1 /*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <linux/blk-mq-pci.h>
58 #include <asm/unaligned.h>
59
60 #include "mpt3sas_base.h"
61
62 #define RAID_CHANNEL 1
63
64 #define PCIE_CHANNEL 2
65
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
82
83 /* global parameters */
84 LIST_HEAD(mpt3sas_ioc_list);
85 /* global ioc lock for list operations */
86 DEFINE_SPINLOCK(gioc_lock);
87
88 MODULE_AUTHOR(MPT3SAS_AUTHOR);
89 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
92 MODULE_ALIAS("mpt2sas");
93
94 /* local parameters */
95 static u8 scsi_io_cb_idx = -1;
96 static u8 tm_cb_idx = -1;
97 static u8 ctl_cb_idx = -1;
98 static u8 base_cb_idx = -1;
99 static u8 port_enable_cb_idx = -1;
100 static u8 transport_cb_idx = -1;
101 static u8 scsih_cb_idx = -1;
102 static u8 config_cb_idx = -1;
103 static int mpt2_ids;
104 static int mpt3_ids;
105
106 static u8 tm_tr_cb_idx = -1 ;
107 static u8 tm_tr_volume_cb_idx = -1 ;
108 static u8 tm_sas_control_cb_idx = -1;
109
110 /* command line options */
111 static u32 logging_level;
112 MODULE_PARM_DESC(logging_level,
113 " bits for enabling additional logging info (default=0)");
114
115
116 static ushort max_sectors = 0xFFFF;
117 module_param(max_sectors, ushort, 0444);
118 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119
120
121 static int missing_delay[2] = {-1, -1};
122 module_param_array(missing_delay, int, NULL, 0444);
123 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124
125 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
126 #define MPT3SAS_MAX_LUN (16895)
127 static u64 max_lun = MPT3SAS_MAX_LUN;
128 module_param(max_lun, ullong, 0444);
129 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130
131 static ushort hbas_to_enumerate;
132 module_param(hbas_to_enumerate, ushort, 0444);
133 MODULE_PARM_DESC(hbas_to_enumerate,
134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
135 1 - enumerates only SAS 2.0 generation HBAs\n \
136 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137
138 /* diag_buffer_enable is bitwise
139 * bit 0 set = TRACE
140 * bit 1 set = SNAPSHOT
141 * bit 2 set = EXTENDED
142 *
143 * Either bit can be set, or both
144 */
145 static int diag_buffer_enable = -1;
146 module_param(diag_buffer_enable, int, 0444);
147 MODULE_PARM_DESC(diag_buffer_enable,
148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
149 static int disable_discovery = -1;
150 module_param(disable_discovery, int, 0444);
151 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152
153
154 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
155 static int prot_mask = -1;
156 module_param(prot_mask, int, 0444);
157 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158
159 static bool enable_sdev_max_qd;
160 module_param(enable_sdev_max_qd, bool, 0444);
161 MODULE_PARM_DESC(enable_sdev_max_qd,
162 "Enable sdev max qd as can_queue, def=disabled(0)");
163
164 static int multipath_on_hba = -1;
165 module_param(multipath_on_hba, int, 0);
166 MODULE_PARM_DESC(multipath_on_hba,
167 "Multipath support to add same target device\n\t\t"
168 "as many times as it is visible to HBA from various paths\n\t\t"
169 "(by default:\n\t\t"
170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
171 "\t SAS 3.5 HBA - This will be enabled)");
172
173 static int host_tagset_enable = 1;
174 module_param(host_tagset_enable, int, 0444);
175 MODULE_PARM_DESC(host_tagset_enable,
176 "Shared host tagset enable/disable Default: enable(1)");
177
178 /* raid transport support */
179 static struct raid_template *mpt3sas_raid_template;
180 static struct raid_template *mpt2sas_raid_template;
181
182
183 /**
184 * struct sense_info - common structure for obtaining sense keys
185 * @skey: sense key
186 * @asc: additional sense code
187 * @ascq: additional sense code qualifier
188 */
189 struct sense_info {
190 u8 skey;
191 u8 asc;
192 u8 ascq;
193 };
194
195 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
196 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
197 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
198 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
199 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200 /**
201 * struct fw_event_work - firmware event struct
202 * @list: link list framework
203 * @work: work object (ioc->fault_reset_work_q)
204 * @ioc: per adapter object
205 * @device_handle: device handle
206 * @VF_ID: virtual function id
207 * @VP_ID: virtual port id
208 * @ignore: flag meaning this event has been marked to ignore
209 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
210 * @refcount: kref for this event
211 * @event_data: reply event data payload follows
212 *
213 * This object stored on ioc->fw_event_list.
214 */
215 struct fw_event_work {
216 struct list_head list;
217 struct work_struct work;
218
219 struct MPT3SAS_ADAPTER *ioc;
220 u16 device_handle;
221 u8 VF_ID;
222 u8 VP_ID;
223 u8 ignore;
224 u16 event;
225 struct kref refcount;
226 char event_data[] __aligned(4);
227 };
228
fw_event_work_free(struct kref * r)229 static void fw_event_work_free(struct kref *r)
230 {
231 kfree(container_of(r, struct fw_event_work, refcount));
232 }
233
fw_event_work_get(struct fw_event_work * fw_work)234 static void fw_event_work_get(struct fw_event_work *fw_work)
235 {
236 kref_get(&fw_work->refcount);
237 }
238
fw_event_work_put(struct fw_event_work * fw_work)239 static void fw_event_work_put(struct fw_event_work *fw_work)
240 {
241 kref_put(&fw_work->refcount, fw_event_work_free);
242 }
243
alloc_fw_event_work(int len)244 static struct fw_event_work *alloc_fw_event_work(int len)
245 {
246 struct fw_event_work *fw_event;
247
248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
249 if (!fw_event)
250 return NULL;
251
252 kref_init(&fw_event->refcount);
253 return fw_event;
254 }
255
256 /**
257 * struct _scsi_io_transfer - scsi io transfer
258 * @handle: sas device handle (assigned by firmware)
259 * @is_raid: flag set for hidden raid components
260 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
261 * @data_length: data transfer length
262 * @data_dma: dma pointer to data
263 * @sense: sense data
264 * @lun: lun number
265 * @cdb_length: cdb length
266 * @cdb: cdb contents
267 * @timeout: timeout for this command
268 * @VF_ID: virtual function id
269 * @VP_ID: virtual port id
270 * @valid_reply: flag set for reply message
271 * @sense_length: sense length
272 * @ioc_status: ioc status
273 * @scsi_state: scsi state
274 * @scsi_status: scsi staus
275 * @log_info: log information
276 * @transfer_length: data length transfer when there is a reply message
277 *
278 * Used for sending internal scsi commands to devices within this module.
279 * Refer to _scsi_send_scsi_io().
280 */
281 struct _scsi_io_transfer {
282 u16 handle;
283 u8 is_raid;
284 enum dma_data_direction dir;
285 u32 data_length;
286 dma_addr_t data_dma;
287 u8 sense[SCSI_SENSE_BUFFERSIZE];
288 u32 lun;
289 u8 cdb_length;
290 u8 cdb[32];
291 u8 timeout;
292 u8 VF_ID;
293 u8 VP_ID;
294 u8 valid_reply;
295 /* the following bits are only valid when 'valid_reply = 1' */
296 u32 sense_length;
297 u16 ioc_status;
298 u8 scsi_state;
299 u8 scsi_status;
300 u32 log_info;
301 u32 transfer_length;
302 };
303
304 /**
305 * _scsih_set_debug_level - global setting of ioc->logging_level.
306 * @val: ?
307 * @kp: ?
308 *
309 * Note: The logging levels are defined in mpt3sas_debug.h.
310 */
311 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)312 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313 {
314 int ret = param_set_int(val, kp);
315 struct MPT3SAS_ADAPTER *ioc;
316
317 if (ret)
318 return ret;
319
320 pr_info("setting logging_level(0x%08x)\n", logging_level);
321 spin_lock(&gioc_lock);
322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
323 ioc->logging_level = logging_level;
324 spin_unlock(&gioc_lock);
325 return 0;
326 }
327 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
328 &logging_level, 0644);
329
330 /**
331 * _scsih_srch_boot_sas_address - search based on sas_address
332 * @sas_address: sas address
333 * @boot_device: boot device object from bios page 2
334 *
335 * Return: 1 when there's a match, 0 means no match.
336 */
337 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)338 _scsih_srch_boot_sas_address(u64 sas_address,
339 Mpi2BootDeviceSasWwid_t *boot_device)
340 {
341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
342 }
343
344 /**
345 * _scsih_srch_boot_device_name - search based on device name
346 * @device_name: device name specified in INDENTIFY fram
347 * @boot_device: boot device object from bios page 2
348 *
349 * Return: 1 when there's a match, 0 means no match.
350 */
351 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)352 _scsih_srch_boot_device_name(u64 device_name,
353 Mpi2BootDeviceDeviceName_t *boot_device)
354 {
355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
356 }
357
358 /**
359 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
360 * @enclosure_logical_id: enclosure logical id
361 * @slot_number: slot number
362 * @boot_device: boot device object from bios page 2
363 *
364 * Return: 1 when there's a match, 0 means no match.
365 */
366 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)367 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
368 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369 {
370 return (enclosure_logical_id == le64_to_cpu(boot_device->
371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
372 SlotNumber)) ? 1 : 0;
373 }
374
375 /**
376 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
377 * port number from port list
378 * @ioc: per adapter object
379 * @port_id: port number
380 * @bypass_dirty_port_flag: when set look the matching hba port entry even
381 * if hba port entry is marked as dirty.
382 *
383 * Search for hba port entry corresponding to provided port number,
384 * if available return port object otherwise return NULL.
385 */
386 struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)387 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
388 u8 port_id, u8 bypass_dirty_port_flag)
389 {
390 struct hba_port *port, *port_next;
391
392 /*
393 * When multipath_on_hba is disabled then
394 * search the hba_port entry using default
395 * port id i.e. 255
396 */
397 if (!ioc->multipath_on_hba)
398 port_id = MULTIPATH_DISABLED_PORT_ID;
399
400 list_for_each_entry_safe(port, port_next,
401 &ioc->port_table_list, list) {
402 if (port->port_id != port_id)
403 continue;
404 if (bypass_dirty_port_flag)
405 return port;
406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
407 continue;
408 return port;
409 }
410
411 /*
412 * Allocate hba_port object for default port id (i.e. 255)
413 * when multipath_on_hba is disabled for the HBA.
414 * And add this object to port_table_list.
415 */
416 if (!ioc->multipath_on_hba) {
417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
418 if (!port)
419 return NULL;
420
421 port->port_id = port_id;
422 ioc_info(ioc,
423 "hba_port entry: %p, port: %d is added to hba_port list\n",
424 port, port->port_id);
425 list_add_tail(&port->list,
426 &ioc->port_table_list);
427 return port;
428 }
429 return NULL;
430 }
431
432 /**
433 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
434 * @ioc: per adapter object
435 * @port: hba_port object
436 * @phy: phy number
437 *
438 * Return virtual_phy object corresponding to phy number.
439 */
440 struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)441 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
442 struct hba_port *port, u32 phy)
443 {
444 struct virtual_phy *vphy, *vphy_next;
445
446 if (!port->vphys_mask)
447 return NULL;
448
449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
450 if (vphy->phy_mask & (1 << phy))
451 return vphy;
452 }
453 return NULL;
454 }
455
456 /**
457 * _scsih_is_boot_device - search for matching boot device.
458 * @sas_address: sas address
459 * @device_name: device name specified in INDENTIFY fram
460 * @enclosure_logical_id: enclosure logical id
461 * @slot: slot number
462 * @form: specifies boot device form
463 * @boot_device: boot device object from bios page 2
464 *
465 * Return: 1 when there's a match, 0 means no match.
466 */
467 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)468 _scsih_is_boot_device(u64 sas_address, u64 device_name,
469 u64 enclosure_logical_id, u16 slot, u8 form,
470 Mpi2BiosPage2BootDevice_t *boot_device)
471 {
472 int rc = 0;
473
474 switch (form) {
475 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
476 if (!sas_address)
477 break;
478 rc = _scsih_srch_boot_sas_address(
479 sas_address, &boot_device->SasWwid);
480 break;
481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
482 if (!enclosure_logical_id)
483 break;
484 rc = _scsih_srch_boot_encl_slot(
485 enclosure_logical_id,
486 slot, &boot_device->EnclosureSlot);
487 break;
488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
489 if (!device_name)
490 break;
491 rc = _scsih_srch_boot_device_name(
492 device_name, &boot_device->DeviceName);
493 break;
494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
495 break;
496 }
497
498 return rc;
499 }
500
501 /**
502 * _scsih_get_sas_address - set the sas_address for given device handle
503 * @ioc: ?
504 * @handle: device handle
505 * @sas_address: sas address
506 *
507 * Return: 0 success, non-zero when failure
508 */
509 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)510 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
511 u64 *sas_address)
512 {
513 Mpi2SasDevicePage0_t sas_device_pg0;
514 Mpi2ConfigReply_t mpi_reply;
515 u32 ioc_status;
516
517 *sas_address = 0;
518
519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
522 __FILE__, __LINE__, __func__);
523 return -ENXIO;
524 }
525
526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
528 /* For HBA, vSES doesn't return HBA SAS address. Instead return
529 * vSES's sas address.
530 */
531 if ((handle <= ioc->sas_hba.num_phys) &&
532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
533 MPI2_SAS_DEVICE_INFO_SEP)))
534 *sas_address = ioc->sas_hba.sas_address;
535 else
536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
537 return 0;
538 }
539
540 /* we hit this because the given parent handle doesn't exist */
541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
542 return -ENXIO;
543
544 /* else error case */
545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
546 handle, ioc_status, __FILE__, __LINE__, __func__);
547 return -EIO;
548 }
549
550 /**
551 * _scsih_determine_boot_device - determine boot device.
552 * @ioc: per adapter object
553 * @device: sas_device or pcie_device object
554 * @channel: SAS or PCIe channel
555 *
556 * Determines whether this device should be first reported device to
557 * to scsi-ml or sas transport, this purpose is for persistent boot device.
558 * There are primary, alternate, and current entries in bios page 2. The order
559 * priority is primary, alternate, then current. This routine saves
560 * the corresponding device object.
561 * The saved data to be used later in _scsih_probe_boot_devices().
562 */
563 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)564 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
565 u32 channel)
566 {
567 struct _sas_device *sas_device;
568 struct _pcie_device *pcie_device;
569 struct _raid_device *raid_device;
570 u64 sas_address;
571 u64 device_name;
572 u64 enclosure_logical_id;
573 u16 slot;
574
575 /* only process this function when driver loads */
576 if (!ioc->is_driver_loading)
577 return;
578
579 /* no Bios, return immediately */
580 if (!ioc->bios_pg3.BiosVersion)
581 return;
582
583 if (channel == RAID_CHANNEL) {
584 raid_device = device;
585 sas_address = raid_device->wwid;
586 device_name = 0;
587 enclosure_logical_id = 0;
588 slot = 0;
589 } else if (channel == PCIE_CHANNEL) {
590 pcie_device = device;
591 sas_address = pcie_device->wwid;
592 device_name = 0;
593 enclosure_logical_id = 0;
594 slot = 0;
595 } else {
596 sas_device = device;
597 sas_address = sas_device->sas_address;
598 device_name = sas_device->device_name;
599 enclosure_logical_id = sas_device->enclosure_logical_id;
600 slot = sas_device->slot;
601 }
602
603 if (!ioc->req_boot_device.device) {
604 if (_scsih_is_boot_device(sas_address, device_name,
605 enclosure_logical_id, slot,
606 (ioc->bios_pg2.ReqBootDeviceForm &
607 MPI2_BIOSPAGE2_FORM_MASK),
608 &ioc->bios_pg2.RequestedBootDevice)) {
609 dinitprintk(ioc,
610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
611 __func__, (u64)sas_address));
612 ioc->req_boot_device.device = device;
613 ioc->req_boot_device.channel = channel;
614 }
615 }
616
617 if (!ioc->req_alt_boot_device.device) {
618 if (_scsih_is_boot_device(sas_address, device_name,
619 enclosure_logical_id, slot,
620 (ioc->bios_pg2.ReqAltBootDeviceForm &
621 MPI2_BIOSPAGE2_FORM_MASK),
622 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 dinitprintk(ioc,
624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
625 __func__, (u64)sas_address));
626 ioc->req_alt_boot_device.device = device;
627 ioc->req_alt_boot_device.channel = channel;
628 }
629 }
630
631 if (!ioc->current_boot_device.device) {
632 if (_scsih_is_boot_device(sas_address, device_name,
633 enclosure_logical_id, slot,
634 (ioc->bios_pg2.CurrentBootDeviceForm &
635 MPI2_BIOSPAGE2_FORM_MASK),
636 &ioc->bios_pg2.CurrentBootDevice)) {
637 dinitprintk(ioc,
638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
639 __func__, (u64)sas_address));
640 ioc->current_boot_device.device = device;
641 ioc->current_boot_device.channel = channel;
642 }
643 }
644 }
645
646 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)647 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
648 struct MPT3SAS_TARGET *tgt_priv)
649 {
650 struct _sas_device *ret;
651
652 assert_spin_locked(&ioc->sas_device_lock);
653
654 ret = tgt_priv->sas_dev;
655 if (ret)
656 sas_device_get(ret);
657
658 return ret;
659 }
660
661 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)662 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
663 struct MPT3SAS_TARGET *tgt_priv)
664 {
665 struct _sas_device *ret;
666 unsigned long flags;
667
668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
671
672 return ret;
673 }
674
675 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)676 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
677 struct MPT3SAS_TARGET *tgt_priv)
678 {
679 struct _pcie_device *ret;
680
681 assert_spin_locked(&ioc->pcie_device_lock);
682
683 ret = tgt_priv->pcie_dev;
684 if (ret)
685 pcie_device_get(ret);
686
687 return ret;
688 }
689
690 /**
691 * mpt3sas_get_pdev_from_target - pcie device search
692 * @ioc: per adapter object
693 * @tgt_priv: starget private object
694 *
695 * Context: This function will acquire ioc->pcie_device_lock and will release
696 * before returning the pcie_device object.
697 *
698 * This searches for pcie_device from target, then return pcie_device object.
699 */
700 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)701 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
702 struct MPT3SAS_TARGET *tgt_priv)
703 {
704 struct _pcie_device *ret;
705 unsigned long flags;
706
707 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
710
711 return ret;
712 }
713
714
715 /**
716 * __mpt3sas_get_sdev_by_rphy - sas device search
717 * @ioc: per adapter object
718 * @rphy: sas_rphy pointer
719 *
720 * Context: This function will acquire ioc->sas_device_lock and will release
721 * before returning the sas_device object.
722 *
723 * This searches for sas_device from rphy object
724 * then return sas_device object.
725 */
726 struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)727 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
728 struct sas_rphy *rphy)
729 {
730 struct _sas_device *sas_device;
731
732 assert_spin_locked(&ioc->sas_device_lock);
733
734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
735 if (sas_device->rphy != rphy)
736 continue;
737 sas_device_get(sas_device);
738 return sas_device;
739 }
740
741 sas_device = NULL;
742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
743 if (sas_device->rphy != rphy)
744 continue;
745 sas_device_get(sas_device);
746 return sas_device;
747 }
748
749 return NULL;
750 }
751
752 /**
753 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
754 * sas address from sas_device_list list
755 * @ioc: per adapter object
756 * @sas_address: device sas address
757 * @port: port number
758 *
759 * Search for _sas_device object corresponding to provided sas address,
760 * if available return _sas_device object address otherwise return NULL.
761 */
762 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)763 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
764 u64 sas_address, struct hba_port *port)
765 {
766 struct _sas_device *sas_device;
767
768 if (!port)
769 return NULL;
770
771 assert_spin_locked(&ioc->sas_device_lock);
772
773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
774 if (sas_device->sas_address != sas_address)
775 continue;
776 if (sas_device->port != port)
777 continue;
778 sas_device_get(sas_device);
779 return sas_device;
780 }
781
782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
783 if (sas_device->sas_address != sas_address)
784 continue;
785 if (sas_device->port != port)
786 continue;
787 sas_device_get(sas_device);
788 return sas_device;
789 }
790
791 return NULL;
792 }
793
794 /**
795 * mpt3sas_get_sdev_by_addr - sas device search
796 * @ioc: per adapter object
797 * @sas_address: sas address
798 * @port: hba port entry
799 * Context: Calling function should acquire ioc->sas_device_lock
800 *
801 * This searches for sas_device based on sas_address & port number,
802 * then return sas_device object.
803 */
804 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)805 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
806 u64 sas_address, struct hba_port *port)
807 {
808 struct _sas_device *sas_device;
809 unsigned long flags;
810
811 spin_lock_irqsave(&ioc->sas_device_lock, flags);
812 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
813 sas_address, port);
814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
815
816 return sas_device;
817 }
818
819 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)820 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821 {
822 struct _sas_device *sas_device;
823
824 assert_spin_locked(&ioc->sas_device_lock);
825
826 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
827 if (sas_device->handle == handle)
828 goto found_device;
829
830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
831 if (sas_device->handle == handle)
832 goto found_device;
833
834 return NULL;
835
836 found_device:
837 sas_device_get(sas_device);
838 return sas_device;
839 }
840
841 /**
842 * mpt3sas_get_sdev_by_handle - sas device search
843 * @ioc: per adapter object
844 * @handle: sas device handle (assigned by firmware)
845 * Context: Calling function should acquire ioc->sas_device_lock
846 *
847 * This searches for sas_device based on sas_address, then return sas_device
848 * object.
849 */
850 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)851 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
852 {
853 struct _sas_device *sas_device;
854 unsigned long flags;
855
856 spin_lock_irqsave(&ioc->sas_device_lock, flags);
857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
859
860 return sas_device;
861 }
862
863 /**
864 * _scsih_display_enclosure_chassis_info - display device location info
865 * @ioc: per adapter object
866 * @sas_device: per sas device object
867 * @sdev: scsi device struct
868 * @starget: scsi target struct
869 */
870 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)871 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
872 struct _sas_device *sas_device, struct scsi_device *sdev,
873 struct scsi_target *starget)
874 {
875 if (sdev) {
876 if (sas_device->enclosure_handle != 0)
877 sdev_printk(KERN_INFO, sdev,
878 "enclosure logical id (0x%016llx), slot(%d) \n",
879 (unsigned long long)
880 sas_device->enclosure_logical_id,
881 sas_device->slot);
882 if (sas_device->connector_name[0] != '\0')
883 sdev_printk(KERN_INFO, sdev,
884 "enclosure level(0x%04x), connector name( %s)\n",
885 sas_device->enclosure_level,
886 sas_device->connector_name);
887 if (sas_device->is_chassis_slot_valid)
888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
889 sas_device->chassis_slot);
890 } else if (starget) {
891 if (sas_device->enclosure_handle != 0)
892 starget_printk(KERN_INFO, starget,
893 "enclosure logical id(0x%016llx), slot(%d) \n",
894 (unsigned long long)
895 sas_device->enclosure_logical_id,
896 sas_device->slot);
897 if (sas_device->connector_name[0] != '\0')
898 starget_printk(KERN_INFO, starget,
899 "enclosure level(0x%04x), connector name( %s)\n",
900 sas_device->enclosure_level,
901 sas_device->connector_name);
902 if (sas_device->is_chassis_slot_valid)
903 starget_printk(KERN_INFO, starget,
904 "chassis slot(0x%04x)\n",
905 sas_device->chassis_slot);
906 } else {
907 if (sas_device->enclosure_handle != 0)
908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
909 (u64)sas_device->enclosure_logical_id,
910 sas_device->slot);
911 if (sas_device->connector_name[0] != '\0')
912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
913 sas_device->enclosure_level,
914 sas_device->connector_name);
915 if (sas_device->is_chassis_slot_valid)
916 ioc_info(ioc, "chassis slot(0x%04x)\n",
917 sas_device->chassis_slot);
918 }
919 }
920
921 /**
922 * _scsih_sas_device_remove - remove sas_device from list.
923 * @ioc: per adapter object
924 * @sas_device: the sas_device object
925 * Context: This function will acquire ioc->sas_device_lock.
926 *
927 * If sas_device is on the list, remove it and decrement its reference count.
928 */
929 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)930 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
931 struct _sas_device *sas_device)
932 {
933 unsigned long flags;
934
935 if (!sas_device)
936 return;
937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
938 sas_device->handle, (u64)sas_device->sas_address);
939
940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
941
942 /*
943 * The lock serializes access to the list, but we still need to verify
944 * that nobody removed the entry while we were waiting on the lock.
945 */
946 spin_lock_irqsave(&ioc->sas_device_lock, flags);
947 if (!list_empty(&sas_device->list)) {
948 list_del_init(&sas_device->list);
949 sas_device_put(sas_device);
950 }
951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
952 }
953
954 /**
955 * _scsih_device_remove_by_handle - removing device object by handle
956 * @ioc: per adapter object
957 * @handle: device handle
958 */
959 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)960 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961 {
962 struct _sas_device *sas_device;
963 unsigned long flags;
964
965 if (ioc->shost_recovery)
966 return;
967
968 spin_lock_irqsave(&ioc->sas_device_lock, flags);
969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 if (sas_device) {
971 list_del_init(&sas_device->list);
972 sas_device_put(sas_device);
973 }
974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
975 if (sas_device) {
976 _scsih_remove_device(ioc, sas_device);
977 sas_device_put(sas_device);
978 }
979 }
980
981 /**
982 * mpt3sas_device_remove_by_sas_address - removing device object by
983 * sas address & port number
984 * @ioc: per adapter object
985 * @sas_address: device sas_address
986 * @port: hba port entry
987 *
988 * Return nothing.
989 */
990 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)991 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
992 u64 sas_address, struct hba_port *port)
993 {
994 struct _sas_device *sas_device;
995 unsigned long flags;
996
997 if (ioc->shost_recovery)
998 return;
999
1000 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002 if (sas_device) {
1003 list_del_init(&sas_device->list);
1004 sas_device_put(sas_device);
1005 }
1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007 if (sas_device) {
1008 _scsih_remove_device(ioc, sas_device);
1009 sas_device_put(sas_device);
1010 }
1011 }
1012
1013 /**
1014 * _scsih_sas_device_add - insert sas_device to the list.
1015 * @ioc: per adapter object
1016 * @sas_device: the sas_device object
1017 * Context: This function will acquire ioc->sas_device_lock.
1018 *
1019 * Adding new object to the ioc->sas_device_list.
1020 */
1021 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1022 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1023 struct _sas_device *sas_device)
1024 {
1025 unsigned long flags;
1026
1027 dewtprintk(ioc,
1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1029 __func__, sas_device->handle,
1030 (u64)sas_device->sas_address));
1031
1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1033 NULL, NULL));
1034
1035 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1036 sas_device_get(sas_device);
1037 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039
1040 if (ioc->hide_drives) {
1041 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1042 return;
1043 }
1044
1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1046 sas_device->sas_address_parent, sas_device->port)) {
1047 _scsih_sas_device_remove(ioc, sas_device);
1048 } else if (!sas_device->starget) {
1049 /*
1050 * When asyn scanning is enabled, its not possible to remove
1051 * devices while scanning is turned on due to an oops in
1052 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053 */
1054 if (!ioc->is_driver_loading) {
1055 mpt3sas_transport_port_remove(ioc,
1056 sas_device->sas_address,
1057 sas_device->sas_address_parent,
1058 sas_device->port);
1059 _scsih_sas_device_remove(ioc, sas_device);
1060 }
1061 } else
1062 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1063 }
1064
1065 /**
1066 * _scsih_sas_device_init_add - insert sas_device to the list.
1067 * @ioc: per adapter object
1068 * @sas_device: the sas_device object
1069 * Context: This function will acquire ioc->sas_device_lock.
1070 *
1071 * Adding new object at driver load time to the ioc->sas_device_init_list.
1072 */
1073 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1074 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1075 struct _sas_device *sas_device)
1076 {
1077 unsigned long flags;
1078
1079 dewtprintk(ioc,
1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1081 __func__, sas_device->handle,
1082 (u64)sas_device->sas_address));
1083
1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1085 NULL, NULL));
1086
1087 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1088 sas_device_get(sas_device);
1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1090 _scsih_determine_boot_device(ioc, sas_device, 0);
1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1092 }
1093
1094
1095 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1096 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097 {
1098 struct _pcie_device *pcie_device;
1099
1100 assert_spin_locked(&ioc->pcie_device_lock);
1101
1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1103 if (pcie_device->wwid == wwid)
1104 goto found_device;
1105
1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1107 if (pcie_device->wwid == wwid)
1108 goto found_device;
1109
1110 return NULL;
1111
1112 found_device:
1113 pcie_device_get(pcie_device);
1114 return pcie_device;
1115 }
1116
1117
1118 /**
1119 * mpt3sas_get_pdev_by_wwid - pcie device search
1120 * @ioc: per adapter object
1121 * @wwid: wwid
1122 *
1123 * Context: This function will acquire ioc->pcie_device_lock and will release
1124 * before returning the pcie_device object.
1125 *
1126 * This searches for pcie_device based on wwid, then return pcie_device object.
1127 */
1128 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1129 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130 {
1131 struct _pcie_device *pcie_device;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1137
1138 return pcie_device;
1139 }
1140
1141
1142 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1143 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1144 int channel)
1145 {
1146 struct _pcie_device *pcie_device;
1147
1148 assert_spin_locked(&ioc->pcie_device_lock);
1149
1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1151 if (pcie_device->id == id && pcie_device->channel == channel)
1152 goto found_device;
1153
1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1155 if (pcie_device->id == id && pcie_device->channel == channel)
1156 goto found_device;
1157
1158 return NULL;
1159
1160 found_device:
1161 pcie_device_get(pcie_device);
1162 return pcie_device;
1163 }
1164
1165 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1166 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167 {
1168 struct _pcie_device *pcie_device;
1169
1170 assert_spin_locked(&ioc->pcie_device_lock);
1171
1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1173 if (pcie_device->handle == handle)
1174 goto found_device;
1175
1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1177 if (pcie_device->handle == handle)
1178 goto found_device;
1179
1180 return NULL;
1181
1182 found_device:
1183 pcie_device_get(pcie_device);
1184 return pcie_device;
1185 }
1186
1187
1188 /**
1189 * mpt3sas_get_pdev_by_handle - pcie device search
1190 * @ioc: per adapter object
1191 * @handle: Firmware device handle
1192 *
1193 * Context: This function will acquire ioc->pcie_device_lock and will release
1194 * before returning the pcie_device object.
1195 *
1196 * This searches for pcie_device based on handle, then return pcie_device
1197 * object.
1198 */
1199 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1200 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201 {
1202 struct _pcie_device *pcie_device;
1203 unsigned long flags;
1204
1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1208
1209 return pcie_device;
1210 }
1211
1212 /**
1213 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1214 * @ioc: per adapter object
1215 * Context: This function will acquire ioc->pcie_device_lock
1216 *
1217 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1218 * which has reported maximum among all available NVMe drives.
1219 * Minimum max_shutdown_latency will be six seconds.
1220 */
1221 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1222 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223 {
1224 struct _pcie_device *pcie_device;
1225 unsigned long flags;
1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227
1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1230 if (pcie_device->shutdown_latency) {
1231 if (shutdown_latency < pcie_device->shutdown_latency)
1232 shutdown_latency =
1233 pcie_device->shutdown_latency;
1234 }
1235 }
1236 ioc->max_shutdown_latency = shutdown_latency;
1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1238 }
1239
1240 /**
1241 * _scsih_pcie_device_remove - remove pcie_device from list.
1242 * @ioc: per adapter object
1243 * @pcie_device: the pcie_device object
1244 * Context: This function will acquire ioc->pcie_device_lock.
1245 *
1246 * If pcie_device is on the list, remove it and decrement its reference count.
1247 */
1248 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1249 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1250 struct _pcie_device *pcie_device)
1251 {
1252 unsigned long flags;
1253 int was_on_pcie_device_list = 0;
1254 u8 update_latency = 0;
1255
1256 if (!pcie_device)
1257 return;
1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1259 pcie_device->handle, (u64)pcie_device->wwid);
1260 if (pcie_device->enclosure_handle != 0)
1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1262 (u64)pcie_device->enclosure_logical_id,
1263 pcie_device->slot);
1264 if (pcie_device->connector_name[0] != '\0')
1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1266 pcie_device->enclosure_level,
1267 pcie_device->connector_name);
1268
1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1270 if (!list_empty(&pcie_device->list)) {
1271 list_del_init(&pcie_device->list);
1272 was_on_pcie_device_list = 1;
1273 }
1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 update_latency = 1;
1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1277 if (was_on_pcie_device_list) {
1278 kfree(pcie_device->serial_number);
1279 pcie_device_put(pcie_device);
1280 }
1281
1282 /*
1283 * This device's RTD3 Entry Latency matches IOC's
1284 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1285 * from the available drives as current drive is getting removed.
1286 */
1287 if (update_latency)
1288 _scsih_set_nvme_max_shutdown_latency(ioc);
1289 }
1290
1291
1292 /**
1293 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1294 * @ioc: per adapter object
1295 * @handle: device handle
1296 */
1297 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1298 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299 {
1300 struct _pcie_device *pcie_device;
1301 unsigned long flags;
1302 int was_on_pcie_device_list = 0;
1303 u8 update_latency = 0;
1304
1305 if (ioc->shost_recovery)
1306 return;
1307
1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (pcie_device) {
1311 if (!list_empty(&pcie_device->list)) {
1312 list_del_init(&pcie_device->list);
1313 was_on_pcie_device_list = 1;
1314 pcie_device_put(pcie_device);
1315 }
1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1317 update_latency = 1;
1318 }
1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1320 if (was_on_pcie_device_list) {
1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1322 pcie_device_put(pcie_device);
1323 }
1324
1325 /*
1326 * This device's RTD3 Entry Latency matches IOC's
1327 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1328 * from the available drives as current drive is getting removed.
1329 */
1330 if (update_latency)
1331 _scsih_set_nvme_max_shutdown_latency(ioc);
1332 }
1333
1334 /**
1335 * _scsih_pcie_device_add - add pcie_device object
1336 * @ioc: per adapter object
1337 * @pcie_device: pcie_device object
1338 *
1339 * This is added to the pcie_device_list link list.
1340 */
1341 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1342 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1343 struct _pcie_device *pcie_device)
1344 {
1345 unsigned long flags;
1346
1347 dewtprintk(ioc,
1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 __func__,
1350 pcie_device->handle, (u64)pcie_device->wwid));
1351 if (pcie_device->enclosure_handle != 0)
1352 dewtprintk(ioc,
1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 __func__,
1355 (u64)pcie_device->enclosure_logical_id,
1356 pcie_device->slot));
1357 if (pcie_device->connector_name[0] != '\0')
1358 dewtprintk(ioc,
1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1360 __func__, pcie_device->enclosure_level,
1361 pcie_device->connector_name));
1362
1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1364 pcie_device_get(pcie_device);
1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367
1368 if (pcie_device->access_status ==
1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371 return;
1372 }
1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1374 _scsih_pcie_device_remove(ioc, pcie_device);
1375 } else if (!pcie_device->starget) {
1376 if (!ioc->is_driver_loading) {
1377 /*TODO-- Need to find out whether this condition will occur or not*/
1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1379 }
1380 } else
1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1382 }
1383
1384 /*
1385 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1386 * @ioc: per adapter object
1387 * @pcie_device: the pcie_device object
1388 * Context: This function will acquire ioc->pcie_device_lock.
1389 *
1390 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1391 */
1392 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1393 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1394 struct _pcie_device *pcie_device)
1395 {
1396 unsigned long flags;
1397
1398 dewtprintk(ioc,
1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 __func__,
1401 pcie_device->handle, (u64)pcie_device->wwid));
1402 if (pcie_device->enclosure_handle != 0)
1403 dewtprintk(ioc,
1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 __func__,
1406 (u64)pcie_device->enclosure_logical_id,
1407 pcie_device->slot));
1408 if (pcie_device->connector_name[0] != '\0')
1409 dewtprintk(ioc,
1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1411 __func__, pcie_device->enclosure_level,
1412 pcie_device->connector_name));
1413
1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1415 pcie_device_get(pcie_device);
1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1417 if (pcie_device->access_status !=
1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1421 }
1422 /**
1423 * _scsih_raid_device_find_by_id - raid device search
1424 * @ioc: per adapter object
1425 * @id: sas device target id
1426 * @channel: sas device channel
1427 * Context: Calling function should acquire ioc->raid_device_lock
1428 *
1429 * This searches for raid_device based on target id, then return raid_device
1430 * object.
1431 */
1432 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1433 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434 {
1435 struct _raid_device *raid_device, *r;
1436
1437 r = NULL;
1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1439 if (raid_device->id == id && raid_device->channel == channel) {
1440 r = raid_device;
1441 goto out;
1442 }
1443 }
1444
1445 out:
1446 return r;
1447 }
1448
1449 /**
1450 * mpt3sas_raid_device_find_by_handle - raid device search
1451 * @ioc: per adapter object
1452 * @handle: sas device handle (assigned by firmware)
1453 * Context: Calling function should acquire ioc->raid_device_lock
1454 *
1455 * This searches for raid_device based on handle, then return raid_device
1456 * object.
1457 */
1458 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1459 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460 {
1461 struct _raid_device *raid_device, *r;
1462
1463 r = NULL;
1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1465 if (raid_device->handle != handle)
1466 continue;
1467 r = raid_device;
1468 goto out;
1469 }
1470
1471 out:
1472 return r;
1473 }
1474
1475 /**
1476 * _scsih_raid_device_find_by_wwid - raid device search
1477 * @ioc: per adapter object
1478 * @wwid: ?
1479 * Context: Calling function should acquire ioc->raid_device_lock
1480 *
1481 * This searches for raid_device based on wwid, then return raid_device
1482 * object.
1483 */
1484 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1485 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486 {
1487 struct _raid_device *raid_device, *r;
1488
1489 r = NULL;
1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1491 if (raid_device->wwid != wwid)
1492 continue;
1493 r = raid_device;
1494 goto out;
1495 }
1496
1497 out:
1498 return r;
1499 }
1500
1501 /**
1502 * _scsih_raid_device_add - add raid_device object
1503 * @ioc: per adapter object
1504 * @raid_device: raid_device object
1505 *
1506 * This is added to the raid_device_list link list.
1507 */
1508 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1509 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1510 struct _raid_device *raid_device)
1511 {
1512 unsigned long flags;
1513
1514 dewtprintk(ioc,
1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 __func__,
1517 raid_device->handle, (u64)raid_device->wwid));
1518
1519 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1520 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1522 }
1523
1524 /**
1525 * _scsih_raid_device_remove - delete raid_device object
1526 * @ioc: per adapter object
1527 * @raid_device: raid_device object
1528 *
1529 */
1530 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1531 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1532 struct _raid_device *raid_device)
1533 {
1534 unsigned long flags;
1535
1536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1537 list_del(&raid_device->list);
1538 kfree(raid_device);
1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1540 }
1541
1542 /**
1543 * mpt3sas_scsih_expander_find_by_handle - expander device search
1544 * @ioc: per adapter object
1545 * @handle: expander handle (assigned by firmware)
1546 * Context: Calling function should acquire ioc->sas_device_lock
1547 *
1548 * This searches for expander device based on handle, then returns the
1549 * sas_node object.
1550 */
1551 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1552 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553 {
1554 struct _sas_node *sas_expander, *r;
1555
1556 r = NULL;
1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1558 if (sas_expander->handle != handle)
1559 continue;
1560 r = sas_expander;
1561 goto out;
1562 }
1563 out:
1564 return r;
1565 }
1566
1567 /**
1568 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1569 * @ioc: per adapter object
1570 * @handle: enclosure handle (assigned by firmware)
1571 * Context: Calling function should acquire ioc->sas_device_lock
1572 *
1573 * This searches for enclosure device based on handle, then returns the
1574 * enclosure object.
1575 */
1576 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1577 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578 {
1579 struct _enclosure_node *enclosure_dev, *r;
1580
1581 r = NULL;
1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1584 continue;
1585 r = enclosure_dev;
1586 goto out;
1587 }
1588 out:
1589 return r;
1590 }
1591 /**
1592 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1593 * @ioc: per adapter object
1594 * @sas_address: sas address
1595 * @port: hba port entry
1596 * Context: Calling function should acquire ioc->sas_node_lock.
1597 *
1598 * This searches for expander device based on sas_address & port number,
1599 * then returns the sas_node object.
1600 */
1601 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1602 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1603 u64 sas_address, struct hba_port *port)
1604 {
1605 struct _sas_node *sas_expander, *r = NULL;
1606
1607 if (!port)
1608 return r;
1609
1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1611 if (sas_expander->sas_address != sas_address)
1612 continue;
1613 if (sas_expander->port != port)
1614 continue;
1615 r = sas_expander;
1616 goto out;
1617 }
1618 out:
1619 return r;
1620 }
1621
1622 /**
1623 * _scsih_expander_node_add - insert expander device to the list.
1624 * @ioc: per adapter object
1625 * @sas_expander: the sas_device object
1626 * Context: This function will acquire ioc->sas_node_lock.
1627 *
1628 * Adding new object to the ioc->sas_expander_list.
1629 */
1630 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1631 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1632 struct _sas_node *sas_expander)
1633 {
1634 unsigned long flags;
1635
1636 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1639 }
1640
1641 /**
1642 * _scsih_is_end_device - determines if device is an end device
1643 * @device_info: bitfield providing information about the device.
1644 * Context: none
1645 *
1646 * Return: 1 if end device.
1647 */
1648 static int
_scsih_is_end_device(u32 device_info)1649 _scsih_is_end_device(u32 device_info)
1650 {
1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1655 return 1;
1656 else
1657 return 0;
1658 }
1659
1660 /**
1661 * _scsih_is_nvme_pciescsi_device - determines if
1662 * device is an pcie nvme/scsi device
1663 * @device_info: bitfield providing information about the device.
1664 * Context: none
1665 *
1666 * Returns 1 if device is pcie device type nvme/scsi.
1667 */
1668 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1669 _scsih_is_nvme_pciescsi_device(u32 device_info)
1670 {
1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_NVME) ||
1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1674 == MPI26_PCIE_DEVINFO_SCSI))
1675 return 1;
1676 else
1677 return 0;
1678 }
1679
1680 /**
1681 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1682 * @ioc: per adapter object
1683 * @id: target id
1684 * @channel: channel
1685 * Context: This function will acquire ioc->scsi_lookup_lock.
1686 *
1687 * This will search for a matching channel:id in the scsi_lookup array,
1688 * returning 1 if found.
1689 */
1690 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1691 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1692 int channel)
1693 {
1694 int smid;
1695 struct scsi_cmnd *scmd;
1696
1697 for (smid = 1;
1698 smid <= ioc->shost->can_queue; smid++) {
1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1700 if (!scmd)
1701 continue;
1702 if (scmd->device->id == id &&
1703 scmd->device->channel == channel)
1704 return 1;
1705 }
1706 return 0;
1707 }
1708
1709 /**
1710 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1711 * @ioc: per adapter object
1712 * @id: target id
1713 * @lun: lun number
1714 * @channel: channel
1715 * Context: This function will acquire ioc->scsi_lookup_lock.
1716 *
1717 * This will search for a matching channel:id:lun in the scsi_lookup array,
1718 * returning 1 if found.
1719 */
1720 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1721 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1722 unsigned int lun, int channel)
1723 {
1724 int smid;
1725 struct scsi_cmnd *scmd;
1726
1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728
1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1730 if (!scmd)
1731 continue;
1732 if (scmd->device->id == id &&
1733 scmd->device->channel == channel &&
1734 scmd->device->lun == lun)
1735 return 1;
1736 }
1737 return 0;
1738 }
1739
1740 /**
1741 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1742 * @ioc: per adapter object
1743 * @smid: system request message index
1744 *
1745 * Return: the smid stored scmd pointer.
1746 * Then will dereference the stored scmd pointer.
1747 */
1748 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1749 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750 {
1751 struct scsi_cmnd *scmd = NULL;
1752 struct scsiio_tracker *st;
1753 Mpi25SCSIIORequest_t *mpi_request;
1754 u16 tag = smid - 1;
1755
1756 if (smid > 0 &&
1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758 u32 unique_tag =
1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760
1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1762
1763 /*
1764 * If SCSI IO request is outstanding at driver level then
1765 * DevHandle filed must be non-zero. If DevHandle is zero
1766 * then it means that this smid is free at driver level,
1767 * so return NULL.
1768 */
1769 if (!mpi_request->DevHandle)
1770 return scmd;
1771
1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 if (scmd) {
1774 st = scsi_cmd_priv(scmd);
1775 if (st->cb_idx == 0xFF || st->smid == 0)
1776 scmd = NULL;
1777 }
1778 }
1779 return scmd;
1780 }
1781
1782 /**
1783 * scsih_change_queue_depth - setting device queue depth
1784 * @sdev: scsi device struct
1785 * @qdepth: requested queue depth
1786 *
1787 * Return: queue depth.
1788 */
1789 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1790 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791 {
1792 struct Scsi_Host *shost = sdev->host;
1793 int max_depth;
1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 struct MPT3SAS_DEVICE *sas_device_priv_data;
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct _sas_device *sas_device;
1798 unsigned long flags;
1799
1800 max_depth = shost->can_queue;
1801
1802 /*
1803 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1804 * is disabled.
1805 */
1806 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1807 goto not_sata;
1808
1809 sas_device_priv_data = sdev->hostdata;
1810 if (!sas_device_priv_data)
1811 goto not_sata;
1812 sas_target_priv_data = sas_device_priv_data->sas_target;
1813 if (!sas_target_priv_data)
1814 goto not_sata;
1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1816 goto not_sata;
1817
1818 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device) {
1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823
1824 sas_device_put(sas_device);
1825 }
1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1827
1828 not_sata:
1829
1830 if (!sdev->tagged_supported)
1831 max_depth = 1;
1832 if (qdepth > max_depth)
1833 qdepth = max_depth;
1834 scsi_change_queue_depth(sdev, qdepth);
1835 sdev_printk(KERN_INFO, sdev,
1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1837 sdev->queue_depth, sdev->tagged_supported,
1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1839 return sdev->queue_depth;
1840 }
1841
1842 /**
1843 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1844 * @sdev: scsi device struct
1845 * @qdepth: requested queue depth
1846 *
1847 * Returns nothing.
1848 */
1849 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1850 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851 {
1852 struct Scsi_Host *shost = sdev->host;
1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854
1855 if (ioc->enable_sdev_max_qd)
1856 qdepth = shost->can_queue;
1857
1858 scsih_change_queue_depth(sdev, qdepth);
1859 }
1860
1861 /**
1862 * scsih_target_alloc - target add routine
1863 * @starget: scsi target struct
1864 *
1865 * Return: 0 if ok. Any other return is assumed to be an error and
1866 * the device is ignored.
1867 */
1868 static int
scsih_target_alloc(struct scsi_target * starget)1869 scsih_target_alloc(struct scsi_target *starget)
1870 {
1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1873 struct MPT3SAS_TARGET *sas_target_priv_data;
1874 struct _sas_device *sas_device;
1875 struct _raid_device *raid_device;
1876 struct _pcie_device *pcie_device;
1877 unsigned long flags;
1878 struct sas_rphy *rphy;
1879
1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 GFP_KERNEL);
1882 if (!sas_target_priv_data)
1883 return -ENOMEM;
1884
1885 starget->hostdata = sas_target_priv_data;
1886 sas_target_priv_data->starget = starget;
1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1888
1889 /* RAID volumes */
1890 if (starget->channel == RAID_CHANNEL) {
1891 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1893 starget->channel);
1894 if (raid_device) {
1895 sas_target_priv_data->handle = raid_device->handle;
1896 sas_target_priv_data->sas_address = raid_device->wwid;
1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1898 if (ioc->is_warpdrive)
1899 sas_target_priv_data->raid_device = raid_device;
1900 raid_device->starget = starget;
1901 }
1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1903 return 0;
1904 }
1905
1906 /* PCIe devices */
1907 if (starget->channel == PCIE_CHANNEL) {
1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1910 starget->channel);
1911 if (pcie_device) {
1912 sas_target_priv_data->handle = pcie_device->handle;
1913 sas_target_priv_data->sas_address = pcie_device->wwid;
1914 sas_target_priv_data->port = NULL;
1915 sas_target_priv_data->pcie_dev = pcie_device;
1916 pcie_device->starget = starget;
1917 pcie_device->id = starget->id;
1918 pcie_device->channel = starget->channel;
1919 sas_target_priv_data->flags |=
1920 MPT_TARGET_FLAGS_PCIE_DEVICE;
1921 if (pcie_device->fast_path)
1922 sas_target_priv_data->flags |=
1923 MPT_TARGET_FASTPATH_IO;
1924 }
1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1926 return 0;
1927 }
1928
1929 /* sas/sata devices */
1930 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1931 rphy = dev_to_rphy(starget->dev.parent);
1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1933
1934 if (sas_device) {
1935 sas_target_priv_data->handle = sas_device->handle;
1936 sas_target_priv_data->sas_address = sas_device->sas_address;
1937 sas_target_priv_data->port = sas_device->port;
1938 sas_target_priv_data->sas_dev = sas_device;
1939 sas_device->starget = starget;
1940 sas_device->id = starget->id;
1941 sas_device->channel = starget->channel;
1942 if (test_bit(sas_device->handle, ioc->pd_handles))
1943 sas_target_priv_data->flags |=
1944 MPT_TARGET_FLAGS_RAID_COMPONENT;
1945 if (sas_device->fast_path)
1946 sas_target_priv_data->flags |=
1947 MPT_TARGET_FASTPATH_IO;
1948 }
1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1950
1951 return 0;
1952 }
1953
1954 /**
1955 * scsih_target_destroy - target destroy routine
1956 * @starget: scsi target struct
1957 */
1958 static void
scsih_target_destroy(struct scsi_target * starget)1959 scsih_target_destroy(struct scsi_target *starget)
1960 {
1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1963 struct MPT3SAS_TARGET *sas_target_priv_data;
1964 struct _sas_device *sas_device;
1965 struct _raid_device *raid_device;
1966 struct _pcie_device *pcie_device;
1967 unsigned long flags;
1968
1969 sas_target_priv_data = starget->hostdata;
1970 if (!sas_target_priv_data)
1971 return;
1972
1973 if (starget->channel == RAID_CHANNEL) {
1974 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1976 starget->channel);
1977 if (raid_device) {
1978 raid_device->starget = NULL;
1979 raid_device->sdev = NULL;
1980 }
1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1982 goto out;
1983 }
1984
1985 if (starget->channel == PCIE_CHANNEL) {
1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1988 sas_target_priv_data);
1989 if (pcie_device && (pcie_device->starget == starget) &&
1990 (pcie_device->id == starget->id) &&
1991 (pcie_device->channel == starget->channel))
1992 pcie_device->starget = NULL;
1993
1994 if (pcie_device) {
1995 /*
1996 * Corresponding get() is in _scsih_target_alloc()
1997 */
1998 sas_target_priv_data->pcie_dev = NULL;
1999 pcie_device_put(pcie_device);
2000 pcie_device_put(pcie_device);
2001 }
2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2003 goto out;
2004 }
2005
2006 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2008 if (sas_device && (sas_device->starget == starget) &&
2009 (sas_device->id == starget->id) &&
2010 (sas_device->channel == starget->channel))
2011 sas_device->starget = NULL;
2012
2013 if (sas_device) {
2014 /*
2015 * Corresponding get() is in _scsih_target_alloc()
2016 */
2017 sas_target_priv_data->sas_dev = NULL;
2018 sas_device_put(sas_device);
2019
2020 sas_device_put(sas_device);
2021 }
2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2023
2024 out:
2025 kfree(sas_target_priv_data);
2026 starget->hostdata = NULL;
2027 }
2028
2029 /**
2030 * scsih_slave_alloc - device add routine
2031 * @sdev: scsi device struct
2032 *
2033 * Return: 0 if ok. Any other return is assumed to be an error and
2034 * the device is ignored.
2035 */
2036 static int
scsih_slave_alloc(struct scsi_device * sdev)2037 scsih_slave_alloc(struct scsi_device *sdev)
2038 {
2039 struct Scsi_Host *shost;
2040 struct MPT3SAS_ADAPTER *ioc;
2041 struct MPT3SAS_TARGET *sas_target_priv_data;
2042 struct MPT3SAS_DEVICE *sas_device_priv_data;
2043 struct scsi_target *starget;
2044 struct _raid_device *raid_device;
2045 struct _sas_device *sas_device;
2046 struct _pcie_device *pcie_device;
2047 unsigned long flags;
2048
2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 GFP_KERNEL);
2051 if (!sas_device_priv_data)
2052 return -ENOMEM;
2053
2054 sas_device_priv_data->lun = sdev->lun;
2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056
2057 starget = scsi_target(sdev);
2058 sas_target_priv_data = starget->hostdata;
2059 sas_target_priv_data->num_luns++;
2060 sas_device_priv_data->sas_target = sas_target_priv_data;
2061 sdev->hostdata = sas_device_priv_data;
2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2063 sdev->no_uld_attach = 1;
2064
2065 shost = dev_to_shost(&starget->dev);
2066 ioc = shost_priv(shost);
2067 if (starget->channel == RAID_CHANNEL) {
2068 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2069 raid_device = _scsih_raid_device_find_by_id(ioc,
2070 starget->id, starget->channel);
2071 if (raid_device)
2072 raid_device->sdev = sdev; /* raid is single lun */
2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 }
2075 if (starget->channel == PCIE_CHANNEL) {
2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2078 sas_target_priv_data->sas_address);
2079 if (pcie_device && (pcie_device->starget == NULL)) {
2080 sdev_printk(KERN_INFO, sdev,
2081 "%s : pcie_device->starget set to starget @ %d\n",
2082 __func__, __LINE__);
2083 pcie_device->starget = starget;
2084 }
2085
2086 if (pcie_device)
2087 pcie_device_put(pcie_device);
2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089
2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2091 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2093 sas_target_priv_data->sas_address,
2094 sas_target_priv_data->port);
2095 if (sas_device && (sas_device->starget == NULL)) {
2096 sdev_printk(KERN_INFO, sdev,
2097 "%s : sas_device->starget set to starget @ %d\n",
2098 __func__, __LINE__);
2099 sas_device->starget = starget;
2100 }
2101
2102 if (sas_device)
2103 sas_device_put(sas_device);
2104
2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2106 }
2107
2108 return 0;
2109 }
2110
2111 /**
2112 * scsih_slave_destroy - device destroy routine
2113 * @sdev: scsi device struct
2114 */
2115 static void
scsih_slave_destroy(struct scsi_device * sdev)2116 scsih_slave_destroy(struct scsi_device *sdev)
2117 {
2118 struct MPT3SAS_TARGET *sas_target_priv_data;
2119 struct scsi_target *starget;
2120 struct Scsi_Host *shost;
2121 struct MPT3SAS_ADAPTER *ioc;
2122 struct _sas_device *sas_device;
2123 struct _pcie_device *pcie_device;
2124 unsigned long flags;
2125
2126 if (!sdev->hostdata)
2127 return;
2128
2129 starget = scsi_target(sdev);
2130 sas_target_priv_data = starget->hostdata;
2131 sas_target_priv_data->num_luns--;
2132
2133 shost = dev_to_shost(&starget->dev);
2134 ioc = shost_priv(shost);
2135
2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2139 sas_target_priv_data);
2140 if (pcie_device && !sas_target_priv_data->num_luns)
2141 pcie_device->starget = NULL;
2142
2143 if (pcie_device)
2144 pcie_device_put(pcie_device);
2145
2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147
2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2150 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2151 sas_target_priv_data);
2152 if (sas_device && !sas_target_priv_data->num_luns)
2153 sas_device->starget = NULL;
2154
2155 if (sas_device)
2156 sas_device_put(sas_device);
2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2158 }
2159
2160 kfree(sdev->hostdata);
2161 sdev->hostdata = NULL;
2162 }
2163
2164 /**
2165 * _scsih_display_sata_capabilities - sata capabilities
2166 * @ioc: per adapter object
2167 * @handle: device handle
2168 * @sdev: scsi device struct
2169 */
2170 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2171 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2172 u16 handle, struct scsi_device *sdev)
2173 {
2174 Mpi2ConfigReply_t mpi_reply;
2175 Mpi2SasDevicePage0_t sas_device_pg0;
2176 u32 ioc_status;
2177 u16 flags;
2178 u32 device_info;
2179
2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2182 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2183 __FILE__, __LINE__, __func__);
2184 return;
2185 }
2186
2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2188 MPI2_IOCSTATUS_MASK;
2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2190 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2191 __FILE__, __LINE__, __func__);
2192 return;
2193 }
2194
2195 flags = le16_to_cpu(sas_device_pg0.Flags);
2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197
2198 sdev_printk(KERN_INFO, sdev,
2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2200 "sw_preserve(%s)\n",
2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2208 }
2209
2210 /*
2211 * raid transport support -
2212 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2213 * unloading the driver followed by a load - I believe that the subroutine
2214 * raid_class_release() is not cleaning up properly.
2215 */
2216
2217 /**
2218 * scsih_is_raid - return boolean indicating device is raid volume
2219 * @dev: the device struct object
2220 */
2221 static int
scsih_is_raid(struct device * dev)2222 scsih_is_raid(struct device *dev)
2223 {
2224 struct scsi_device *sdev = to_scsi_device(dev);
2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226
2227 if (ioc->is_warpdrive)
2228 return 0;
2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2230 }
2231
2232 static int
scsih_is_nvme(struct device * dev)2233 scsih_is_nvme(struct device *dev)
2234 {
2235 struct scsi_device *sdev = to_scsi_device(dev);
2236
2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2238 }
2239
2240 /**
2241 * scsih_get_resync - get raid volume resync percent complete
2242 * @dev: the device struct object
2243 */
2244 static void
scsih_get_resync(struct device * dev)2245 scsih_get_resync(struct device *dev)
2246 {
2247 struct scsi_device *sdev = to_scsi_device(dev);
2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2249 static struct _raid_device *raid_device;
2250 unsigned long flags;
2251 Mpi2RaidVolPage0_t vol_pg0;
2252 Mpi2ConfigReply_t mpi_reply;
2253 u32 volume_status_flags;
2254 u8 percent_complete;
2255 u16 handle;
2256
2257 percent_complete = 0;
2258 handle = 0;
2259 if (ioc->is_warpdrive)
2260 goto out;
2261
2262 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2264 sdev->channel);
2265 if (raid_device) {
2266 handle = raid_device->handle;
2267 percent_complete = raid_device->percent_complete;
2268 }
2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2270
2271 if (!handle)
2272 goto out;
2273
2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2276 sizeof(Mpi2RaidVolPage0_t))) {
2277 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2278 __FILE__, __LINE__, __func__);
2279 percent_complete = 0;
2280 goto out;
2281 }
2282
2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2284 if (!(volume_status_flags &
2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2286 percent_complete = 0;
2287
2288 out:
2289
2290 switch (ioc->hba_mpi_version_belonged) {
2291 case MPI2_VERSION:
2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2293 break;
2294 case MPI25_VERSION:
2295 case MPI26_VERSION:
2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2297 break;
2298 }
2299 }
2300
2301 /**
2302 * scsih_get_state - get raid volume level
2303 * @dev: the device struct object
2304 */
2305 static void
scsih_get_state(struct device * dev)2306 scsih_get_state(struct device *dev)
2307 {
2308 struct scsi_device *sdev = to_scsi_device(dev);
2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310 static struct _raid_device *raid_device;
2311 unsigned long flags;
2312 Mpi2RaidVolPage0_t vol_pg0;
2313 Mpi2ConfigReply_t mpi_reply;
2314 u32 volstate;
2315 enum raid_state state = RAID_STATE_UNKNOWN;
2316 u16 handle = 0;
2317
2318 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2320 sdev->channel);
2321 if (raid_device)
2322 handle = raid_device->handle;
2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2324
2325 if (!raid_device)
2326 goto out;
2327
2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2330 sizeof(Mpi2RaidVolPage0_t))) {
2331 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2332 __FILE__, __LINE__, __func__);
2333 goto out;
2334 }
2335
2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2338 state = RAID_STATE_RESYNCING;
2339 goto out;
2340 }
2341
2342 switch (vol_pg0.VolumeState) {
2343 case MPI2_RAID_VOL_STATE_OPTIMAL:
2344 case MPI2_RAID_VOL_STATE_ONLINE:
2345 state = RAID_STATE_ACTIVE;
2346 break;
2347 case MPI2_RAID_VOL_STATE_DEGRADED:
2348 state = RAID_STATE_DEGRADED;
2349 break;
2350 case MPI2_RAID_VOL_STATE_FAILED:
2351 case MPI2_RAID_VOL_STATE_MISSING:
2352 state = RAID_STATE_OFFLINE;
2353 break;
2354 }
2355 out:
2356 switch (ioc->hba_mpi_version_belonged) {
2357 case MPI2_VERSION:
2358 raid_set_state(mpt2sas_raid_template, dev, state);
2359 break;
2360 case MPI25_VERSION:
2361 case MPI26_VERSION:
2362 raid_set_state(mpt3sas_raid_template, dev, state);
2363 break;
2364 }
2365 }
2366
2367 /**
2368 * _scsih_set_level - set raid level
2369 * @ioc: ?
2370 * @sdev: scsi device struct
2371 * @volume_type: volume type
2372 */
2373 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2374 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2375 struct scsi_device *sdev, u8 volume_type)
2376 {
2377 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378
2379 switch (volume_type) {
2380 case MPI2_RAID_VOL_TYPE_RAID0:
2381 level = RAID_LEVEL_0;
2382 break;
2383 case MPI2_RAID_VOL_TYPE_RAID10:
2384 level = RAID_LEVEL_10;
2385 break;
2386 case MPI2_RAID_VOL_TYPE_RAID1E:
2387 level = RAID_LEVEL_1E;
2388 break;
2389 case MPI2_RAID_VOL_TYPE_RAID1:
2390 level = RAID_LEVEL_1;
2391 break;
2392 }
2393
2394 switch (ioc->hba_mpi_version_belonged) {
2395 case MPI2_VERSION:
2396 raid_set_level(mpt2sas_raid_template,
2397 &sdev->sdev_gendev, level);
2398 break;
2399 case MPI25_VERSION:
2400 case MPI26_VERSION:
2401 raid_set_level(mpt3sas_raid_template,
2402 &sdev->sdev_gendev, level);
2403 break;
2404 }
2405 }
2406
2407
2408 /**
2409 * _scsih_get_volume_capabilities - volume capabilities
2410 * @ioc: per adapter object
2411 * @raid_device: the raid_device object
2412 *
2413 * Return: 0 for success, else 1
2414 */
2415 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2416 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2417 struct _raid_device *raid_device)
2418 {
2419 Mpi2RaidVolPage0_t *vol_pg0;
2420 Mpi2RaidPhysDiskPage0_t pd_pg0;
2421 Mpi2SasDevicePage0_t sas_device_pg0;
2422 Mpi2ConfigReply_t mpi_reply;
2423 u16 sz;
2424 u8 num_pds;
2425
2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2427 &num_pds)) || !num_pds) {
2428 dfailprintk(ioc,
2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2430 __FILE__, __LINE__, __func__));
2431 return 1;
2432 }
2433
2434 raid_device->num_pds = num_pds;
2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2436 sizeof(Mpi2RaidVol0PhysDisk_t));
2437 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 if (!vol_pg0) {
2439 dfailprintk(ioc,
2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2441 __FILE__, __LINE__, __func__));
2442 return 1;
2443 }
2444
2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2447 dfailprintk(ioc,
2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2449 __FILE__, __LINE__, __func__));
2450 kfree(vol_pg0);
2451 return 1;
2452 }
2453
2454 raid_device->volume_type = vol_pg0->VolumeType;
2455
2456 /* figure out what the underlying devices are by
2457 * obtaining the device_info bits for the 1st device
2458 */
2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2461 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2464 le16_to_cpu(pd_pg0.DevHandle)))) {
2465 raid_device->device_info =
2466 le32_to_cpu(sas_device_pg0.DeviceInfo);
2467 }
2468 }
2469
2470 kfree(vol_pg0);
2471 return 0;
2472 }
2473
2474 /**
2475 * _scsih_enable_tlr - setting TLR flags
2476 * @ioc: per adapter object
2477 * @sdev: scsi device struct
2478 *
2479 * Enabling Transaction Layer Retries for tape devices when
2480 * vpd page 0x90 is present
2481 *
2482 */
2483 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2484 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2485 {
2486
2487 /* only for TAPE */
2488 if (sdev->type != TYPE_TAPE)
2489 return;
2490
2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 return;
2493
2494 sas_enable_tlr(sdev);
2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2497 return;
2498
2499 }
2500
2501 /**
2502 * scsih_slave_configure - device configure routine.
2503 * @sdev: scsi device struct
2504 *
2505 * Return: 0 if ok. Any other return is assumed to be an error and
2506 * the device is ignored.
2507 */
2508 static int
scsih_slave_configure(struct scsi_device * sdev)2509 scsih_slave_configure(struct scsi_device *sdev)
2510 {
2511 struct Scsi_Host *shost = sdev->host;
2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2513 struct MPT3SAS_DEVICE *sas_device_priv_data;
2514 struct MPT3SAS_TARGET *sas_target_priv_data;
2515 struct _sas_device *sas_device;
2516 struct _pcie_device *pcie_device;
2517 struct _raid_device *raid_device;
2518 unsigned long flags;
2519 int qdepth;
2520 u8 ssp_target = 0;
2521 char *ds = "";
2522 char *r_level = "";
2523 u16 handle, volume_handle = 0;
2524 u64 volume_wwid = 0;
2525
2526 qdepth = 1;
2527 sas_device_priv_data = sdev->hostdata;
2528 sas_device_priv_data->configured_lun = 1;
2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2530 sas_target_priv_data = sas_device_priv_data->sas_target;
2531 handle = sas_target_priv_data->handle;
2532
2533 /* raid volume handling */
2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535
2536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539 if (!raid_device) {
2540 dfailprintk(ioc,
2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2542 __FILE__, __LINE__, __func__));
2543 return 1;
2544 }
2545
2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2547 dfailprintk(ioc,
2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549 __FILE__, __LINE__, __func__));
2550 return 1;
2551 }
2552
2553 /*
2554 * WARPDRIVE: Initialize the required data for Direct IO
2555 */
2556 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2557
2558 /* RAID Queue Depth Support
2559 * IS volume = underlying qdepth of drive type, either
2560 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2561 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2562 */
2563 if (raid_device->device_info &
2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566 ds = "SSP";
2567 } else {
2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2569 if (raid_device->device_info &
2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2571 ds = "SATA";
2572 else
2573 ds = "STP";
2574 }
2575
2576 switch (raid_device->volume_type) {
2577 case MPI2_RAID_VOL_TYPE_RAID0:
2578 r_level = "RAID0";
2579 break;
2580 case MPI2_RAID_VOL_TYPE_RAID1E:
2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582 if (ioc->manu_pg10.OEMIdentifier &&
2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2584 MFG10_GF0_R10_DISPLAY) &&
2585 !(raid_device->num_pds % 2))
2586 r_level = "RAID10";
2587 else
2588 r_level = "RAID1E";
2589 break;
2590 case MPI2_RAID_VOL_TYPE_RAID1:
2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592 r_level = "RAID1";
2593 break;
2594 case MPI2_RAID_VOL_TYPE_RAID10:
2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596 r_level = "RAID10";
2597 break;
2598 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599 default:
2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2601 r_level = "RAIDX";
2602 break;
2603 }
2604
2605 if (!ioc->hide_ir_msg)
2606 sdev_printk(KERN_INFO, sdev,
2607 "%s: handle(0x%04x), wwid(0x%016llx),"
2608 " pd_count(%d), type(%s)\n",
2609 r_level, raid_device->handle,
2610 (unsigned long long)raid_device->wwid,
2611 raid_device->num_pds, ds);
2612
2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2614 blk_queue_max_hw_sectors(sdev->request_queue,
2615 MPT3SAS_RAID_MAX_SECTORS);
2616 sdev_printk(KERN_INFO, sdev,
2617 "Set queue's max_sector to: %u\n",
2618 MPT3SAS_RAID_MAX_SECTORS);
2619 }
2620
2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2622
2623 /* raid transport support */
2624 if (!ioc->is_warpdrive)
2625 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2626 return 0;
2627 }
2628
2629 /* non-raid handling */
2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2631 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 &volume_handle)) {
2633 dfailprintk(ioc,
2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2635 __FILE__, __LINE__, __func__));
2636 return 1;
2637 }
2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2639 volume_handle, &volume_wwid)) {
2640 dfailprintk(ioc,
2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2642 __FILE__, __LINE__, __func__));
2643 return 1;
2644 }
2645 }
2646
2647 /* PCIe handling */
2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2651 sas_device_priv_data->sas_target->sas_address);
2652 if (!pcie_device) {
2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2654 dfailprintk(ioc,
2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2656 __FILE__, __LINE__, __func__));
2657 return 1;
2658 }
2659
2660 qdepth = ioc->max_nvme_qd;
2661 ds = "NVMe";
2662 sdev_printk(KERN_INFO, sdev,
2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2664 ds, handle, (unsigned long long)pcie_device->wwid,
2665 pcie_device->port_num);
2666 if (pcie_device->enclosure_handle != 0)
2667 sdev_printk(KERN_INFO, sdev,
2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2669 ds,
2670 (unsigned long long)pcie_device->enclosure_logical_id,
2671 pcie_device->slot);
2672 if (pcie_device->connector_name[0] != '\0')
2673 sdev_printk(KERN_INFO, sdev,
2674 "%s: enclosure level(0x%04x),"
2675 "connector name( %s)\n", ds,
2676 pcie_device->enclosure_level,
2677 pcie_device->connector_name);
2678
2679 if (pcie_device->nvme_mdts)
2680 blk_queue_max_hw_sectors(sdev->request_queue,
2681 pcie_device->nvme_mdts/512);
2682
2683 pcie_device_put(pcie_device);
2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2686 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2687 ** merged and can eliminate holes created during merging
2688 ** operation.
2689 **/
2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2691 sdev->request_queue);
2692 blk_queue_virt_boundary(sdev->request_queue,
2693 ioc->page_size - 1);
2694 return 0;
2695 }
2696
2697 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2699 sas_device_priv_data->sas_target->sas_address,
2700 sas_device_priv_data->sas_target->port);
2701 if (!sas_device) {
2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2703 dfailprintk(ioc,
2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2705 __FILE__, __LINE__, __func__));
2706 return 1;
2707 }
2708
2709 sas_device->volume_handle = volume_handle;
2710 sas_device->volume_wwid = volume_wwid;
2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2712 qdepth = (sas_device->port_type > 1) ?
2713 ioc->max_wideport_qd : ioc->max_narrowport_qd;
2714 ssp_target = 1;
2715 if (sas_device->device_info &
2716 MPI2_SAS_DEVICE_INFO_SEP) {
2717 sdev_printk(KERN_WARNING, sdev,
2718 "set ignore_delay_remove for handle(0x%04x)\n",
2719 sas_device_priv_data->sas_target->handle);
2720 sas_device_priv_data->ignore_delay_remove = 1;
2721 ds = "SES";
2722 } else
2723 ds = "SSP";
2724 } else {
2725 qdepth = ioc->max_sata_qd;
2726 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2727 ds = "STP";
2728 else if (sas_device->device_info &
2729 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2730 ds = "SATA";
2731 }
2732
2733 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2734 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2735 ds, handle, (unsigned long long)sas_device->sas_address,
2736 sas_device->phy, (unsigned long long)sas_device->device_name);
2737
2738 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2739
2740 sas_device_put(sas_device);
2741 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2742
2743 if (!ssp_target)
2744 _scsih_display_sata_capabilities(ioc, handle, sdev);
2745
2746
2747 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2748
2749 if (ssp_target) {
2750 sas_read_port_mode_page(sdev);
2751 _scsih_enable_tlr(ioc, sdev);
2752 }
2753
2754 return 0;
2755 }
2756
2757 /**
2758 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2759 * @sdev: scsi device struct
2760 * @bdev: pointer to block device context
2761 * @capacity: device size (in 512 byte sectors)
2762 * @params: three element array to place output:
2763 * params[0] number of heads (max 255)
2764 * params[1] number of sectors (max 63)
2765 * params[2] number of cylinders
2766 */
2767 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2768 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2769 sector_t capacity, int params[])
2770 {
2771 int heads;
2772 int sectors;
2773 sector_t cylinders;
2774 ulong dummy;
2775
2776 heads = 64;
2777 sectors = 32;
2778
2779 dummy = heads * sectors;
2780 cylinders = capacity;
2781 sector_div(cylinders, dummy);
2782
2783 /*
2784 * Handle extended translation size for logical drives
2785 * > 1Gb
2786 */
2787 if ((ulong)capacity >= 0x200000) {
2788 heads = 255;
2789 sectors = 63;
2790 dummy = heads * sectors;
2791 cylinders = capacity;
2792 sector_div(cylinders, dummy);
2793 }
2794
2795 /* return result */
2796 params[0] = heads;
2797 params[1] = sectors;
2798 params[2] = cylinders;
2799
2800 return 0;
2801 }
2802
2803 /**
2804 * _scsih_response_code - translation of device response code
2805 * @ioc: per adapter object
2806 * @response_code: response code returned by the device
2807 */
2808 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2809 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2810 {
2811 char *desc;
2812
2813 switch (response_code) {
2814 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2815 desc = "task management request completed";
2816 break;
2817 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2818 desc = "invalid frame";
2819 break;
2820 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2821 desc = "task management request not supported";
2822 break;
2823 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2824 desc = "task management request failed";
2825 break;
2826 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2827 desc = "task management request succeeded";
2828 break;
2829 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2830 desc = "invalid lun";
2831 break;
2832 case 0xA:
2833 desc = "overlapped tag attempted";
2834 break;
2835 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2836 desc = "task queued, however not sent to target";
2837 break;
2838 default:
2839 desc = "unknown";
2840 break;
2841 }
2842 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2843 }
2844
2845 /**
2846 * _scsih_tm_done - tm completion routine
2847 * @ioc: per adapter object
2848 * @smid: system request message index
2849 * @msix_index: MSIX table index supplied by the OS
2850 * @reply: reply message frame(lower 32bit addr)
2851 * Context: none.
2852 *
2853 * The callback handler when using scsih_issue_tm.
2854 *
2855 * Return: 1 meaning mf should be freed from _base_interrupt
2856 * 0 means the mf is freed from this function.
2857 */
2858 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2859 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2860 {
2861 MPI2DefaultReply_t *mpi_reply;
2862
2863 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2864 return 1;
2865 if (ioc->tm_cmds.smid != smid)
2866 return 1;
2867 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2868 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2869 if (mpi_reply) {
2870 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2871 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2872 }
2873 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2874 complete(&ioc->tm_cmds.done);
2875 return 1;
2876 }
2877
2878 /**
2879 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2880 * @ioc: per adapter object
2881 * @handle: device handle
2882 *
2883 * During taskmangement request, we need to freeze the device queue.
2884 */
2885 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2886 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2887 {
2888 struct MPT3SAS_DEVICE *sas_device_priv_data;
2889 struct scsi_device *sdev;
2890 u8 skip = 0;
2891
2892 shost_for_each_device(sdev, ioc->shost) {
2893 if (skip)
2894 continue;
2895 sas_device_priv_data = sdev->hostdata;
2896 if (!sas_device_priv_data)
2897 continue;
2898 if (sas_device_priv_data->sas_target->handle == handle) {
2899 sas_device_priv_data->sas_target->tm_busy = 1;
2900 skip = 1;
2901 ioc->ignore_loginfos = 1;
2902 }
2903 }
2904 }
2905
2906 /**
2907 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2908 * @ioc: per adapter object
2909 * @handle: device handle
2910 *
2911 * During taskmangement request, we need to freeze the device queue.
2912 */
2913 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2914 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2915 {
2916 struct MPT3SAS_DEVICE *sas_device_priv_data;
2917 struct scsi_device *sdev;
2918 u8 skip = 0;
2919
2920 shost_for_each_device(sdev, ioc->shost) {
2921 if (skip)
2922 continue;
2923 sas_device_priv_data = sdev->hostdata;
2924 if (!sas_device_priv_data)
2925 continue;
2926 if (sas_device_priv_data->sas_target->handle == handle) {
2927 sas_device_priv_data->sas_target->tm_busy = 0;
2928 skip = 1;
2929 ioc->ignore_loginfos = 0;
2930 }
2931 }
2932 }
2933
2934 /**
2935 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2936 * @ioc: per adapter object
2937 * @channel: the channel assigned by the OS
2938 * @id: the id assigned by the OS
2939 * @lun: lun number
2940 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2941 * @smid_task: smid assigned to the task
2942 *
2943 * Look whether TM has aborted the timed out SCSI command, if
2944 * TM has aborted the IO then return SUCCESS else return FAILED.
2945 */
2946 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2947 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2948 uint id, uint lun, u8 type, u16 smid_task)
2949 {
2950
2951 if (smid_task <= ioc->shost->can_queue) {
2952 switch (type) {
2953 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2954 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2955 id, channel)))
2956 return SUCCESS;
2957 break;
2958 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2959 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2960 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2961 lun, channel)))
2962 return SUCCESS;
2963 break;
2964 default:
2965 return SUCCESS;
2966 }
2967 } else if (smid_task == ioc->scsih_cmds.smid) {
2968 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2969 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2970 return SUCCESS;
2971 } else if (smid_task == ioc->ctl_cmds.smid) {
2972 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2973 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2974 return SUCCESS;
2975 }
2976
2977 return FAILED;
2978 }
2979
2980 /**
2981 * scsih_tm_post_processing - post processing of target & LUN reset
2982 * @ioc: per adapter object
2983 * @handle: device handle
2984 * @channel: the channel assigned by the OS
2985 * @id: the id assigned by the OS
2986 * @lun: lun number
2987 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2988 * @smid_task: smid assigned to the task
2989 *
2990 * Post processing of target & LUN reset. Due to interrupt latency
2991 * issue it possible that interrupt for aborted IO might not be
2992 * received yet. So before returning failure status, poll the
2993 * reply descriptor pools for the reply of timed out SCSI command.
2994 * Return FAILED status if reply for timed out is not received
2995 * otherwise return SUCCESS.
2996 */
2997 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2998 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2999 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3000 {
3001 int rc;
3002
3003 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3004 if (rc == SUCCESS)
3005 return rc;
3006
3007 ioc_info(ioc,
3008 "Poll ReplyDescriptor queues for completion of"
3009 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3010 smid_task, type, handle);
3011
3012 /*
3013 * Due to interrupt latency issues, driver may receive interrupt for
3014 * TM first and then for aborted SCSI IO command. So, poll all the
3015 * ReplyDescriptor pools before returning the FAILED status to SML.
3016 */
3017 mpt3sas_base_mask_interrupts(ioc);
3018 mpt3sas_base_sync_reply_irqs(ioc, 1);
3019 mpt3sas_base_unmask_interrupts(ioc);
3020
3021 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3022 }
3023
3024 /**
3025 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3026 * @ioc: per adapter struct
3027 * @handle: device handle
3028 * @channel: the channel assigned by the OS
3029 * @id: the id assigned by the OS
3030 * @lun: lun number
3031 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3032 * @smid_task: smid assigned to the task
3033 * @msix_task: MSIX table index supplied by the OS
3034 * @timeout: timeout in seconds
3035 * @tr_method: Target Reset Method
3036 * Context: user
3037 *
3038 * A generic API for sending task management requests to firmware.
3039 *
3040 * The callback index is set inside `ioc->tm_cb_idx`.
3041 * The caller is responsible to check for outstanding commands.
3042 *
3043 * Return: SUCCESS or FAILED.
3044 */
3045 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3046 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3047 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3048 u8 timeout, u8 tr_method)
3049 {
3050 Mpi2SCSITaskManagementRequest_t *mpi_request;
3051 Mpi2SCSITaskManagementReply_t *mpi_reply;
3052 Mpi25SCSIIORequest_t *request;
3053 u16 smid = 0;
3054 u32 ioc_state;
3055 int rc;
3056 u8 issue_reset = 0;
3057
3058 lockdep_assert_held(&ioc->tm_cmds.mutex);
3059
3060 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3061 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3062 return FAILED;
3063 }
3064
3065 if (ioc->shost_recovery || ioc->remove_host ||
3066 ioc->pci_error_recovery) {
3067 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3068 return FAILED;
3069 }
3070
3071 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3072 if (ioc_state & MPI2_DOORBELL_USED) {
3073 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3074 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3075 return (!rc) ? SUCCESS : FAILED;
3076 }
3077
3078 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3079 mpt3sas_print_fault_code(ioc, ioc_state &
3080 MPI2_DOORBELL_DATA_MASK);
3081 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3082 return (!rc) ? SUCCESS : FAILED;
3083 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3084 MPI2_IOC_STATE_COREDUMP) {
3085 mpt3sas_print_coredump_info(ioc, ioc_state &
3086 MPI2_DOORBELL_DATA_MASK);
3087 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3088 return (!rc) ? SUCCESS : FAILED;
3089 }
3090
3091 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3092 if (!smid) {
3093 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3094 return FAILED;
3095 }
3096
3097 dtmprintk(ioc,
3098 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3099 handle, type, smid_task, timeout, tr_method));
3100 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3101 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3102 ioc->tm_cmds.smid = smid;
3103 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3104 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3105 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3106 mpi_request->DevHandle = cpu_to_le16(handle);
3107 mpi_request->TaskType = type;
3108 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3109 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3110 mpi_request->MsgFlags = tr_method;
3111 mpi_request->TaskMID = cpu_to_le16(smid_task);
3112 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3113 mpt3sas_scsih_set_tm_flag(ioc, handle);
3114 init_completion(&ioc->tm_cmds.done);
3115 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3116 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3117 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3118 mpt3sas_check_cmd_timeout(ioc,
3119 ioc->tm_cmds.status, mpi_request,
3120 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3121 if (issue_reset) {
3122 rc = mpt3sas_base_hard_reset_handler(ioc,
3123 FORCE_BIG_HAMMER);
3124 rc = (!rc) ? SUCCESS : FAILED;
3125 goto out;
3126 }
3127 }
3128
3129 /* sync IRQs in case those were busy during flush. */
3130 mpt3sas_base_sync_reply_irqs(ioc, 0);
3131
3132 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3133 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3134 mpi_reply = ioc->tm_cmds.reply;
3135 dtmprintk(ioc,
3136 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3137 le16_to_cpu(mpi_reply->IOCStatus),
3138 le32_to_cpu(mpi_reply->IOCLogInfo),
3139 le32_to_cpu(mpi_reply->TerminationCount)));
3140 if (ioc->logging_level & MPT_DEBUG_TM) {
3141 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3142 if (mpi_reply->IOCStatus)
3143 _debug_dump_mf(mpi_request,
3144 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3145 }
3146 }
3147
3148 switch (type) {
3149 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3150 rc = SUCCESS;
3151 /*
3152 * If DevHandle filed in smid_task's entry of request pool
3153 * doesn't match with device handle on which this task abort
3154 * TM is received then it means that TM has successfully
3155 * aborted the timed out command. Since smid_task's entry in
3156 * request pool will be memset to zero once the timed out
3157 * command is returned to the SML. If the command is not
3158 * aborted then smid_task’s entry won’t be cleared and it
3159 * will have same DevHandle value on which this task abort TM
3160 * is received and driver will return the TM status as FAILED.
3161 */
3162 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3163 if (le16_to_cpu(request->DevHandle) != handle)
3164 break;
3165
3166 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3167 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3168 handle, timeout, tr_method, smid_task, msix_task);
3169 rc = FAILED;
3170 break;
3171
3172 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3173 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3174 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3175 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3176 type, smid_task);
3177 break;
3178 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3179 rc = SUCCESS;
3180 break;
3181 default:
3182 rc = FAILED;
3183 break;
3184 }
3185
3186 out:
3187 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3188 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3189 return rc;
3190 }
3191
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3192 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3193 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3194 u16 msix_task, u8 timeout, u8 tr_method)
3195 {
3196 int ret;
3197
3198 mutex_lock(&ioc->tm_cmds.mutex);
3199 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3200 smid_task, msix_task, timeout, tr_method);
3201 mutex_unlock(&ioc->tm_cmds.mutex);
3202
3203 return ret;
3204 }
3205
3206 /**
3207 * _scsih_tm_display_info - displays info about the device
3208 * @ioc: per adapter struct
3209 * @scmd: pointer to scsi command object
3210 *
3211 * Called by task management callback handlers.
3212 */
3213 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3214 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3215 {
3216 struct scsi_target *starget = scmd->device->sdev_target;
3217 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3218 struct _sas_device *sas_device = NULL;
3219 struct _pcie_device *pcie_device = NULL;
3220 unsigned long flags;
3221 char *device_str = NULL;
3222
3223 if (!priv_target)
3224 return;
3225 if (ioc->hide_ir_msg)
3226 device_str = "WarpDrive";
3227 else
3228 device_str = "volume";
3229
3230 scsi_print_command(scmd);
3231 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3232 starget_printk(KERN_INFO, starget,
3233 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3234 device_str, priv_target->handle,
3235 device_str, (unsigned long long)priv_target->sas_address);
3236
3237 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3238 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3239 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3240 if (pcie_device) {
3241 starget_printk(KERN_INFO, starget,
3242 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3243 pcie_device->handle,
3244 (unsigned long long)pcie_device->wwid,
3245 pcie_device->port_num);
3246 if (pcie_device->enclosure_handle != 0)
3247 starget_printk(KERN_INFO, starget,
3248 "enclosure logical id(0x%016llx), slot(%d)\n",
3249 (unsigned long long)
3250 pcie_device->enclosure_logical_id,
3251 pcie_device->slot);
3252 if (pcie_device->connector_name[0] != '\0')
3253 starget_printk(KERN_INFO, starget,
3254 "enclosure level(0x%04x), connector name( %s)\n",
3255 pcie_device->enclosure_level,
3256 pcie_device->connector_name);
3257 pcie_device_put(pcie_device);
3258 }
3259 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3260
3261 } else {
3262 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3263 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3264 if (sas_device) {
3265 if (priv_target->flags &
3266 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3267 starget_printk(KERN_INFO, starget,
3268 "volume handle(0x%04x), "
3269 "volume wwid(0x%016llx)\n",
3270 sas_device->volume_handle,
3271 (unsigned long long)sas_device->volume_wwid);
3272 }
3273 starget_printk(KERN_INFO, starget,
3274 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3275 sas_device->handle,
3276 (unsigned long long)sas_device->sas_address,
3277 sas_device->phy);
3278
3279 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3280 NULL, starget);
3281
3282 sas_device_put(sas_device);
3283 }
3284 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3285 }
3286 }
3287
3288 /**
3289 * scsih_abort - eh threads main abort routine
3290 * @scmd: pointer to scsi command object
3291 *
3292 * Return: SUCCESS if command aborted else FAILED
3293 */
3294 static int
scsih_abort(struct scsi_cmnd * scmd)3295 scsih_abort(struct scsi_cmnd *scmd)
3296 {
3297 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3298 struct MPT3SAS_DEVICE *sas_device_priv_data;
3299 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3300 u16 handle;
3301 int r;
3302
3303 u8 timeout = 30;
3304 struct _pcie_device *pcie_device = NULL;
3305 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3306 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3307 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3308 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3309 _scsih_tm_display_info(ioc, scmd);
3310
3311 sas_device_priv_data = scmd->device->hostdata;
3312 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3313 ioc->remove_host) {
3314 sdev_printk(KERN_INFO, scmd->device,
3315 "device been deleted! scmd(0x%p)\n", scmd);
3316 scmd->result = DID_NO_CONNECT << 16;
3317 scsi_done(scmd);
3318 r = SUCCESS;
3319 goto out;
3320 }
3321
3322 /* check for completed command */
3323 if (st == NULL || st->cb_idx == 0xFF) {
3324 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3325 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3326 scmd->result = DID_RESET << 16;
3327 r = SUCCESS;
3328 goto out;
3329 }
3330
3331 /* for hidden raid components and volumes this is not supported */
3332 if (sas_device_priv_data->sas_target->flags &
3333 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3334 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3335 scmd->result = DID_RESET << 16;
3336 r = FAILED;
3337 goto out;
3338 }
3339
3340 mpt3sas_halt_firmware(ioc);
3341
3342 handle = sas_device_priv_data->sas_target->handle;
3343 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3344 if (pcie_device && (!ioc->tm_custom_handling) &&
3345 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3346 timeout = ioc->nvme_abort_timeout;
3347 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3348 scmd->device->id, scmd->device->lun,
3349 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3350 st->smid, st->msix_io, timeout, 0);
3351 /* Command must be cleared after abort */
3352 if (r == SUCCESS && st->cb_idx != 0xFF)
3353 r = FAILED;
3354 out:
3355 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3356 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3357 if (pcie_device)
3358 pcie_device_put(pcie_device);
3359 return r;
3360 }
3361
3362 /**
3363 * scsih_dev_reset - eh threads main device reset routine
3364 * @scmd: pointer to scsi command object
3365 *
3366 * Return: SUCCESS if command aborted else FAILED
3367 */
3368 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3369 scsih_dev_reset(struct scsi_cmnd *scmd)
3370 {
3371 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3372 struct MPT3SAS_DEVICE *sas_device_priv_data;
3373 struct _sas_device *sas_device = NULL;
3374 struct _pcie_device *pcie_device = NULL;
3375 u16 handle;
3376 u8 tr_method = 0;
3377 u8 tr_timeout = 30;
3378 int r;
3379
3380 struct scsi_target *starget = scmd->device->sdev_target;
3381 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3382
3383 sdev_printk(KERN_INFO, scmd->device,
3384 "attempting device reset! scmd(0x%p)\n", scmd);
3385 _scsih_tm_display_info(ioc, scmd);
3386
3387 sas_device_priv_data = scmd->device->hostdata;
3388 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3389 ioc->remove_host) {
3390 sdev_printk(KERN_INFO, scmd->device,
3391 "device been deleted! scmd(0x%p)\n", scmd);
3392 scmd->result = DID_NO_CONNECT << 16;
3393 scsi_done(scmd);
3394 r = SUCCESS;
3395 goto out;
3396 }
3397
3398 /* for hidden raid components obtain the volume_handle */
3399 handle = 0;
3400 if (sas_device_priv_data->sas_target->flags &
3401 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3402 sas_device = mpt3sas_get_sdev_from_target(ioc,
3403 target_priv_data);
3404 if (sas_device)
3405 handle = sas_device->volume_handle;
3406 } else
3407 handle = sas_device_priv_data->sas_target->handle;
3408
3409 if (!handle) {
3410 scmd->result = DID_RESET << 16;
3411 r = FAILED;
3412 goto out;
3413 }
3414
3415 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3416
3417 if (pcie_device && (!ioc->tm_custom_handling) &&
3418 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3419 tr_timeout = pcie_device->reset_timeout;
3420 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3421 } else
3422 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3423
3424 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3425 scmd->device->id, scmd->device->lun,
3426 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3427 tr_timeout, tr_method);
3428 /* Check for busy commands after reset */
3429 if (r == SUCCESS && scsi_device_busy(scmd->device))
3430 r = FAILED;
3431 out:
3432 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3433 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3434
3435 if (sas_device)
3436 sas_device_put(sas_device);
3437 if (pcie_device)
3438 pcie_device_put(pcie_device);
3439
3440 return r;
3441 }
3442
3443 /**
3444 * scsih_target_reset - eh threads main target reset routine
3445 * @scmd: pointer to scsi command object
3446 *
3447 * Return: SUCCESS if command aborted else FAILED
3448 */
3449 static int
scsih_target_reset(struct scsi_cmnd * scmd)3450 scsih_target_reset(struct scsi_cmnd *scmd)
3451 {
3452 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3453 struct MPT3SAS_DEVICE *sas_device_priv_data;
3454 struct _sas_device *sas_device = NULL;
3455 struct _pcie_device *pcie_device = NULL;
3456 u16 handle;
3457 u8 tr_method = 0;
3458 u8 tr_timeout = 30;
3459 int r;
3460 struct scsi_target *starget = scmd->device->sdev_target;
3461 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3462
3463 starget_printk(KERN_INFO, starget,
3464 "attempting target reset! scmd(0x%p)\n", scmd);
3465 _scsih_tm_display_info(ioc, scmd);
3466
3467 sas_device_priv_data = scmd->device->hostdata;
3468 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3469 ioc->remove_host) {
3470 starget_printk(KERN_INFO, starget,
3471 "target been deleted! scmd(0x%p)\n", scmd);
3472 scmd->result = DID_NO_CONNECT << 16;
3473 scsi_done(scmd);
3474 r = SUCCESS;
3475 goto out;
3476 }
3477
3478 /* for hidden raid components obtain the volume_handle */
3479 handle = 0;
3480 if (sas_device_priv_data->sas_target->flags &
3481 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3482 sas_device = mpt3sas_get_sdev_from_target(ioc,
3483 target_priv_data);
3484 if (sas_device)
3485 handle = sas_device->volume_handle;
3486 } else
3487 handle = sas_device_priv_data->sas_target->handle;
3488
3489 if (!handle) {
3490 scmd->result = DID_RESET << 16;
3491 r = FAILED;
3492 goto out;
3493 }
3494
3495 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3496
3497 if (pcie_device && (!ioc->tm_custom_handling) &&
3498 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3499 tr_timeout = pcie_device->reset_timeout;
3500 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3501 } else
3502 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3503 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3504 scmd->device->id, 0,
3505 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3506 tr_timeout, tr_method);
3507 /* Check for busy commands after reset */
3508 if (r == SUCCESS && atomic_read(&starget->target_busy))
3509 r = FAILED;
3510 out:
3511 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3512 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3513
3514 if (sas_device)
3515 sas_device_put(sas_device);
3516 if (pcie_device)
3517 pcie_device_put(pcie_device);
3518 return r;
3519 }
3520
3521
3522 /**
3523 * scsih_host_reset - eh threads main host reset routine
3524 * @scmd: pointer to scsi command object
3525 *
3526 * Return: SUCCESS if command aborted else FAILED
3527 */
3528 static int
scsih_host_reset(struct scsi_cmnd * scmd)3529 scsih_host_reset(struct scsi_cmnd *scmd)
3530 {
3531 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3532 int r, retval;
3533
3534 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3535 scsi_print_command(scmd);
3536
3537 if (ioc->is_driver_loading || ioc->remove_host) {
3538 ioc_info(ioc, "Blocking the host reset\n");
3539 r = FAILED;
3540 goto out;
3541 }
3542
3543 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3544 r = (retval < 0) ? FAILED : SUCCESS;
3545 out:
3546 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3547 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3548
3549 return r;
3550 }
3551
3552 /**
3553 * _scsih_fw_event_add - insert and queue up fw_event
3554 * @ioc: per adapter object
3555 * @fw_event: object describing the event
3556 * Context: This function will acquire ioc->fw_event_lock.
3557 *
3558 * This adds the firmware event object into link list, then queues it up to
3559 * be processed from user context.
3560 */
3561 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3562 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3563 {
3564 unsigned long flags;
3565
3566 if (ioc->firmware_event_thread == NULL)
3567 return;
3568
3569 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3570 fw_event_work_get(fw_event);
3571 INIT_LIST_HEAD(&fw_event->list);
3572 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3573 INIT_WORK(&fw_event->work, _firmware_event_work);
3574 fw_event_work_get(fw_event);
3575 queue_work(ioc->firmware_event_thread, &fw_event->work);
3576 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3577 }
3578
3579 /**
3580 * _scsih_fw_event_del_from_list - delete fw_event from the list
3581 * @ioc: per adapter object
3582 * @fw_event: object describing the event
3583 * Context: This function will acquire ioc->fw_event_lock.
3584 *
3585 * If the fw_event is on the fw_event_list, remove it and do a put.
3586 */
3587 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3588 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3589 *fw_event)
3590 {
3591 unsigned long flags;
3592
3593 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3594 if (!list_empty(&fw_event->list)) {
3595 list_del_init(&fw_event->list);
3596 fw_event_work_put(fw_event);
3597 }
3598 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3599 }
3600
3601
3602 /**
3603 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3604 * @ioc: per adapter object
3605 * @event_data: trigger event data
3606 */
3607 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3608 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3609 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3610 {
3611 struct fw_event_work *fw_event;
3612 u16 sz;
3613
3614 if (ioc->is_driver_loading)
3615 return;
3616 sz = sizeof(*event_data);
3617 fw_event = alloc_fw_event_work(sz);
3618 if (!fw_event)
3619 return;
3620 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3621 fw_event->ioc = ioc;
3622 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3623 _scsih_fw_event_add(ioc, fw_event);
3624 fw_event_work_put(fw_event);
3625 }
3626
3627 /**
3628 * _scsih_error_recovery_delete_devices - remove devices not responding
3629 * @ioc: per adapter object
3630 */
3631 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3632 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3633 {
3634 struct fw_event_work *fw_event;
3635
3636 fw_event = alloc_fw_event_work(0);
3637 if (!fw_event)
3638 return;
3639 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3640 fw_event->ioc = ioc;
3641 _scsih_fw_event_add(ioc, fw_event);
3642 fw_event_work_put(fw_event);
3643 }
3644
3645 /**
3646 * mpt3sas_port_enable_complete - port enable completed (fake event)
3647 * @ioc: per adapter object
3648 */
3649 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3650 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651 {
3652 struct fw_event_work *fw_event;
3653
3654 fw_event = alloc_fw_event_work(0);
3655 if (!fw_event)
3656 return;
3657 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3658 fw_event->ioc = ioc;
3659 _scsih_fw_event_add(ioc, fw_event);
3660 fw_event_work_put(fw_event);
3661 }
3662
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3663 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664 {
3665 unsigned long flags;
3666 struct fw_event_work *fw_event = NULL;
3667
3668 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3669 if (!list_empty(&ioc->fw_event_list)) {
3670 fw_event = list_first_entry(&ioc->fw_event_list,
3671 struct fw_event_work, list);
3672 list_del_init(&fw_event->list);
3673 fw_event_work_put(fw_event);
3674 }
3675 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3676
3677 return fw_event;
3678 }
3679
3680 /**
3681 * _scsih_fw_event_cleanup_queue - cleanup event queue
3682 * @ioc: per adapter object
3683 *
3684 * Walk the firmware event queue, either killing timers, or waiting
3685 * for outstanding events to complete
3686 *
3687 * Context: task, can sleep
3688 */
3689 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3690 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3691 {
3692 struct fw_event_work *fw_event;
3693
3694 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3695 !ioc->firmware_event_thread)
3696 return;
3697 /*
3698 * Set current running event as ignore, so that
3699 * current running event will exit quickly.
3700 * As diag reset has occurred it is of no use
3701 * to process remaining stale event data entries.
3702 */
3703 if (ioc->shost_recovery && ioc->current_event)
3704 ioc->current_event->ignore = 1;
3705
3706 ioc->fw_events_cleanup = 1;
3707 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3708 (fw_event = ioc->current_event)) {
3709
3710 /*
3711 * Don't call cancel_work_sync() for current_event
3712 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3713 * otherwise we may observe deadlock if current
3714 * hard reset issued as part of processing the current_event.
3715 *
3716 * Orginal logic of cleaning the current_event is added
3717 * for handling the back to back host reset issued by the user.
3718 * i.e. during back to back host reset, driver use to process
3719 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3720 * event back to back and this made the drives to unregister
3721 * the devices from SML.
3722 */
3723
3724 if (fw_event == ioc->current_event &&
3725 ioc->current_event->event !=
3726 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3727 ioc->current_event = NULL;
3728 continue;
3729 }
3730
3731 /*
3732 * Driver has to clear ioc->start_scan flag when
3733 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3734 * otherwise scsi_scan_host() API waits for the
3735 * 5 minute timer to expire. If we exit from
3736 * scsi_scan_host() early then we can issue the
3737 * new port enable request as part of current diag reset.
3738 */
3739 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3740 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3741 ioc->start_scan = 0;
3742 }
3743
3744 /*
3745 * Wait on the fw_event to complete. If this returns 1, then
3746 * the event was never executed, and we need a put for the
3747 * reference the work had on the fw_event.
3748 *
3749 * If it did execute, we wait for it to finish, and the put will
3750 * happen from _firmware_event_work()
3751 */
3752 if (cancel_work_sync(&fw_event->work))
3753 fw_event_work_put(fw_event);
3754
3755 }
3756 ioc->fw_events_cleanup = 0;
3757 }
3758
3759 /**
3760 * _scsih_internal_device_block - block the sdev device
3761 * @sdev: per device object
3762 * @sas_device_priv_data : per device driver private data
3763 *
3764 * make sure device is blocked without error, if not
3765 * print an error
3766 */
3767 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3768 _scsih_internal_device_block(struct scsi_device *sdev,
3769 struct MPT3SAS_DEVICE *sas_device_priv_data)
3770 {
3771 int r = 0;
3772
3773 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3774 sas_device_priv_data->sas_target->handle);
3775 sas_device_priv_data->block = 1;
3776
3777 r = scsi_internal_device_block_nowait(sdev);
3778 if (r == -EINVAL)
3779 sdev_printk(KERN_WARNING, sdev,
3780 "device_block failed with return(%d) for handle(0x%04x)\n",
3781 r, sas_device_priv_data->sas_target->handle);
3782 }
3783
3784 /**
3785 * _scsih_internal_device_unblock - unblock the sdev device
3786 * @sdev: per device object
3787 * @sas_device_priv_data : per device driver private data
3788 * make sure device is unblocked without error, if not retry
3789 * by blocking and then unblocking
3790 */
3791
3792 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3793 _scsih_internal_device_unblock(struct scsi_device *sdev,
3794 struct MPT3SAS_DEVICE *sas_device_priv_data)
3795 {
3796 int r = 0;
3797
3798 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3799 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3800 sas_device_priv_data->block = 0;
3801 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3802 if (r == -EINVAL) {
3803 /* The device has been set to SDEV_RUNNING by SD layer during
3804 * device addition but the request queue is still stopped by
3805 * our earlier block call. We need to perform a block again
3806 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3807
3808 sdev_printk(KERN_WARNING, sdev,
3809 "device_unblock failed with return(%d) for handle(0x%04x) "
3810 "performing a block followed by an unblock\n",
3811 r, sas_device_priv_data->sas_target->handle);
3812 sas_device_priv_data->block = 1;
3813 r = scsi_internal_device_block_nowait(sdev);
3814 if (r)
3815 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3816 "failed with return(%d) for handle(0x%04x)\n",
3817 r, sas_device_priv_data->sas_target->handle);
3818
3819 sas_device_priv_data->block = 0;
3820 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3821 if (r)
3822 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3823 " failed with return(%d) for handle(0x%04x)\n",
3824 r, sas_device_priv_data->sas_target->handle);
3825 }
3826 }
3827
3828 /**
3829 * _scsih_ublock_io_all_device - unblock every device
3830 * @ioc: per adapter object
3831 *
3832 * change the device state from block to running
3833 */
3834 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3835 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3836 {
3837 struct MPT3SAS_DEVICE *sas_device_priv_data;
3838 struct scsi_device *sdev;
3839
3840 shost_for_each_device(sdev, ioc->shost) {
3841 sas_device_priv_data = sdev->hostdata;
3842 if (!sas_device_priv_data)
3843 continue;
3844 if (!sas_device_priv_data->block)
3845 continue;
3846
3847 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3848 "device_running, handle(0x%04x)\n",
3849 sas_device_priv_data->sas_target->handle));
3850 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3851 }
3852 }
3853
3854
3855 /**
3856 * _scsih_ublock_io_device - prepare device to be deleted
3857 * @ioc: per adapter object
3858 * @sas_address: sas address
3859 * @port: hba port entry
3860 *
3861 * unblock then put device in offline state
3862 */
3863 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)3864 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3865 u64 sas_address, struct hba_port *port)
3866 {
3867 struct MPT3SAS_DEVICE *sas_device_priv_data;
3868 struct scsi_device *sdev;
3869
3870 shost_for_each_device(sdev, ioc->shost) {
3871 sas_device_priv_data = sdev->hostdata;
3872 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3873 continue;
3874 if (sas_device_priv_data->sas_target->sas_address
3875 != sas_address)
3876 continue;
3877 if (sas_device_priv_data->sas_target->port != port)
3878 continue;
3879 if (sas_device_priv_data->block)
3880 _scsih_internal_device_unblock(sdev,
3881 sas_device_priv_data);
3882 }
3883 }
3884
3885 /**
3886 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3887 * @ioc: per adapter object
3888 *
3889 * During device pull we need to appropriately set the sdev state.
3890 */
3891 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3892 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3893 {
3894 struct MPT3SAS_DEVICE *sas_device_priv_data;
3895 struct scsi_device *sdev;
3896
3897 shost_for_each_device(sdev, ioc->shost) {
3898 sas_device_priv_data = sdev->hostdata;
3899 if (!sas_device_priv_data)
3900 continue;
3901 if (sas_device_priv_data->block)
3902 continue;
3903 if (sas_device_priv_data->ignore_delay_remove) {
3904 sdev_printk(KERN_INFO, sdev,
3905 "%s skip device_block for SES handle(0x%04x)\n",
3906 __func__, sas_device_priv_data->sas_target->handle);
3907 continue;
3908 }
3909 _scsih_internal_device_block(sdev, sas_device_priv_data);
3910 }
3911 }
3912
3913 /**
3914 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3915 * @ioc: per adapter object
3916 * @handle: device handle
3917 *
3918 * During device pull we need to appropriately set the sdev state.
3919 */
3920 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3921 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3922 {
3923 struct MPT3SAS_DEVICE *sas_device_priv_data;
3924 struct scsi_device *sdev;
3925 struct _sas_device *sas_device;
3926
3927 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3928
3929 shost_for_each_device(sdev, ioc->shost) {
3930 sas_device_priv_data = sdev->hostdata;
3931 if (!sas_device_priv_data)
3932 continue;
3933 if (sas_device_priv_data->sas_target->handle != handle)
3934 continue;
3935 if (sas_device_priv_data->block)
3936 continue;
3937 if (sas_device && sas_device->pend_sas_rphy_add)
3938 continue;
3939 if (sas_device_priv_data->ignore_delay_remove) {
3940 sdev_printk(KERN_INFO, sdev,
3941 "%s skip device_block for SES handle(0x%04x)\n",
3942 __func__, sas_device_priv_data->sas_target->handle);
3943 continue;
3944 }
3945 _scsih_internal_device_block(sdev, sas_device_priv_data);
3946 }
3947
3948 if (sas_device)
3949 sas_device_put(sas_device);
3950 }
3951
3952 /**
3953 * _scsih_block_io_to_children_attached_to_ex
3954 * @ioc: per adapter object
3955 * @sas_expander: the sas_device object
3956 *
3957 * This routine set sdev state to SDEV_BLOCK for all devices
3958 * attached to this expander. This function called when expander is
3959 * pulled.
3960 */
3961 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3962 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3963 struct _sas_node *sas_expander)
3964 {
3965 struct _sas_port *mpt3sas_port;
3966 struct _sas_device *sas_device;
3967 struct _sas_node *expander_sibling;
3968 unsigned long flags;
3969
3970 if (!sas_expander)
3971 return;
3972
3973 list_for_each_entry(mpt3sas_port,
3974 &sas_expander->sas_port_list, port_list) {
3975 if (mpt3sas_port->remote_identify.device_type ==
3976 SAS_END_DEVICE) {
3977 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3978 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3979 mpt3sas_port->remote_identify.sas_address,
3980 mpt3sas_port->hba_port);
3981 if (sas_device) {
3982 set_bit(sas_device->handle,
3983 ioc->blocking_handles);
3984 sas_device_put(sas_device);
3985 }
3986 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3987 }
3988 }
3989
3990 list_for_each_entry(mpt3sas_port,
3991 &sas_expander->sas_port_list, port_list) {
3992
3993 if (mpt3sas_port->remote_identify.device_type ==
3994 SAS_EDGE_EXPANDER_DEVICE ||
3995 mpt3sas_port->remote_identify.device_type ==
3996 SAS_FANOUT_EXPANDER_DEVICE) {
3997 expander_sibling =
3998 mpt3sas_scsih_expander_find_by_sas_address(
3999 ioc, mpt3sas_port->remote_identify.sas_address,
4000 mpt3sas_port->hba_port);
4001 _scsih_block_io_to_children_attached_to_ex(ioc,
4002 expander_sibling);
4003 }
4004 }
4005 }
4006
4007 /**
4008 * _scsih_block_io_to_children_attached_directly
4009 * @ioc: per adapter object
4010 * @event_data: topology change event data
4011 *
4012 * This routine set sdev state to SDEV_BLOCK for all devices
4013 * direct attached during device pull.
4014 */
4015 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4016 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4017 Mpi2EventDataSasTopologyChangeList_t *event_data)
4018 {
4019 int i;
4020 u16 handle;
4021 u16 reason_code;
4022
4023 for (i = 0; i < event_data->NumEntries; i++) {
4024 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4025 if (!handle)
4026 continue;
4027 reason_code = event_data->PHY[i].PhyStatus &
4028 MPI2_EVENT_SAS_TOPO_RC_MASK;
4029 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4030 _scsih_block_io_device(ioc, handle);
4031 }
4032 }
4033
4034 /**
4035 * _scsih_block_io_to_pcie_children_attached_directly
4036 * @ioc: per adapter object
4037 * @event_data: topology change event data
4038 *
4039 * This routine set sdev state to SDEV_BLOCK for all devices
4040 * direct attached during device pull/reconnect.
4041 */
4042 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4043 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4044 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4045 {
4046 int i;
4047 u16 handle;
4048 u16 reason_code;
4049
4050 for (i = 0; i < event_data->NumEntries; i++) {
4051 handle =
4052 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4053 if (!handle)
4054 continue;
4055 reason_code = event_data->PortEntry[i].PortStatus;
4056 if (reason_code ==
4057 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4058 _scsih_block_io_device(ioc, handle);
4059 }
4060 }
4061 /**
4062 * _scsih_tm_tr_send - send task management request
4063 * @ioc: per adapter object
4064 * @handle: device handle
4065 * Context: interrupt time.
4066 *
4067 * This code is to initiate the device removal handshake protocol
4068 * with controller firmware. This function will issue target reset
4069 * using high priority request queue. It will send a sas iounit
4070 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4071 *
4072 * This is designed to send muliple task management request at the same
4073 * time to the fifo. If the fifo is full, we will append the request,
4074 * and process it in a future completion.
4075 */
4076 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4077 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4078 {
4079 Mpi2SCSITaskManagementRequest_t *mpi_request;
4080 u16 smid;
4081 struct _sas_device *sas_device = NULL;
4082 struct _pcie_device *pcie_device = NULL;
4083 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4084 u64 sas_address = 0;
4085 unsigned long flags;
4086 struct _tr_list *delayed_tr;
4087 u32 ioc_state;
4088 u8 tr_method = 0;
4089 struct hba_port *port = NULL;
4090
4091 if (ioc->pci_error_recovery) {
4092 dewtprintk(ioc,
4093 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4094 __func__, handle));
4095 return;
4096 }
4097 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4098 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4099 dewtprintk(ioc,
4100 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4101 __func__, handle));
4102 return;
4103 }
4104
4105 /* if PD, then return */
4106 if (test_bit(handle, ioc->pd_handles))
4107 return;
4108
4109 clear_bit(handle, ioc->pend_os_device_add);
4110
4111 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4112 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4113 if (sas_device && sas_device->starget &&
4114 sas_device->starget->hostdata) {
4115 sas_target_priv_data = sas_device->starget->hostdata;
4116 sas_target_priv_data->deleted = 1;
4117 sas_address = sas_device->sas_address;
4118 port = sas_device->port;
4119 }
4120 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4121 if (!sas_device) {
4122 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4123 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4124 if (pcie_device && pcie_device->starget &&
4125 pcie_device->starget->hostdata) {
4126 sas_target_priv_data = pcie_device->starget->hostdata;
4127 sas_target_priv_data->deleted = 1;
4128 sas_address = pcie_device->wwid;
4129 }
4130 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4131 if (pcie_device && (!ioc->tm_custom_handling) &&
4132 (!(mpt3sas_scsih_is_pcie_scsi_device(
4133 pcie_device->device_info))))
4134 tr_method =
4135 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4136 else
4137 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4138 }
4139 if (sas_target_priv_data) {
4140 dewtprintk(ioc,
4141 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4142 handle, (u64)sas_address));
4143 if (sas_device) {
4144 if (sas_device->enclosure_handle != 0)
4145 dewtprintk(ioc,
4146 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4147 (u64)sas_device->enclosure_logical_id,
4148 sas_device->slot));
4149 if (sas_device->connector_name[0] != '\0')
4150 dewtprintk(ioc,
4151 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4152 sas_device->enclosure_level,
4153 sas_device->connector_name));
4154 } else if (pcie_device) {
4155 if (pcie_device->enclosure_handle != 0)
4156 dewtprintk(ioc,
4157 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4158 (u64)pcie_device->enclosure_logical_id,
4159 pcie_device->slot));
4160 if (pcie_device->connector_name[0] != '\0')
4161 dewtprintk(ioc,
4162 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4163 pcie_device->enclosure_level,
4164 pcie_device->connector_name));
4165 }
4166 _scsih_ublock_io_device(ioc, sas_address, port);
4167 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4168 }
4169
4170 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4171 if (!smid) {
4172 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4173 if (!delayed_tr)
4174 goto out;
4175 INIT_LIST_HEAD(&delayed_tr->list);
4176 delayed_tr->handle = handle;
4177 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4178 dewtprintk(ioc,
4179 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4180 handle));
4181 goto out;
4182 }
4183
4184 dewtprintk(ioc,
4185 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4186 handle, smid, ioc->tm_tr_cb_idx));
4187 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4188 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4189 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4190 mpi_request->DevHandle = cpu_to_le16(handle);
4191 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4192 mpi_request->MsgFlags = tr_method;
4193 set_bit(handle, ioc->device_remove_in_progress);
4194 ioc->put_smid_hi_priority(ioc, smid, 0);
4195 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4196
4197 out:
4198 if (sas_device)
4199 sas_device_put(sas_device);
4200 if (pcie_device)
4201 pcie_device_put(pcie_device);
4202 }
4203
4204 /**
4205 * _scsih_tm_tr_complete -
4206 * @ioc: per adapter object
4207 * @smid: system request message index
4208 * @msix_index: MSIX table index supplied by the OS
4209 * @reply: reply message frame(lower 32bit addr)
4210 * Context: interrupt time.
4211 *
4212 * This is the target reset completion routine.
4213 * This code is part of the code to initiate the device removal
4214 * handshake protocol with controller firmware.
4215 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4216 *
4217 * Return: 1 meaning mf should be freed from _base_interrupt
4218 * 0 means the mf is freed from this function.
4219 */
4220 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4221 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4222 u32 reply)
4223 {
4224 u16 handle;
4225 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4226 Mpi2SCSITaskManagementReply_t *mpi_reply =
4227 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4228 Mpi2SasIoUnitControlRequest_t *mpi_request;
4229 u16 smid_sas_ctrl;
4230 u32 ioc_state;
4231 struct _sc_list *delayed_sc;
4232
4233 if (ioc->pci_error_recovery) {
4234 dewtprintk(ioc,
4235 ioc_info(ioc, "%s: host in pci error recovery\n",
4236 __func__));
4237 return 1;
4238 }
4239 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4240 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4241 dewtprintk(ioc,
4242 ioc_info(ioc, "%s: host is not operational\n",
4243 __func__));
4244 return 1;
4245 }
4246 if (unlikely(!mpi_reply)) {
4247 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4248 __FILE__, __LINE__, __func__);
4249 return 1;
4250 }
4251 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4252 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4253 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4254 dewtprintk(ioc,
4255 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4256 handle,
4257 le16_to_cpu(mpi_reply->DevHandle), smid));
4258 return 0;
4259 }
4260
4261 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4262 dewtprintk(ioc,
4263 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4264 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4265 le32_to_cpu(mpi_reply->IOCLogInfo),
4266 le32_to_cpu(mpi_reply->TerminationCount)));
4267
4268 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4269 if (!smid_sas_ctrl) {
4270 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4271 if (!delayed_sc)
4272 return _scsih_check_for_pending_tm(ioc, smid);
4273 INIT_LIST_HEAD(&delayed_sc->list);
4274 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4275 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4276 dewtprintk(ioc,
4277 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4278 handle));
4279 return _scsih_check_for_pending_tm(ioc, smid);
4280 }
4281
4282 dewtprintk(ioc,
4283 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4284 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4285 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4286 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4287 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4288 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4289 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4290 ioc->put_smid_default(ioc, smid_sas_ctrl);
4291
4292 return _scsih_check_for_pending_tm(ioc, smid);
4293 }
4294
4295 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4296 * issue to IOC or not.
4297 * @ioc: per adapter object
4298 * @scmd: pointer to scsi command object
4299 *
4300 * Returns true if scmd can be issued to IOC otherwise returns false.
4301 */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4302 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4303 struct scsi_cmnd *scmd)
4304 {
4305
4306 if (ioc->pci_error_recovery)
4307 return false;
4308
4309 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4310 if (ioc->remove_host)
4311 return false;
4312
4313 return true;
4314 }
4315
4316 if (ioc->remove_host) {
4317
4318 switch (scmd->cmnd[0]) {
4319 case SYNCHRONIZE_CACHE:
4320 case START_STOP:
4321 return true;
4322 default:
4323 return false;
4324 }
4325 }
4326
4327 return true;
4328 }
4329
4330 /**
4331 * _scsih_sas_control_complete - completion routine
4332 * @ioc: per adapter object
4333 * @smid: system request message index
4334 * @msix_index: MSIX table index supplied by the OS
4335 * @reply: reply message frame(lower 32bit addr)
4336 * Context: interrupt time.
4337 *
4338 * This is the sas iounit control completion routine.
4339 * This code is part of the code to initiate the device removal
4340 * handshake protocol with controller firmware.
4341 *
4342 * Return: 1 meaning mf should be freed from _base_interrupt
4343 * 0 means the mf is freed from this function.
4344 */
4345 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4346 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4347 u8 msix_index, u32 reply)
4348 {
4349 Mpi2SasIoUnitControlReply_t *mpi_reply =
4350 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4351
4352 if (likely(mpi_reply)) {
4353 dewtprintk(ioc,
4354 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4355 le16_to_cpu(mpi_reply->DevHandle), smid,
4356 le16_to_cpu(mpi_reply->IOCStatus),
4357 le32_to_cpu(mpi_reply->IOCLogInfo)));
4358 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4359 MPI2_IOCSTATUS_SUCCESS) {
4360 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4361 ioc->device_remove_in_progress);
4362 }
4363 } else {
4364 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4365 __FILE__, __LINE__, __func__);
4366 }
4367 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4368 }
4369
4370 /**
4371 * _scsih_tm_tr_volume_send - send target reset request for volumes
4372 * @ioc: per adapter object
4373 * @handle: device handle
4374 * Context: interrupt time.
4375 *
4376 * This is designed to send muliple task management request at the same
4377 * time to the fifo. If the fifo is full, we will append the request,
4378 * and process it in a future completion.
4379 */
4380 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4381 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4382 {
4383 Mpi2SCSITaskManagementRequest_t *mpi_request;
4384 u16 smid;
4385 struct _tr_list *delayed_tr;
4386
4387 if (ioc->pci_error_recovery) {
4388 dewtprintk(ioc,
4389 ioc_info(ioc, "%s: host reset in progress!\n",
4390 __func__));
4391 return;
4392 }
4393
4394 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4395 if (!smid) {
4396 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4397 if (!delayed_tr)
4398 return;
4399 INIT_LIST_HEAD(&delayed_tr->list);
4400 delayed_tr->handle = handle;
4401 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4402 dewtprintk(ioc,
4403 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4404 handle));
4405 return;
4406 }
4407
4408 dewtprintk(ioc,
4409 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4410 handle, smid, ioc->tm_tr_volume_cb_idx));
4411 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4412 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4413 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4414 mpi_request->DevHandle = cpu_to_le16(handle);
4415 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4416 ioc->put_smid_hi_priority(ioc, smid, 0);
4417 }
4418
4419 /**
4420 * _scsih_tm_volume_tr_complete - target reset completion
4421 * @ioc: per adapter object
4422 * @smid: system request message index
4423 * @msix_index: MSIX table index supplied by the OS
4424 * @reply: reply message frame(lower 32bit addr)
4425 * Context: interrupt time.
4426 *
4427 * Return: 1 meaning mf should be freed from _base_interrupt
4428 * 0 means the mf is freed from this function.
4429 */
4430 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4431 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4432 u8 msix_index, u32 reply)
4433 {
4434 u16 handle;
4435 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4436 Mpi2SCSITaskManagementReply_t *mpi_reply =
4437 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4438
4439 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4440 dewtprintk(ioc,
4441 ioc_info(ioc, "%s: host reset in progress!\n",
4442 __func__));
4443 return 1;
4444 }
4445 if (unlikely(!mpi_reply)) {
4446 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4447 __FILE__, __LINE__, __func__);
4448 return 1;
4449 }
4450
4451 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4452 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4453 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4454 dewtprintk(ioc,
4455 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4456 handle, le16_to_cpu(mpi_reply->DevHandle),
4457 smid));
4458 return 0;
4459 }
4460
4461 dewtprintk(ioc,
4462 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4463 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4464 le32_to_cpu(mpi_reply->IOCLogInfo),
4465 le32_to_cpu(mpi_reply->TerminationCount)));
4466
4467 return _scsih_check_for_pending_tm(ioc, smid);
4468 }
4469
4470 /**
4471 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4472 * @ioc: per adapter object
4473 * @smid: system request message index
4474 * @event: Event ID
4475 * @event_context: used to track events uniquely
4476 *
4477 * Context - processed in interrupt context.
4478 */
4479 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4480 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4481 U32 event_context)
4482 {
4483 Mpi2EventAckRequest_t *ack_request;
4484 int i = smid - ioc->internal_smid;
4485 unsigned long flags;
4486
4487 /* Without releasing the smid just update the
4488 * call back index and reuse the same smid for
4489 * processing this delayed request
4490 */
4491 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4492 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4493 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4494
4495 dewtprintk(ioc,
4496 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4497 le16_to_cpu(event), smid, ioc->base_cb_idx));
4498 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4499 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4500 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4501 ack_request->Event = event;
4502 ack_request->EventContext = event_context;
4503 ack_request->VF_ID = 0; /* TODO */
4504 ack_request->VP_ID = 0;
4505 ioc->put_smid_default(ioc, smid);
4506 }
4507
4508 /**
4509 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4510 * sas_io_unit_ctrl messages
4511 * @ioc: per adapter object
4512 * @smid: system request message index
4513 * @handle: device handle
4514 *
4515 * Context - processed in interrupt context.
4516 */
4517 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4518 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4519 u16 smid, u16 handle)
4520 {
4521 Mpi2SasIoUnitControlRequest_t *mpi_request;
4522 u32 ioc_state;
4523 int i = smid - ioc->internal_smid;
4524 unsigned long flags;
4525
4526 if (ioc->remove_host) {
4527 dewtprintk(ioc,
4528 ioc_info(ioc, "%s: host has been removed\n",
4529 __func__));
4530 return;
4531 } else if (ioc->pci_error_recovery) {
4532 dewtprintk(ioc,
4533 ioc_info(ioc, "%s: host in pci error recovery\n",
4534 __func__));
4535 return;
4536 }
4537 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4538 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4539 dewtprintk(ioc,
4540 ioc_info(ioc, "%s: host is not operational\n",
4541 __func__));
4542 return;
4543 }
4544
4545 /* Without releasing the smid just update the
4546 * call back index and reuse the same smid for
4547 * processing this delayed request
4548 */
4549 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4550 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4551 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4552
4553 dewtprintk(ioc,
4554 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4555 handle, smid, ioc->tm_sas_control_cb_idx));
4556 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4557 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4558 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4559 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4560 mpi_request->DevHandle = cpu_to_le16(handle);
4561 ioc->put_smid_default(ioc, smid);
4562 }
4563
4564 /**
4565 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4566 * @ioc: per adapter object
4567 * @smid: system request message index
4568 *
4569 * Context: Executed in interrupt context
4570 *
4571 * This will check delayed internal messages list, and process the
4572 * next request.
4573 *
4574 * Return: 1 meaning mf should be freed from _base_interrupt
4575 * 0 means the mf is freed from this function.
4576 */
4577 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4578 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4579 {
4580 struct _sc_list *delayed_sc;
4581 struct _event_ack_list *delayed_event_ack;
4582
4583 if (!list_empty(&ioc->delayed_event_ack_list)) {
4584 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4585 struct _event_ack_list, list);
4586 _scsih_issue_delayed_event_ack(ioc, smid,
4587 delayed_event_ack->Event, delayed_event_ack->EventContext);
4588 list_del(&delayed_event_ack->list);
4589 kfree(delayed_event_ack);
4590 return 0;
4591 }
4592
4593 if (!list_empty(&ioc->delayed_sc_list)) {
4594 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4595 struct _sc_list, list);
4596 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4597 delayed_sc->handle);
4598 list_del(&delayed_sc->list);
4599 kfree(delayed_sc);
4600 return 0;
4601 }
4602 return 1;
4603 }
4604
4605 /**
4606 * _scsih_check_for_pending_tm - check for pending task management
4607 * @ioc: per adapter object
4608 * @smid: system request message index
4609 *
4610 * This will check delayed target reset list, and feed the
4611 * next reqeust.
4612 *
4613 * Return: 1 meaning mf should be freed from _base_interrupt
4614 * 0 means the mf is freed from this function.
4615 */
4616 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4617 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4618 {
4619 struct _tr_list *delayed_tr;
4620
4621 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4622 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4623 struct _tr_list, list);
4624 mpt3sas_base_free_smid(ioc, smid);
4625 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4626 list_del(&delayed_tr->list);
4627 kfree(delayed_tr);
4628 return 0;
4629 }
4630
4631 if (!list_empty(&ioc->delayed_tr_list)) {
4632 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4633 struct _tr_list, list);
4634 mpt3sas_base_free_smid(ioc, smid);
4635 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4636 list_del(&delayed_tr->list);
4637 kfree(delayed_tr);
4638 return 0;
4639 }
4640
4641 return 1;
4642 }
4643
4644 /**
4645 * _scsih_check_topo_delete_events - sanity check on topo events
4646 * @ioc: per adapter object
4647 * @event_data: the event data payload
4648 *
4649 * This routine added to better handle cable breaker.
4650 *
4651 * This handles the case where driver receives multiple expander
4652 * add and delete events in a single shot. When there is a delete event
4653 * the routine will void any pending add events waiting in the event queue.
4654 */
4655 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4656 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4657 Mpi2EventDataSasTopologyChangeList_t *event_data)
4658 {
4659 struct fw_event_work *fw_event;
4660 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4661 u16 expander_handle;
4662 struct _sas_node *sas_expander;
4663 unsigned long flags;
4664 int i, reason_code;
4665 u16 handle;
4666
4667 for (i = 0 ; i < event_data->NumEntries; i++) {
4668 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4669 if (!handle)
4670 continue;
4671 reason_code = event_data->PHY[i].PhyStatus &
4672 MPI2_EVENT_SAS_TOPO_RC_MASK;
4673 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4674 _scsih_tm_tr_send(ioc, handle);
4675 }
4676
4677 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4678 if (expander_handle < ioc->sas_hba.num_phys) {
4679 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4680 return;
4681 }
4682 if (event_data->ExpStatus ==
4683 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4684 /* put expander attached devices into blocking state */
4685 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4686 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4687 expander_handle);
4688 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4689 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4690 do {
4691 handle = find_first_bit(ioc->blocking_handles,
4692 ioc->facts.MaxDevHandle);
4693 if (handle < ioc->facts.MaxDevHandle)
4694 _scsih_block_io_device(ioc, handle);
4695 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4696 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4697 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4698
4699 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4700 return;
4701
4702 /* mark ignore flag for pending events */
4703 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4704 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4705 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4706 fw_event->ignore)
4707 continue;
4708 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4709 fw_event->event_data;
4710 if (local_event_data->ExpStatus ==
4711 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4712 local_event_data->ExpStatus ==
4713 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4714 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4715 expander_handle) {
4716 dewtprintk(ioc,
4717 ioc_info(ioc, "setting ignoring flag\n"));
4718 fw_event->ignore = 1;
4719 }
4720 }
4721 }
4722 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4723 }
4724
4725 /**
4726 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4727 * events
4728 * @ioc: per adapter object
4729 * @event_data: the event data payload
4730 *
4731 * This handles the case where driver receives multiple switch
4732 * or device add and delete events in a single shot. When there
4733 * is a delete event the routine will void any pending add
4734 * events waiting in the event queue.
4735 */
4736 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4737 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4738 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4739 {
4740 struct fw_event_work *fw_event;
4741 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4742 unsigned long flags;
4743 int i, reason_code;
4744 u16 handle, switch_handle;
4745
4746 for (i = 0; i < event_data->NumEntries; i++) {
4747 handle =
4748 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4749 if (!handle)
4750 continue;
4751 reason_code = event_data->PortEntry[i].PortStatus;
4752 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4753 _scsih_tm_tr_send(ioc, handle);
4754 }
4755
4756 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4757 if (!switch_handle) {
4758 _scsih_block_io_to_pcie_children_attached_directly(
4759 ioc, event_data);
4760 return;
4761 }
4762 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4763 if ((event_data->SwitchStatus
4764 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4765 (event_data->SwitchStatus ==
4766 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4767 _scsih_block_io_to_pcie_children_attached_directly(
4768 ioc, event_data);
4769
4770 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4771 return;
4772
4773 /* mark ignore flag for pending events */
4774 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4775 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4776 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4777 fw_event->ignore)
4778 continue;
4779 local_event_data =
4780 (Mpi26EventDataPCIeTopologyChangeList_t *)
4781 fw_event->event_data;
4782 if (local_event_data->SwitchStatus ==
4783 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4784 local_event_data->SwitchStatus ==
4785 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4786 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4787 switch_handle) {
4788 dewtprintk(ioc,
4789 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4790 fw_event->ignore = 1;
4791 }
4792 }
4793 }
4794 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4795 }
4796
4797 /**
4798 * _scsih_set_volume_delete_flag - setting volume delete flag
4799 * @ioc: per adapter object
4800 * @handle: device handle
4801 *
4802 * This returns nothing.
4803 */
4804 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4805 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4806 {
4807 struct _raid_device *raid_device;
4808 struct MPT3SAS_TARGET *sas_target_priv_data;
4809 unsigned long flags;
4810
4811 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4812 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4813 if (raid_device && raid_device->starget &&
4814 raid_device->starget->hostdata) {
4815 sas_target_priv_data =
4816 raid_device->starget->hostdata;
4817 sas_target_priv_data->deleted = 1;
4818 dewtprintk(ioc,
4819 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4820 handle, (u64)raid_device->wwid));
4821 }
4822 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4823 }
4824
4825 /**
4826 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4827 * @handle: input handle
4828 * @a: handle for volume a
4829 * @b: handle for volume b
4830 *
4831 * IR firmware only supports two raid volumes. The purpose of this
4832 * routine is to set the volume handle in either a or b. When the given
4833 * input handle is non-zero, or when a and b have not been set before.
4834 */
4835 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4836 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4837 {
4838 if (!handle || handle == *a || handle == *b)
4839 return;
4840 if (!*a)
4841 *a = handle;
4842 else if (!*b)
4843 *b = handle;
4844 }
4845
4846 /**
4847 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4848 * @ioc: per adapter object
4849 * @event_data: the event data payload
4850 * Context: interrupt time.
4851 *
4852 * This routine will send target reset to volume, followed by target
4853 * resets to the PDs. This is called when a PD has been removed, or
4854 * volume has been deleted or removed. When the target reset is sent
4855 * to volume, the PD target resets need to be queued to start upon
4856 * completion of the volume target reset.
4857 */
4858 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4859 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4860 Mpi2EventDataIrConfigChangeList_t *event_data)
4861 {
4862 Mpi2EventIrConfigElement_t *element;
4863 int i;
4864 u16 handle, volume_handle, a, b;
4865 struct _tr_list *delayed_tr;
4866
4867 a = 0;
4868 b = 0;
4869
4870 if (ioc->is_warpdrive)
4871 return;
4872
4873 /* Volume Resets for Deleted or Removed */
4874 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4875 for (i = 0; i < event_data->NumElements; i++, element++) {
4876 if (le32_to_cpu(event_data->Flags) &
4877 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4878 continue;
4879 if (element->ReasonCode ==
4880 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4881 element->ReasonCode ==
4882 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4883 volume_handle = le16_to_cpu(element->VolDevHandle);
4884 _scsih_set_volume_delete_flag(ioc, volume_handle);
4885 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4886 }
4887 }
4888
4889 /* Volume Resets for UNHIDE events */
4890 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4891 for (i = 0; i < event_data->NumElements; i++, element++) {
4892 if (le32_to_cpu(event_data->Flags) &
4893 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4894 continue;
4895 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4896 volume_handle = le16_to_cpu(element->VolDevHandle);
4897 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4898 }
4899 }
4900
4901 if (a)
4902 _scsih_tm_tr_volume_send(ioc, a);
4903 if (b)
4904 _scsih_tm_tr_volume_send(ioc, b);
4905
4906 /* PD target resets */
4907 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4908 for (i = 0; i < event_data->NumElements; i++, element++) {
4909 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4910 continue;
4911 handle = le16_to_cpu(element->PhysDiskDevHandle);
4912 volume_handle = le16_to_cpu(element->VolDevHandle);
4913 clear_bit(handle, ioc->pd_handles);
4914 if (!volume_handle)
4915 _scsih_tm_tr_send(ioc, handle);
4916 else if (volume_handle == a || volume_handle == b) {
4917 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4918 BUG_ON(!delayed_tr);
4919 INIT_LIST_HEAD(&delayed_tr->list);
4920 delayed_tr->handle = handle;
4921 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4922 dewtprintk(ioc,
4923 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4924 handle));
4925 } else
4926 _scsih_tm_tr_send(ioc, handle);
4927 }
4928 }
4929
4930
4931 /**
4932 * _scsih_check_volume_delete_events - set delete flag for volumes
4933 * @ioc: per adapter object
4934 * @event_data: the event data payload
4935 * Context: interrupt time.
4936 *
4937 * This will handle the case when the cable connected to entire volume is
4938 * pulled. We will take care of setting the deleted flag so normal IO will
4939 * not be sent.
4940 */
4941 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4942 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4943 Mpi2EventDataIrVolume_t *event_data)
4944 {
4945 u32 state;
4946
4947 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4948 return;
4949 state = le32_to_cpu(event_data->NewValue);
4950 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4951 MPI2_RAID_VOL_STATE_FAILED)
4952 _scsih_set_volume_delete_flag(ioc,
4953 le16_to_cpu(event_data->VolDevHandle));
4954 }
4955
4956 /**
4957 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4958 * @ioc: per adapter object
4959 * @event_data: the temp threshold event data
4960 * Context: interrupt time.
4961 */
4962 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4963 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4964 Mpi2EventDataTemperature_t *event_data)
4965 {
4966 u32 doorbell;
4967 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4968 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4969 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4970 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4971 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4972 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4973 event_data->SensorNum);
4974 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4975 event_data->CurrentTemperature);
4976 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4977 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4978 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4979 MPI2_IOC_STATE_FAULT) {
4980 mpt3sas_print_fault_code(ioc,
4981 doorbell & MPI2_DOORBELL_DATA_MASK);
4982 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4983 MPI2_IOC_STATE_COREDUMP) {
4984 mpt3sas_print_coredump_info(ioc,
4985 doorbell & MPI2_DOORBELL_DATA_MASK);
4986 }
4987 }
4988 }
4989 }
4990
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4991 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4992 {
4993 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4994
4995 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4996 return 0;
4997
4998 if (pending)
4999 return test_and_set_bit(0, &priv->ata_command_pending);
5000
5001 clear_bit(0, &priv->ata_command_pending);
5002 return 0;
5003 }
5004
5005 /**
5006 * _scsih_flush_running_cmds - completing outstanding commands.
5007 * @ioc: per adapter object
5008 *
5009 * The flushing out of all pending scmd commands following host reset,
5010 * where all IO is dropped to the floor.
5011 */
5012 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)5013 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5014 {
5015 struct scsi_cmnd *scmd;
5016 struct scsiio_tracker *st;
5017 u16 smid;
5018 int count = 0;
5019
5020 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5021 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5022 if (!scmd)
5023 continue;
5024 count++;
5025 _scsih_set_satl_pending(scmd, false);
5026 st = scsi_cmd_priv(scmd);
5027 mpt3sas_base_clear_st(ioc, st);
5028 scsi_dma_unmap(scmd);
5029 if (ioc->pci_error_recovery || ioc->remove_host)
5030 scmd->result = DID_NO_CONNECT << 16;
5031 else
5032 scmd->result = DID_RESET << 16;
5033 scsi_done(scmd);
5034 }
5035 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5036 }
5037
5038 /**
5039 * _scsih_setup_eedp - setup MPI request for EEDP transfer
5040 * @ioc: per adapter object
5041 * @scmd: pointer to scsi command object
5042 * @mpi_request: pointer to the SCSI_IO request message frame
5043 *
5044 * Supporting protection 1 and 3.
5045 */
5046 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5047 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5048 Mpi25SCSIIORequest_t *mpi_request)
5049 {
5050 u16 eedp_flags;
5051 Mpi25SCSIIORequest_t *mpi_request_3v =
5052 (Mpi25SCSIIORequest_t *)mpi_request;
5053
5054 switch (scsi_get_prot_op(scmd)) {
5055 case SCSI_PROT_READ_STRIP:
5056 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5057 break;
5058 case SCSI_PROT_WRITE_INSERT:
5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5060 break;
5061 default:
5062 return;
5063 }
5064
5065 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5066 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5067
5068 if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5069 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5070
5071 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5072 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5073
5074 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5075 cpu_to_be32(scsi_prot_ref_tag(scmd));
5076 }
5077
5078 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5079
5080 if (ioc->is_gen35_ioc)
5081 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5082 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5083 }
5084
5085 /**
5086 * _scsih_eedp_error_handling - return sense code for EEDP errors
5087 * @scmd: pointer to scsi command object
5088 * @ioc_status: ioc status
5089 */
5090 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5091 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5092 {
5093 u8 ascq;
5094
5095 switch (ioc_status) {
5096 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5097 ascq = 0x01;
5098 break;
5099 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5100 ascq = 0x02;
5101 break;
5102 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5103 ascq = 0x03;
5104 break;
5105 default:
5106 ascq = 0x00;
5107 break;
5108 }
5109 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5110 set_host_byte(scmd, DID_ABORT);
5111 }
5112
5113 /**
5114 * scsih_qcmd - main scsi request entry point
5115 * @shost: SCSI host pointer
5116 * @scmd: pointer to scsi command object
5117 *
5118 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5119 *
5120 * Return: 0 on success. If there's a failure, return either:
5121 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5122 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5123 */
5124 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5125 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5126 {
5127 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5128 struct MPT3SAS_DEVICE *sas_device_priv_data;
5129 struct MPT3SAS_TARGET *sas_target_priv_data;
5130 struct _raid_device *raid_device;
5131 struct request *rq = scsi_cmd_to_rq(scmd);
5132 int class;
5133 Mpi25SCSIIORequest_t *mpi_request;
5134 struct _pcie_device *pcie_device = NULL;
5135 u32 mpi_control;
5136 u16 smid;
5137 u16 handle;
5138
5139 if (ioc->logging_level & MPT_DEBUG_SCSI)
5140 scsi_print_command(scmd);
5141
5142 sas_device_priv_data = scmd->device->hostdata;
5143 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5144 scmd->result = DID_NO_CONNECT << 16;
5145 scsi_done(scmd);
5146 return 0;
5147 }
5148
5149 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5150 scmd->result = DID_NO_CONNECT << 16;
5151 scsi_done(scmd);
5152 return 0;
5153 }
5154
5155 sas_target_priv_data = sas_device_priv_data->sas_target;
5156
5157 /* invalid device handle */
5158 handle = sas_target_priv_data->handle;
5159
5160 /*
5161 * Avoid error handling escallation when device is disconnected
5162 */
5163 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5164 if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5165 scmd->cmnd[0] == TEST_UNIT_READY) {
5166 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5167 scsi_done(scmd);
5168 return 0;
5169 }
5170 }
5171
5172 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5173 scmd->result = DID_NO_CONNECT << 16;
5174 scsi_done(scmd);
5175 return 0;
5176 }
5177
5178
5179 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5180 /* host recovery or link resets sent via IOCTLs */
5181 return SCSI_MLQUEUE_HOST_BUSY;
5182 } else if (sas_target_priv_data->deleted) {
5183 /* device has been deleted */
5184 scmd->result = DID_NO_CONNECT << 16;
5185 scsi_done(scmd);
5186 return 0;
5187 } else if (sas_target_priv_data->tm_busy ||
5188 sas_device_priv_data->block) {
5189 /* device busy with task management */
5190 return SCSI_MLQUEUE_DEVICE_BUSY;
5191 }
5192
5193 /*
5194 * Bug work around for firmware SATL handling. The loop
5195 * is based on atomic operations and ensures consistency
5196 * since we're lockless at this point
5197 */
5198 do {
5199 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5200 return SCSI_MLQUEUE_DEVICE_BUSY;
5201 } while (_scsih_set_satl_pending(scmd, true));
5202
5203 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5204 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5205 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5206 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5207 else
5208 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5209
5210 /* set tags */
5211 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5212 /* NCQ Prio supported, make sure control indicated high priority */
5213 if (sas_device_priv_data->ncq_prio_enable) {
5214 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5215 if (class == IOPRIO_CLASS_RT)
5216 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5217 }
5218 /* Make sure Device is not raid volume.
5219 * We do not expose raid functionality to upper layer for warpdrive.
5220 */
5221 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5222 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5223 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5224 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5225
5226 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5227 if (!smid) {
5228 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5229 _scsih_set_satl_pending(scmd, false);
5230 goto out;
5231 }
5232 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5233 memset(mpi_request, 0, ioc->request_sz);
5234 _scsih_setup_eedp(ioc, scmd, mpi_request);
5235
5236 if (scmd->cmd_len == 32)
5237 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5238 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5239 if (sas_device_priv_data->sas_target->flags &
5240 MPT_TARGET_FLAGS_RAID_COMPONENT)
5241 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5242 else
5243 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5244 mpi_request->DevHandle = cpu_to_le16(handle);
5245 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5246 mpi_request->Control = cpu_to_le32(mpi_control);
5247 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5248 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5249 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5250 mpi_request->SenseBufferLowAddress =
5251 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5252 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5253 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5254 mpi_request->LUN);
5255 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5256
5257 if (mpi_request->DataLength) {
5258 pcie_device = sas_target_priv_data->pcie_dev;
5259 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5260 mpt3sas_base_free_smid(ioc, smid);
5261 _scsih_set_satl_pending(scmd, false);
5262 goto out;
5263 }
5264 } else
5265 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5266
5267 raid_device = sas_target_priv_data->raid_device;
5268 if (raid_device && raid_device->direct_io_enabled)
5269 mpt3sas_setup_direct_io(ioc, scmd,
5270 raid_device, mpi_request);
5271
5272 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5273 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5274 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5275 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5276 ioc->put_smid_fast_path(ioc, smid, handle);
5277 } else
5278 ioc->put_smid_scsi_io(ioc, smid,
5279 le16_to_cpu(mpi_request->DevHandle));
5280 } else
5281 ioc->put_smid_default(ioc, smid);
5282 return 0;
5283
5284 out:
5285 return SCSI_MLQUEUE_HOST_BUSY;
5286 }
5287
5288 /**
5289 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5290 * @sense_buffer: sense data returned by target
5291 * @data: normalized skey/asc/ascq
5292 */
5293 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5294 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5295 {
5296 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5297 /* descriptor format */
5298 data->skey = sense_buffer[1] & 0x0F;
5299 data->asc = sense_buffer[2];
5300 data->ascq = sense_buffer[3];
5301 } else {
5302 /* fixed format */
5303 data->skey = sense_buffer[2] & 0x0F;
5304 data->asc = sense_buffer[12];
5305 data->ascq = sense_buffer[13];
5306 }
5307 }
5308
5309 /**
5310 * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5311 * @ioc: per adapter object
5312 * @scmd: pointer to scsi command object
5313 * @mpi_reply: reply mf payload returned from firmware
5314 * @smid: ?
5315 *
5316 * scsi_status - SCSI Status code returned from target device
5317 * scsi_state - state info associated with SCSI_IO determined by ioc
5318 * ioc_status - ioc supplied status info
5319 */
5320 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5321 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5322 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5323 {
5324 u32 response_info;
5325 u8 *response_bytes;
5326 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5327 MPI2_IOCSTATUS_MASK;
5328 u8 scsi_state = mpi_reply->SCSIState;
5329 u8 scsi_status = mpi_reply->SCSIStatus;
5330 char *desc_ioc_state = NULL;
5331 char *desc_scsi_status = NULL;
5332 char *desc_scsi_state = ioc->tmp_string;
5333 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5334 struct _sas_device *sas_device = NULL;
5335 struct _pcie_device *pcie_device = NULL;
5336 struct scsi_target *starget = scmd->device->sdev_target;
5337 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5338 char *device_str = NULL;
5339
5340 if (!priv_target)
5341 return;
5342 if (ioc->hide_ir_msg)
5343 device_str = "WarpDrive";
5344 else
5345 device_str = "volume";
5346
5347 if (log_info == 0x31170000)
5348 return;
5349
5350 switch (ioc_status) {
5351 case MPI2_IOCSTATUS_SUCCESS:
5352 desc_ioc_state = "success";
5353 break;
5354 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5355 desc_ioc_state = "invalid function";
5356 break;
5357 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5358 desc_ioc_state = "scsi recovered error";
5359 break;
5360 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5361 desc_ioc_state = "scsi invalid dev handle";
5362 break;
5363 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5364 desc_ioc_state = "scsi device not there";
5365 break;
5366 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5367 desc_ioc_state = "scsi data overrun";
5368 break;
5369 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5370 desc_ioc_state = "scsi data underrun";
5371 break;
5372 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5373 desc_ioc_state = "scsi io data error";
5374 break;
5375 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5376 desc_ioc_state = "scsi protocol error";
5377 break;
5378 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5379 desc_ioc_state = "scsi task terminated";
5380 break;
5381 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5382 desc_ioc_state = "scsi residual mismatch";
5383 break;
5384 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5385 desc_ioc_state = "scsi task mgmt failed";
5386 break;
5387 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5388 desc_ioc_state = "scsi ioc terminated";
5389 break;
5390 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5391 desc_ioc_state = "scsi ext terminated";
5392 break;
5393 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5394 desc_ioc_state = "eedp guard error";
5395 break;
5396 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5397 desc_ioc_state = "eedp ref tag error";
5398 break;
5399 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5400 desc_ioc_state = "eedp app tag error";
5401 break;
5402 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5403 desc_ioc_state = "insufficient power";
5404 break;
5405 default:
5406 desc_ioc_state = "unknown";
5407 break;
5408 }
5409
5410 switch (scsi_status) {
5411 case MPI2_SCSI_STATUS_GOOD:
5412 desc_scsi_status = "good";
5413 break;
5414 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5415 desc_scsi_status = "check condition";
5416 break;
5417 case MPI2_SCSI_STATUS_CONDITION_MET:
5418 desc_scsi_status = "condition met";
5419 break;
5420 case MPI2_SCSI_STATUS_BUSY:
5421 desc_scsi_status = "busy";
5422 break;
5423 case MPI2_SCSI_STATUS_INTERMEDIATE:
5424 desc_scsi_status = "intermediate";
5425 break;
5426 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5427 desc_scsi_status = "intermediate condmet";
5428 break;
5429 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5430 desc_scsi_status = "reservation conflict";
5431 break;
5432 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5433 desc_scsi_status = "command terminated";
5434 break;
5435 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5436 desc_scsi_status = "task set full";
5437 break;
5438 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5439 desc_scsi_status = "aca active";
5440 break;
5441 case MPI2_SCSI_STATUS_TASK_ABORTED:
5442 desc_scsi_status = "task aborted";
5443 break;
5444 default:
5445 desc_scsi_status = "unknown";
5446 break;
5447 }
5448
5449 desc_scsi_state[0] = '\0';
5450 if (!scsi_state)
5451 desc_scsi_state = " ";
5452 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5453 strcat(desc_scsi_state, "response info ");
5454 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5455 strcat(desc_scsi_state, "state terminated ");
5456 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5457 strcat(desc_scsi_state, "no status ");
5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5459 strcat(desc_scsi_state, "autosense failed ");
5460 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5461 strcat(desc_scsi_state, "autosense valid ");
5462
5463 scsi_print_command(scmd);
5464
5465 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5466 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5467 device_str, (u64)priv_target->sas_address);
5468 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5469 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5470 if (pcie_device) {
5471 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5472 (u64)pcie_device->wwid, pcie_device->port_num);
5473 if (pcie_device->enclosure_handle != 0)
5474 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5475 (u64)pcie_device->enclosure_logical_id,
5476 pcie_device->slot);
5477 if (pcie_device->connector_name[0])
5478 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5479 pcie_device->enclosure_level,
5480 pcie_device->connector_name);
5481 pcie_device_put(pcie_device);
5482 }
5483 } else {
5484 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5485 if (sas_device) {
5486 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5487 (u64)sas_device->sas_address, sas_device->phy);
5488
5489 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5490 NULL, NULL);
5491
5492 sas_device_put(sas_device);
5493 }
5494 }
5495
5496 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5497 le16_to_cpu(mpi_reply->DevHandle),
5498 desc_ioc_state, ioc_status, smid);
5499 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5500 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5501 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5502 le16_to_cpu(mpi_reply->TaskTag),
5503 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5504 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5505 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5506
5507 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5508 struct sense_info data;
5509 _scsih_normalize_sense(scmd->sense_buffer, &data);
5510 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5511 data.skey, data.asc, data.ascq,
5512 le32_to_cpu(mpi_reply->SenseCount));
5513 }
5514 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5515 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5516 response_bytes = (u8 *)&response_info;
5517 _scsih_response_code(ioc, response_bytes[0]);
5518 }
5519 }
5520
5521 /**
5522 * _scsih_turn_on_pfa_led - illuminate PFA LED
5523 * @ioc: per adapter object
5524 * @handle: device handle
5525 * Context: process
5526 */
5527 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5528 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5529 {
5530 Mpi2SepReply_t mpi_reply;
5531 Mpi2SepRequest_t mpi_request;
5532 struct _sas_device *sas_device;
5533
5534 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5535 if (!sas_device)
5536 return;
5537
5538 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5539 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5540 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5541 mpi_request.SlotStatus =
5542 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5543 mpi_request.DevHandle = cpu_to_le16(handle);
5544 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5545 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5546 &mpi_request)) != 0) {
5547 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5548 __FILE__, __LINE__, __func__);
5549 goto out;
5550 }
5551 sas_device->pfa_led_on = 1;
5552
5553 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5554 dewtprintk(ioc,
5555 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5556 le16_to_cpu(mpi_reply.IOCStatus),
5557 le32_to_cpu(mpi_reply.IOCLogInfo)));
5558 goto out;
5559 }
5560 out:
5561 sas_device_put(sas_device);
5562 }
5563
5564 /**
5565 * _scsih_turn_off_pfa_led - turn off Fault LED
5566 * @ioc: per adapter object
5567 * @sas_device: sas device whose PFA LED has to turned off
5568 * Context: process
5569 */
5570 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5571 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5572 struct _sas_device *sas_device)
5573 {
5574 Mpi2SepReply_t mpi_reply;
5575 Mpi2SepRequest_t mpi_request;
5576
5577 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5578 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5579 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5580 mpi_request.SlotStatus = 0;
5581 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5582 mpi_request.DevHandle = 0;
5583 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5584 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5585 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5586 &mpi_request)) != 0) {
5587 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5588 __FILE__, __LINE__, __func__);
5589 return;
5590 }
5591
5592 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5593 dewtprintk(ioc,
5594 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5595 le16_to_cpu(mpi_reply.IOCStatus),
5596 le32_to_cpu(mpi_reply.IOCLogInfo)));
5597 return;
5598 }
5599 }
5600
5601 /**
5602 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5603 * @ioc: per adapter object
5604 * @handle: device handle
5605 * Context: interrupt.
5606 */
5607 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5608 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5609 {
5610 struct fw_event_work *fw_event;
5611
5612 fw_event = alloc_fw_event_work(0);
5613 if (!fw_event)
5614 return;
5615 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5616 fw_event->device_handle = handle;
5617 fw_event->ioc = ioc;
5618 _scsih_fw_event_add(ioc, fw_event);
5619 fw_event_work_put(fw_event);
5620 }
5621
5622 /**
5623 * _scsih_smart_predicted_fault - process smart errors
5624 * @ioc: per adapter object
5625 * @handle: device handle
5626 * Context: interrupt.
5627 */
5628 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5629 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5630 {
5631 struct scsi_target *starget;
5632 struct MPT3SAS_TARGET *sas_target_priv_data;
5633 Mpi2EventNotificationReply_t *event_reply;
5634 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5635 struct _sas_device *sas_device;
5636 ssize_t sz;
5637 unsigned long flags;
5638
5639 /* only handle non-raid devices */
5640 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5641 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5642 if (!sas_device)
5643 goto out_unlock;
5644
5645 starget = sas_device->starget;
5646 sas_target_priv_data = starget->hostdata;
5647
5648 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5649 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5650 goto out_unlock;
5651
5652 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5653
5654 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5655
5656 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5657 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5658
5659 /* insert into event log */
5660 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5661 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5662 event_reply = kzalloc(sz, GFP_ATOMIC);
5663 if (!event_reply) {
5664 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5665 __FILE__, __LINE__, __func__);
5666 goto out;
5667 }
5668
5669 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5670 event_reply->Event =
5671 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5672 event_reply->MsgLength = sz/4;
5673 event_reply->EventDataLength =
5674 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5675 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5676 event_reply->EventData;
5677 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5678 event_data->ASC = 0x5D;
5679 event_data->DevHandle = cpu_to_le16(handle);
5680 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5681 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5682 kfree(event_reply);
5683 out:
5684 if (sas_device)
5685 sas_device_put(sas_device);
5686 return;
5687
5688 out_unlock:
5689 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5690 goto out;
5691 }
5692
5693 /**
5694 * _scsih_io_done - scsi request callback
5695 * @ioc: per adapter object
5696 * @smid: system request message index
5697 * @msix_index: MSIX table index supplied by the OS
5698 * @reply: reply message frame(lower 32bit addr)
5699 *
5700 * Callback handler when using _scsih_qcmd.
5701 *
5702 * Return: 1 meaning mf should be freed from _base_interrupt
5703 * 0 means the mf is freed from this function.
5704 */
5705 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5706 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5707 {
5708 Mpi25SCSIIORequest_t *mpi_request;
5709 Mpi2SCSIIOReply_t *mpi_reply;
5710 struct scsi_cmnd *scmd;
5711 struct scsiio_tracker *st;
5712 u16 ioc_status;
5713 u32 xfer_cnt;
5714 u8 scsi_state;
5715 u8 scsi_status;
5716 u32 log_info;
5717 struct MPT3SAS_DEVICE *sas_device_priv_data;
5718 u32 response_code = 0;
5719
5720 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5721
5722 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5723 if (scmd == NULL)
5724 return 1;
5725
5726 _scsih_set_satl_pending(scmd, false);
5727
5728 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5729
5730 if (mpi_reply == NULL) {
5731 scmd->result = DID_OK << 16;
5732 goto out;
5733 }
5734
5735 sas_device_priv_data = scmd->device->hostdata;
5736 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5737 sas_device_priv_data->sas_target->deleted) {
5738 scmd->result = DID_NO_CONNECT << 16;
5739 goto out;
5740 }
5741 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5742
5743 /*
5744 * WARPDRIVE: If direct_io is set then it is directIO,
5745 * the failed direct I/O should be redirected to volume
5746 */
5747 st = scsi_cmd_priv(scmd);
5748 if (st->direct_io &&
5749 ((ioc_status & MPI2_IOCSTATUS_MASK)
5750 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5751 st->direct_io = 0;
5752 st->scmd = scmd;
5753 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5754 mpi_request->DevHandle =
5755 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5756 ioc->put_smid_scsi_io(ioc, smid,
5757 sas_device_priv_data->sas_target->handle);
5758 return 0;
5759 }
5760 /* turning off TLR */
5761 scsi_state = mpi_reply->SCSIState;
5762 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5763 response_code =
5764 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5765 if (!sas_device_priv_data->tlr_snoop_check) {
5766 sas_device_priv_data->tlr_snoop_check++;
5767 if ((!ioc->is_warpdrive &&
5768 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5769 !scsih_is_nvme(&scmd->device->sdev_gendev))
5770 && sas_is_tlr_enabled(scmd->device) &&
5771 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5772 sas_disable_tlr(scmd->device);
5773 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5774 }
5775 }
5776
5777 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5778 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5779 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5780 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5781 else
5782 log_info = 0;
5783 ioc_status &= MPI2_IOCSTATUS_MASK;
5784 scsi_status = mpi_reply->SCSIStatus;
5785
5786 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5787 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5788 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5789 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5790 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5791 }
5792
5793 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5794 struct sense_info data;
5795 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5796 smid);
5797 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5798 le32_to_cpu(mpi_reply->SenseCount));
5799 memcpy(scmd->sense_buffer, sense_data, sz);
5800 _scsih_normalize_sense(scmd->sense_buffer, &data);
5801 /* failure prediction threshold exceeded */
5802 if (data.asc == 0x5D)
5803 _scsih_smart_predicted_fault(ioc,
5804 le16_to_cpu(mpi_reply->DevHandle));
5805 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5806
5807 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5808 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5809 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5810 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5811 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5812 }
5813 switch (ioc_status) {
5814 case MPI2_IOCSTATUS_BUSY:
5815 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5816 scmd->result = SAM_STAT_BUSY;
5817 break;
5818
5819 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5820 scmd->result = DID_NO_CONNECT << 16;
5821 break;
5822
5823 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5824 if (sas_device_priv_data->block) {
5825 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5826 goto out;
5827 }
5828 if (log_info == 0x31110630) {
5829 if (scmd->retries > 2) {
5830 scmd->result = DID_NO_CONNECT << 16;
5831 scsi_device_set_state(scmd->device,
5832 SDEV_OFFLINE);
5833 } else {
5834 scmd->result = DID_SOFT_ERROR << 16;
5835 scmd->device->expecting_cc_ua = 1;
5836 }
5837 break;
5838 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5839 scmd->result = DID_RESET << 16;
5840 break;
5841 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5842 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5843 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5844 scmd->result = DID_RESET << 16;
5845 break;
5846 }
5847 scmd->result = DID_SOFT_ERROR << 16;
5848 break;
5849 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5850 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5851 scmd->result = DID_RESET << 16;
5852 break;
5853
5854 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5855 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5856 scmd->result = DID_SOFT_ERROR << 16;
5857 else
5858 scmd->result = (DID_OK << 16) | scsi_status;
5859 break;
5860
5861 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5862 scmd->result = (DID_OK << 16) | scsi_status;
5863
5864 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5865 break;
5866
5867 if (xfer_cnt < scmd->underflow) {
5868 if (scsi_status == SAM_STAT_BUSY)
5869 scmd->result = SAM_STAT_BUSY;
5870 else
5871 scmd->result = DID_SOFT_ERROR << 16;
5872 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5873 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5874 scmd->result = DID_SOFT_ERROR << 16;
5875 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5876 scmd->result = DID_RESET << 16;
5877 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5878 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5879 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5880 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5881 0x20, 0);
5882 }
5883 break;
5884
5885 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5886 scsi_set_resid(scmd, 0);
5887 fallthrough;
5888 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5889 case MPI2_IOCSTATUS_SUCCESS:
5890 scmd->result = (DID_OK << 16) | scsi_status;
5891 if (response_code ==
5892 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5893 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5894 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5895 scmd->result = DID_SOFT_ERROR << 16;
5896 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5897 scmd->result = DID_RESET << 16;
5898 break;
5899
5900 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5901 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5902 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5903 _scsih_eedp_error_handling(scmd, ioc_status);
5904 break;
5905
5906 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5907 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5908 case MPI2_IOCSTATUS_INVALID_SGL:
5909 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5910 case MPI2_IOCSTATUS_INVALID_FIELD:
5911 case MPI2_IOCSTATUS_INVALID_STATE:
5912 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5913 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5914 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5915 default:
5916 scmd->result = DID_SOFT_ERROR << 16;
5917 break;
5918
5919 }
5920
5921 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5922 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5923
5924 out:
5925
5926 scsi_dma_unmap(scmd);
5927 mpt3sas_base_free_smid(ioc, smid);
5928 scsi_done(scmd);
5929 return 0;
5930 }
5931
5932 /**
5933 * _scsih_update_vphys_after_reset - update the Port's
5934 * vphys_list after reset
5935 * @ioc: per adapter object
5936 *
5937 * Returns nothing.
5938 */
5939 static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)5940 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5941 {
5942 u16 sz, ioc_status;
5943 int i;
5944 Mpi2ConfigReply_t mpi_reply;
5945 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5946 u16 attached_handle;
5947 u64 attached_sas_addr;
5948 u8 found = 0, port_id;
5949 Mpi2SasPhyPage0_t phy_pg0;
5950 struct hba_port *port, *port_next, *mport;
5951 struct virtual_phy *vphy, *vphy_next;
5952 struct _sas_device *sas_device;
5953
5954 /*
5955 * Mark all the vphys objects as dirty.
5956 */
5957 list_for_each_entry_safe(port, port_next,
5958 &ioc->port_table_list, list) {
5959 if (!port->vphys_mask)
5960 continue;
5961 list_for_each_entry_safe(vphy, vphy_next,
5962 &port->vphys_list, list) {
5963 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5964 }
5965 }
5966
5967 /*
5968 * Read SASIOUnitPage0 to get each HBA Phy's data.
5969 */
5970 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5971 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5972 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5973 if (!sas_iounit_pg0) {
5974 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5975 __FILE__, __LINE__, __func__);
5976 return;
5977 }
5978 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5979 sas_iounit_pg0, sz)) != 0)
5980 goto out;
5981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5982 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5983 goto out;
5984 /*
5985 * Loop over each HBA Phy.
5986 */
5987 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5988 /*
5989 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5990 */
5991 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5992 MPI2_SAS_NEG_LINK_RATE_1_5)
5993 continue;
5994 /*
5995 * Check whether Phy is connected to SEP device or not,
5996 * if it is SEP device then read the Phy's SASPHYPage0 data to
5997 * determine whether Phy is a virtual Phy or not. if it is
5998 * virtual phy then it is conformed that the attached remote
5999 * device is a HBA's vSES device.
6000 */
6001 if (!(le32_to_cpu(
6002 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6003 MPI2_SAS_DEVICE_INFO_SEP))
6004 continue;
6005
6006 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6007 i))) {
6008 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6009 __FILE__, __LINE__, __func__);
6010 continue;
6011 }
6012
6013 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6014 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6015 continue;
6016 /*
6017 * Get the vSES device's SAS Address.
6018 */
6019 attached_handle = le16_to_cpu(
6020 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6021 if (_scsih_get_sas_address(ioc, attached_handle,
6022 &attached_sas_addr) != 0) {
6023 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6024 __FILE__, __LINE__, __func__);
6025 continue;
6026 }
6027
6028 found = 0;
6029 port = port_next = NULL;
6030 /*
6031 * Loop over each virtual_phy object from
6032 * each port's vphys_list.
6033 */
6034 list_for_each_entry_safe(port,
6035 port_next, &ioc->port_table_list, list) {
6036 if (!port->vphys_mask)
6037 continue;
6038 list_for_each_entry_safe(vphy, vphy_next,
6039 &port->vphys_list, list) {
6040 /*
6041 * Continue with next virtual_phy object
6042 * if the object is not marked as dirty.
6043 */
6044 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6045 continue;
6046
6047 /*
6048 * Continue with next virtual_phy object
6049 * if the object's SAS Address is not equals
6050 * to current Phy's vSES device SAS Address.
6051 */
6052 if (vphy->sas_address != attached_sas_addr)
6053 continue;
6054 /*
6055 * Enable current Phy number bit in object's
6056 * phy_mask field.
6057 */
6058 if (!(vphy->phy_mask & (1 << i)))
6059 vphy->phy_mask = (1 << i);
6060 /*
6061 * Get hba_port object from hba_port table
6062 * corresponding to current phy's Port ID.
6063 * if there is no hba_port object corresponding
6064 * to Phy's Port ID then create a new hba_port
6065 * object & add to hba_port table.
6066 */
6067 port_id = sas_iounit_pg0->PhyData[i].Port;
6068 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6069 if (!mport) {
6070 mport = kzalloc(
6071 sizeof(struct hba_port), GFP_KERNEL);
6072 if (!mport)
6073 break;
6074 mport->port_id = port_id;
6075 ioc_info(ioc,
6076 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6077 __func__, mport, mport->port_id);
6078 list_add_tail(&mport->list,
6079 &ioc->port_table_list);
6080 }
6081 /*
6082 * If mport & port pointers are not pointing to
6083 * same hba_port object then it means that vSES
6084 * device's Port ID got changed after reset and
6085 * hence move current virtual_phy object from
6086 * port's vphys_list to mport's vphys_list.
6087 */
6088 if (port != mport) {
6089 if (!mport->vphys_mask)
6090 INIT_LIST_HEAD(
6091 &mport->vphys_list);
6092 mport->vphys_mask |= (1 << i);
6093 port->vphys_mask &= ~(1 << i);
6094 list_move(&vphy->list,
6095 &mport->vphys_list);
6096 sas_device = mpt3sas_get_sdev_by_addr(
6097 ioc, attached_sas_addr, port);
6098 if (sas_device)
6099 sas_device->port = mport;
6100 }
6101 /*
6102 * Earlier while updating the hba_port table,
6103 * it is determined that there is no other
6104 * direct attached device with mport's Port ID,
6105 * Hence mport was marked as dirty. Only vSES
6106 * device has this Port ID, so unmark the mport
6107 * as dirt.
6108 */
6109 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6110 mport->sas_address = 0;
6111 mport->phy_mask = 0;
6112 mport->flags &=
6113 ~HBA_PORT_FLAG_DIRTY_PORT;
6114 }
6115 /*
6116 * Unmark current virtual_phy object as dirty.
6117 */
6118 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6119 found = 1;
6120 break;
6121 }
6122 if (found)
6123 break;
6124 }
6125 }
6126 out:
6127 kfree(sas_iounit_pg0);
6128 }
6129
6130 /**
6131 * _scsih_get_port_table_after_reset - Construct temporary port table
6132 * @ioc: per adapter object
6133 * @port_table: address where port table needs to be constructed
6134 *
6135 * return number of HBA port entries available after reset.
6136 */
6137 static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6138 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6139 struct hba_port *port_table)
6140 {
6141 u16 sz, ioc_status;
6142 int i, j;
6143 Mpi2ConfigReply_t mpi_reply;
6144 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6145 u16 attached_handle;
6146 u64 attached_sas_addr;
6147 u8 found = 0, port_count = 0, port_id;
6148
6149 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6150 * sizeof(Mpi2SasIOUnit0PhyData_t));
6151 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6152 if (!sas_iounit_pg0) {
6153 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6154 __FILE__, __LINE__, __func__);
6155 return port_count;
6156 }
6157
6158 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6159 sas_iounit_pg0, sz)) != 0)
6160 goto out;
6161 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6162 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6163 goto out;
6164 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6165 found = 0;
6166 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6167 MPI2_SAS_NEG_LINK_RATE_1_5)
6168 continue;
6169 attached_handle =
6170 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6171 if (_scsih_get_sas_address(
6172 ioc, attached_handle, &attached_sas_addr) != 0) {
6173 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6174 __FILE__, __LINE__, __func__);
6175 continue;
6176 }
6177
6178 for (j = 0; j < port_count; j++) {
6179 port_id = sas_iounit_pg0->PhyData[i].Port;
6180 if (port_table[j].port_id == port_id &&
6181 port_table[j].sas_address == attached_sas_addr) {
6182 port_table[j].phy_mask |= (1 << i);
6183 found = 1;
6184 break;
6185 }
6186 }
6187
6188 if (found)
6189 continue;
6190
6191 port_id = sas_iounit_pg0->PhyData[i].Port;
6192 port_table[port_count].port_id = port_id;
6193 port_table[port_count].phy_mask = (1 << i);
6194 port_table[port_count].sas_address = attached_sas_addr;
6195 port_count++;
6196 }
6197 out:
6198 kfree(sas_iounit_pg0);
6199 return port_count;
6200 }
6201
6202 enum hba_port_matched_codes {
6203 NOT_MATCHED = 0,
6204 MATCHED_WITH_ADDR_AND_PHYMASK,
6205 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6206 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6207 MATCHED_WITH_ADDR,
6208 };
6209
6210 /**
6211 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6212 * from HBA port table
6213 * @ioc: per adapter object
6214 * @port_entry: hba port entry from temporary port table which needs to be
6215 * searched for matched entry in the HBA port table
6216 * @matched_port_entry: save matched hba port entry here
6217 * @count: count of matched entries
6218 *
6219 * return type of matched entry found.
6220 */
6221 static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6222 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6223 struct hba_port *port_entry,
6224 struct hba_port **matched_port_entry, int *count)
6225 {
6226 struct hba_port *port_table_entry, *matched_port = NULL;
6227 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6228 int lcount = 0;
6229 *matched_port_entry = NULL;
6230
6231 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6232 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6233 continue;
6234
6235 if ((port_table_entry->sas_address == port_entry->sas_address)
6236 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6237 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6238 matched_port = port_table_entry;
6239 break;
6240 }
6241
6242 if ((port_table_entry->sas_address == port_entry->sas_address)
6243 && (port_table_entry->phy_mask & port_entry->phy_mask)
6244 && (port_table_entry->port_id == port_entry->port_id)) {
6245 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6246 matched_port = port_table_entry;
6247 continue;
6248 }
6249
6250 if ((port_table_entry->sas_address == port_entry->sas_address)
6251 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6252 if (matched_code ==
6253 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6254 continue;
6255 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6256 matched_port = port_table_entry;
6257 continue;
6258 }
6259
6260 if (port_table_entry->sas_address == port_entry->sas_address) {
6261 if (matched_code ==
6262 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6263 continue;
6264 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6265 continue;
6266 matched_code = MATCHED_WITH_ADDR;
6267 matched_port = port_table_entry;
6268 lcount++;
6269 }
6270 }
6271
6272 *matched_port_entry = matched_port;
6273 if (matched_code == MATCHED_WITH_ADDR)
6274 *count = lcount;
6275 return matched_code;
6276 }
6277
6278 /**
6279 * _scsih_del_phy_part_of_anther_port - remove phy if it
6280 * is a part of anther port
6281 *@ioc: per adapter object
6282 *@port_table: port table after reset
6283 *@index: hba port entry index
6284 *@port_count: number of ports available after host reset
6285 *@offset: HBA phy bit offset
6286 *
6287 */
6288 static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6289 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6290 struct hba_port *port_table,
6291 int index, u8 port_count, int offset)
6292 {
6293 struct _sas_node *sas_node = &ioc->sas_hba;
6294 u32 i, found = 0;
6295
6296 for (i = 0; i < port_count; i++) {
6297 if (i == index)
6298 continue;
6299
6300 if (port_table[i].phy_mask & (1 << offset)) {
6301 mpt3sas_transport_del_phy_from_an_existing_port(
6302 ioc, sas_node, &sas_node->phy[offset]);
6303 found = 1;
6304 break;
6305 }
6306 }
6307 if (!found)
6308 port_table[index].phy_mask |= (1 << offset);
6309 }
6310
6311 /**
6312 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6313 * right port
6314 *@ioc: per adapter object
6315 *@hba_port_entry: hba port table entry
6316 *@port_table: temporary port table
6317 *@index: hba port entry index
6318 *@port_count: number of ports available after host reset
6319 *
6320 */
6321 static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6322 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6323 struct hba_port *hba_port_entry, struct hba_port *port_table,
6324 int index, int port_count)
6325 {
6326 u32 phy_mask, offset = 0;
6327 struct _sas_node *sas_node = &ioc->sas_hba;
6328
6329 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6330
6331 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6332 if (phy_mask & (1 << offset)) {
6333 if (!(port_table[index].phy_mask & (1 << offset))) {
6334 _scsih_del_phy_part_of_anther_port(
6335 ioc, port_table, index, port_count,
6336 offset);
6337 continue;
6338 }
6339 if (sas_node->phy[offset].phy_belongs_to_port)
6340 mpt3sas_transport_del_phy_from_an_existing_port(
6341 ioc, sas_node, &sas_node->phy[offset]);
6342 mpt3sas_transport_add_phy_to_an_existing_port(
6343 ioc, sas_node, &sas_node->phy[offset],
6344 hba_port_entry->sas_address,
6345 hba_port_entry);
6346 }
6347 }
6348 }
6349
6350 /**
6351 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6352 * @ioc: per adapter object
6353 *
6354 * Returns nothing.
6355 */
6356 static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6357 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6358 {
6359 struct hba_port *port, *port_next;
6360 struct virtual_phy *vphy, *vphy_next;
6361
6362 list_for_each_entry_safe(port, port_next,
6363 &ioc->port_table_list, list) {
6364 if (!port->vphys_mask)
6365 continue;
6366 list_for_each_entry_safe(vphy, vphy_next,
6367 &port->vphys_list, list) {
6368 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6369 drsprintk(ioc, ioc_info(ioc,
6370 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6371 vphy, port->port_id,
6372 vphy->phy_mask));
6373 port->vphys_mask &= ~vphy->phy_mask;
6374 list_del(&vphy->list);
6375 kfree(vphy);
6376 }
6377 }
6378 if (!port->vphys_mask && !port->sas_address)
6379 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6380 }
6381 }
6382
6383 /**
6384 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6385 * after host reset
6386 *@ioc: per adapter object
6387 *
6388 */
6389 static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6390 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6391 {
6392 struct hba_port *port, *port_next;
6393
6394 list_for_each_entry_safe(port, port_next,
6395 &ioc->port_table_list, list) {
6396 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6397 port->flags & HBA_PORT_FLAG_NEW_PORT)
6398 continue;
6399
6400 drsprintk(ioc, ioc_info(ioc,
6401 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6402 port, port->port_id, port->phy_mask));
6403 list_del(&port->list);
6404 kfree(port);
6405 }
6406 }
6407
6408 /**
6409 * _scsih_sas_port_refresh - Update HBA port table after host reset
6410 * @ioc: per adapter object
6411 */
6412 static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6413 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6414 {
6415 u32 port_count = 0;
6416 struct hba_port *port_table;
6417 struct hba_port *port_table_entry;
6418 struct hba_port *port_entry = NULL;
6419 int i, j, count = 0, lcount = 0;
6420 int ret;
6421 u64 sas_addr;
6422 u8 num_phys;
6423
6424 drsprintk(ioc, ioc_info(ioc,
6425 "updating ports for sas_host(0x%016llx)\n",
6426 (unsigned long long)ioc->sas_hba.sas_address));
6427
6428 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6429 if (!num_phys) {
6430 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6431 __FILE__, __LINE__, __func__);
6432 return;
6433 }
6434
6435 if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6436 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6437 __FILE__, __LINE__, __func__);
6438 return;
6439 }
6440 ioc->sas_hba.num_phys = num_phys;
6441
6442 port_table = kcalloc(ioc->sas_hba.num_phys,
6443 sizeof(struct hba_port), GFP_KERNEL);
6444 if (!port_table)
6445 return;
6446
6447 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6448 if (!port_count)
6449 return;
6450
6451 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6452 for (j = 0; j < port_count; j++)
6453 drsprintk(ioc, ioc_info(ioc,
6454 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6455 port_table[j].port_id,
6456 port_table[j].phy_mask, port_table[j].sas_address));
6457
6458 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6459 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6460
6461 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6462 port_table_entry = NULL;
6463 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6464 drsprintk(ioc, ioc_info(ioc,
6465 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6466 port_table_entry->port_id,
6467 port_table_entry->phy_mask,
6468 port_table_entry->sas_address));
6469 }
6470
6471 for (j = 0; j < port_count; j++) {
6472 ret = _scsih_look_and_get_matched_port_entry(ioc,
6473 &port_table[j], &port_entry, &count);
6474 if (!port_entry) {
6475 drsprintk(ioc, ioc_info(ioc,
6476 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6477 port_table[j].sas_address,
6478 port_table[j].port_id));
6479 continue;
6480 }
6481
6482 switch (ret) {
6483 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6484 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6485 _scsih_add_or_del_phys_from_existing_port(ioc,
6486 port_entry, port_table, j, port_count);
6487 break;
6488 case MATCHED_WITH_ADDR:
6489 sas_addr = port_table[j].sas_address;
6490 for (i = 0; i < port_count; i++) {
6491 if (port_table[i].sas_address == sas_addr)
6492 lcount++;
6493 }
6494
6495 if (count > 1 || lcount > 1)
6496 port_entry = NULL;
6497 else
6498 _scsih_add_or_del_phys_from_existing_port(ioc,
6499 port_entry, port_table, j, port_count);
6500 }
6501
6502 if (!port_entry)
6503 continue;
6504
6505 if (port_entry->port_id != port_table[j].port_id)
6506 port_entry->port_id = port_table[j].port_id;
6507 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6508 port_entry->phy_mask = port_table[j].phy_mask;
6509 }
6510
6511 port_table_entry = NULL;
6512 }
6513
6514 /**
6515 * _scsih_alloc_vphy - allocate virtual_phy object
6516 * @ioc: per adapter object
6517 * @port_id: Port ID number
6518 * @phy_num: HBA Phy number
6519 *
6520 * Returns allocated virtual_phy object.
6521 */
6522 static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6523 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6524 {
6525 struct virtual_phy *vphy;
6526 struct hba_port *port;
6527
6528 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6529 if (!port)
6530 return NULL;
6531
6532 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6533 if (!vphy) {
6534 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6535 if (!vphy)
6536 return NULL;
6537
6538 if (!port->vphys_mask)
6539 INIT_LIST_HEAD(&port->vphys_list);
6540
6541 /*
6542 * Enable bit corresponding to HBA phy number on its
6543 * parent hba_port object's vphys_mask field.
6544 */
6545 port->vphys_mask |= (1 << phy_num);
6546 vphy->phy_mask |= (1 << phy_num);
6547
6548 list_add_tail(&vphy->list, &port->vphys_list);
6549
6550 ioc_info(ioc,
6551 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6552 vphy, port->port_id, phy_num);
6553 }
6554 return vphy;
6555 }
6556
6557 /**
6558 * _scsih_sas_host_refresh - refreshing sas host object contents
6559 * @ioc: per adapter object
6560 * Context: user
6561 *
6562 * During port enable, fw will send topology events for every device. Its
6563 * possible that the handles may change from the previous setting, so this
6564 * code keeping handles updating if changed.
6565 */
6566 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6567 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6568 {
6569 u16 sz;
6570 u16 ioc_status;
6571 int i;
6572 Mpi2ConfigReply_t mpi_reply;
6573 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6574 u16 attached_handle;
6575 u8 link_rate, port_id;
6576 struct hba_port *port;
6577 Mpi2SasPhyPage0_t phy_pg0;
6578
6579 dtmprintk(ioc,
6580 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6581 (u64)ioc->sas_hba.sas_address));
6582
6583 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6584 * sizeof(Mpi2SasIOUnit0PhyData_t));
6585 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6586 if (!sas_iounit_pg0) {
6587 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6588 __FILE__, __LINE__, __func__);
6589 return;
6590 }
6591
6592 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6593 sas_iounit_pg0, sz)) != 0)
6594 goto out;
6595 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6596 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6597 goto out;
6598 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6599 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6600 if (i == 0)
6601 ioc->sas_hba.handle = le16_to_cpu(
6602 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6603 port_id = sas_iounit_pg0->PhyData[i].Port;
6604 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6605 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6606 if (!port)
6607 goto out;
6608
6609 port->port_id = port_id;
6610 ioc_info(ioc,
6611 "hba_port entry: %p, port: %d is added to hba_port list\n",
6612 port, port->port_id);
6613 if (ioc->shost_recovery)
6614 port->flags = HBA_PORT_FLAG_NEW_PORT;
6615 list_add_tail(&port->list, &ioc->port_table_list);
6616 }
6617 /*
6618 * Check whether current Phy belongs to HBA vSES device or not.
6619 */
6620 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6621 MPI2_SAS_DEVICE_INFO_SEP &&
6622 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6623 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6624 &phy_pg0, i))) {
6625 ioc_err(ioc,
6626 "failure at %s:%d/%s()!\n",
6627 __FILE__, __LINE__, __func__);
6628 goto out;
6629 }
6630 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6631 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6632 continue;
6633 /*
6634 * Allocate a virtual_phy object for vSES device, if
6635 * this vSES device is hot added.
6636 */
6637 if (!_scsih_alloc_vphy(ioc, port_id, i))
6638 goto out;
6639 ioc->sas_hba.phy[i].hba_vphy = 1;
6640 }
6641
6642 /*
6643 * Add new HBA phys to STL if these new phys got added as part
6644 * of HBA Firmware upgrade/downgrade operation.
6645 */
6646 if (!ioc->sas_hba.phy[i].phy) {
6647 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6648 &phy_pg0, i))) {
6649 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6650 __FILE__, __LINE__, __func__);
6651 continue;
6652 }
6653 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6654 MPI2_IOCSTATUS_MASK;
6655 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6656 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6657 __FILE__, __LINE__, __func__);
6658 continue;
6659 }
6660 ioc->sas_hba.phy[i].phy_id = i;
6661 mpt3sas_transport_add_host_phy(ioc,
6662 &ioc->sas_hba.phy[i], phy_pg0,
6663 ioc->sas_hba.parent_dev);
6664 continue;
6665 }
6666 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6667 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6668 AttachedDevHandle);
6669 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6670 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6671 ioc->sas_hba.phy[i].port =
6672 mpt3sas_get_port_by_id(ioc, port_id, 0);
6673 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6674 attached_handle, i, link_rate,
6675 ioc->sas_hba.phy[i].port);
6676 }
6677 /*
6678 * Clear the phy details if this phy got disabled as part of
6679 * HBA Firmware upgrade/downgrade operation.
6680 */
6681 for (i = ioc->sas_hba.num_phys;
6682 i < ioc->sas_hba.nr_phys_allocated; i++) {
6683 if (ioc->sas_hba.phy[i].phy &&
6684 ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6685 SAS_LINK_RATE_1_5_GBPS)
6686 mpt3sas_transport_update_links(ioc,
6687 ioc->sas_hba.sas_address, 0, i,
6688 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6689 }
6690 out:
6691 kfree(sas_iounit_pg0);
6692 }
6693
6694 /**
6695 * _scsih_sas_host_add - create sas host object
6696 * @ioc: per adapter object
6697 *
6698 * Creating host side data object, stored in ioc->sas_hba
6699 */
6700 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)6701 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6702 {
6703 int i;
6704 Mpi2ConfigReply_t mpi_reply;
6705 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6706 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6707 Mpi2SasPhyPage0_t phy_pg0;
6708 Mpi2SasDevicePage0_t sas_device_pg0;
6709 Mpi2SasEnclosurePage0_t enclosure_pg0;
6710 u16 ioc_status;
6711 u16 sz;
6712 u8 device_missing_delay;
6713 u8 num_phys, port_id;
6714 struct hba_port *port;
6715
6716 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6717 if (!num_phys) {
6718 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6719 __FILE__, __LINE__, __func__);
6720 return;
6721 }
6722
6723 ioc->sas_hba.nr_phys_allocated = max_t(u8,
6724 MPT_MAX_HBA_NUM_PHYS, num_phys);
6725 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6726 sizeof(struct _sas_phy), GFP_KERNEL);
6727 if (!ioc->sas_hba.phy) {
6728 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6729 __FILE__, __LINE__, __func__);
6730 goto out;
6731 }
6732 ioc->sas_hba.num_phys = num_phys;
6733
6734 /* sas_iounit page 0 */
6735 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6736 sizeof(Mpi2SasIOUnit0PhyData_t));
6737 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6738 if (!sas_iounit_pg0) {
6739 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6740 __FILE__, __LINE__, __func__);
6741 return;
6742 }
6743 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6744 sas_iounit_pg0, sz))) {
6745 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6746 __FILE__, __LINE__, __func__);
6747 goto out;
6748 }
6749 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6750 MPI2_IOCSTATUS_MASK;
6751 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6752 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6753 __FILE__, __LINE__, __func__);
6754 goto out;
6755 }
6756
6757 /* sas_iounit page 1 */
6758 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6759 sizeof(Mpi2SasIOUnit1PhyData_t));
6760 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6761 if (!sas_iounit_pg1) {
6762 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6763 __FILE__, __LINE__, __func__);
6764 goto out;
6765 }
6766 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6767 sas_iounit_pg1, sz))) {
6768 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6769 __FILE__, __LINE__, __func__);
6770 goto out;
6771 }
6772 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6773 MPI2_IOCSTATUS_MASK;
6774 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6775 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6776 __FILE__, __LINE__, __func__);
6777 goto out;
6778 }
6779
6780 ioc->io_missing_delay =
6781 sas_iounit_pg1->IODeviceMissingDelay;
6782 device_missing_delay =
6783 sas_iounit_pg1->ReportDeviceMissingDelay;
6784 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6785 ioc->device_missing_delay = (device_missing_delay &
6786 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6787 else
6788 ioc->device_missing_delay = device_missing_delay &
6789 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6790
6791 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6792 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6793 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6794 i))) {
6795 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6796 __FILE__, __LINE__, __func__);
6797 goto out;
6798 }
6799 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6800 MPI2_IOCSTATUS_MASK;
6801 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6802 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6803 __FILE__, __LINE__, __func__);
6804 goto out;
6805 }
6806
6807 if (i == 0)
6808 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6809 PhyData[0].ControllerDevHandle);
6810
6811 port_id = sas_iounit_pg0->PhyData[i].Port;
6812 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6813 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6814 if (!port)
6815 goto out;
6816
6817 port->port_id = port_id;
6818 ioc_info(ioc,
6819 "hba_port entry: %p, port: %d is added to hba_port list\n",
6820 port, port->port_id);
6821 list_add_tail(&port->list,
6822 &ioc->port_table_list);
6823 }
6824
6825 /*
6826 * Check whether current Phy belongs to HBA vSES device or not.
6827 */
6828 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6829 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6830 (phy_pg0.NegotiatedLinkRate >> 4) >=
6831 MPI2_SAS_NEG_LINK_RATE_1_5) {
6832 /*
6833 * Allocate a virtual_phy object for vSES device.
6834 */
6835 if (!_scsih_alloc_vphy(ioc, port_id, i))
6836 goto out;
6837 ioc->sas_hba.phy[i].hba_vphy = 1;
6838 }
6839
6840 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6841 ioc->sas_hba.phy[i].phy_id = i;
6842 ioc->sas_hba.phy[i].port =
6843 mpt3sas_get_port_by_id(ioc, port_id, 0);
6844 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6845 phy_pg0, ioc->sas_hba.parent_dev);
6846 }
6847 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6848 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6849 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6850 __FILE__, __LINE__, __func__);
6851 goto out;
6852 }
6853 ioc->sas_hba.enclosure_handle =
6854 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6855 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6856 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6857 ioc->sas_hba.handle,
6858 (u64)ioc->sas_hba.sas_address,
6859 ioc->sas_hba.num_phys);
6860
6861 if (ioc->sas_hba.enclosure_handle) {
6862 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6863 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6864 ioc->sas_hba.enclosure_handle)))
6865 ioc->sas_hba.enclosure_logical_id =
6866 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6867 }
6868
6869 out:
6870 kfree(sas_iounit_pg1);
6871 kfree(sas_iounit_pg0);
6872 }
6873
6874 /**
6875 * _scsih_expander_add - creating expander object
6876 * @ioc: per adapter object
6877 * @handle: expander handle
6878 *
6879 * Creating expander object, stored in ioc->sas_expander_list.
6880 *
6881 * Return: 0 for success, else error.
6882 */
6883 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)6884 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6885 {
6886 struct _sas_node *sas_expander;
6887 struct _enclosure_node *enclosure_dev;
6888 Mpi2ConfigReply_t mpi_reply;
6889 Mpi2ExpanderPage0_t expander_pg0;
6890 Mpi2ExpanderPage1_t expander_pg1;
6891 u32 ioc_status;
6892 u16 parent_handle;
6893 u64 sas_address, sas_address_parent = 0;
6894 int i;
6895 unsigned long flags;
6896 struct _sas_port *mpt3sas_port = NULL;
6897 u8 port_id;
6898
6899 int rc = 0;
6900
6901 if (!handle)
6902 return -1;
6903
6904 if (ioc->shost_recovery || ioc->pci_error_recovery)
6905 return -1;
6906
6907 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6908 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6909 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6910 __FILE__, __LINE__, __func__);
6911 return -1;
6912 }
6913
6914 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6915 MPI2_IOCSTATUS_MASK;
6916 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6917 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6918 __FILE__, __LINE__, __func__);
6919 return -1;
6920 }
6921
6922 /* handle out of order topology events */
6923 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6924 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6925 != 0) {
6926 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6927 __FILE__, __LINE__, __func__);
6928 return -1;
6929 }
6930
6931 port_id = expander_pg0.PhysicalPort;
6932 if (sas_address_parent != ioc->sas_hba.sas_address) {
6933 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6934 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6935 sas_address_parent,
6936 mpt3sas_get_port_by_id(ioc, port_id, 0));
6937 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6938 if (!sas_expander) {
6939 rc = _scsih_expander_add(ioc, parent_handle);
6940 if (rc != 0)
6941 return rc;
6942 }
6943 }
6944
6945 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6946 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6947 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6948 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6949 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6950
6951 if (sas_expander)
6952 return 0;
6953
6954 sas_expander = kzalloc(sizeof(struct _sas_node),
6955 GFP_KERNEL);
6956 if (!sas_expander) {
6957 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6958 __FILE__, __LINE__, __func__);
6959 return -1;
6960 }
6961
6962 sas_expander->handle = handle;
6963 sas_expander->num_phys = expander_pg0.NumPhys;
6964 sas_expander->sas_address_parent = sas_address_parent;
6965 sas_expander->sas_address = sas_address;
6966 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6967 if (!sas_expander->port) {
6968 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6969 __FILE__, __LINE__, __func__);
6970 rc = -1;
6971 goto out_fail;
6972 }
6973
6974 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6975 handle, parent_handle,
6976 (u64)sas_expander->sas_address, sas_expander->num_phys);
6977
6978 if (!sas_expander->num_phys) {
6979 rc = -1;
6980 goto out_fail;
6981 }
6982 sas_expander->phy = kcalloc(sas_expander->num_phys,
6983 sizeof(struct _sas_phy), GFP_KERNEL);
6984 if (!sas_expander->phy) {
6985 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6986 __FILE__, __LINE__, __func__);
6987 rc = -1;
6988 goto out_fail;
6989 }
6990
6991 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6992 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6993 sas_address_parent, sas_expander->port);
6994 if (!mpt3sas_port) {
6995 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6996 __FILE__, __LINE__, __func__);
6997 rc = -1;
6998 goto out_fail;
6999 }
7000 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
7001 sas_expander->rphy = mpt3sas_port->rphy;
7002
7003 for (i = 0 ; i < sas_expander->num_phys ; i++) {
7004 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
7005 &expander_pg1, i, handle))) {
7006 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7007 __FILE__, __LINE__, __func__);
7008 rc = -1;
7009 goto out_fail;
7010 }
7011 sas_expander->phy[i].handle = handle;
7012 sas_expander->phy[i].phy_id = i;
7013 sas_expander->phy[i].port =
7014 mpt3sas_get_port_by_id(ioc, port_id, 0);
7015
7016 if ((mpt3sas_transport_add_expander_phy(ioc,
7017 &sas_expander->phy[i], expander_pg1,
7018 sas_expander->parent_dev))) {
7019 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7020 __FILE__, __LINE__, __func__);
7021 rc = -1;
7022 goto out_fail;
7023 }
7024 }
7025
7026 if (sas_expander->enclosure_handle) {
7027 enclosure_dev =
7028 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7029 sas_expander->enclosure_handle);
7030 if (enclosure_dev)
7031 sas_expander->enclosure_logical_id =
7032 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7033 }
7034
7035 _scsih_expander_node_add(ioc, sas_expander);
7036 return 0;
7037
7038 out_fail:
7039
7040 if (mpt3sas_port)
7041 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7042 sas_address_parent, sas_expander->port);
7043 kfree(sas_expander);
7044 return rc;
7045 }
7046
7047 /**
7048 * mpt3sas_expander_remove - removing expander object
7049 * @ioc: per adapter object
7050 * @sas_address: expander sas_address
7051 * @port: hba port entry
7052 */
7053 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)7054 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7055 struct hba_port *port)
7056 {
7057 struct _sas_node *sas_expander;
7058 unsigned long flags;
7059
7060 if (ioc->shost_recovery)
7061 return;
7062
7063 if (!port)
7064 return;
7065
7066 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7067 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7068 sas_address, port);
7069 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7070 if (sas_expander)
7071 _scsih_expander_node_remove(ioc, sas_expander);
7072 }
7073
7074 /**
7075 * _scsih_done - internal SCSI_IO callback handler.
7076 * @ioc: per adapter object
7077 * @smid: system request message index
7078 * @msix_index: MSIX table index supplied by the OS
7079 * @reply: reply message frame(lower 32bit addr)
7080 *
7081 * Callback handler when sending internal generated SCSI_IO.
7082 * The callback index passed is `ioc->scsih_cb_idx`
7083 *
7084 * Return: 1 meaning mf should be freed from _base_interrupt
7085 * 0 means the mf is freed from this function.
7086 */
7087 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7088 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7089 {
7090 MPI2DefaultReply_t *mpi_reply;
7091
7092 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7093 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7094 return 1;
7095 if (ioc->scsih_cmds.smid != smid)
7096 return 1;
7097 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7098 if (mpi_reply) {
7099 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7100 mpi_reply->MsgLength*4);
7101 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7102 }
7103 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7104 complete(&ioc->scsih_cmds.done);
7105 return 1;
7106 }
7107
7108
7109
7110
7111 #define MPT3_MAX_LUNS (255)
7112
7113
7114 /**
7115 * _scsih_check_access_status - check access flags
7116 * @ioc: per adapter object
7117 * @sas_address: sas address
7118 * @handle: sas device handle
7119 * @access_status: errors returned during discovery of the device
7120 *
7121 * Return: 0 for success, else failure
7122 */
7123 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)7124 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7125 u16 handle, u8 access_status)
7126 {
7127 u8 rc = 1;
7128 char *desc = NULL;
7129
7130 switch (access_status) {
7131 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7132 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7133 rc = 0;
7134 break;
7135 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7136 desc = "sata capability failed";
7137 break;
7138 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7139 desc = "sata affiliation conflict";
7140 break;
7141 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7142 desc = "route not addressable";
7143 break;
7144 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7145 desc = "smp error not addressable";
7146 break;
7147 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7148 desc = "device blocked";
7149 break;
7150 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7151 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7152 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7153 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7154 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7155 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7156 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7157 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7158 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7159 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7160 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7161 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7162 desc = "sata initialization failed";
7163 break;
7164 default:
7165 desc = "unknown";
7166 break;
7167 }
7168
7169 if (!rc)
7170 return 0;
7171
7172 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7173 desc, (u64)sas_address, handle);
7174 return rc;
7175 }
7176
7177 /**
7178 * _scsih_check_device - checking device responsiveness
7179 * @ioc: per adapter object
7180 * @parent_sas_address: sas address of parent expander or sas host
7181 * @handle: attached device handle
7182 * @phy_number: phy number
7183 * @link_rate: new link rate
7184 */
7185 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)7186 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7187 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7188 {
7189 Mpi2ConfigReply_t mpi_reply;
7190 Mpi2SasDevicePage0_t sas_device_pg0;
7191 struct _sas_device *sas_device = NULL;
7192 struct _enclosure_node *enclosure_dev = NULL;
7193 u32 ioc_status;
7194 unsigned long flags;
7195 u64 sas_address;
7196 struct scsi_target *starget;
7197 struct MPT3SAS_TARGET *sas_target_priv_data;
7198 u32 device_info;
7199 struct hba_port *port;
7200
7201 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7202 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7203 return;
7204
7205 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7206 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7207 return;
7208
7209 /* wide port handling ~ we need only handle device once for the phy that
7210 * is matched in sas device page zero
7211 */
7212 if (phy_number != sas_device_pg0.PhyNum)
7213 return;
7214
7215 /* check if this is end device */
7216 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7217 if (!(_scsih_is_end_device(device_info)))
7218 return;
7219
7220 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7221 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7222 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7223 if (!port)
7224 goto out_unlock;
7225 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7226 sas_address, port);
7227
7228 if (!sas_device)
7229 goto out_unlock;
7230
7231 if (unlikely(sas_device->handle != handle)) {
7232 starget = sas_device->starget;
7233 sas_target_priv_data = starget->hostdata;
7234 starget_printk(KERN_INFO, starget,
7235 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7236 sas_device->handle, handle);
7237 sas_target_priv_data->handle = handle;
7238 sas_device->handle = handle;
7239 if (le16_to_cpu(sas_device_pg0.Flags) &
7240 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7241 sas_device->enclosure_level =
7242 sas_device_pg0.EnclosureLevel;
7243 memcpy(sas_device->connector_name,
7244 sas_device_pg0.ConnectorName, 4);
7245 sas_device->connector_name[4] = '\0';
7246 } else {
7247 sas_device->enclosure_level = 0;
7248 sas_device->connector_name[0] = '\0';
7249 }
7250
7251 sas_device->enclosure_handle =
7252 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7253 sas_device->is_chassis_slot_valid = 0;
7254 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7255 sas_device->enclosure_handle);
7256 if (enclosure_dev) {
7257 sas_device->enclosure_logical_id =
7258 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7259 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7260 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7261 sas_device->is_chassis_slot_valid = 1;
7262 sas_device->chassis_slot =
7263 enclosure_dev->pg0.ChassisSlot;
7264 }
7265 }
7266 }
7267
7268 /* check if device is present */
7269 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7270 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7271 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7272 handle);
7273 goto out_unlock;
7274 }
7275
7276 /* check if there were any issues with discovery */
7277 if (_scsih_check_access_status(ioc, sas_address, handle,
7278 sas_device_pg0.AccessStatus))
7279 goto out_unlock;
7280
7281 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7282 _scsih_ublock_io_device(ioc, sas_address, port);
7283
7284 if (sas_device)
7285 sas_device_put(sas_device);
7286 return;
7287
7288 out_unlock:
7289 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7290 if (sas_device)
7291 sas_device_put(sas_device);
7292 }
7293
7294 /**
7295 * _scsih_add_device - creating sas device object
7296 * @ioc: per adapter object
7297 * @handle: sas device handle
7298 * @phy_num: phy number end device attached to
7299 * @is_pd: is this hidden raid component
7300 *
7301 * Creating end device object, stored in ioc->sas_device_list.
7302 *
7303 * Return: 0 for success, non-zero for failure.
7304 */
7305 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)7306 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7307 u8 is_pd)
7308 {
7309 Mpi2ConfigReply_t mpi_reply;
7310 Mpi2SasDevicePage0_t sas_device_pg0;
7311 struct _sas_device *sas_device;
7312 struct _enclosure_node *enclosure_dev = NULL;
7313 u32 ioc_status;
7314 u64 sas_address;
7315 u32 device_info;
7316 u8 port_id;
7317
7318 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7319 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7320 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7321 __FILE__, __LINE__, __func__);
7322 return -1;
7323 }
7324
7325 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7326 MPI2_IOCSTATUS_MASK;
7327 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7328 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7329 __FILE__, __LINE__, __func__);
7330 return -1;
7331 }
7332
7333 /* check if this is end device */
7334 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7335 if (!(_scsih_is_end_device(device_info)))
7336 return -1;
7337 set_bit(handle, ioc->pend_os_device_add);
7338 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7339
7340 /* check if device is present */
7341 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7342 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7343 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7344 handle);
7345 return -1;
7346 }
7347
7348 /* check if there were any issues with discovery */
7349 if (_scsih_check_access_status(ioc, sas_address, handle,
7350 sas_device_pg0.AccessStatus))
7351 return -1;
7352
7353 port_id = sas_device_pg0.PhysicalPort;
7354 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7355 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7356 if (sas_device) {
7357 clear_bit(handle, ioc->pend_os_device_add);
7358 sas_device_put(sas_device);
7359 return -1;
7360 }
7361
7362 if (sas_device_pg0.EnclosureHandle) {
7363 enclosure_dev =
7364 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7365 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7366 if (enclosure_dev == NULL)
7367 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7368 sas_device_pg0.EnclosureHandle);
7369 }
7370
7371 sas_device = kzalloc(sizeof(struct _sas_device),
7372 GFP_KERNEL);
7373 if (!sas_device) {
7374 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7375 __FILE__, __LINE__, __func__);
7376 return 0;
7377 }
7378
7379 kref_init(&sas_device->refcount);
7380 sas_device->handle = handle;
7381 if (_scsih_get_sas_address(ioc,
7382 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7383 &sas_device->sas_address_parent) != 0)
7384 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7385 __FILE__, __LINE__, __func__);
7386 sas_device->enclosure_handle =
7387 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7388 if (sas_device->enclosure_handle != 0)
7389 sas_device->slot =
7390 le16_to_cpu(sas_device_pg0.Slot);
7391 sas_device->device_info = device_info;
7392 sas_device->sas_address = sas_address;
7393 sas_device->phy = sas_device_pg0.PhyNum;
7394 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7395 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7396 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7397 if (!sas_device->port) {
7398 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7399 __FILE__, __LINE__, __func__);
7400 goto out;
7401 }
7402
7403 if (le16_to_cpu(sas_device_pg0.Flags)
7404 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7405 sas_device->enclosure_level =
7406 sas_device_pg0.EnclosureLevel;
7407 memcpy(sas_device->connector_name,
7408 sas_device_pg0.ConnectorName, 4);
7409 sas_device->connector_name[4] = '\0';
7410 } else {
7411 sas_device->enclosure_level = 0;
7412 sas_device->connector_name[0] = '\0';
7413 }
7414 /* get enclosure_logical_id & chassis_slot*/
7415 sas_device->is_chassis_slot_valid = 0;
7416 if (enclosure_dev) {
7417 sas_device->enclosure_logical_id =
7418 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7419 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7420 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7421 sas_device->is_chassis_slot_valid = 1;
7422 sas_device->chassis_slot =
7423 enclosure_dev->pg0.ChassisSlot;
7424 }
7425 }
7426
7427 /* get device name */
7428 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7429 sas_device->port_type = sas_device_pg0.MaxPortConnections;
7430 ioc_info(ioc,
7431 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7432 handle, sas_device->sas_address, sas_device->port_type);
7433
7434 if (ioc->wait_for_discovery_to_complete)
7435 _scsih_sas_device_init_add(ioc, sas_device);
7436 else
7437 _scsih_sas_device_add(ioc, sas_device);
7438
7439 out:
7440 sas_device_put(sas_device);
7441 return 0;
7442 }
7443
7444 /**
7445 * _scsih_remove_device - removing sas device object
7446 * @ioc: per adapter object
7447 * @sas_device: the sas_device object
7448 */
7449 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)7450 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7451 struct _sas_device *sas_device)
7452 {
7453 struct MPT3SAS_TARGET *sas_target_priv_data;
7454
7455 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7456 (sas_device->pfa_led_on)) {
7457 _scsih_turn_off_pfa_led(ioc, sas_device);
7458 sas_device->pfa_led_on = 0;
7459 }
7460
7461 dewtprintk(ioc,
7462 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7463 __func__,
7464 sas_device->handle, (u64)sas_device->sas_address));
7465
7466 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7467 NULL, NULL));
7468
7469 if (sas_device->starget && sas_device->starget->hostdata) {
7470 sas_target_priv_data = sas_device->starget->hostdata;
7471 sas_target_priv_data->deleted = 1;
7472 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7473 sas_device->port);
7474 sas_target_priv_data->handle =
7475 MPT3SAS_INVALID_DEVICE_HANDLE;
7476 }
7477
7478 if (!ioc->hide_drives)
7479 mpt3sas_transport_port_remove(ioc,
7480 sas_device->sas_address,
7481 sas_device->sas_address_parent,
7482 sas_device->port);
7483
7484 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7485 sas_device->handle, (u64)sas_device->sas_address);
7486
7487 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7488
7489 dewtprintk(ioc,
7490 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7491 __func__,
7492 sas_device->handle, (u64)sas_device->sas_address));
7493 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7494 NULL, NULL));
7495 }
7496
7497 /**
7498 * _scsih_sas_topology_change_event_debug - debug for topology event
7499 * @ioc: per adapter object
7500 * @event_data: event data payload
7501 * Context: user.
7502 */
7503 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)7504 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7505 Mpi2EventDataSasTopologyChangeList_t *event_data)
7506 {
7507 int i;
7508 u16 handle;
7509 u16 reason_code;
7510 u8 phy_number;
7511 char *status_str = NULL;
7512 u8 link_rate, prev_link_rate;
7513
7514 switch (event_data->ExpStatus) {
7515 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7516 status_str = "add";
7517 break;
7518 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7519 status_str = "remove";
7520 break;
7521 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7522 case 0:
7523 status_str = "responding";
7524 break;
7525 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7526 status_str = "remove delay";
7527 break;
7528 default:
7529 status_str = "unknown status";
7530 break;
7531 }
7532 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7533 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7534 "start_phy(%02d), count(%d)\n",
7535 le16_to_cpu(event_data->ExpanderDevHandle),
7536 le16_to_cpu(event_data->EnclosureHandle),
7537 event_data->StartPhyNum, event_data->NumEntries);
7538 for (i = 0; i < event_data->NumEntries; i++) {
7539 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7540 if (!handle)
7541 continue;
7542 phy_number = event_data->StartPhyNum + i;
7543 reason_code = event_data->PHY[i].PhyStatus &
7544 MPI2_EVENT_SAS_TOPO_RC_MASK;
7545 switch (reason_code) {
7546 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7547 status_str = "target add";
7548 break;
7549 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7550 status_str = "target remove";
7551 break;
7552 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7553 status_str = "delay target remove";
7554 break;
7555 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7556 status_str = "link rate change";
7557 break;
7558 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7559 status_str = "target responding";
7560 break;
7561 default:
7562 status_str = "unknown";
7563 break;
7564 }
7565 link_rate = event_data->PHY[i].LinkRate >> 4;
7566 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7567 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7568 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7569 handle, status_str, link_rate, prev_link_rate);
7570
7571 }
7572 }
7573
7574 /**
7575 * _scsih_sas_topology_change_event - handle topology changes
7576 * @ioc: per adapter object
7577 * @fw_event: The fw_event_work object
7578 * Context: user.
7579 *
7580 */
7581 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7582 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7583 struct fw_event_work *fw_event)
7584 {
7585 int i;
7586 u16 parent_handle, handle;
7587 u16 reason_code;
7588 u8 phy_number, max_phys;
7589 struct _sas_node *sas_expander;
7590 u64 sas_address;
7591 unsigned long flags;
7592 u8 link_rate, prev_link_rate;
7593 struct hba_port *port;
7594 Mpi2EventDataSasTopologyChangeList_t *event_data =
7595 (Mpi2EventDataSasTopologyChangeList_t *)
7596 fw_event->event_data;
7597
7598 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7599 _scsih_sas_topology_change_event_debug(ioc, event_data);
7600
7601 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7602 return 0;
7603
7604 if (!ioc->sas_hba.num_phys)
7605 _scsih_sas_host_add(ioc);
7606 else
7607 _scsih_sas_host_refresh(ioc);
7608
7609 if (fw_event->ignore) {
7610 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7611 return 0;
7612 }
7613
7614 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7615 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7616
7617 /* handle expander add */
7618 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7619 if (_scsih_expander_add(ioc, parent_handle) != 0)
7620 return 0;
7621
7622 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7623 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7624 parent_handle);
7625 if (sas_expander) {
7626 sas_address = sas_expander->sas_address;
7627 max_phys = sas_expander->num_phys;
7628 port = sas_expander->port;
7629 } else if (parent_handle < ioc->sas_hba.num_phys) {
7630 sas_address = ioc->sas_hba.sas_address;
7631 max_phys = ioc->sas_hba.num_phys;
7632 } else {
7633 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7634 return 0;
7635 }
7636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7637
7638 /* handle siblings events */
7639 for (i = 0; i < event_data->NumEntries; i++) {
7640 if (fw_event->ignore) {
7641 dewtprintk(ioc,
7642 ioc_info(ioc, "ignoring expander event\n"));
7643 return 0;
7644 }
7645 if (ioc->remove_host || ioc->pci_error_recovery)
7646 return 0;
7647 phy_number = event_data->StartPhyNum + i;
7648 if (phy_number >= max_phys)
7649 continue;
7650 reason_code = event_data->PHY[i].PhyStatus &
7651 MPI2_EVENT_SAS_TOPO_RC_MASK;
7652 if ((event_data->PHY[i].PhyStatus &
7653 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7654 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7655 continue;
7656 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7657 if (!handle)
7658 continue;
7659 link_rate = event_data->PHY[i].LinkRate >> 4;
7660 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7661 switch (reason_code) {
7662 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7663
7664 if (ioc->shost_recovery)
7665 break;
7666
7667 if (link_rate == prev_link_rate)
7668 break;
7669
7670 mpt3sas_transport_update_links(ioc, sas_address,
7671 handle, phy_number, link_rate, port);
7672
7673 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7674 break;
7675
7676 _scsih_check_device(ioc, sas_address, handle,
7677 phy_number, link_rate);
7678
7679 if (!test_bit(handle, ioc->pend_os_device_add))
7680 break;
7681
7682 fallthrough;
7683
7684 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7685
7686 if (ioc->shost_recovery)
7687 break;
7688
7689 mpt3sas_transport_update_links(ioc, sas_address,
7690 handle, phy_number, link_rate, port);
7691
7692 _scsih_add_device(ioc, handle, phy_number, 0);
7693
7694 break;
7695 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7696
7697 _scsih_device_remove_by_handle(ioc, handle);
7698 break;
7699 }
7700 }
7701
7702 /* handle expander removal */
7703 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7704 sas_expander)
7705 mpt3sas_expander_remove(ioc, sas_address, port);
7706
7707 return 0;
7708 }
7709
7710 /**
7711 * _scsih_sas_device_status_change_event_debug - debug for device event
7712 * @ioc: ?
7713 * @event_data: event data payload
7714 * Context: user.
7715 */
7716 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7717 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7718 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7719 {
7720 char *reason_str = NULL;
7721
7722 switch (event_data->ReasonCode) {
7723 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7724 reason_str = "smart data";
7725 break;
7726 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7727 reason_str = "unsupported device discovered";
7728 break;
7729 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7730 reason_str = "internal device reset";
7731 break;
7732 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7733 reason_str = "internal task abort";
7734 break;
7735 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7736 reason_str = "internal task abort set";
7737 break;
7738 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7739 reason_str = "internal clear task set";
7740 break;
7741 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7742 reason_str = "internal query task";
7743 break;
7744 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7745 reason_str = "sata init failure";
7746 break;
7747 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7748 reason_str = "internal device reset complete";
7749 break;
7750 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7751 reason_str = "internal task abort complete";
7752 break;
7753 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7754 reason_str = "internal async notification";
7755 break;
7756 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7757 reason_str = "expander reduced functionality";
7758 break;
7759 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7760 reason_str = "expander reduced functionality complete";
7761 break;
7762 default:
7763 reason_str = "unknown reason";
7764 break;
7765 }
7766 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7767 reason_str, le16_to_cpu(event_data->DevHandle),
7768 (u64)le64_to_cpu(event_data->SASAddress),
7769 le16_to_cpu(event_data->TaskTag));
7770 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7771 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7772 event_data->ASC, event_data->ASCQ);
7773 pr_cont("\n");
7774 }
7775
7776 /**
7777 * _scsih_sas_device_status_change_event - handle device status change
7778 * @ioc: per adapter object
7779 * @event_data: The fw event
7780 * Context: user.
7781 */
7782 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7783 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7784 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7785 {
7786 struct MPT3SAS_TARGET *target_priv_data;
7787 struct _sas_device *sas_device;
7788 u64 sas_address;
7789 unsigned long flags;
7790
7791 /* In MPI Revision K (0xC), the internal device reset complete was
7792 * implemented, so avoid setting tm_busy flag for older firmware.
7793 */
7794 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7795 return;
7796
7797 if (event_data->ReasonCode !=
7798 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7799 event_data->ReasonCode !=
7800 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7801 return;
7802
7803 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7804 sas_address = le64_to_cpu(event_data->SASAddress);
7805 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7806 sas_address,
7807 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7808
7809 if (!sas_device || !sas_device->starget)
7810 goto out;
7811
7812 target_priv_data = sas_device->starget->hostdata;
7813 if (!target_priv_data)
7814 goto out;
7815
7816 if (event_data->ReasonCode ==
7817 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7818 target_priv_data->tm_busy = 1;
7819 else
7820 target_priv_data->tm_busy = 0;
7821
7822 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7823 ioc_info(ioc,
7824 "%s tm_busy flag for handle(0x%04x)\n",
7825 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7826 target_priv_data->handle);
7827
7828 out:
7829 if (sas_device)
7830 sas_device_put(sas_device);
7831
7832 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7833 }
7834
7835
7836 /**
7837 * _scsih_check_pcie_access_status - check access flags
7838 * @ioc: per adapter object
7839 * @wwid: wwid
7840 * @handle: sas device handle
7841 * @access_status: errors returned during discovery of the device
7842 *
7843 * Return: 0 for success, else failure
7844 */
7845 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)7846 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7847 u16 handle, u8 access_status)
7848 {
7849 u8 rc = 1;
7850 char *desc = NULL;
7851
7852 switch (access_status) {
7853 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7854 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7855 rc = 0;
7856 break;
7857 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7858 desc = "PCIe device capability failed";
7859 break;
7860 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7861 desc = "PCIe device blocked";
7862 ioc_info(ioc,
7863 "Device with Access Status (%s): wwid(0x%016llx), "
7864 "handle(0x%04x)\n ll only be added to the internal list",
7865 desc, (u64)wwid, handle);
7866 rc = 0;
7867 break;
7868 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7869 desc = "PCIe device mem space access failed";
7870 break;
7871 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7872 desc = "PCIe device unsupported";
7873 break;
7874 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7875 desc = "PCIe device MSIx Required";
7876 break;
7877 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7878 desc = "PCIe device init fail max";
7879 break;
7880 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7881 desc = "PCIe device status unknown";
7882 break;
7883 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7884 desc = "nvme ready timeout";
7885 break;
7886 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7887 desc = "nvme device configuration unsupported";
7888 break;
7889 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7890 desc = "nvme identify failed";
7891 break;
7892 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7893 desc = "nvme qconfig failed";
7894 break;
7895 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7896 desc = "nvme qcreation failed";
7897 break;
7898 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7899 desc = "nvme eventcfg failed";
7900 break;
7901 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7902 desc = "nvme get feature stat failed";
7903 break;
7904 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7905 desc = "nvme idle timeout";
7906 break;
7907 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7908 desc = "nvme failure status";
7909 break;
7910 default:
7911 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7912 access_status, (u64)wwid, handle);
7913 return rc;
7914 }
7915
7916 if (!rc)
7917 return rc;
7918
7919 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7920 desc, (u64)wwid, handle);
7921 return rc;
7922 }
7923
7924 /**
7925 * _scsih_pcie_device_remove_from_sml - removing pcie device
7926 * from SML and free up associated memory
7927 * @ioc: per adapter object
7928 * @pcie_device: the pcie_device object
7929 */
7930 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)7931 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7932 struct _pcie_device *pcie_device)
7933 {
7934 struct MPT3SAS_TARGET *sas_target_priv_data;
7935
7936 dewtprintk(ioc,
7937 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7938 __func__,
7939 pcie_device->handle, (u64)pcie_device->wwid));
7940 if (pcie_device->enclosure_handle != 0)
7941 dewtprintk(ioc,
7942 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7943 __func__,
7944 (u64)pcie_device->enclosure_logical_id,
7945 pcie_device->slot));
7946 if (pcie_device->connector_name[0] != '\0')
7947 dewtprintk(ioc,
7948 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7949 __func__,
7950 pcie_device->enclosure_level,
7951 pcie_device->connector_name));
7952
7953 if (pcie_device->starget && pcie_device->starget->hostdata) {
7954 sas_target_priv_data = pcie_device->starget->hostdata;
7955 sas_target_priv_data->deleted = 1;
7956 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7957 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7958 }
7959
7960 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7961 pcie_device->handle, (u64)pcie_device->wwid);
7962 if (pcie_device->enclosure_handle != 0)
7963 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7964 (u64)pcie_device->enclosure_logical_id,
7965 pcie_device->slot);
7966 if (pcie_device->connector_name[0] != '\0')
7967 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7968 pcie_device->enclosure_level,
7969 pcie_device->connector_name);
7970
7971 if (pcie_device->starget && (pcie_device->access_status !=
7972 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7973 scsi_remove_target(&pcie_device->starget->dev);
7974 dewtprintk(ioc,
7975 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7976 __func__,
7977 pcie_device->handle, (u64)pcie_device->wwid));
7978 if (pcie_device->enclosure_handle != 0)
7979 dewtprintk(ioc,
7980 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7981 __func__,
7982 (u64)pcie_device->enclosure_logical_id,
7983 pcie_device->slot));
7984 if (pcie_device->connector_name[0] != '\0')
7985 dewtprintk(ioc,
7986 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7987 __func__,
7988 pcie_device->enclosure_level,
7989 pcie_device->connector_name));
7990
7991 kfree(pcie_device->serial_number);
7992 }
7993
7994
7995 /**
7996 * _scsih_pcie_check_device - checking device responsiveness
7997 * @ioc: per adapter object
7998 * @handle: attached device handle
7999 */
8000 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8001 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8002 {
8003 Mpi2ConfigReply_t mpi_reply;
8004 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8005 u32 ioc_status;
8006 struct _pcie_device *pcie_device;
8007 u64 wwid;
8008 unsigned long flags;
8009 struct scsi_target *starget;
8010 struct MPT3SAS_TARGET *sas_target_priv_data;
8011 u32 device_info;
8012
8013 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8014 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8015 return;
8016
8017 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8018 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8019 return;
8020
8021 /* check if this is end device */
8022 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8023 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8024 return;
8025
8026 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8027 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8028 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8029
8030 if (!pcie_device) {
8031 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8032 return;
8033 }
8034
8035 if (unlikely(pcie_device->handle != handle)) {
8036 starget = pcie_device->starget;
8037 sas_target_priv_data = starget->hostdata;
8038 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8039 starget_printk(KERN_INFO, starget,
8040 "handle changed from(0x%04x) to (0x%04x)!!!\n",
8041 pcie_device->handle, handle);
8042 sas_target_priv_data->handle = handle;
8043 pcie_device->handle = handle;
8044
8045 if (le32_to_cpu(pcie_device_pg0.Flags) &
8046 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8047 pcie_device->enclosure_level =
8048 pcie_device_pg0.EnclosureLevel;
8049 memcpy(&pcie_device->connector_name[0],
8050 &pcie_device_pg0.ConnectorName[0], 4);
8051 } else {
8052 pcie_device->enclosure_level = 0;
8053 pcie_device->connector_name[0] = '\0';
8054 }
8055 }
8056
8057 /* check if device is present */
8058 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8059 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8060 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8061 handle);
8062 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8063 pcie_device_put(pcie_device);
8064 return;
8065 }
8066
8067 /* check if there were any issues with discovery */
8068 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8069 pcie_device_pg0.AccessStatus)) {
8070 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8071 pcie_device_put(pcie_device);
8072 return;
8073 }
8074
8075 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8076 pcie_device_put(pcie_device);
8077
8078 _scsih_ublock_io_device(ioc, wwid, NULL);
8079
8080 return;
8081 }
8082
8083 /**
8084 * _scsih_pcie_add_device - creating pcie device object
8085 * @ioc: per adapter object
8086 * @handle: pcie device handle
8087 *
8088 * Creating end device object, stored in ioc->pcie_device_list.
8089 *
8090 * Return: 1 means queue the event later, 0 means complete the event
8091 */
8092 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8093 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8094 {
8095 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8096 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8097 Mpi2ConfigReply_t mpi_reply;
8098 struct _pcie_device *pcie_device;
8099 struct _enclosure_node *enclosure_dev;
8100 u32 ioc_status;
8101 u64 wwid;
8102
8103 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8104 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8105 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8106 __FILE__, __LINE__, __func__);
8107 return 0;
8108 }
8109 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8110 MPI2_IOCSTATUS_MASK;
8111 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8112 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8113 __FILE__, __LINE__, __func__);
8114 return 0;
8115 }
8116
8117 set_bit(handle, ioc->pend_os_device_add);
8118 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8119
8120 /* check if device is present */
8121 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8122 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8123 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8124 handle);
8125 return 0;
8126 }
8127
8128 /* check if there were any issues with discovery */
8129 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8130 pcie_device_pg0.AccessStatus))
8131 return 0;
8132
8133 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8134 (pcie_device_pg0.DeviceInfo))))
8135 return 0;
8136
8137 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8138 if (pcie_device) {
8139 clear_bit(handle, ioc->pend_os_device_add);
8140 pcie_device_put(pcie_device);
8141 return 0;
8142 }
8143
8144 /* PCIe Device Page 2 contains read-only information about a
8145 * specific NVMe device; therefore, this page is only
8146 * valid for NVMe devices and skip for pcie devices of type scsi.
8147 */
8148 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8149 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8150 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8151 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8152 handle)) {
8153 ioc_err(ioc,
8154 "failure at %s:%d/%s()!\n", __FILE__,
8155 __LINE__, __func__);
8156 return 0;
8157 }
8158
8159 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8160 MPI2_IOCSTATUS_MASK;
8161 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8162 ioc_err(ioc,
8163 "failure at %s:%d/%s()!\n", __FILE__,
8164 __LINE__, __func__);
8165 return 0;
8166 }
8167 }
8168
8169 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8170 if (!pcie_device) {
8171 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8172 __FILE__, __LINE__, __func__);
8173 return 0;
8174 }
8175
8176 kref_init(&pcie_device->refcount);
8177 pcie_device->id = ioc->pcie_target_id++;
8178 pcie_device->channel = PCIE_CHANNEL;
8179 pcie_device->handle = handle;
8180 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8181 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8182 pcie_device->wwid = wwid;
8183 pcie_device->port_num = pcie_device_pg0.PortNum;
8184 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8185 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8186
8187 pcie_device->enclosure_handle =
8188 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8189 if (pcie_device->enclosure_handle != 0)
8190 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8191
8192 if (le32_to_cpu(pcie_device_pg0.Flags) &
8193 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8194 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8195 memcpy(&pcie_device->connector_name[0],
8196 &pcie_device_pg0.ConnectorName[0], 4);
8197 } else {
8198 pcie_device->enclosure_level = 0;
8199 pcie_device->connector_name[0] = '\0';
8200 }
8201
8202 /* get enclosure_logical_id */
8203 if (pcie_device->enclosure_handle) {
8204 enclosure_dev =
8205 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8206 pcie_device->enclosure_handle);
8207 if (enclosure_dev)
8208 pcie_device->enclosure_logical_id =
8209 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8210 }
8211 /* TODO -- Add device name once FW supports it */
8212 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8213 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8214 pcie_device->nvme_mdts =
8215 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8216 pcie_device->shutdown_latency =
8217 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8218 /*
8219 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8220 * if drive's RTD3 Entry Latency is greater then IOC's
8221 * max_shutdown_latency.
8222 */
8223 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8224 ioc->max_shutdown_latency =
8225 pcie_device->shutdown_latency;
8226 if (pcie_device_pg2.ControllerResetTO)
8227 pcie_device->reset_timeout =
8228 pcie_device_pg2.ControllerResetTO;
8229 else
8230 pcie_device->reset_timeout = 30;
8231 } else
8232 pcie_device->reset_timeout = 30;
8233
8234 if (ioc->wait_for_discovery_to_complete)
8235 _scsih_pcie_device_init_add(ioc, pcie_device);
8236 else
8237 _scsih_pcie_device_add(ioc, pcie_device);
8238
8239 pcie_device_put(pcie_device);
8240 return 0;
8241 }
8242
8243 /**
8244 * _scsih_pcie_topology_change_event_debug - debug for topology
8245 * event
8246 * @ioc: per adapter object
8247 * @event_data: event data payload
8248 * Context: user.
8249 */
8250 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)8251 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8252 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8253 {
8254 int i;
8255 u16 handle;
8256 u16 reason_code;
8257 u8 port_number;
8258 char *status_str = NULL;
8259 u8 link_rate, prev_link_rate;
8260
8261 switch (event_data->SwitchStatus) {
8262 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8263 status_str = "add";
8264 break;
8265 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8266 status_str = "remove";
8267 break;
8268 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8269 case 0:
8270 status_str = "responding";
8271 break;
8272 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8273 status_str = "remove delay";
8274 break;
8275 default:
8276 status_str = "unknown status";
8277 break;
8278 }
8279 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8280 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8281 "start_port(%02d), count(%d)\n",
8282 le16_to_cpu(event_data->SwitchDevHandle),
8283 le16_to_cpu(event_data->EnclosureHandle),
8284 event_data->StartPortNum, event_data->NumEntries);
8285 for (i = 0; i < event_data->NumEntries; i++) {
8286 handle =
8287 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8288 if (!handle)
8289 continue;
8290 port_number = event_data->StartPortNum + i;
8291 reason_code = event_data->PortEntry[i].PortStatus;
8292 switch (reason_code) {
8293 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8294 status_str = "target add";
8295 break;
8296 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8297 status_str = "target remove";
8298 break;
8299 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8300 status_str = "delay target remove";
8301 break;
8302 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8303 status_str = "link rate change";
8304 break;
8305 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8306 status_str = "target responding";
8307 break;
8308 default:
8309 status_str = "unknown";
8310 break;
8311 }
8312 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8313 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8314 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8315 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8316 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8317 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8318 handle, status_str, link_rate, prev_link_rate);
8319 }
8320 }
8321
8322 /**
8323 * _scsih_pcie_topology_change_event - handle PCIe topology
8324 * changes
8325 * @ioc: per adapter object
8326 * @fw_event: The fw_event_work object
8327 * Context: user.
8328 *
8329 */
8330 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8331 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8332 struct fw_event_work *fw_event)
8333 {
8334 int i;
8335 u16 handle;
8336 u16 reason_code;
8337 u8 link_rate, prev_link_rate;
8338 unsigned long flags;
8339 int rc;
8340 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8341 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8342 struct _pcie_device *pcie_device;
8343
8344 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8345 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8346
8347 if (ioc->shost_recovery || ioc->remove_host ||
8348 ioc->pci_error_recovery)
8349 return;
8350
8351 if (fw_event->ignore) {
8352 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8353 return;
8354 }
8355
8356 /* handle siblings events */
8357 for (i = 0; i < event_data->NumEntries; i++) {
8358 if (fw_event->ignore) {
8359 dewtprintk(ioc,
8360 ioc_info(ioc, "ignoring switch event\n"));
8361 return;
8362 }
8363 if (ioc->remove_host || ioc->pci_error_recovery)
8364 return;
8365 reason_code = event_data->PortEntry[i].PortStatus;
8366 handle =
8367 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8368 if (!handle)
8369 continue;
8370
8371 link_rate = event_data->PortEntry[i].CurrentPortInfo
8372 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8373 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8374 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8375
8376 switch (reason_code) {
8377 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8378 if (ioc->shost_recovery)
8379 break;
8380 if (link_rate == prev_link_rate)
8381 break;
8382 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8383 break;
8384
8385 _scsih_pcie_check_device(ioc, handle);
8386
8387 /* This code after this point handles the test case
8388 * where a device has been added, however its returning
8389 * BUSY for sometime. Then before the Device Missing
8390 * Delay expires and the device becomes READY, the
8391 * device is removed and added back.
8392 */
8393 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8394 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8395 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8396
8397 if (pcie_device) {
8398 pcie_device_put(pcie_device);
8399 break;
8400 }
8401
8402 if (!test_bit(handle, ioc->pend_os_device_add))
8403 break;
8404
8405 dewtprintk(ioc,
8406 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8407 handle));
8408 event_data->PortEntry[i].PortStatus &= 0xF0;
8409 event_data->PortEntry[i].PortStatus |=
8410 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8411 fallthrough;
8412 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8413 if (ioc->shost_recovery)
8414 break;
8415 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8416 break;
8417
8418 rc = _scsih_pcie_add_device(ioc, handle);
8419 if (!rc) {
8420 /* mark entry vacant */
8421 /* TODO This needs to be reviewed and fixed,
8422 * we dont have an entry
8423 * to make an event void like vacant
8424 */
8425 event_data->PortEntry[i].PortStatus |=
8426 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8427 }
8428 break;
8429 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8430 _scsih_pcie_device_remove_by_handle(ioc, handle);
8431 break;
8432 }
8433 }
8434 }
8435
8436 /**
8437 * _scsih_pcie_device_status_change_event_debug - debug for device event
8438 * @ioc: ?
8439 * @event_data: event data payload
8440 * Context: user.
8441 */
8442 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)8443 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8444 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8445 {
8446 char *reason_str = NULL;
8447
8448 switch (event_data->ReasonCode) {
8449 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8450 reason_str = "smart data";
8451 break;
8452 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8453 reason_str = "unsupported device discovered";
8454 break;
8455 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8456 reason_str = "internal device reset";
8457 break;
8458 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8459 reason_str = "internal task abort";
8460 break;
8461 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8462 reason_str = "internal task abort set";
8463 break;
8464 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8465 reason_str = "internal clear task set";
8466 break;
8467 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8468 reason_str = "internal query task";
8469 break;
8470 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8471 reason_str = "device init failure";
8472 break;
8473 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8474 reason_str = "internal device reset complete";
8475 break;
8476 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8477 reason_str = "internal task abort complete";
8478 break;
8479 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8480 reason_str = "internal async notification";
8481 break;
8482 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8483 reason_str = "pcie hot reset failed";
8484 break;
8485 default:
8486 reason_str = "unknown reason";
8487 break;
8488 }
8489
8490 ioc_info(ioc, "PCIE device status change: (%s)\n"
8491 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8492 reason_str, le16_to_cpu(event_data->DevHandle),
8493 (u64)le64_to_cpu(event_data->WWID),
8494 le16_to_cpu(event_data->TaskTag));
8495 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8496 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8497 event_data->ASC, event_data->ASCQ);
8498 pr_cont("\n");
8499 }
8500
8501 /**
8502 * _scsih_pcie_device_status_change_event - handle device status
8503 * change
8504 * @ioc: per adapter object
8505 * @fw_event: The fw_event_work object
8506 * Context: user.
8507 */
8508 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8509 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8510 struct fw_event_work *fw_event)
8511 {
8512 struct MPT3SAS_TARGET *target_priv_data;
8513 struct _pcie_device *pcie_device;
8514 u64 wwid;
8515 unsigned long flags;
8516 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8517 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8518 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8519 _scsih_pcie_device_status_change_event_debug(ioc,
8520 event_data);
8521
8522 if (event_data->ReasonCode !=
8523 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8524 event_data->ReasonCode !=
8525 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8526 return;
8527
8528 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8529 wwid = le64_to_cpu(event_data->WWID);
8530 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8531
8532 if (!pcie_device || !pcie_device->starget)
8533 goto out;
8534
8535 target_priv_data = pcie_device->starget->hostdata;
8536 if (!target_priv_data)
8537 goto out;
8538
8539 if (event_data->ReasonCode ==
8540 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8541 target_priv_data->tm_busy = 1;
8542 else
8543 target_priv_data->tm_busy = 0;
8544 out:
8545 if (pcie_device)
8546 pcie_device_put(pcie_device);
8547
8548 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8549 }
8550
8551 /**
8552 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8553 * event
8554 * @ioc: per adapter object
8555 * @event_data: event data payload
8556 * Context: user.
8557 */
8558 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)8559 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8560 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8561 {
8562 char *reason_str = NULL;
8563
8564 switch (event_data->ReasonCode) {
8565 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8566 reason_str = "enclosure add";
8567 break;
8568 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8569 reason_str = "enclosure remove";
8570 break;
8571 default:
8572 reason_str = "unknown reason";
8573 break;
8574 }
8575
8576 ioc_info(ioc, "enclosure status change: (%s)\n"
8577 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8578 reason_str,
8579 le16_to_cpu(event_data->EnclosureHandle),
8580 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8581 le16_to_cpu(event_data->StartSlot));
8582 }
8583
8584 /**
8585 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8586 * @ioc: per adapter object
8587 * @fw_event: The fw_event_work object
8588 * Context: user.
8589 */
8590 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8591 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8592 struct fw_event_work *fw_event)
8593 {
8594 Mpi2ConfigReply_t mpi_reply;
8595 struct _enclosure_node *enclosure_dev = NULL;
8596 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8597 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8598 int rc;
8599 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8600
8601 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8602 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8603 (Mpi2EventDataSasEnclDevStatusChange_t *)
8604 fw_event->event_data);
8605 if (ioc->shost_recovery)
8606 return;
8607
8608 if (enclosure_handle)
8609 enclosure_dev =
8610 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8611 enclosure_handle);
8612 switch (event_data->ReasonCode) {
8613 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8614 if (!enclosure_dev) {
8615 enclosure_dev =
8616 kzalloc(sizeof(struct _enclosure_node),
8617 GFP_KERNEL);
8618 if (!enclosure_dev) {
8619 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8620 __FILE__, __LINE__, __func__);
8621 return;
8622 }
8623 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8624 &enclosure_dev->pg0,
8625 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8626 enclosure_handle);
8627
8628 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8629 MPI2_IOCSTATUS_MASK)) {
8630 kfree(enclosure_dev);
8631 return;
8632 }
8633
8634 list_add_tail(&enclosure_dev->list,
8635 &ioc->enclosure_list);
8636 }
8637 break;
8638 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8639 if (enclosure_dev) {
8640 list_del(&enclosure_dev->list);
8641 kfree(enclosure_dev);
8642 }
8643 break;
8644 default:
8645 break;
8646 }
8647 }
8648
8649 /**
8650 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8651 * @ioc: per adapter object
8652 * @fw_event: The fw_event_work object
8653 * Context: user.
8654 */
8655 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8656 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8657 struct fw_event_work *fw_event)
8658 {
8659 struct scsi_cmnd *scmd;
8660 struct scsi_device *sdev;
8661 struct scsiio_tracker *st;
8662 u16 smid, handle;
8663 u32 lun;
8664 struct MPT3SAS_DEVICE *sas_device_priv_data;
8665 u32 termination_count;
8666 u32 query_count;
8667 Mpi2SCSITaskManagementReply_t *mpi_reply;
8668 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8669 (Mpi2EventDataSasBroadcastPrimitive_t *)
8670 fw_event->event_data;
8671 u16 ioc_status;
8672 unsigned long flags;
8673 int r;
8674 u8 max_retries = 0;
8675 u8 task_abort_retries;
8676
8677 mutex_lock(&ioc->tm_cmds.mutex);
8678 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8679 __func__, event_data->PhyNum, event_data->PortWidth);
8680
8681 _scsih_block_io_all_device(ioc);
8682
8683 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8684 mpi_reply = ioc->tm_cmds.reply;
8685 broadcast_aen_retry:
8686
8687 /* sanity checks for retrying this loop */
8688 if (max_retries++ == 5) {
8689 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8690 goto out;
8691 } else if (max_retries > 1)
8692 dewtprintk(ioc,
8693 ioc_info(ioc, "%s: %d retry\n",
8694 __func__, max_retries - 1));
8695
8696 termination_count = 0;
8697 query_count = 0;
8698 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8699 if (ioc->shost_recovery)
8700 goto out;
8701 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8702 if (!scmd)
8703 continue;
8704 st = scsi_cmd_priv(scmd);
8705 sdev = scmd->device;
8706 sas_device_priv_data = sdev->hostdata;
8707 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8708 continue;
8709 /* skip hidden raid components */
8710 if (sas_device_priv_data->sas_target->flags &
8711 MPT_TARGET_FLAGS_RAID_COMPONENT)
8712 continue;
8713 /* skip volumes */
8714 if (sas_device_priv_data->sas_target->flags &
8715 MPT_TARGET_FLAGS_VOLUME)
8716 continue;
8717 /* skip PCIe devices */
8718 if (sas_device_priv_data->sas_target->flags &
8719 MPT_TARGET_FLAGS_PCIE_DEVICE)
8720 continue;
8721
8722 handle = sas_device_priv_data->sas_target->handle;
8723 lun = sas_device_priv_data->lun;
8724 query_count++;
8725
8726 if (ioc->shost_recovery)
8727 goto out;
8728
8729 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8730 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8731 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8732 st->msix_io, 30, 0);
8733 if (r == FAILED) {
8734 sdev_printk(KERN_WARNING, sdev,
8735 "mpt3sas_scsih_issue_tm: FAILED when sending "
8736 "QUERY_TASK: scmd(%p)\n", scmd);
8737 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8738 goto broadcast_aen_retry;
8739 }
8740 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8741 & MPI2_IOCSTATUS_MASK;
8742 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8743 sdev_printk(KERN_WARNING, sdev,
8744 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8745 ioc_status, scmd);
8746 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8747 goto broadcast_aen_retry;
8748 }
8749
8750 /* see if IO is still owned by IOC and target */
8751 if (mpi_reply->ResponseCode ==
8752 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8753 mpi_reply->ResponseCode ==
8754 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8755 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8756 continue;
8757 }
8758 task_abort_retries = 0;
8759 tm_retry:
8760 if (task_abort_retries++ == 60) {
8761 dewtprintk(ioc,
8762 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8763 __func__));
8764 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8765 goto broadcast_aen_retry;
8766 }
8767
8768 if (ioc->shost_recovery)
8769 goto out_no_lock;
8770
8771 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8772 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8773 st->smid, st->msix_io, 30, 0);
8774 if (r == FAILED || st->cb_idx != 0xFF) {
8775 sdev_printk(KERN_WARNING, sdev,
8776 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8777 "scmd(%p)\n", scmd);
8778 goto tm_retry;
8779 }
8780
8781 if (task_abort_retries > 1)
8782 sdev_printk(KERN_WARNING, sdev,
8783 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8784 " scmd(%p)\n",
8785 task_abort_retries - 1, scmd);
8786
8787 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8788 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8789 }
8790
8791 if (ioc->broadcast_aen_pending) {
8792 dewtprintk(ioc,
8793 ioc_info(ioc,
8794 "%s: loop back due to pending AEN\n",
8795 __func__));
8796 ioc->broadcast_aen_pending = 0;
8797 goto broadcast_aen_retry;
8798 }
8799
8800 out:
8801 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8802 out_no_lock:
8803
8804 dewtprintk(ioc,
8805 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8806 __func__, query_count, termination_count));
8807
8808 ioc->broadcast_aen_busy = 0;
8809 if (!ioc->shost_recovery)
8810 _scsih_ublock_io_all_device(ioc);
8811 mutex_unlock(&ioc->tm_cmds.mutex);
8812 }
8813
8814 /**
8815 * _scsih_sas_discovery_event - handle discovery events
8816 * @ioc: per adapter object
8817 * @fw_event: The fw_event_work object
8818 * Context: user.
8819 */
8820 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8821 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8822 struct fw_event_work *fw_event)
8823 {
8824 Mpi2EventDataSasDiscovery_t *event_data =
8825 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8826
8827 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8828 ioc_info(ioc, "discovery event: (%s)",
8829 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8830 "start" : "stop");
8831 if (event_data->DiscoveryStatus)
8832 pr_cont("discovery_status(0x%08x)",
8833 le32_to_cpu(event_data->DiscoveryStatus));
8834 pr_cont("\n");
8835 }
8836
8837 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8838 !ioc->sas_hba.num_phys) {
8839 if (disable_discovery > 0 && ioc->shost_recovery) {
8840 /* Wait for the reset to complete */
8841 while (ioc->shost_recovery)
8842 ssleep(1);
8843 }
8844 _scsih_sas_host_add(ioc);
8845 }
8846 }
8847
8848 /**
8849 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8850 * events
8851 * @ioc: per adapter object
8852 * @fw_event: The fw_event_work object
8853 * Context: user.
8854 */
8855 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8856 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8857 struct fw_event_work *fw_event)
8858 {
8859 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8860 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8861
8862 switch (event_data->ReasonCode) {
8863 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8864 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8865 le16_to_cpu(event_data->DevHandle),
8866 (u64)le64_to_cpu(event_data->SASAddress),
8867 event_data->PhysicalPort);
8868 break;
8869 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8870 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8871 le16_to_cpu(event_data->DevHandle),
8872 (u64)le64_to_cpu(event_data->SASAddress),
8873 event_data->PhysicalPort);
8874 break;
8875 default:
8876 break;
8877 }
8878 }
8879
8880 /**
8881 * _scsih_pcie_enumeration_event - handle enumeration events
8882 * @ioc: per adapter object
8883 * @fw_event: The fw_event_work object
8884 * Context: user.
8885 */
8886 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8887 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8888 struct fw_event_work *fw_event)
8889 {
8890 Mpi26EventDataPCIeEnumeration_t *event_data =
8891 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8892
8893 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8894 return;
8895
8896 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8897 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8898 "started" : "completed",
8899 event_data->Flags);
8900 if (event_data->EnumerationStatus)
8901 pr_cont("enumeration_status(0x%08x)",
8902 le32_to_cpu(event_data->EnumerationStatus));
8903 pr_cont("\n");
8904 }
8905
8906 /**
8907 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8908 * @ioc: per adapter object
8909 * @handle: device handle for physical disk
8910 * @phys_disk_num: physical disk number
8911 *
8912 * Return: 0 for success, else failure.
8913 */
8914 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)8915 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8916 {
8917 Mpi2RaidActionRequest_t *mpi_request;
8918 Mpi2RaidActionReply_t *mpi_reply;
8919 u16 smid;
8920 u8 issue_reset = 0;
8921 int rc = 0;
8922 u16 ioc_status;
8923 u32 log_info;
8924
8925 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8926 return rc;
8927
8928 mutex_lock(&ioc->scsih_cmds.mutex);
8929
8930 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8931 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8932 rc = -EAGAIN;
8933 goto out;
8934 }
8935 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8936
8937 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8938 if (!smid) {
8939 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8940 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8941 rc = -EAGAIN;
8942 goto out;
8943 }
8944
8945 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8946 ioc->scsih_cmds.smid = smid;
8947 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8948
8949 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8950 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8951 mpi_request->PhysDiskNum = phys_disk_num;
8952
8953 dewtprintk(ioc,
8954 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8955 handle, phys_disk_num));
8956
8957 init_completion(&ioc->scsih_cmds.done);
8958 ioc->put_smid_default(ioc, smid);
8959 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8960
8961 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8962 mpt3sas_check_cmd_timeout(ioc,
8963 ioc->scsih_cmds.status, mpi_request,
8964 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8965 rc = -EFAULT;
8966 goto out;
8967 }
8968
8969 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8970
8971 mpi_reply = ioc->scsih_cmds.reply;
8972 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8973 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8974 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8975 else
8976 log_info = 0;
8977 ioc_status &= MPI2_IOCSTATUS_MASK;
8978 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8979 dewtprintk(ioc,
8980 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8981 ioc_status, log_info));
8982 rc = -EFAULT;
8983 } else
8984 dewtprintk(ioc,
8985 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8986 }
8987
8988 out:
8989 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8990 mutex_unlock(&ioc->scsih_cmds.mutex);
8991
8992 if (issue_reset)
8993 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8994 return rc;
8995 }
8996
8997 /**
8998 * _scsih_reprobe_lun - reprobing lun
8999 * @sdev: scsi device struct
9000 * @no_uld_attach: sdev->no_uld_attach flag setting
9001 *
9002 **/
9003 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)9004 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
9005 {
9006 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
9007 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
9008 sdev->no_uld_attach ? "hiding" : "exposing");
9009 WARN_ON(scsi_device_reprobe(sdev));
9010 }
9011
9012 /**
9013 * _scsih_sas_volume_add - add new volume
9014 * @ioc: per adapter object
9015 * @element: IR config element data
9016 * Context: user.
9017 */
9018 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9019 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9020 Mpi2EventIrConfigElement_t *element)
9021 {
9022 struct _raid_device *raid_device;
9023 unsigned long flags;
9024 u64 wwid;
9025 u16 handle = le16_to_cpu(element->VolDevHandle);
9026 int rc;
9027
9028 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9029 if (!wwid) {
9030 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9031 __FILE__, __LINE__, __func__);
9032 return;
9033 }
9034
9035 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9036 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9037 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9038
9039 if (raid_device)
9040 return;
9041
9042 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9043 if (!raid_device) {
9044 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9045 __FILE__, __LINE__, __func__);
9046 return;
9047 }
9048
9049 raid_device->id = ioc->sas_id++;
9050 raid_device->channel = RAID_CHANNEL;
9051 raid_device->handle = handle;
9052 raid_device->wwid = wwid;
9053 _scsih_raid_device_add(ioc, raid_device);
9054 if (!ioc->wait_for_discovery_to_complete) {
9055 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9056 raid_device->id, 0);
9057 if (rc)
9058 _scsih_raid_device_remove(ioc, raid_device);
9059 } else {
9060 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9061 _scsih_determine_boot_device(ioc, raid_device, 1);
9062 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9063 }
9064 }
9065
9066 /**
9067 * _scsih_sas_volume_delete - delete volume
9068 * @ioc: per adapter object
9069 * @handle: volume device handle
9070 * Context: user.
9071 */
9072 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)9073 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9074 {
9075 struct _raid_device *raid_device;
9076 unsigned long flags;
9077 struct MPT3SAS_TARGET *sas_target_priv_data;
9078 struct scsi_target *starget = NULL;
9079
9080 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9081 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9082 if (raid_device) {
9083 if (raid_device->starget) {
9084 starget = raid_device->starget;
9085 sas_target_priv_data = starget->hostdata;
9086 sas_target_priv_data->deleted = 1;
9087 }
9088 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9089 raid_device->handle, (u64)raid_device->wwid);
9090 list_del(&raid_device->list);
9091 kfree(raid_device);
9092 }
9093 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9094 if (starget)
9095 scsi_remove_target(&starget->dev);
9096 }
9097
9098 /**
9099 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9100 * @ioc: per adapter object
9101 * @element: IR config element data
9102 * Context: user.
9103 */
9104 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9105 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9106 Mpi2EventIrConfigElement_t *element)
9107 {
9108 struct _sas_device *sas_device;
9109 struct scsi_target *starget = NULL;
9110 struct MPT3SAS_TARGET *sas_target_priv_data;
9111 unsigned long flags;
9112 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9113
9114 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9115 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9116 if (sas_device) {
9117 sas_device->volume_handle = 0;
9118 sas_device->volume_wwid = 0;
9119 clear_bit(handle, ioc->pd_handles);
9120 if (sas_device->starget && sas_device->starget->hostdata) {
9121 starget = sas_device->starget;
9122 sas_target_priv_data = starget->hostdata;
9123 sas_target_priv_data->flags &=
9124 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9125 }
9126 }
9127 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9128 if (!sas_device)
9129 return;
9130
9131 /* exposing raid component */
9132 if (starget)
9133 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9134
9135 sas_device_put(sas_device);
9136 }
9137
9138 /**
9139 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9140 * @ioc: per adapter object
9141 * @element: IR config element data
9142 * Context: user.
9143 */
9144 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9145 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9146 Mpi2EventIrConfigElement_t *element)
9147 {
9148 struct _sas_device *sas_device;
9149 struct scsi_target *starget = NULL;
9150 struct MPT3SAS_TARGET *sas_target_priv_data;
9151 unsigned long flags;
9152 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9153 u16 volume_handle = 0;
9154 u64 volume_wwid = 0;
9155
9156 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9157 if (volume_handle)
9158 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9159 &volume_wwid);
9160
9161 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9162 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9163 if (sas_device) {
9164 set_bit(handle, ioc->pd_handles);
9165 if (sas_device->starget && sas_device->starget->hostdata) {
9166 starget = sas_device->starget;
9167 sas_target_priv_data = starget->hostdata;
9168 sas_target_priv_data->flags |=
9169 MPT_TARGET_FLAGS_RAID_COMPONENT;
9170 sas_device->volume_handle = volume_handle;
9171 sas_device->volume_wwid = volume_wwid;
9172 }
9173 }
9174 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9175 if (!sas_device)
9176 return;
9177
9178 /* hiding raid component */
9179 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9180
9181 if (starget)
9182 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9183
9184 sas_device_put(sas_device);
9185 }
9186
9187 /**
9188 * _scsih_sas_pd_delete - delete pd component
9189 * @ioc: per adapter object
9190 * @element: IR config element data
9191 * Context: user.
9192 */
9193 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9194 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9195 Mpi2EventIrConfigElement_t *element)
9196 {
9197 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9198
9199 _scsih_device_remove_by_handle(ioc, handle);
9200 }
9201
9202 /**
9203 * _scsih_sas_pd_add - remove pd component
9204 * @ioc: per adapter object
9205 * @element: IR config element data
9206 * Context: user.
9207 */
9208 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9209 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9210 Mpi2EventIrConfigElement_t *element)
9211 {
9212 struct _sas_device *sas_device;
9213 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9214 Mpi2ConfigReply_t mpi_reply;
9215 Mpi2SasDevicePage0_t sas_device_pg0;
9216 u32 ioc_status;
9217 u64 sas_address;
9218 u16 parent_handle;
9219
9220 set_bit(handle, ioc->pd_handles);
9221
9222 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9223 if (sas_device) {
9224 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9225 sas_device_put(sas_device);
9226 return;
9227 }
9228
9229 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9230 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9231 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9232 __FILE__, __LINE__, __func__);
9233 return;
9234 }
9235
9236 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9237 MPI2_IOCSTATUS_MASK;
9238 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9239 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9240 __FILE__, __LINE__, __func__);
9241 return;
9242 }
9243
9244 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9245 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9246 mpt3sas_transport_update_links(ioc, sas_address, handle,
9247 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9248 mpt3sas_get_port_by_id(ioc,
9249 sas_device_pg0.PhysicalPort, 0));
9250
9251 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9252 _scsih_add_device(ioc, handle, 0, 1);
9253 }
9254
9255 /**
9256 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9257 * @ioc: per adapter object
9258 * @event_data: event data payload
9259 * Context: user.
9260 */
9261 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)9262 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9263 Mpi2EventDataIrConfigChangeList_t *event_data)
9264 {
9265 Mpi2EventIrConfigElement_t *element;
9266 u8 element_type;
9267 int i;
9268 char *reason_str = NULL, *element_str = NULL;
9269
9270 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9271
9272 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9273 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9274 "foreign" : "native",
9275 event_data->NumElements);
9276 for (i = 0; i < event_data->NumElements; i++, element++) {
9277 switch (element->ReasonCode) {
9278 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9279 reason_str = "add";
9280 break;
9281 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9282 reason_str = "remove";
9283 break;
9284 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9285 reason_str = "no change";
9286 break;
9287 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9288 reason_str = "hide";
9289 break;
9290 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9291 reason_str = "unhide";
9292 break;
9293 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9294 reason_str = "volume_created";
9295 break;
9296 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9297 reason_str = "volume_deleted";
9298 break;
9299 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9300 reason_str = "pd_created";
9301 break;
9302 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9303 reason_str = "pd_deleted";
9304 break;
9305 default:
9306 reason_str = "unknown reason";
9307 break;
9308 }
9309 element_type = le16_to_cpu(element->ElementFlags) &
9310 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9311 switch (element_type) {
9312 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9313 element_str = "volume";
9314 break;
9315 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9316 element_str = "phys disk";
9317 break;
9318 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9319 element_str = "hot spare";
9320 break;
9321 default:
9322 element_str = "unknown element";
9323 break;
9324 }
9325 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9326 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9327 reason_str, le16_to_cpu(element->VolDevHandle),
9328 le16_to_cpu(element->PhysDiskDevHandle),
9329 element->PhysDiskNum);
9330 }
9331 }
9332
9333 /**
9334 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9335 * @ioc: per adapter object
9336 * @fw_event: The fw_event_work object
9337 * Context: user.
9338 */
9339 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9340 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9341 struct fw_event_work *fw_event)
9342 {
9343 Mpi2EventIrConfigElement_t *element;
9344 int i;
9345 u8 foreign_config;
9346 Mpi2EventDataIrConfigChangeList_t *event_data =
9347 (Mpi2EventDataIrConfigChangeList_t *)
9348 fw_event->event_data;
9349
9350 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9351 (!ioc->hide_ir_msg))
9352 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9353
9354 foreign_config = (le32_to_cpu(event_data->Flags) &
9355 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9356
9357 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9358 if (ioc->shost_recovery &&
9359 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9360 for (i = 0; i < event_data->NumElements; i++, element++) {
9361 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9362 _scsih_ir_fastpath(ioc,
9363 le16_to_cpu(element->PhysDiskDevHandle),
9364 element->PhysDiskNum);
9365 }
9366 return;
9367 }
9368
9369 for (i = 0; i < event_data->NumElements; i++, element++) {
9370
9371 switch (element->ReasonCode) {
9372 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9373 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9374 if (!foreign_config)
9375 _scsih_sas_volume_add(ioc, element);
9376 break;
9377 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9378 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9379 if (!foreign_config)
9380 _scsih_sas_volume_delete(ioc,
9381 le16_to_cpu(element->VolDevHandle));
9382 break;
9383 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9384 if (!ioc->is_warpdrive)
9385 _scsih_sas_pd_hide(ioc, element);
9386 break;
9387 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9388 if (!ioc->is_warpdrive)
9389 _scsih_sas_pd_expose(ioc, element);
9390 break;
9391 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9392 if (!ioc->is_warpdrive)
9393 _scsih_sas_pd_add(ioc, element);
9394 break;
9395 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9396 if (!ioc->is_warpdrive)
9397 _scsih_sas_pd_delete(ioc, element);
9398 break;
9399 }
9400 }
9401 }
9402
9403 /**
9404 * _scsih_sas_ir_volume_event - IR volume event
9405 * @ioc: per adapter object
9406 * @fw_event: The fw_event_work object
9407 * Context: user.
9408 */
9409 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9410 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9411 struct fw_event_work *fw_event)
9412 {
9413 u64 wwid;
9414 unsigned long flags;
9415 struct _raid_device *raid_device;
9416 u16 handle;
9417 u32 state;
9418 int rc;
9419 Mpi2EventDataIrVolume_t *event_data =
9420 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9421
9422 if (ioc->shost_recovery)
9423 return;
9424
9425 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9426 return;
9427
9428 handle = le16_to_cpu(event_data->VolDevHandle);
9429 state = le32_to_cpu(event_data->NewValue);
9430 if (!ioc->hide_ir_msg)
9431 dewtprintk(ioc,
9432 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9433 __func__, handle,
9434 le32_to_cpu(event_data->PreviousValue),
9435 state));
9436 switch (state) {
9437 case MPI2_RAID_VOL_STATE_MISSING:
9438 case MPI2_RAID_VOL_STATE_FAILED:
9439 _scsih_sas_volume_delete(ioc, handle);
9440 break;
9441
9442 case MPI2_RAID_VOL_STATE_ONLINE:
9443 case MPI2_RAID_VOL_STATE_DEGRADED:
9444 case MPI2_RAID_VOL_STATE_OPTIMAL:
9445
9446 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9447 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9448 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9449
9450 if (raid_device)
9451 break;
9452
9453 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9454 if (!wwid) {
9455 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9456 __FILE__, __LINE__, __func__);
9457 break;
9458 }
9459
9460 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9461 if (!raid_device) {
9462 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9463 __FILE__, __LINE__, __func__);
9464 break;
9465 }
9466
9467 raid_device->id = ioc->sas_id++;
9468 raid_device->channel = RAID_CHANNEL;
9469 raid_device->handle = handle;
9470 raid_device->wwid = wwid;
9471 _scsih_raid_device_add(ioc, raid_device);
9472 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9473 raid_device->id, 0);
9474 if (rc)
9475 _scsih_raid_device_remove(ioc, raid_device);
9476 break;
9477
9478 case MPI2_RAID_VOL_STATE_INITIALIZING:
9479 default:
9480 break;
9481 }
9482 }
9483
9484 /**
9485 * _scsih_sas_ir_physical_disk_event - PD event
9486 * @ioc: per adapter object
9487 * @fw_event: The fw_event_work object
9488 * Context: user.
9489 */
9490 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9491 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9492 struct fw_event_work *fw_event)
9493 {
9494 u16 handle, parent_handle;
9495 u32 state;
9496 struct _sas_device *sas_device;
9497 Mpi2ConfigReply_t mpi_reply;
9498 Mpi2SasDevicePage0_t sas_device_pg0;
9499 u32 ioc_status;
9500 Mpi2EventDataIrPhysicalDisk_t *event_data =
9501 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9502 u64 sas_address;
9503
9504 if (ioc->shost_recovery)
9505 return;
9506
9507 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9508 return;
9509
9510 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9511 state = le32_to_cpu(event_data->NewValue);
9512
9513 if (!ioc->hide_ir_msg)
9514 dewtprintk(ioc,
9515 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9516 __func__, handle,
9517 le32_to_cpu(event_data->PreviousValue),
9518 state));
9519
9520 switch (state) {
9521 case MPI2_RAID_PD_STATE_ONLINE:
9522 case MPI2_RAID_PD_STATE_DEGRADED:
9523 case MPI2_RAID_PD_STATE_REBUILDING:
9524 case MPI2_RAID_PD_STATE_OPTIMAL:
9525 case MPI2_RAID_PD_STATE_HOT_SPARE:
9526
9527 if (!ioc->is_warpdrive)
9528 set_bit(handle, ioc->pd_handles);
9529
9530 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9531 if (sas_device) {
9532 sas_device_put(sas_device);
9533 return;
9534 }
9535
9536 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9537 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9538 handle))) {
9539 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9540 __FILE__, __LINE__, __func__);
9541 return;
9542 }
9543
9544 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9545 MPI2_IOCSTATUS_MASK;
9546 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9547 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9548 __FILE__, __LINE__, __func__);
9549 return;
9550 }
9551
9552 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9553 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9554 mpt3sas_transport_update_links(ioc, sas_address, handle,
9555 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9556 mpt3sas_get_port_by_id(ioc,
9557 sas_device_pg0.PhysicalPort, 0));
9558
9559 _scsih_add_device(ioc, handle, 0, 1);
9560
9561 break;
9562
9563 case MPI2_RAID_PD_STATE_OFFLINE:
9564 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9565 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9566 default:
9567 break;
9568 }
9569 }
9570
9571 /**
9572 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9573 * @ioc: per adapter object
9574 * @event_data: event data payload
9575 * Context: user.
9576 */
9577 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)9578 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9579 Mpi2EventDataIrOperationStatus_t *event_data)
9580 {
9581 char *reason_str = NULL;
9582
9583 switch (event_data->RAIDOperation) {
9584 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9585 reason_str = "resync";
9586 break;
9587 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9588 reason_str = "online capacity expansion";
9589 break;
9590 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9591 reason_str = "consistency check";
9592 break;
9593 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9594 reason_str = "background init";
9595 break;
9596 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9597 reason_str = "make data consistent";
9598 break;
9599 }
9600
9601 if (!reason_str)
9602 return;
9603
9604 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9605 reason_str,
9606 le16_to_cpu(event_data->VolDevHandle),
9607 event_data->PercentComplete);
9608 }
9609
9610 /**
9611 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9612 * @ioc: per adapter object
9613 * @fw_event: The fw_event_work object
9614 * Context: user.
9615 */
9616 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9617 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9618 struct fw_event_work *fw_event)
9619 {
9620 Mpi2EventDataIrOperationStatus_t *event_data =
9621 (Mpi2EventDataIrOperationStatus_t *)
9622 fw_event->event_data;
9623 static struct _raid_device *raid_device;
9624 unsigned long flags;
9625 u16 handle;
9626
9627 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9628 (!ioc->hide_ir_msg))
9629 _scsih_sas_ir_operation_status_event_debug(ioc,
9630 event_data);
9631
9632 /* code added for raid transport support */
9633 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9634
9635 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9636 handle = le16_to_cpu(event_data->VolDevHandle);
9637 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9638 if (raid_device)
9639 raid_device->percent_complete =
9640 event_data->PercentComplete;
9641 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9642 }
9643 }
9644
9645 /**
9646 * _scsih_prep_device_scan - initialize parameters prior to device scan
9647 * @ioc: per adapter object
9648 *
9649 * Set the deleted flag prior to device scan. If the device is found during
9650 * the scan, then we clear the deleted flag.
9651 */
9652 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)9653 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9654 {
9655 struct MPT3SAS_DEVICE *sas_device_priv_data;
9656 struct scsi_device *sdev;
9657
9658 shost_for_each_device(sdev, ioc->shost) {
9659 sas_device_priv_data = sdev->hostdata;
9660 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9661 sas_device_priv_data->sas_target->deleted = 1;
9662 }
9663 }
9664
9665 /**
9666 * _scsih_update_device_qdepth - Update QD during Reset.
9667 * @ioc: per adapter object
9668 *
9669 */
9670 static void
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER * ioc)9671 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9672 {
9673 struct MPT3SAS_DEVICE *sas_device_priv_data;
9674 struct MPT3SAS_TARGET *sas_target_priv_data;
9675 struct _sas_device *sas_device;
9676 struct scsi_device *sdev;
9677 u16 qdepth;
9678
9679 ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9680 shost_for_each_device(sdev, ioc->shost) {
9681 sas_device_priv_data = sdev->hostdata;
9682 if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9683 sas_target_priv_data = sas_device_priv_data->sas_target;
9684 sas_device = sas_device_priv_data->sas_target->sas_dev;
9685 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9686 qdepth = ioc->max_nvme_qd;
9687 else if (sas_device &&
9688 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9689 qdepth = (sas_device->port_type > 1) ?
9690 ioc->max_wideport_qd : ioc->max_narrowport_qd;
9691 else if (sas_device &&
9692 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9693 qdepth = ioc->max_sata_qd;
9694 else
9695 continue;
9696 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9697 }
9698 }
9699 }
9700
9701 /**
9702 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9703 * @ioc: per adapter object
9704 * @sas_device_pg0: SAS Device page 0
9705 *
9706 * After host reset, find out whether devices are still responding.
9707 * Used in _scsih_remove_unresponsive_sas_devices.
9708 */
9709 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)9710 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9711 Mpi2SasDevicePage0_t *sas_device_pg0)
9712 {
9713 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9714 struct scsi_target *starget;
9715 struct _sas_device *sas_device = NULL;
9716 struct _enclosure_node *enclosure_dev = NULL;
9717 unsigned long flags;
9718 struct hba_port *port = mpt3sas_get_port_by_id(
9719 ioc, sas_device_pg0->PhysicalPort, 0);
9720
9721 if (sas_device_pg0->EnclosureHandle) {
9722 enclosure_dev =
9723 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9724 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9725 if (enclosure_dev == NULL)
9726 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9727 sas_device_pg0->EnclosureHandle);
9728 }
9729 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9730 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9731 if (sas_device->sas_address != le64_to_cpu(
9732 sas_device_pg0->SASAddress))
9733 continue;
9734 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9735 continue;
9736 if (sas_device->port != port)
9737 continue;
9738 sas_device->responding = 1;
9739 starget = sas_device->starget;
9740 if (starget && starget->hostdata) {
9741 sas_target_priv_data = starget->hostdata;
9742 sas_target_priv_data->tm_busy = 0;
9743 sas_target_priv_data->deleted = 0;
9744 } else
9745 sas_target_priv_data = NULL;
9746 if (starget) {
9747 starget_printk(KERN_INFO, starget,
9748 "handle(0x%04x), sas_addr(0x%016llx)\n",
9749 le16_to_cpu(sas_device_pg0->DevHandle),
9750 (unsigned long long)
9751 sas_device->sas_address);
9752
9753 if (sas_device->enclosure_handle != 0)
9754 starget_printk(KERN_INFO, starget,
9755 "enclosure logical id(0x%016llx), slot(%d)\n",
9756 (unsigned long long)
9757 sas_device->enclosure_logical_id,
9758 sas_device->slot);
9759 }
9760 if (le16_to_cpu(sas_device_pg0->Flags) &
9761 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9762 sas_device->enclosure_level =
9763 sas_device_pg0->EnclosureLevel;
9764 memcpy(&sas_device->connector_name[0],
9765 &sas_device_pg0->ConnectorName[0], 4);
9766 } else {
9767 sas_device->enclosure_level = 0;
9768 sas_device->connector_name[0] = '\0';
9769 }
9770
9771 sas_device->enclosure_handle =
9772 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9773 sas_device->is_chassis_slot_valid = 0;
9774 if (enclosure_dev) {
9775 sas_device->enclosure_logical_id = le64_to_cpu(
9776 enclosure_dev->pg0.EnclosureLogicalID);
9777 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9778 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9779 sas_device->is_chassis_slot_valid = 1;
9780 sas_device->chassis_slot =
9781 enclosure_dev->pg0.ChassisSlot;
9782 }
9783 }
9784
9785 if (sas_device->handle == le16_to_cpu(
9786 sas_device_pg0->DevHandle))
9787 goto out;
9788 pr_info("\thandle changed from(0x%04x)!!!\n",
9789 sas_device->handle);
9790 sas_device->handle = le16_to_cpu(
9791 sas_device_pg0->DevHandle);
9792 if (sas_target_priv_data)
9793 sas_target_priv_data->handle =
9794 le16_to_cpu(sas_device_pg0->DevHandle);
9795 goto out;
9796 }
9797 out:
9798 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9799 }
9800
9801 /**
9802 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9803 * And create enclosure list by scanning all Enclosure Page(0)s
9804 * @ioc: per adapter object
9805 */
9806 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)9807 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9808 {
9809 struct _enclosure_node *enclosure_dev;
9810 Mpi2ConfigReply_t mpi_reply;
9811 u16 enclosure_handle;
9812 int rc;
9813
9814 /* Free existing enclosure list */
9815 mpt3sas_free_enclosure_list(ioc);
9816
9817 /* Re constructing enclosure list after reset*/
9818 enclosure_handle = 0xFFFF;
9819 do {
9820 enclosure_dev =
9821 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9822 if (!enclosure_dev) {
9823 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9824 __FILE__, __LINE__, __func__);
9825 return;
9826 }
9827 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9828 &enclosure_dev->pg0,
9829 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9830 enclosure_handle);
9831
9832 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9833 MPI2_IOCSTATUS_MASK)) {
9834 kfree(enclosure_dev);
9835 return;
9836 }
9837 list_add_tail(&enclosure_dev->list,
9838 &ioc->enclosure_list);
9839 enclosure_handle =
9840 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9841 } while (1);
9842 }
9843
9844 /**
9845 * _scsih_search_responding_sas_devices -
9846 * @ioc: per adapter object
9847 *
9848 * After host reset, find out whether devices are still responding.
9849 * If not remove.
9850 */
9851 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)9852 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9853 {
9854 Mpi2SasDevicePage0_t sas_device_pg0;
9855 Mpi2ConfigReply_t mpi_reply;
9856 u16 ioc_status;
9857 u16 handle;
9858 u32 device_info;
9859
9860 ioc_info(ioc, "search for end-devices: start\n");
9861
9862 if (list_empty(&ioc->sas_device_list))
9863 goto out;
9864
9865 handle = 0xFFFF;
9866 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9867 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9868 handle))) {
9869 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9870 MPI2_IOCSTATUS_MASK;
9871 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9872 break;
9873 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9874 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9875 if (!(_scsih_is_end_device(device_info)))
9876 continue;
9877 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9878 }
9879
9880 out:
9881 ioc_info(ioc, "search for end-devices: complete\n");
9882 }
9883
9884 /**
9885 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9886 * @ioc: per adapter object
9887 * @pcie_device_pg0: PCIe Device page 0
9888 *
9889 * After host reset, find out whether devices are still responding.
9890 * Used in _scsih_remove_unresponding_devices.
9891 */
9892 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)9893 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9894 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9895 {
9896 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9897 struct scsi_target *starget;
9898 struct _pcie_device *pcie_device;
9899 unsigned long flags;
9900
9901 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9902 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9903 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9904 && (pcie_device->slot == le16_to_cpu(
9905 pcie_device_pg0->Slot))) {
9906 pcie_device->access_status =
9907 pcie_device_pg0->AccessStatus;
9908 pcie_device->responding = 1;
9909 starget = pcie_device->starget;
9910 if (starget && starget->hostdata) {
9911 sas_target_priv_data = starget->hostdata;
9912 sas_target_priv_data->tm_busy = 0;
9913 sas_target_priv_data->deleted = 0;
9914 } else
9915 sas_target_priv_data = NULL;
9916 if (starget) {
9917 starget_printk(KERN_INFO, starget,
9918 "handle(0x%04x), wwid(0x%016llx) ",
9919 pcie_device->handle,
9920 (unsigned long long)pcie_device->wwid);
9921 if (pcie_device->enclosure_handle != 0)
9922 starget_printk(KERN_INFO, starget,
9923 "enclosure logical id(0x%016llx), "
9924 "slot(%d)\n",
9925 (unsigned long long)
9926 pcie_device->enclosure_logical_id,
9927 pcie_device->slot);
9928 }
9929
9930 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9931 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9932 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9933 pcie_device->enclosure_level =
9934 pcie_device_pg0->EnclosureLevel;
9935 memcpy(&pcie_device->connector_name[0],
9936 &pcie_device_pg0->ConnectorName[0], 4);
9937 } else {
9938 pcie_device->enclosure_level = 0;
9939 pcie_device->connector_name[0] = '\0';
9940 }
9941
9942 if (pcie_device->handle == le16_to_cpu(
9943 pcie_device_pg0->DevHandle))
9944 goto out;
9945 pr_info("\thandle changed from(0x%04x)!!!\n",
9946 pcie_device->handle);
9947 pcie_device->handle = le16_to_cpu(
9948 pcie_device_pg0->DevHandle);
9949 if (sas_target_priv_data)
9950 sas_target_priv_data->handle =
9951 le16_to_cpu(pcie_device_pg0->DevHandle);
9952 goto out;
9953 }
9954 }
9955
9956 out:
9957 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9958 }
9959
9960 /**
9961 * _scsih_search_responding_pcie_devices -
9962 * @ioc: per adapter object
9963 *
9964 * After host reset, find out whether devices are still responding.
9965 * If not remove.
9966 */
9967 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)9968 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9969 {
9970 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9971 Mpi2ConfigReply_t mpi_reply;
9972 u16 ioc_status;
9973 u16 handle;
9974 u32 device_info;
9975
9976 ioc_info(ioc, "search for end-devices: start\n");
9977
9978 if (list_empty(&ioc->pcie_device_list))
9979 goto out;
9980
9981 handle = 0xFFFF;
9982 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9983 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9984 handle))) {
9985 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9986 MPI2_IOCSTATUS_MASK;
9987 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9988 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9989 __func__, ioc_status,
9990 le32_to_cpu(mpi_reply.IOCLogInfo));
9991 break;
9992 }
9993 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9994 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9995 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9996 continue;
9997 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9998 }
9999 out:
10000 ioc_info(ioc, "search for PCIe end-devices: complete\n");
10001 }
10002
10003 /**
10004 * _scsih_mark_responding_raid_device - mark a raid_device as responding
10005 * @ioc: per adapter object
10006 * @wwid: world wide identifier for raid volume
10007 * @handle: device handle
10008 *
10009 * After host reset, find out whether devices are still responding.
10010 * Used in _scsih_remove_unresponsive_raid_devices.
10011 */
10012 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)10013 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10014 u16 handle)
10015 {
10016 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10017 struct scsi_target *starget;
10018 struct _raid_device *raid_device;
10019 unsigned long flags;
10020
10021 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10022 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10023 if (raid_device->wwid == wwid && raid_device->starget) {
10024 starget = raid_device->starget;
10025 if (starget && starget->hostdata) {
10026 sas_target_priv_data = starget->hostdata;
10027 sas_target_priv_data->deleted = 0;
10028 } else
10029 sas_target_priv_data = NULL;
10030 raid_device->responding = 1;
10031 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10032 starget_printk(KERN_INFO, raid_device->starget,
10033 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10034 (unsigned long long)raid_device->wwid);
10035
10036 /*
10037 * WARPDRIVE: The handles of the PDs might have changed
10038 * across the host reset so re-initialize the
10039 * required data for Direct IO
10040 */
10041 mpt3sas_init_warpdrive_properties(ioc, raid_device);
10042 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10043 if (raid_device->handle == handle) {
10044 spin_unlock_irqrestore(&ioc->raid_device_lock,
10045 flags);
10046 return;
10047 }
10048 pr_info("\thandle changed from(0x%04x)!!!\n",
10049 raid_device->handle);
10050 raid_device->handle = handle;
10051 if (sas_target_priv_data)
10052 sas_target_priv_data->handle = handle;
10053 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10054 return;
10055 }
10056 }
10057 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10058 }
10059
10060 /**
10061 * _scsih_search_responding_raid_devices -
10062 * @ioc: per adapter object
10063 *
10064 * After host reset, find out whether devices are still responding.
10065 * If not remove.
10066 */
10067 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)10068 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10069 {
10070 Mpi2RaidVolPage1_t volume_pg1;
10071 Mpi2RaidVolPage0_t volume_pg0;
10072 Mpi2RaidPhysDiskPage0_t pd_pg0;
10073 Mpi2ConfigReply_t mpi_reply;
10074 u16 ioc_status;
10075 u16 handle;
10076 u8 phys_disk_num;
10077
10078 if (!ioc->ir_firmware)
10079 return;
10080
10081 ioc_info(ioc, "search for raid volumes: start\n");
10082
10083 if (list_empty(&ioc->raid_device_list))
10084 goto out;
10085
10086 handle = 0xFFFF;
10087 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10088 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10089 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10090 MPI2_IOCSTATUS_MASK;
10091 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10092 break;
10093 handle = le16_to_cpu(volume_pg1.DevHandle);
10094
10095 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10096 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10097 sizeof(Mpi2RaidVolPage0_t)))
10098 continue;
10099
10100 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10101 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10102 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10103 _scsih_mark_responding_raid_device(ioc,
10104 le64_to_cpu(volume_pg1.WWID), handle);
10105 }
10106
10107 /* refresh the pd_handles */
10108 if (!ioc->is_warpdrive) {
10109 phys_disk_num = 0xFF;
10110 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10111 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10112 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10113 phys_disk_num))) {
10114 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10115 MPI2_IOCSTATUS_MASK;
10116 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10117 break;
10118 phys_disk_num = pd_pg0.PhysDiskNum;
10119 handle = le16_to_cpu(pd_pg0.DevHandle);
10120 set_bit(handle, ioc->pd_handles);
10121 }
10122 }
10123 out:
10124 ioc_info(ioc, "search for responding raid volumes: complete\n");
10125 }
10126
10127 /**
10128 * _scsih_mark_responding_expander - mark a expander as responding
10129 * @ioc: per adapter object
10130 * @expander_pg0:SAS Expander Config Page0
10131 *
10132 * After host reset, find out whether devices are still responding.
10133 * Used in _scsih_remove_unresponsive_expanders.
10134 */
10135 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)10136 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10137 Mpi2ExpanderPage0_t *expander_pg0)
10138 {
10139 struct _sas_node *sas_expander = NULL;
10140 unsigned long flags;
10141 int i;
10142 struct _enclosure_node *enclosure_dev = NULL;
10143 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10144 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10145 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10146 struct hba_port *port = mpt3sas_get_port_by_id(
10147 ioc, expander_pg0->PhysicalPort, 0);
10148
10149 if (enclosure_handle)
10150 enclosure_dev =
10151 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10152 enclosure_handle);
10153
10154 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10155 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10156 if (sas_expander->sas_address != sas_address)
10157 continue;
10158 if (sas_expander->port != port)
10159 continue;
10160 sas_expander->responding = 1;
10161
10162 if (enclosure_dev) {
10163 sas_expander->enclosure_logical_id =
10164 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10165 sas_expander->enclosure_handle =
10166 le16_to_cpu(expander_pg0->EnclosureHandle);
10167 }
10168
10169 if (sas_expander->handle == handle)
10170 goto out;
10171 pr_info("\texpander(0x%016llx): handle changed" \
10172 " from(0x%04x) to (0x%04x)!!!\n",
10173 (unsigned long long)sas_expander->sas_address,
10174 sas_expander->handle, handle);
10175 sas_expander->handle = handle;
10176 for (i = 0 ; i < sas_expander->num_phys ; i++)
10177 sas_expander->phy[i].handle = handle;
10178 goto out;
10179 }
10180 out:
10181 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10182 }
10183
10184 /**
10185 * _scsih_search_responding_expanders -
10186 * @ioc: per adapter object
10187 *
10188 * After host reset, find out whether devices are still responding.
10189 * If not remove.
10190 */
10191 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)10192 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10193 {
10194 Mpi2ExpanderPage0_t expander_pg0;
10195 Mpi2ConfigReply_t mpi_reply;
10196 u16 ioc_status;
10197 u64 sas_address;
10198 u16 handle;
10199 u8 port;
10200
10201 ioc_info(ioc, "search for expanders: start\n");
10202
10203 if (list_empty(&ioc->sas_expander_list))
10204 goto out;
10205
10206 handle = 0xFFFF;
10207 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10208 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10209
10210 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10211 MPI2_IOCSTATUS_MASK;
10212 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10213 break;
10214
10215 handle = le16_to_cpu(expander_pg0.DevHandle);
10216 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10217 port = expander_pg0.PhysicalPort;
10218 pr_info(
10219 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10220 handle, (unsigned long long)sas_address,
10221 (ioc->multipath_on_hba ?
10222 port : MULTIPATH_DISABLED_PORT_ID));
10223 _scsih_mark_responding_expander(ioc, &expander_pg0);
10224 }
10225
10226 out:
10227 ioc_info(ioc, "search for expanders: complete\n");
10228 }
10229
10230 /**
10231 * _scsih_remove_unresponding_devices - removing unresponding devices
10232 * @ioc: per adapter object
10233 */
10234 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)10235 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10236 {
10237 struct _sas_device *sas_device, *sas_device_next;
10238 struct _sas_node *sas_expander, *sas_expander_next;
10239 struct _raid_device *raid_device, *raid_device_next;
10240 struct _pcie_device *pcie_device, *pcie_device_next;
10241 struct list_head tmp_list;
10242 unsigned long flags;
10243 LIST_HEAD(head);
10244
10245 ioc_info(ioc, "removing unresponding devices: start\n");
10246
10247 /* removing unresponding end devices */
10248 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10249 /*
10250 * Iterate, pulling off devices marked as non-responding. We become the
10251 * owner for the reference the list had on any object we prune.
10252 */
10253 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10254
10255 /*
10256 * Clean up the sas_device_init_list list as
10257 * driver goes for fresh scan as part of diag reset.
10258 */
10259 list_for_each_entry_safe(sas_device, sas_device_next,
10260 &ioc->sas_device_init_list, list) {
10261 list_del_init(&sas_device->list);
10262 sas_device_put(sas_device);
10263 }
10264
10265 list_for_each_entry_safe(sas_device, sas_device_next,
10266 &ioc->sas_device_list, list) {
10267 if (!sas_device->responding)
10268 list_move_tail(&sas_device->list, &head);
10269 else
10270 sas_device->responding = 0;
10271 }
10272 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10273
10274 /*
10275 * Now, uninitialize and remove the unresponding devices we pruned.
10276 */
10277 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10278 _scsih_remove_device(ioc, sas_device);
10279 list_del_init(&sas_device->list);
10280 sas_device_put(sas_device);
10281 }
10282
10283 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10284 INIT_LIST_HEAD(&head);
10285 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10286 /*
10287 * Clean up the pcie_device_init_list list as
10288 * driver goes for fresh scan as part of diag reset.
10289 */
10290 list_for_each_entry_safe(pcie_device, pcie_device_next,
10291 &ioc->pcie_device_init_list, list) {
10292 list_del_init(&pcie_device->list);
10293 pcie_device_put(pcie_device);
10294 }
10295
10296 list_for_each_entry_safe(pcie_device, pcie_device_next,
10297 &ioc->pcie_device_list, list) {
10298 if (!pcie_device->responding)
10299 list_move_tail(&pcie_device->list, &head);
10300 else
10301 pcie_device->responding = 0;
10302 }
10303 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10304
10305 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10306 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10307 list_del_init(&pcie_device->list);
10308 pcie_device_put(pcie_device);
10309 }
10310
10311 /* removing unresponding volumes */
10312 if (ioc->ir_firmware) {
10313 ioc_info(ioc, "removing unresponding devices: volumes\n");
10314 list_for_each_entry_safe(raid_device, raid_device_next,
10315 &ioc->raid_device_list, list) {
10316 if (!raid_device->responding)
10317 _scsih_sas_volume_delete(ioc,
10318 raid_device->handle);
10319 else
10320 raid_device->responding = 0;
10321 }
10322 }
10323
10324 /* removing unresponding expanders */
10325 ioc_info(ioc, "removing unresponding devices: expanders\n");
10326 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10327 INIT_LIST_HEAD(&tmp_list);
10328 list_for_each_entry_safe(sas_expander, sas_expander_next,
10329 &ioc->sas_expander_list, list) {
10330 if (!sas_expander->responding)
10331 list_move_tail(&sas_expander->list, &tmp_list);
10332 else
10333 sas_expander->responding = 0;
10334 }
10335 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10336 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10337 list) {
10338 _scsih_expander_node_remove(ioc, sas_expander);
10339 }
10340
10341 ioc_info(ioc, "removing unresponding devices: complete\n");
10342
10343 /* unblock devices */
10344 _scsih_ublock_io_all_device(ioc);
10345 }
10346
10347 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)10348 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10349 struct _sas_node *sas_expander, u16 handle)
10350 {
10351 Mpi2ExpanderPage1_t expander_pg1;
10352 Mpi2ConfigReply_t mpi_reply;
10353 int i;
10354
10355 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10356 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10357 &expander_pg1, i, handle))) {
10358 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10359 __FILE__, __LINE__, __func__);
10360 return;
10361 }
10362
10363 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10364 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10365 expander_pg1.NegotiatedLinkRate >> 4,
10366 sas_expander->port);
10367 }
10368 }
10369
10370 /**
10371 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10372 * @ioc: per adapter object
10373 */
10374 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)10375 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10376 {
10377 Mpi2ExpanderPage0_t expander_pg0;
10378 Mpi2SasDevicePage0_t sas_device_pg0;
10379 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10380 Mpi2RaidVolPage1_t *volume_pg1;
10381 Mpi2RaidVolPage0_t *volume_pg0;
10382 Mpi2RaidPhysDiskPage0_t pd_pg0;
10383 Mpi2EventIrConfigElement_t element;
10384 Mpi2ConfigReply_t mpi_reply;
10385 u8 phys_disk_num, port_id;
10386 u16 ioc_status;
10387 u16 handle, parent_handle;
10388 u64 sas_address;
10389 struct _sas_device *sas_device;
10390 struct _pcie_device *pcie_device;
10391 struct _sas_node *expander_device;
10392 static struct _raid_device *raid_device;
10393 u8 retry_count;
10394 unsigned long flags;
10395
10396 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10397 if (!volume_pg0)
10398 return;
10399
10400 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10401 if (!volume_pg1) {
10402 kfree(volume_pg0);
10403 return;
10404 }
10405
10406 ioc_info(ioc, "scan devices: start\n");
10407
10408 _scsih_sas_host_refresh(ioc);
10409
10410 ioc_info(ioc, "\tscan devices: expanders start\n");
10411
10412 /* expanders */
10413 handle = 0xFFFF;
10414 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10415 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10416 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10417 MPI2_IOCSTATUS_MASK;
10418 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10419 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10420 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10421 break;
10422 }
10423 handle = le16_to_cpu(expander_pg0.DevHandle);
10424 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10425 port_id = expander_pg0.PhysicalPort;
10426 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10427 ioc, le64_to_cpu(expander_pg0.SASAddress),
10428 mpt3sas_get_port_by_id(ioc, port_id, 0));
10429 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10430 if (expander_device)
10431 _scsih_refresh_expander_links(ioc, expander_device,
10432 handle);
10433 else {
10434 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10435 handle,
10436 (u64)le64_to_cpu(expander_pg0.SASAddress));
10437 _scsih_expander_add(ioc, handle);
10438 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10439 handle,
10440 (u64)le64_to_cpu(expander_pg0.SASAddress));
10441 }
10442 }
10443
10444 ioc_info(ioc, "\tscan devices: expanders complete\n");
10445
10446 if (!ioc->ir_firmware)
10447 goto skip_to_sas;
10448
10449 ioc_info(ioc, "\tscan devices: phys disk start\n");
10450
10451 /* phys disk */
10452 phys_disk_num = 0xFF;
10453 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10454 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10455 phys_disk_num))) {
10456 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10457 MPI2_IOCSTATUS_MASK;
10458 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10459 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10460 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10461 break;
10462 }
10463 phys_disk_num = pd_pg0.PhysDiskNum;
10464 handle = le16_to_cpu(pd_pg0.DevHandle);
10465 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10466 if (sas_device) {
10467 sas_device_put(sas_device);
10468 continue;
10469 }
10470 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10471 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10472 handle) != 0)
10473 continue;
10474 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10475 MPI2_IOCSTATUS_MASK;
10476 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10477 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10478 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10479 break;
10480 }
10481 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10482 if (!_scsih_get_sas_address(ioc, parent_handle,
10483 &sas_address)) {
10484 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10485 handle,
10486 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10487 port_id = sas_device_pg0.PhysicalPort;
10488 mpt3sas_transport_update_links(ioc, sas_address,
10489 handle, sas_device_pg0.PhyNum,
10490 MPI2_SAS_NEG_LINK_RATE_1_5,
10491 mpt3sas_get_port_by_id(ioc, port_id, 0));
10492 set_bit(handle, ioc->pd_handles);
10493 retry_count = 0;
10494 /* This will retry adding the end device.
10495 * _scsih_add_device() will decide on retries and
10496 * return "1" when it should be retried
10497 */
10498 while (_scsih_add_device(ioc, handle, retry_count++,
10499 1)) {
10500 ssleep(1);
10501 }
10502 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10503 handle,
10504 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10505 }
10506 }
10507
10508 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10509
10510 ioc_info(ioc, "\tscan devices: volumes start\n");
10511
10512 /* volumes */
10513 handle = 0xFFFF;
10514 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10515 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10516 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10517 MPI2_IOCSTATUS_MASK;
10518 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10519 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10520 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10521 break;
10522 }
10523 handle = le16_to_cpu(volume_pg1->DevHandle);
10524 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10525 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10526 le64_to_cpu(volume_pg1->WWID));
10527 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10528 if (raid_device)
10529 continue;
10530 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10531 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10532 sizeof(Mpi2RaidVolPage0_t)))
10533 continue;
10534 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10535 MPI2_IOCSTATUS_MASK;
10536 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10537 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10538 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10539 break;
10540 }
10541 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10542 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10543 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10544 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10545 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10546 element.VolDevHandle = volume_pg1->DevHandle;
10547 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10548 volume_pg1->DevHandle);
10549 _scsih_sas_volume_add(ioc, &element);
10550 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10551 volume_pg1->DevHandle);
10552 }
10553 }
10554
10555 ioc_info(ioc, "\tscan devices: volumes complete\n");
10556
10557 skip_to_sas:
10558
10559 ioc_info(ioc, "\tscan devices: end devices start\n");
10560
10561 /* sas devices */
10562 handle = 0xFFFF;
10563 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10564 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10565 handle))) {
10566 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10567 MPI2_IOCSTATUS_MASK;
10568 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10569 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10570 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10571 break;
10572 }
10573 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10574 if (!(_scsih_is_end_device(
10575 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10576 continue;
10577 port_id = sas_device_pg0.PhysicalPort;
10578 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10579 le64_to_cpu(sas_device_pg0.SASAddress),
10580 mpt3sas_get_port_by_id(ioc, port_id, 0));
10581 if (sas_device) {
10582 sas_device_put(sas_device);
10583 continue;
10584 }
10585 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10586 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10587 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10588 handle,
10589 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10590 mpt3sas_transport_update_links(ioc, sas_address, handle,
10591 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10592 mpt3sas_get_port_by_id(ioc, port_id, 0));
10593 retry_count = 0;
10594 /* This will retry adding the end device.
10595 * _scsih_add_device() will decide on retries and
10596 * return "1" when it should be retried
10597 */
10598 while (_scsih_add_device(ioc, handle, retry_count++,
10599 0)) {
10600 ssleep(1);
10601 }
10602 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10603 handle,
10604 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10605 }
10606 }
10607 ioc_info(ioc, "\tscan devices: end devices complete\n");
10608 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10609
10610 /* pcie devices */
10611 handle = 0xFFFF;
10612 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10613 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10614 handle))) {
10615 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10616 & MPI2_IOCSTATUS_MASK;
10617 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10618 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10619 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10620 break;
10621 }
10622 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10623 if (!(_scsih_is_nvme_pciescsi_device(
10624 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10625 continue;
10626 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10627 le64_to_cpu(pcie_device_pg0.WWID));
10628 if (pcie_device) {
10629 pcie_device_put(pcie_device);
10630 continue;
10631 }
10632 retry_count = 0;
10633 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10634 _scsih_pcie_add_device(ioc, handle);
10635
10636 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10637 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10638 }
10639
10640 kfree(volume_pg0);
10641 kfree(volume_pg1);
10642
10643 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10644 ioc_info(ioc, "scan devices: complete\n");
10645 }
10646
10647 /**
10648 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10649 * @ioc: per adapter object
10650 *
10651 * The handler for doing any required cleanup or initialization.
10652 */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)10653 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10654 {
10655 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10656 }
10657
10658 /**
10659 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10660 * scsi & tm cmds.
10661 * @ioc: per adapter object
10662 *
10663 * The handler for doing any required cleanup or initialization.
10664 */
10665 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)10666 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10667 {
10668 dtmprintk(ioc,
10669 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10670 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10671 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10672 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10673 complete(&ioc->scsih_cmds.done);
10674 }
10675 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10676 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10677 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10678 complete(&ioc->tm_cmds.done);
10679 }
10680
10681 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10682 memset(ioc->device_remove_in_progress, 0,
10683 ioc->device_remove_in_progress_sz);
10684 _scsih_fw_event_cleanup_queue(ioc);
10685 _scsih_flush_running_cmds(ioc);
10686 }
10687
10688 /**
10689 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10690 * @ioc: per adapter object
10691 *
10692 * The handler for doing any required cleanup or initialization.
10693 */
10694 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)10695 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10696 {
10697 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10698 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10699 if (ioc->multipath_on_hba) {
10700 _scsih_sas_port_refresh(ioc);
10701 _scsih_update_vphys_after_reset(ioc);
10702 }
10703 _scsih_prep_device_scan(ioc);
10704 _scsih_create_enclosure_list_after_reset(ioc);
10705 _scsih_search_responding_sas_devices(ioc);
10706 _scsih_search_responding_pcie_devices(ioc);
10707 _scsih_search_responding_raid_devices(ioc);
10708 _scsih_search_responding_expanders(ioc);
10709 _scsih_error_recovery_delete_devices(ioc);
10710 }
10711 }
10712
10713 /**
10714 * _mpt3sas_fw_work - delayed task for processing firmware events
10715 * @ioc: per adapter object
10716 * @fw_event: The fw_event_work object
10717 * Context: user.
10718 */
10719 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10720 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10721 {
10722 ioc->current_event = fw_event;
10723 _scsih_fw_event_del_from_list(ioc, fw_event);
10724
10725 /* the queue is being flushed so ignore this event */
10726 if (ioc->remove_host || ioc->pci_error_recovery) {
10727 fw_event_work_put(fw_event);
10728 ioc->current_event = NULL;
10729 return;
10730 }
10731
10732 switch (fw_event->event) {
10733 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10734 mpt3sas_process_trigger_data(ioc,
10735 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10736 fw_event->event_data);
10737 break;
10738 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10739 while (scsi_host_in_recovery(ioc->shost) ||
10740 ioc->shost_recovery) {
10741 /*
10742 * If we're unloading or cancelling the work, bail.
10743 * Otherwise, this can become an infinite loop.
10744 */
10745 if (ioc->remove_host || ioc->fw_events_cleanup)
10746 goto out;
10747 ssleep(1);
10748 }
10749 _scsih_remove_unresponding_devices(ioc);
10750 _scsih_del_dirty_vphy(ioc);
10751 _scsih_del_dirty_port_entries(ioc);
10752 if (ioc->is_gen35_ioc)
10753 _scsih_update_device_qdepth(ioc);
10754 _scsih_scan_for_devices_after_reset(ioc);
10755 /*
10756 * If diag reset has occurred during the driver load
10757 * then driver has to complete the driver load operation
10758 * by executing the following items:
10759 *- Register the devices from sas_device_init_list to SML
10760 *- clear is_driver_loading flag,
10761 *- start the watchdog thread.
10762 * In happy driver load path, above things are taken care of when
10763 * driver executes scsih_scan_finished().
10764 */
10765 if (ioc->is_driver_loading)
10766 _scsih_complete_devices_scanning(ioc);
10767 _scsih_set_nvme_max_shutdown_latency(ioc);
10768 break;
10769 case MPT3SAS_PORT_ENABLE_COMPLETE:
10770 ioc->start_scan = 0;
10771 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10772 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10773 missing_delay[1]);
10774 dewtprintk(ioc,
10775 ioc_info(ioc, "port enable: complete from worker thread\n"));
10776 break;
10777 case MPT3SAS_TURN_ON_PFA_LED:
10778 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10779 break;
10780 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10781 _scsih_sas_topology_change_event(ioc, fw_event);
10782 break;
10783 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10784 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10785 _scsih_sas_device_status_change_event_debug(ioc,
10786 (Mpi2EventDataSasDeviceStatusChange_t *)
10787 fw_event->event_data);
10788 break;
10789 case MPI2_EVENT_SAS_DISCOVERY:
10790 _scsih_sas_discovery_event(ioc, fw_event);
10791 break;
10792 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10793 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10794 break;
10795 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10796 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10797 break;
10798 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10799 _scsih_sas_enclosure_dev_status_change_event(ioc,
10800 fw_event);
10801 break;
10802 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10803 _scsih_sas_ir_config_change_event(ioc, fw_event);
10804 break;
10805 case MPI2_EVENT_IR_VOLUME:
10806 _scsih_sas_ir_volume_event(ioc, fw_event);
10807 break;
10808 case MPI2_EVENT_IR_PHYSICAL_DISK:
10809 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10810 break;
10811 case MPI2_EVENT_IR_OPERATION_STATUS:
10812 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10813 break;
10814 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10815 _scsih_pcie_device_status_change_event(ioc, fw_event);
10816 break;
10817 case MPI2_EVENT_PCIE_ENUMERATION:
10818 _scsih_pcie_enumeration_event(ioc, fw_event);
10819 break;
10820 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10821 _scsih_pcie_topology_change_event(ioc, fw_event);
10822 ioc->current_event = NULL;
10823 return;
10824 }
10825 out:
10826 fw_event_work_put(fw_event);
10827 ioc->current_event = NULL;
10828 }
10829
10830 /**
10831 * _firmware_event_work
10832 * @work: The fw_event_work object
10833 * Context: user.
10834 *
10835 * wrappers for the work thread handling firmware events
10836 */
10837
10838 static void
_firmware_event_work(struct work_struct * work)10839 _firmware_event_work(struct work_struct *work)
10840 {
10841 struct fw_event_work *fw_event = container_of(work,
10842 struct fw_event_work, work);
10843
10844 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10845 }
10846
10847 /**
10848 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10849 * @ioc: per adapter object
10850 * @msix_index: MSIX table index supplied by the OS
10851 * @reply: reply message frame(lower 32bit addr)
10852 * Context: interrupt.
10853 *
10854 * This function merely adds a new work task into ioc->firmware_event_thread.
10855 * The tasks are worked from _firmware_event_work in user context.
10856 *
10857 * Return: 1 meaning mf should be freed from _base_interrupt
10858 * 0 means the mf is freed from this function.
10859 */
10860 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)10861 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10862 u32 reply)
10863 {
10864 struct fw_event_work *fw_event;
10865 Mpi2EventNotificationReply_t *mpi_reply;
10866 u16 event;
10867 u16 sz;
10868 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10869
10870 /* events turned off due to host reset */
10871 if (ioc->pci_error_recovery)
10872 return 1;
10873
10874 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10875
10876 if (unlikely(!mpi_reply)) {
10877 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10878 __FILE__, __LINE__, __func__);
10879 return 1;
10880 }
10881
10882 event = le16_to_cpu(mpi_reply->Event);
10883
10884 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10885 mpt3sas_trigger_event(ioc, event, 0);
10886
10887 switch (event) {
10888 /* handle these */
10889 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10890 {
10891 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10892 (Mpi2EventDataSasBroadcastPrimitive_t *)
10893 mpi_reply->EventData;
10894
10895 if (baen_data->Primitive !=
10896 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10897 return 1;
10898
10899 if (ioc->broadcast_aen_busy) {
10900 ioc->broadcast_aen_pending++;
10901 return 1;
10902 } else
10903 ioc->broadcast_aen_busy = 1;
10904 break;
10905 }
10906
10907 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10908 _scsih_check_topo_delete_events(ioc,
10909 (Mpi2EventDataSasTopologyChangeList_t *)
10910 mpi_reply->EventData);
10911 /*
10912 * No need to add the topology change list
10913 * event to fw event work queue when
10914 * diag reset is going on. Since during diag
10915 * reset driver scan the devices by reading
10916 * sas device page0's not by processing the
10917 * events.
10918 */
10919 if (ioc->shost_recovery)
10920 return 1;
10921 break;
10922 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10923 _scsih_check_pcie_topo_remove_events(ioc,
10924 (Mpi26EventDataPCIeTopologyChangeList_t *)
10925 mpi_reply->EventData);
10926 if (ioc->shost_recovery)
10927 return 1;
10928 break;
10929 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10930 _scsih_check_ir_config_unhide_events(ioc,
10931 (Mpi2EventDataIrConfigChangeList_t *)
10932 mpi_reply->EventData);
10933 break;
10934 case MPI2_EVENT_IR_VOLUME:
10935 _scsih_check_volume_delete_events(ioc,
10936 (Mpi2EventDataIrVolume_t *)
10937 mpi_reply->EventData);
10938 break;
10939 case MPI2_EVENT_LOG_ENTRY_ADDED:
10940 {
10941 Mpi2EventDataLogEntryAdded_t *log_entry;
10942 u32 log_code;
10943
10944 if (!ioc->is_warpdrive)
10945 break;
10946
10947 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10948 mpi_reply->EventData;
10949 log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10950
10951 if (le16_to_cpu(log_entry->LogEntryQualifier)
10952 != MPT2_WARPDRIVE_LOGENTRY)
10953 break;
10954
10955 switch (log_code) {
10956 case MPT2_WARPDRIVE_LC_SSDT:
10957 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10958 break;
10959 case MPT2_WARPDRIVE_LC_SSDLW:
10960 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10961 break;
10962 case MPT2_WARPDRIVE_LC_SSDLF:
10963 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10964 break;
10965 case MPT2_WARPDRIVE_LC_BRMF:
10966 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10967 break;
10968 }
10969
10970 break;
10971 }
10972 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10973 _scsih_sas_device_status_change_event(ioc,
10974 (Mpi2EventDataSasDeviceStatusChange_t *)
10975 mpi_reply->EventData);
10976 break;
10977 case MPI2_EVENT_IR_OPERATION_STATUS:
10978 case MPI2_EVENT_SAS_DISCOVERY:
10979 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10980 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10981 case MPI2_EVENT_IR_PHYSICAL_DISK:
10982 case MPI2_EVENT_PCIE_ENUMERATION:
10983 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10984 break;
10985
10986 case MPI2_EVENT_TEMP_THRESHOLD:
10987 _scsih_temp_threshold_events(ioc,
10988 (Mpi2EventDataTemperature_t *)
10989 mpi_reply->EventData);
10990 break;
10991 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10992 ActiveCableEventData =
10993 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10994 switch (ActiveCableEventData->ReasonCode) {
10995 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10996 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10997 ActiveCableEventData->ReceptacleID);
10998 pr_notice("cannot be powered and devices connected\n");
10999 pr_notice("to this active cable will not be seen\n");
11000 pr_notice("This active cable requires %d mW of power\n",
11001 le32_to_cpu(
11002 ActiveCableEventData->ActiveCablePowerRequirement));
11003 break;
11004
11005 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
11006 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
11007 ActiveCableEventData->ReceptacleID);
11008 pr_notice(
11009 "is not running at optimal speed(12 Gb/s rate)\n");
11010 break;
11011 }
11012
11013 break;
11014
11015 default: /* ignore the rest */
11016 return 1;
11017 }
11018
11019 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11020 fw_event = alloc_fw_event_work(sz);
11021 if (!fw_event) {
11022 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11023 __FILE__, __LINE__, __func__);
11024 return 1;
11025 }
11026
11027 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11028 fw_event->ioc = ioc;
11029 fw_event->VF_ID = mpi_reply->VF_ID;
11030 fw_event->VP_ID = mpi_reply->VP_ID;
11031 fw_event->event = event;
11032 _scsih_fw_event_add(ioc, fw_event);
11033 fw_event_work_put(fw_event);
11034 return 1;
11035 }
11036
11037 /**
11038 * _scsih_expander_node_remove - removing expander device from list.
11039 * @ioc: per adapter object
11040 * @sas_expander: the sas_device object
11041 *
11042 * Removing object and freeing associated memory from the
11043 * ioc->sas_expander_list.
11044 */
11045 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)11046 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11047 struct _sas_node *sas_expander)
11048 {
11049 struct _sas_port *mpt3sas_port, *next;
11050 unsigned long flags;
11051 int port_id;
11052
11053 /* remove sibling ports attached to this expander */
11054 list_for_each_entry_safe(mpt3sas_port, next,
11055 &sas_expander->sas_port_list, port_list) {
11056 if (ioc->shost_recovery)
11057 return;
11058 if (mpt3sas_port->remote_identify.device_type ==
11059 SAS_END_DEVICE)
11060 mpt3sas_device_remove_by_sas_address(ioc,
11061 mpt3sas_port->remote_identify.sas_address,
11062 mpt3sas_port->hba_port);
11063 else if (mpt3sas_port->remote_identify.device_type ==
11064 SAS_EDGE_EXPANDER_DEVICE ||
11065 mpt3sas_port->remote_identify.device_type ==
11066 SAS_FANOUT_EXPANDER_DEVICE)
11067 mpt3sas_expander_remove(ioc,
11068 mpt3sas_port->remote_identify.sas_address,
11069 mpt3sas_port->hba_port);
11070 }
11071
11072 port_id = sas_expander->port->port_id;
11073
11074 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11075 sas_expander->sas_address_parent, sas_expander->port);
11076
11077 ioc_info(ioc,
11078 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11079 sas_expander->handle, (unsigned long long)
11080 sas_expander->sas_address,
11081 port_id);
11082
11083 spin_lock_irqsave(&ioc->sas_node_lock, flags);
11084 list_del(&sas_expander->list);
11085 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11086
11087 kfree(sas_expander->phy);
11088 kfree(sas_expander);
11089 }
11090
11091 /**
11092 * _scsih_nvme_shutdown - NVMe shutdown notification
11093 * @ioc: per adapter object
11094 *
11095 * Sending IoUnitControl request with shutdown operation code to alert IOC that
11096 * the host system is shutting down so that IOC can issue NVMe shutdown to
11097 * NVMe drives attached to it.
11098 */
11099 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)11100 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11101 {
11102 Mpi26IoUnitControlRequest_t *mpi_request;
11103 Mpi26IoUnitControlReply_t *mpi_reply;
11104 u16 smid;
11105
11106 /* are there any NVMe devices ? */
11107 if (list_empty(&ioc->pcie_device_list))
11108 return;
11109
11110 mutex_lock(&ioc->scsih_cmds.mutex);
11111
11112 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11113 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11114 goto out;
11115 }
11116
11117 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11118
11119 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11120 if (!smid) {
11121 ioc_err(ioc,
11122 "%s: failed obtaining a smid\n", __func__);
11123 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11124 goto out;
11125 }
11126
11127 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11128 ioc->scsih_cmds.smid = smid;
11129 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11130 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11131 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11132
11133 init_completion(&ioc->scsih_cmds.done);
11134 ioc->put_smid_default(ioc, smid);
11135 /* Wait for max_shutdown_latency seconds */
11136 ioc_info(ioc,
11137 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11138 ioc->max_shutdown_latency);
11139 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11140 ioc->max_shutdown_latency*HZ);
11141
11142 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11143 ioc_err(ioc, "%s: timeout\n", __func__);
11144 goto out;
11145 }
11146
11147 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11148 mpi_reply = ioc->scsih_cmds.reply;
11149 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11150 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11151 le16_to_cpu(mpi_reply->IOCStatus),
11152 le32_to_cpu(mpi_reply->IOCLogInfo));
11153 }
11154 out:
11155 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11156 mutex_unlock(&ioc->scsih_cmds.mutex);
11157 }
11158
11159
11160 /**
11161 * _scsih_ir_shutdown - IR shutdown notification
11162 * @ioc: per adapter object
11163 *
11164 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11165 * the host system is shutting down.
11166 */
11167 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)11168 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11169 {
11170 Mpi2RaidActionRequest_t *mpi_request;
11171 Mpi2RaidActionReply_t *mpi_reply;
11172 u16 smid;
11173
11174 /* is IR firmware build loaded ? */
11175 if (!ioc->ir_firmware)
11176 return;
11177
11178 /* are there any volumes ? */
11179 if (list_empty(&ioc->raid_device_list))
11180 return;
11181
11182 mutex_lock(&ioc->scsih_cmds.mutex);
11183
11184 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11185 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11186 goto out;
11187 }
11188 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11189
11190 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11191 if (!smid) {
11192 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11193 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11194 goto out;
11195 }
11196
11197 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11198 ioc->scsih_cmds.smid = smid;
11199 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11200
11201 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11202 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11203
11204 if (!ioc->hide_ir_msg)
11205 ioc_info(ioc, "IR shutdown (sending)\n");
11206 init_completion(&ioc->scsih_cmds.done);
11207 ioc->put_smid_default(ioc, smid);
11208 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11209
11210 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11211 ioc_err(ioc, "%s: timeout\n", __func__);
11212 goto out;
11213 }
11214
11215 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11216 mpi_reply = ioc->scsih_cmds.reply;
11217 if (!ioc->hide_ir_msg)
11218 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11219 le16_to_cpu(mpi_reply->IOCStatus),
11220 le32_to_cpu(mpi_reply->IOCLogInfo));
11221 }
11222
11223 out:
11224 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11225 mutex_unlock(&ioc->scsih_cmds.mutex);
11226 }
11227
11228 /**
11229 * _scsih_get_shost_and_ioc - get shost and ioc
11230 * and verify whether they are NULL or not
11231 * @pdev: PCI device struct
11232 * @shost: address of scsi host pointer
11233 * @ioc: address of HBA adapter pointer
11234 *
11235 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11236 */
11237 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)11238 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11239 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11240 {
11241 *shost = pci_get_drvdata(pdev);
11242 if (*shost == NULL) {
11243 dev_err(&pdev->dev, "pdev's driver data is null\n");
11244 return -ENXIO;
11245 }
11246
11247 *ioc = shost_priv(*shost);
11248 if (*ioc == NULL) {
11249 dev_err(&pdev->dev, "shost's private data is null\n");
11250 return -ENXIO;
11251 }
11252
11253 return 0;
11254 }
11255
11256 /**
11257 * scsih_remove - detach and remove add host
11258 * @pdev: PCI device struct
11259 *
11260 * Routine called when unloading the driver.
11261 */
scsih_remove(struct pci_dev * pdev)11262 static void scsih_remove(struct pci_dev *pdev)
11263 {
11264 struct Scsi_Host *shost;
11265 struct MPT3SAS_ADAPTER *ioc;
11266 struct _sas_port *mpt3sas_port, *next_port;
11267 struct _raid_device *raid_device, *next;
11268 struct MPT3SAS_TARGET *sas_target_priv_data;
11269 struct _pcie_device *pcie_device, *pcienext;
11270 struct workqueue_struct *wq;
11271 unsigned long flags;
11272 Mpi2ConfigReply_t mpi_reply;
11273 struct hba_port *port, *port_next;
11274
11275 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11276 return;
11277
11278 ioc->remove_host = 1;
11279
11280 if (!pci_device_is_present(pdev)) {
11281 mpt3sas_base_pause_mq_polling(ioc);
11282 _scsih_flush_running_cmds(ioc);
11283 }
11284
11285 _scsih_fw_event_cleanup_queue(ioc);
11286
11287 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11288 wq = ioc->firmware_event_thread;
11289 ioc->firmware_event_thread = NULL;
11290 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11291 if (wq)
11292 destroy_workqueue(wq);
11293 /*
11294 * Copy back the unmodified ioc page1. so that on next driver load,
11295 * current modified changes on ioc page1 won't take effect.
11296 */
11297 if (ioc->is_aero_ioc)
11298 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11299 &ioc->ioc_pg1_copy);
11300 /* release all the volumes */
11301 _scsih_ir_shutdown(ioc);
11302 mpt3sas_destroy_debugfs(ioc);
11303 sas_remove_host(shost);
11304 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11305 list) {
11306 if (raid_device->starget) {
11307 sas_target_priv_data =
11308 raid_device->starget->hostdata;
11309 sas_target_priv_data->deleted = 1;
11310 scsi_remove_target(&raid_device->starget->dev);
11311 }
11312 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11313 raid_device->handle, (u64)raid_device->wwid);
11314 _scsih_raid_device_remove(ioc, raid_device);
11315 }
11316 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11317 list) {
11318 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11319 list_del_init(&pcie_device->list);
11320 pcie_device_put(pcie_device);
11321 }
11322
11323 /* free ports attached to the sas_host */
11324 list_for_each_entry_safe(mpt3sas_port, next_port,
11325 &ioc->sas_hba.sas_port_list, port_list) {
11326 if (mpt3sas_port->remote_identify.device_type ==
11327 SAS_END_DEVICE)
11328 mpt3sas_device_remove_by_sas_address(ioc,
11329 mpt3sas_port->remote_identify.sas_address,
11330 mpt3sas_port->hba_port);
11331 else if (mpt3sas_port->remote_identify.device_type ==
11332 SAS_EDGE_EXPANDER_DEVICE ||
11333 mpt3sas_port->remote_identify.device_type ==
11334 SAS_FANOUT_EXPANDER_DEVICE)
11335 mpt3sas_expander_remove(ioc,
11336 mpt3sas_port->remote_identify.sas_address,
11337 mpt3sas_port->hba_port);
11338 }
11339
11340 list_for_each_entry_safe(port, port_next,
11341 &ioc->port_table_list, list) {
11342 list_del(&port->list);
11343 kfree(port);
11344 }
11345
11346 /* free phys attached to the sas_host */
11347 if (ioc->sas_hba.num_phys) {
11348 kfree(ioc->sas_hba.phy);
11349 ioc->sas_hba.phy = NULL;
11350 ioc->sas_hba.num_phys = 0;
11351 }
11352
11353 mpt3sas_base_detach(ioc);
11354 spin_lock(&gioc_lock);
11355 list_del(&ioc->list);
11356 spin_unlock(&gioc_lock);
11357 scsi_host_put(shost);
11358 }
11359
11360 /**
11361 * scsih_shutdown - routine call during system shutdown
11362 * @pdev: PCI device struct
11363 */
11364 static void
scsih_shutdown(struct pci_dev * pdev)11365 scsih_shutdown(struct pci_dev *pdev)
11366 {
11367 struct Scsi_Host *shost;
11368 struct MPT3SAS_ADAPTER *ioc;
11369 struct workqueue_struct *wq;
11370 unsigned long flags;
11371 Mpi2ConfigReply_t mpi_reply;
11372
11373 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11374 return;
11375
11376 ioc->remove_host = 1;
11377
11378 if (!pci_device_is_present(pdev)) {
11379 mpt3sas_base_pause_mq_polling(ioc);
11380 _scsih_flush_running_cmds(ioc);
11381 }
11382
11383 _scsih_fw_event_cleanup_queue(ioc);
11384
11385 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11386 wq = ioc->firmware_event_thread;
11387 ioc->firmware_event_thread = NULL;
11388 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11389 if (wq)
11390 destroy_workqueue(wq);
11391 /*
11392 * Copy back the unmodified ioc page1 so that on next driver load,
11393 * current modified changes on ioc page1 won't take effect.
11394 */
11395 if (ioc->is_aero_ioc)
11396 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11397 &ioc->ioc_pg1_copy);
11398
11399 _scsih_ir_shutdown(ioc);
11400 _scsih_nvme_shutdown(ioc);
11401 mpt3sas_base_mask_interrupts(ioc);
11402 mpt3sas_base_stop_watchdog(ioc);
11403 ioc->shost_recovery = 1;
11404 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11405 ioc->shost_recovery = 0;
11406 mpt3sas_base_free_irq(ioc);
11407 mpt3sas_base_disable_msix(ioc);
11408 }
11409
11410
11411 /**
11412 * _scsih_probe_boot_devices - reports 1st device
11413 * @ioc: per adapter object
11414 *
11415 * If specified in bios page 2, this routine reports the 1st
11416 * device scsi-ml or sas transport for persistent boot device
11417 * purposes. Please refer to function _scsih_determine_boot_device()
11418 */
11419 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)11420 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11421 {
11422 u32 channel;
11423 void *device;
11424 struct _sas_device *sas_device;
11425 struct _raid_device *raid_device;
11426 struct _pcie_device *pcie_device;
11427 u16 handle;
11428 u64 sas_address_parent;
11429 u64 sas_address;
11430 unsigned long flags;
11431 int rc;
11432 int tid;
11433 struct hba_port *port;
11434
11435 /* no Bios, return immediately */
11436 if (!ioc->bios_pg3.BiosVersion)
11437 return;
11438
11439 device = NULL;
11440 if (ioc->req_boot_device.device) {
11441 device = ioc->req_boot_device.device;
11442 channel = ioc->req_boot_device.channel;
11443 } else if (ioc->req_alt_boot_device.device) {
11444 device = ioc->req_alt_boot_device.device;
11445 channel = ioc->req_alt_boot_device.channel;
11446 } else if (ioc->current_boot_device.device) {
11447 device = ioc->current_boot_device.device;
11448 channel = ioc->current_boot_device.channel;
11449 }
11450
11451 if (!device)
11452 return;
11453
11454 if (channel == RAID_CHANNEL) {
11455 raid_device = device;
11456 /*
11457 * If this boot vd is already registered with SML then
11458 * no need to register it again as part of device scanning
11459 * after diag reset during driver load operation.
11460 */
11461 if (raid_device->starget)
11462 return;
11463 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11464 raid_device->id, 0);
11465 if (rc)
11466 _scsih_raid_device_remove(ioc, raid_device);
11467 } else if (channel == PCIE_CHANNEL) {
11468 pcie_device = device;
11469 /*
11470 * If this boot NVMe device is already registered with SML then
11471 * no need to register it again as part of device scanning
11472 * after diag reset during driver load operation.
11473 */
11474 if (pcie_device->starget)
11475 return;
11476 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11477 tid = pcie_device->id;
11478 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11479 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11480 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11481 if (rc)
11482 _scsih_pcie_device_remove(ioc, pcie_device);
11483 } else {
11484 sas_device = device;
11485 /*
11486 * If this boot sas/sata device is already registered with SML
11487 * then no need to register it again as part of device scanning
11488 * after diag reset during driver load operation.
11489 */
11490 if (sas_device->starget)
11491 return;
11492 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11493 handle = sas_device->handle;
11494 sas_address_parent = sas_device->sas_address_parent;
11495 sas_address = sas_device->sas_address;
11496 port = sas_device->port;
11497 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11498 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11499
11500 if (ioc->hide_drives)
11501 return;
11502
11503 if (!port)
11504 return;
11505
11506 if (!mpt3sas_transport_port_add(ioc, handle,
11507 sas_address_parent, port)) {
11508 _scsih_sas_device_remove(ioc, sas_device);
11509 } else if (!sas_device->starget) {
11510 if (!ioc->is_driver_loading) {
11511 mpt3sas_transport_port_remove(ioc,
11512 sas_address,
11513 sas_address_parent, port);
11514 _scsih_sas_device_remove(ioc, sas_device);
11515 }
11516 }
11517 }
11518 }
11519
11520 /**
11521 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11522 * @ioc: per adapter object
11523 *
11524 * Called during initial loading of the driver.
11525 */
11526 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)11527 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11528 {
11529 struct _raid_device *raid_device, *raid_next;
11530 int rc;
11531
11532 list_for_each_entry_safe(raid_device, raid_next,
11533 &ioc->raid_device_list, list) {
11534 if (raid_device->starget)
11535 continue;
11536 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11537 raid_device->id, 0);
11538 if (rc)
11539 _scsih_raid_device_remove(ioc, raid_device);
11540 }
11541 }
11542
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)11543 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11544 {
11545 struct _sas_device *sas_device = NULL;
11546 unsigned long flags;
11547
11548 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11549 if (!list_empty(&ioc->sas_device_init_list)) {
11550 sas_device = list_first_entry(&ioc->sas_device_init_list,
11551 struct _sas_device, list);
11552 sas_device_get(sas_device);
11553 }
11554 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11555
11556 return sas_device;
11557 }
11558
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)11559 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11560 struct _sas_device *sas_device)
11561 {
11562 unsigned long flags;
11563
11564 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11565
11566 /*
11567 * Since we dropped the lock during the call to port_add(), we need to
11568 * be careful here that somebody else didn't move or delete this item
11569 * while we were busy with other things.
11570 *
11571 * If it was on the list, we need a put() for the reference the list
11572 * had. Either way, we need a get() for the destination list.
11573 */
11574 if (!list_empty(&sas_device->list)) {
11575 list_del_init(&sas_device->list);
11576 sas_device_put(sas_device);
11577 }
11578
11579 sas_device_get(sas_device);
11580 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11581
11582 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11583 }
11584
11585 /**
11586 * _scsih_probe_sas - reporting sas devices to sas transport
11587 * @ioc: per adapter object
11588 *
11589 * Called during initial loading of the driver.
11590 */
11591 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)11592 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11593 {
11594 struct _sas_device *sas_device;
11595
11596 if (ioc->hide_drives)
11597 return;
11598
11599 while ((sas_device = get_next_sas_device(ioc))) {
11600 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11601 sas_device->sas_address_parent, sas_device->port)) {
11602 _scsih_sas_device_remove(ioc, sas_device);
11603 sas_device_put(sas_device);
11604 continue;
11605 } else if (!sas_device->starget) {
11606 /*
11607 * When asyn scanning is enabled, its not possible to
11608 * remove devices while scanning is turned on due to an
11609 * oops in scsi_sysfs_add_sdev()->add_device()->
11610 * sysfs_addrm_start()
11611 */
11612 if (!ioc->is_driver_loading) {
11613 mpt3sas_transport_port_remove(ioc,
11614 sas_device->sas_address,
11615 sas_device->sas_address_parent,
11616 sas_device->port);
11617 _scsih_sas_device_remove(ioc, sas_device);
11618 sas_device_put(sas_device);
11619 continue;
11620 }
11621 }
11622 sas_device_make_active(ioc, sas_device);
11623 sas_device_put(sas_device);
11624 }
11625 }
11626
11627 /**
11628 * get_next_pcie_device - Get the next pcie device
11629 * @ioc: per adapter object
11630 *
11631 * Get the next pcie device from pcie_device_init_list list.
11632 *
11633 * Return: pcie device structure if pcie_device_init_list list is not empty
11634 * otherwise returns NULL
11635 */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)11636 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11637 {
11638 struct _pcie_device *pcie_device = NULL;
11639 unsigned long flags;
11640
11641 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11642 if (!list_empty(&ioc->pcie_device_init_list)) {
11643 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11644 struct _pcie_device, list);
11645 pcie_device_get(pcie_device);
11646 }
11647 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11648
11649 return pcie_device;
11650 }
11651
11652 /**
11653 * pcie_device_make_active - Add pcie device to pcie_device_list list
11654 * @ioc: per adapter object
11655 * @pcie_device: pcie device object
11656 *
11657 * Add the pcie device which has registered with SCSI Transport Later to
11658 * pcie_device_list list
11659 */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)11660 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11661 struct _pcie_device *pcie_device)
11662 {
11663 unsigned long flags;
11664
11665 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11666
11667 if (!list_empty(&pcie_device->list)) {
11668 list_del_init(&pcie_device->list);
11669 pcie_device_put(pcie_device);
11670 }
11671 pcie_device_get(pcie_device);
11672 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11673
11674 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11675 }
11676
11677 /**
11678 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11679 * @ioc: per adapter object
11680 *
11681 * Called during initial loading of the driver.
11682 */
11683 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)11684 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11685 {
11686 struct _pcie_device *pcie_device;
11687 int rc;
11688
11689 /* PCIe Device List */
11690 while ((pcie_device = get_next_pcie_device(ioc))) {
11691 if (pcie_device->starget) {
11692 pcie_device_put(pcie_device);
11693 continue;
11694 }
11695 if (pcie_device->access_status ==
11696 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11697 pcie_device_make_active(ioc, pcie_device);
11698 pcie_device_put(pcie_device);
11699 continue;
11700 }
11701 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11702 pcie_device->id, 0);
11703 if (rc) {
11704 _scsih_pcie_device_remove(ioc, pcie_device);
11705 pcie_device_put(pcie_device);
11706 continue;
11707 } else if (!pcie_device->starget) {
11708 /*
11709 * When async scanning is enabled, its not possible to
11710 * remove devices while scanning is turned on due to an
11711 * oops in scsi_sysfs_add_sdev()->add_device()->
11712 * sysfs_addrm_start()
11713 */
11714 if (!ioc->is_driver_loading) {
11715 /* TODO-- Need to find out whether this condition will
11716 * occur or not
11717 */
11718 _scsih_pcie_device_remove(ioc, pcie_device);
11719 pcie_device_put(pcie_device);
11720 continue;
11721 }
11722 }
11723 pcie_device_make_active(ioc, pcie_device);
11724 pcie_device_put(pcie_device);
11725 }
11726 }
11727
11728 /**
11729 * _scsih_probe_devices - probing for devices
11730 * @ioc: per adapter object
11731 *
11732 * Called during initial loading of the driver.
11733 */
11734 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)11735 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11736 {
11737 u16 volume_mapping_flags;
11738
11739 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11740 return; /* return when IOC doesn't support initiator mode */
11741
11742 _scsih_probe_boot_devices(ioc);
11743
11744 if (ioc->ir_firmware) {
11745 volume_mapping_flags =
11746 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11747 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11748 if (volume_mapping_flags ==
11749 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11750 _scsih_probe_raid(ioc);
11751 _scsih_probe_sas(ioc);
11752 } else {
11753 _scsih_probe_sas(ioc);
11754 _scsih_probe_raid(ioc);
11755 }
11756 } else {
11757 _scsih_probe_sas(ioc);
11758 _scsih_probe_pcie(ioc);
11759 }
11760 }
11761
11762 /**
11763 * scsih_scan_start - scsi lld callback for .scan_start
11764 * @shost: SCSI host pointer
11765 *
11766 * The shost has the ability to discover targets on its own instead
11767 * of scanning the entire bus. In our implemention, we will kick off
11768 * firmware discovery.
11769 */
11770 static void
scsih_scan_start(struct Scsi_Host * shost)11771 scsih_scan_start(struct Scsi_Host *shost)
11772 {
11773 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11774 int rc;
11775 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11776 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11777 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11778 mpt3sas_enable_diag_buffer(ioc, 1);
11779
11780 if (disable_discovery > 0)
11781 return;
11782
11783 ioc->start_scan = 1;
11784 rc = mpt3sas_port_enable(ioc);
11785
11786 if (rc != 0)
11787 ioc_info(ioc, "port enable: FAILED\n");
11788 }
11789
11790 /**
11791 * _scsih_complete_devices_scanning - add the devices to sml and
11792 * complete ioc initialization.
11793 * @ioc: per adapter object
11794 *
11795 * Return nothing.
11796 */
_scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER * ioc)11797 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11798 {
11799
11800 if (ioc->wait_for_discovery_to_complete) {
11801 ioc->wait_for_discovery_to_complete = 0;
11802 _scsih_probe_devices(ioc);
11803 }
11804
11805 mpt3sas_base_start_watchdog(ioc);
11806 ioc->is_driver_loading = 0;
11807 }
11808
11809 /**
11810 * scsih_scan_finished - scsi lld callback for .scan_finished
11811 * @shost: SCSI host pointer
11812 * @time: elapsed time of the scan in jiffies
11813 *
11814 * This function will be called periodicallyn until it returns 1 with the
11815 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11816 * we wait for firmware discovery to complete, then return 1.
11817 */
11818 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)11819 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11820 {
11821 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11822 u32 ioc_state;
11823 int issue_hard_reset = 0;
11824
11825 if (disable_discovery > 0) {
11826 ioc->is_driver_loading = 0;
11827 ioc->wait_for_discovery_to_complete = 0;
11828 return 1;
11829 }
11830
11831 if (time >= (300 * HZ)) {
11832 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11833 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11834 ioc->is_driver_loading = 0;
11835 return 1;
11836 }
11837
11838 if (ioc->start_scan) {
11839 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11840 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11841 mpt3sas_print_fault_code(ioc, ioc_state &
11842 MPI2_DOORBELL_DATA_MASK);
11843 issue_hard_reset = 1;
11844 goto out;
11845 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11846 MPI2_IOC_STATE_COREDUMP) {
11847 mpt3sas_base_coredump_info(ioc, ioc_state &
11848 MPI2_DOORBELL_DATA_MASK);
11849 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11850 issue_hard_reset = 1;
11851 goto out;
11852 }
11853 return 0;
11854 }
11855
11856 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11857 ioc_info(ioc,
11858 "port enable: aborted due to diag reset\n");
11859 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11860 goto out;
11861 }
11862 if (ioc->start_scan_failed) {
11863 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11864 ioc->start_scan_failed);
11865 ioc->is_driver_loading = 0;
11866 ioc->wait_for_discovery_to_complete = 0;
11867 ioc->remove_host = 1;
11868 return 1;
11869 }
11870
11871 ioc_info(ioc, "port enable: SUCCESS\n");
11872 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11873 _scsih_complete_devices_scanning(ioc);
11874
11875 out:
11876 if (issue_hard_reset) {
11877 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11878 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11879 ioc->is_driver_loading = 0;
11880 }
11881 return 1;
11882 }
11883
11884 /**
11885 * scsih_map_queues - map reply queues with request queues
11886 * @shost: SCSI host pointer
11887 */
scsih_map_queues(struct Scsi_Host * shost)11888 static void scsih_map_queues(struct Scsi_Host *shost)
11889 {
11890 struct MPT3SAS_ADAPTER *ioc =
11891 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11892 struct blk_mq_queue_map *map;
11893 int i, qoff, offset;
11894 int nr_msix_vectors = ioc->iopoll_q_start_index;
11895 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11896
11897 if (shost->nr_hw_queues == 1)
11898 return;
11899
11900 for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11901 map = &shost->tag_set.map[i];
11902 map->nr_queues = 0;
11903 offset = 0;
11904 if (i == HCTX_TYPE_DEFAULT) {
11905 map->nr_queues =
11906 nr_msix_vectors - ioc->high_iops_queues;
11907 offset = ioc->high_iops_queues;
11908 } else if (i == HCTX_TYPE_POLL)
11909 map->nr_queues = iopoll_q_count;
11910
11911 if (!map->nr_queues)
11912 BUG_ON(i == HCTX_TYPE_DEFAULT);
11913
11914 /*
11915 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11916 * affinity), so use the regular blk-mq cpu mapping
11917 */
11918 map->queue_offset = qoff;
11919 if (i != HCTX_TYPE_POLL)
11920 blk_mq_pci_map_queues(map, ioc->pdev, offset);
11921 else
11922 blk_mq_map_queues(map);
11923
11924 qoff += map->nr_queues;
11925 }
11926 }
11927
11928 /* shost template for SAS 2.0 HBA devices */
11929 static struct scsi_host_template mpt2sas_driver_template = {
11930 .module = THIS_MODULE,
11931 .name = "Fusion MPT SAS Host",
11932 .proc_name = MPT2SAS_DRIVER_NAME,
11933 .queuecommand = scsih_qcmd,
11934 .target_alloc = scsih_target_alloc,
11935 .slave_alloc = scsih_slave_alloc,
11936 .slave_configure = scsih_slave_configure,
11937 .target_destroy = scsih_target_destroy,
11938 .slave_destroy = scsih_slave_destroy,
11939 .scan_finished = scsih_scan_finished,
11940 .scan_start = scsih_scan_start,
11941 .change_queue_depth = scsih_change_queue_depth,
11942 .eh_abort_handler = scsih_abort,
11943 .eh_device_reset_handler = scsih_dev_reset,
11944 .eh_target_reset_handler = scsih_target_reset,
11945 .eh_host_reset_handler = scsih_host_reset,
11946 .bios_param = scsih_bios_param,
11947 .can_queue = 1,
11948 .this_id = -1,
11949 .sg_tablesize = MPT2SAS_SG_DEPTH,
11950 .max_sectors = 32767,
11951 .cmd_per_lun = 7,
11952 .shost_groups = mpt3sas_host_groups,
11953 .sdev_groups = mpt3sas_dev_groups,
11954 .track_queue_depth = 1,
11955 .cmd_size = sizeof(struct scsiio_tracker),
11956 };
11957
11958 /* raid transport support for SAS 2.0 HBA devices */
11959 static struct raid_function_template mpt2sas_raid_functions = {
11960 .cookie = &mpt2sas_driver_template,
11961 .is_raid = scsih_is_raid,
11962 .get_resync = scsih_get_resync,
11963 .get_state = scsih_get_state,
11964 };
11965
11966 /* shost template for SAS 3.0 HBA devices */
11967 static struct scsi_host_template mpt3sas_driver_template = {
11968 .module = THIS_MODULE,
11969 .name = "Fusion MPT SAS Host",
11970 .proc_name = MPT3SAS_DRIVER_NAME,
11971 .queuecommand = scsih_qcmd,
11972 .target_alloc = scsih_target_alloc,
11973 .slave_alloc = scsih_slave_alloc,
11974 .slave_configure = scsih_slave_configure,
11975 .target_destroy = scsih_target_destroy,
11976 .slave_destroy = scsih_slave_destroy,
11977 .scan_finished = scsih_scan_finished,
11978 .scan_start = scsih_scan_start,
11979 .change_queue_depth = scsih_change_queue_depth,
11980 .eh_abort_handler = scsih_abort,
11981 .eh_device_reset_handler = scsih_dev_reset,
11982 .eh_target_reset_handler = scsih_target_reset,
11983 .eh_host_reset_handler = scsih_host_reset,
11984 .bios_param = scsih_bios_param,
11985 .can_queue = 1,
11986 .this_id = -1,
11987 .sg_tablesize = MPT3SAS_SG_DEPTH,
11988 .max_sectors = 32767,
11989 .max_segment_size = 0xffffffff,
11990 .cmd_per_lun = 128,
11991 .shost_groups = mpt3sas_host_groups,
11992 .sdev_groups = mpt3sas_dev_groups,
11993 .track_queue_depth = 1,
11994 .cmd_size = sizeof(struct scsiio_tracker),
11995 .map_queues = scsih_map_queues,
11996 .mq_poll = mpt3sas_blk_mq_poll,
11997 };
11998
11999 /* raid transport support for SAS 3.0 HBA devices */
12000 static struct raid_function_template mpt3sas_raid_functions = {
12001 .cookie = &mpt3sas_driver_template,
12002 .is_raid = scsih_is_raid,
12003 .get_resync = scsih_get_resync,
12004 .get_state = scsih_get_state,
12005 };
12006
12007 /**
12008 * _scsih_determine_hba_mpi_version - determine in which MPI version class
12009 * this device belongs to.
12010 * @pdev: PCI device struct
12011 *
12012 * return MPI2_VERSION for SAS 2.0 HBA devices,
12013 * MPI25_VERSION for SAS 3.0 HBA devices, and
12014 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
12015 */
12016 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)12017 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12018 {
12019
12020 switch (pdev->device) {
12021 case MPI2_MFGPAGE_DEVID_SSS6200:
12022 case MPI2_MFGPAGE_DEVID_SAS2004:
12023 case MPI2_MFGPAGE_DEVID_SAS2008:
12024 case MPI2_MFGPAGE_DEVID_SAS2108_1:
12025 case MPI2_MFGPAGE_DEVID_SAS2108_2:
12026 case MPI2_MFGPAGE_DEVID_SAS2108_3:
12027 case MPI2_MFGPAGE_DEVID_SAS2116_1:
12028 case MPI2_MFGPAGE_DEVID_SAS2116_2:
12029 case MPI2_MFGPAGE_DEVID_SAS2208_1:
12030 case MPI2_MFGPAGE_DEVID_SAS2208_2:
12031 case MPI2_MFGPAGE_DEVID_SAS2208_3:
12032 case MPI2_MFGPAGE_DEVID_SAS2208_4:
12033 case MPI2_MFGPAGE_DEVID_SAS2208_5:
12034 case MPI2_MFGPAGE_DEVID_SAS2208_6:
12035 case MPI2_MFGPAGE_DEVID_SAS2308_1:
12036 case MPI2_MFGPAGE_DEVID_SAS2308_2:
12037 case MPI2_MFGPAGE_DEVID_SAS2308_3:
12038 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12039 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12040 return MPI2_VERSION;
12041 case MPI25_MFGPAGE_DEVID_SAS3004:
12042 case MPI25_MFGPAGE_DEVID_SAS3008:
12043 case MPI25_MFGPAGE_DEVID_SAS3108_1:
12044 case MPI25_MFGPAGE_DEVID_SAS3108_2:
12045 case MPI25_MFGPAGE_DEVID_SAS3108_5:
12046 case MPI25_MFGPAGE_DEVID_SAS3108_6:
12047 return MPI25_VERSION;
12048 case MPI26_MFGPAGE_DEVID_SAS3216:
12049 case MPI26_MFGPAGE_DEVID_SAS3224:
12050 case MPI26_MFGPAGE_DEVID_SAS3316_1:
12051 case MPI26_MFGPAGE_DEVID_SAS3316_2:
12052 case MPI26_MFGPAGE_DEVID_SAS3316_3:
12053 case MPI26_MFGPAGE_DEVID_SAS3316_4:
12054 case MPI26_MFGPAGE_DEVID_SAS3324_1:
12055 case MPI26_MFGPAGE_DEVID_SAS3324_2:
12056 case MPI26_MFGPAGE_DEVID_SAS3324_3:
12057 case MPI26_MFGPAGE_DEVID_SAS3324_4:
12058 case MPI26_MFGPAGE_DEVID_SAS3508:
12059 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12060 case MPI26_MFGPAGE_DEVID_SAS3408:
12061 case MPI26_MFGPAGE_DEVID_SAS3516:
12062 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12063 case MPI26_MFGPAGE_DEVID_SAS3416:
12064 case MPI26_MFGPAGE_DEVID_SAS3616:
12065 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12066 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12067 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12068 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12069 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12070 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12071 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12072 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12073 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12074 return MPI26_VERSION;
12075 }
12076 return 0;
12077 }
12078
12079 /**
12080 * _scsih_probe - attach and add scsi host
12081 * @pdev: PCI device struct
12082 * @id: pci device id
12083 *
12084 * Return: 0 success, anything else error.
12085 */
12086 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)12087 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12088 {
12089 struct MPT3SAS_ADAPTER *ioc;
12090 struct Scsi_Host *shost = NULL;
12091 int rv;
12092 u16 hba_mpi_version;
12093 int iopoll_q_count = 0;
12094
12095 /* Determine in which MPI version class this pci device belongs */
12096 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12097 if (hba_mpi_version == 0)
12098 return -ENODEV;
12099
12100 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12101 * for other generation HBA's return with -ENODEV
12102 */
12103 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
12104 return -ENODEV;
12105
12106 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12107 * for other generation HBA's return with -ENODEV
12108 */
12109 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
12110 || hba_mpi_version == MPI26_VERSION)))
12111 return -ENODEV;
12112
12113 switch (hba_mpi_version) {
12114 case MPI2_VERSION:
12115 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12116 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12117 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
12118 shost = scsi_host_alloc(&mpt2sas_driver_template,
12119 sizeof(struct MPT3SAS_ADAPTER));
12120 if (!shost)
12121 return -ENODEV;
12122 ioc = shost_priv(shost);
12123 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12124 ioc->hba_mpi_version_belonged = hba_mpi_version;
12125 ioc->id = mpt2_ids++;
12126 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12127 switch (pdev->device) {
12128 case MPI2_MFGPAGE_DEVID_SSS6200:
12129 ioc->is_warpdrive = 1;
12130 ioc->hide_ir_msg = 1;
12131 break;
12132 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12133 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12134 ioc->is_mcpu_endpoint = 1;
12135 break;
12136 default:
12137 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12138 break;
12139 }
12140
12141 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12142 ioc->multipath_on_hba = 0;
12143 else
12144 ioc->multipath_on_hba = 1;
12145
12146 break;
12147 case MPI25_VERSION:
12148 case MPI26_VERSION:
12149 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12150 shost = scsi_host_alloc(&mpt3sas_driver_template,
12151 sizeof(struct MPT3SAS_ADAPTER));
12152 if (!shost)
12153 return -ENODEV;
12154 ioc = shost_priv(shost);
12155 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12156 ioc->hba_mpi_version_belonged = hba_mpi_version;
12157 ioc->id = mpt3_ids++;
12158 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12159 switch (pdev->device) {
12160 case MPI26_MFGPAGE_DEVID_SAS3508:
12161 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12162 case MPI26_MFGPAGE_DEVID_SAS3408:
12163 case MPI26_MFGPAGE_DEVID_SAS3516:
12164 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12165 case MPI26_MFGPAGE_DEVID_SAS3416:
12166 case MPI26_MFGPAGE_DEVID_SAS3616:
12167 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12168 ioc->is_gen35_ioc = 1;
12169 break;
12170 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12171 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12172 dev_err(&pdev->dev,
12173 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12174 pdev->device, pdev->subsystem_vendor,
12175 pdev->subsystem_device);
12176 return 1;
12177 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12178 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12179 dev_err(&pdev->dev,
12180 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12181 pdev->device, pdev->subsystem_vendor,
12182 pdev->subsystem_device);
12183 return 1;
12184 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12185 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12186 dev_info(&pdev->dev,
12187 "HBA is in Configurable Secure mode\n");
12188 fallthrough;
12189 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12190 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12191 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12192 break;
12193 default:
12194 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12195 }
12196 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12197 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12198 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12199 ioc->combined_reply_queue = 1;
12200 if (ioc->is_gen35_ioc)
12201 ioc->combined_reply_index_count =
12202 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12203 else
12204 ioc->combined_reply_index_count =
12205 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12206 }
12207
12208 switch (ioc->is_gen35_ioc) {
12209 case 0:
12210 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12211 ioc->multipath_on_hba = 0;
12212 else
12213 ioc->multipath_on_hba = 1;
12214 break;
12215 case 1:
12216 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12217 ioc->multipath_on_hba = 1;
12218 else
12219 ioc->multipath_on_hba = 0;
12220 break;
12221 default:
12222 break;
12223 }
12224
12225 break;
12226 default:
12227 return -ENODEV;
12228 }
12229
12230 INIT_LIST_HEAD(&ioc->list);
12231 spin_lock(&gioc_lock);
12232 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12233 spin_unlock(&gioc_lock);
12234 ioc->shost = shost;
12235 ioc->pdev = pdev;
12236 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12237 ioc->tm_cb_idx = tm_cb_idx;
12238 ioc->ctl_cb_idx = ctl_cb_idx;
12239 ioc->base_cb_idx = base_cb_idx;
12240 ioc->port_enable_cb_idx = port_enable_cb_idx;
12241 ioc->transport_cb_idx = transport_cb_idx;
12242 ioc->scsih_cb_idx = scsih_cb_idx;
12243 ioc->config_cb_idx = config_cb_idx;
12244 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12245 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12246 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12247 ioc->logging_level = logging_level;
12248 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12249 /* Host waits for minimum of six seconds */
12250 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12251 /*
12252 * Enable MEMORY MOVE support flag.
12253 */
12254 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12255 /* Enable ADDITIONAL QUERY support flag. */
12256 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12257
12258 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12259
12260 /* misc semaphores and spin locks */
12261 mutex_init(&ioc->reset_in_progress_mutex);
12262 /* initializing pci_access_mutex lock */
12263 mutex_init(&ioc->pci_access_mutex);
12264 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12265 spin_lock_init(&ioc->scsi_lookup_lock);
12266 spin_lock_init(&ioc->sas_device_lock);
12267 spin_lock_init(&ioc->sas_node_lock);
12268 spin_lock_init(&ioc->fw_event_lock);
12269 spin_lock_init(&ioc->raid_device_lock);
12270 spin_lock_init(&ioc->pcie_device_lock);
12271 spin_lock_init(&ioc->diag_trigger_lock);
12272
12273 INIT_LIST_HEAD(&ioc->sas_device_list);
12274 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12275 INIT_LIST_HEAD(&ioc->sas_expander_list);
12276 INIT_LIST_HEAD(&ioc->enclosure_list);
12277 INIT_LIST_HEAD(&ioc->pcie_device_list);
12278 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12279 INIT_LIST_HEAD(&ioc->fw_event_list);
12280 INIT_LIST_HEAD(&ioc->raid_device_list);
12281 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12282 INIT_LIST_HEAD(&ioc->delayed_tr_list);
12283 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12284 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12285 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12286 INIT_LIST_HEAD(&ioc->reply_queue_list);
12287 INIT_LIST_HEAD(&ioc->port_table_list);
12288
12289 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12290
12291 /* init shost parameters */
12292 shost->max_cmd_len = 32;
12293 shost->max_lun = max_lun;
12294 shost->transportt = mpt3sas_transport_template;
12295 shost->unique_id = ioc->id;
12296
12297 if (ioc->is_mcpu_endpoint) {
12298 /* mCPU MPI support 64K max IO */
12299 shost->max_sectors = 128;
12300 ioc_info(ioc, "The max_sectors value is set to %d\n",
12301 shost->max_sectors);
12302 } else {
12303 if (max_sectors != 0xFFFF) {
12304 if (max_sectors < 64) {
12305 shost->max_sectors = 64;
12306 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12307 max_sectors);
12308 } else if (max_sectors > 32767) {
12309 shost->max_sectors = 32767;
12310 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12311 max_sectors);
12312 } else {
12313 shost->max_sectors = max_sectors & 0xFFFE;
12314 ioc_info(ioc, "The max_sectors value is set to %d\n",
12315 shost->max_sectors);
12316 }
12317 }
12318 }
12319 /* register EEDP capabilities with SCSI layer */
12320 if (prot_mask >= 0)
12321 scsi_host_set_prot(shost, (prot_mask & 0x07));
12322 else
12323 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12324 | SHOST_DIF_TYPE2_PROTECTION
12325 | SHOST_DIF_TYPE3_PROTECTION);
12326
12327 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12328
12329 /* event thread */
12330 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12331 "fw_event_%s%d", ioc->driver_name, ioc->id);
12332 ioc->firmware_event_thread = alloc_ordered_workqueue(
12333 ioc->firmware_event_name, 0);
12334 if (!ioc->firmware_event_thread) {
12335 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12336 __FILE__, __LINE__, __func__);
12337 rv = -ENODEV;
12338 goto out_thread_fail;
12339 }
12340
12341 shost->host_tagset = 0;
12342
12343 if (ioc->is_gen35_ioc && host_tagset_enable)
12344 shost->host_tagset = 1;
12345
12346 ioc->is_driver_loading = 1;
12347 if ((mpt3sas_base_attach(ioc))) {
12348 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12349 __FILE__, __LINE__, __func__);
12350 rv = -ENODEV;
12351 goto out_attach_fail;
12352 }
12353
12354 if (ioc->is_warpdrive) {
12355 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12356 ioc->hide_drives = 0;
12357 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12358 ioc->hide_drives = 1;
12359 else {
12360 if (mpt3sas_get_num_volumes(ioc))
12361 ioc->hide_drives = 1;
12362 else
12363 ioc->hide_drives = 0;
12364 }
12365 } else
12366 ioc->hide_drives = 0;
12367
12368 shost->nr_hw_queues = 1;
12369
12370 if (shost->host_tagset) {
12371 shost->nr_hw_queues =
12372 ioc->reply_queue_count - ioc->high_iops_queues;
12373
12374 iopoll_q_count =
12375 ioc->reply_queue_count - ioc->iopoll_q_start_index;
12376
12377 shost->nr_maps = iopoll_q_count ? 3 : 1;
12378
12379 dev_info(&ioc->pdev->dev,
12380 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12381 shost->can_queue, shost->nr_hw_queues);
12382 }
12383
12384 rv = scsi_add_host(shost, &pdev->dev);
12385 if (rv) {
12386 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12387 __FILE__, __LINE__, __func__);
12388 goto out_add_shost_fail;
12389 }
12390
12391 scsi_scan_host(shost);
12392 mpt3sas_setup_debugfs(ioc);
12393 return 0;
12394 out_add_shost_fail:
12395 mpt3sas_base_detach(ioc);
12396 out_attach_fail:
12397 destroy_workqueue(ioc->firmware_event_thread);
12398 out_thread_fail:
12399 spin_lock(&gioc_lock);
12400 list_del(&ioc->list);
12401 spin_unlock(&gioc_lock);
12402 scsi_host_put(shost);
12403 return rv;
12404 }
12405
12406 /**
12407 * scsih_suspend - power management suspend main entry point
12408 * @dev: Device struct
12409 *
12410 * Return: 0 success, anything else error.
12411 */
12412 static int __maybe_unused
scsih_suspend(struct device * dev)12413 scsih_suspend(struct device *dev)
12414 {
12415 struct pci_dev *pdev = to_pci_dev(dev);
12416 struct Scsi_Host *shost;
12417 struct MPT3SAS_ADAPTER *ioc;
12418 int rc;
12419
12420 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12421 if (rc)
12422 return rc;
12423
12424 mpt3sas_base_stop_watchdog(ioc);
12425 scsi_block_requests(shost);
12426 _scsih_nvme_shutdown(ioc);
12427 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12428 pdev, pci_name(pdev));
12429
12430 mpt3sas_base_free_resources(ioc);
12431 return 0;
12432 }
12433
12434 /**
12435 * scsih_resume - power management resume main entry point
12436 * @dev: Device struct
12437 *
12438 * Return: 0 success, anything else error.
12439 */
12440 static int __maybe_unused
scsih_resume(struct device * dev)12441 scsih_resume(struct device *dev)
12442 {
12443 struct pci_dev *pdev = to_pci_dev(dev);
12444 struct Scsi_Host *shost;
12445 struct MPT3SAS_ADAPTER *ioc;
12446 pci_power_t device_state = pdev->current_state;
12447 int r;
12448
12449 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12450 if (r)
12451 return r;
12452
12453 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12454 pdev, pci_name(pdev), device_state);
12455
12456 ioc->pdev = pdev;
12457 r = mpt3sas_base_map_resources(ioc);
12458 if (r)
12459 return r;
12460 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12461 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12462 scsi_unblock_requests(shost);
12463 mpt3sas_base_start_watchdog(ioc);
12464 return 0;
12465 }
12466
12467 /**
12468 * scsih_pci_error_detected - Called when a PCI error is detected.
12469 * @pdev: PCI device struct
12470 * @state: PCI channel state
12471 *
12472 * Description: Called when a PCI error is detected.
12473 *
12474 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12475 */
12476 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12477 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12478 {
12479 struct Scsi_Host *shost;
12480 struct MPT3SAS_ADAPTER *ioc;
12481
12482 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12483 return PCI_ERS_RESULT_DISCONNECT;
12484
12485 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12486
12487 switch (state) {
12488 case pci_channel_io_normal:
12489 return PCI_ERS_RESULT_CAN_RECOVER;
12490 case pci_channel_io_frozen:
12491 /* Fatal error, prepare for slot reset */
12492 ioc->pci_error_recovery = 1;
12493 scsi_block_requests(ioc->shost);
12494 mpt3sas_base_stop_watchdog(ioc);
12495 mpt3sas_base_free_resources(ioc);
12496 return PCI_ERS_RESULT_NEED_RESET;
12497 case pci_channel_io_perm_failure:
12498 /* Permanent error, prepare for device removal */
12499 ioc->pci_error_recovery = 1;
12500 mpt3sas_base_stop_watchdog(ioc);
12501 mpt3sas_base_pause_mq_polling(ioc);
12502 _scsih_flush_running_cmds(ioc);
12503 return PCI_ERS_RESULT_DISCONNECT;
12504 }
12505 return PCI_ERS_RESULT_NEED_RESET;
12506 }
12507
12508 /**
12509 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12510 * @pdev: PCI device struct
12511 *
12512 * Description: This routine is called by the pci error recovery
12513 * code after the PCI slot has been reset, just before we
12514 * should resume normal operations.
12515 */
12516 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)12517 scsih_pci_slot_reset(struct pci_dev *pdev)
12518 {
12519 struct Scsi_Host *shost;
12520 struct MPT3SAS_ADAPTER *ioc;
12521 int rc;
12522
12523 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12524 return PCI_ERS_RESULT_DISCONNECT;
12525
12526 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12527
12528 ioc->pci_error_recovery = 0;
12529 ioc->pdev = pdev;
12530 pci_restore_state(pdev);
12531 rc = mpt3sas_base_map_resources(ioc);
12532 if (rc)
12533 return PCI_ERS_RESULT_DISCONNECT;
12534
12535 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12536 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12537
12538 ioc_warn(ioc, "hard reset: %s\n",
12539 (rc == 0) ? "success" : "failed");
12540
12541 if (!rc)
12542 return PCI_ERS_RESULT_RECOVERED;
12543 else
12544 return PCI_ERS_RESULT_DISCONNECT;
12545 }
12546
12547 /**
12548 * scsih_pci_resume() - resume normal ops after PCI reset
12549 * @pdev: pointer to PCI device
12550 *
12551 * Called when the error recovery driver tells us that its
12552 * OK to resume normal operation. Use completion to allow
12553 * halted scsi ops to resume.
12554 */
12555 static void
scsih_pci_resume(struct pci_dev * pdev)12556 scsih_pci_resume(struct pci_dev *pdev)
12557 {
12558 struct Scsi_Host *shost;
12559 struct MPT3SAS_ADAPTER *ioc;
12560
12561 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12562 return;
12563
12564 ioc_info(ioc, "PCI error: resume callback!!\n");
12565
12566 mpt3sas_base_start_watchdog(ioc);
12567 scsi_unblock_requests(ioc->shost);
12568 }
12569
12570 /**
12571 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12572 * @pdev: pointer to PCI device
12573 */
12574 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)12575 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12576 {
12577 struct Scsi_Host *shost;
12578 struct MPT3SAS_ADAPTER *ioc;
12579
12580 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12581 return PCI_ERS_RESULT_DISCONNECT;
12582
12583 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12584
12585 /* TODO - dump whatever for debugging purposes */
12586
12587 /* This called only if scsih_pci_error_detected returns
12588 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12589 * works, no need to reset slot.
12590 */
12591 return PCI_ERS_RESULT_RECOVERED;
12592 }
12593
12594 /**
12595 * scsih_ncq_prio_supp - Check for NCQ command priority support
12596 * @sdev: scsi device struct
12597 *
12598 * This is called when a user indicates they would like to enable
12599 * ncq command priorities. This works only on SATA devices.
12600 */
scsih_ncq_prio_supp(struct scsi_device * sdev)12601 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12602 {
12603 struct scsi_vpd *vpd;
12604 bool ncq_prio_supp = false;
12605
12606 rcu_read_lock();
12607 vpd = rcu_dereference(sdev->vpd_pg89);
12608 if (!vpd || vpd->len < 214)
12609 goto out;
12610
12611 ncq_prio_supp = (vpd->data[213] >> 4) & 1;
12612 out:
12613 rcu_read_unlock();
12614
12615 return ncq_prio_supp;
12616 }
12617 /*
12618 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12619 */
12620 static const struct pci_device_id mpt3sas_pci_table[] = {
12621 /* Spitfire ~ 2004 */
12622 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12623 PCI_ANY_ID, PCI_ANY_ID },
12624 /* Falcon ~ 2008 */
12625 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12626 PCI_ANY_ID, PCI_ANY_ID },
12627 /* Liberator ~ 2108 */
12628 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12629 PCI_ANY_ID, PCI_ANY_ID },
12630 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12631 PCI_ANY_ID, PCI_ANY_ID },
12632 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12633 PCI_ANY_ID, PCI_ANY_ID },
12634 /* Meteor ~ 2116 */
12635 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12636 PCI_ANY_ID, PCI_ANY_ID },
12637 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12638 PCI_ANY_ID, PCI_ANY_ID },
12639 /* Thunderbolt ~ 2208 */
12640 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12641 PCI_ANY_ID, PCI_ANY_ID },
12642 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12643 PCI_ANY_ID, PCI_ANY_ID },
12644 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12645 PCI_ANY_ID, PCI_ANY_ID },
12646 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12647 PCI_ANY_ID, PCI_ANY_ID },
12648 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12649 PCI_ANY_ID, PCI_ANY_ID },
12650 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12651 PCI_ANY_ID, PCI_ANY_ID },
12652 /* Mustang ~ 2308 */
12653 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12654 PCI_ANY_ID, PCI_ANY_ID },
12655 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12656 PCI_ANY_ID, PCI_ANY_ID },
12657 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12658 PCI_ANY_ID, PCI_ANY_ID },
12659 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12660 PCI_ANY_ID, PCI_ANY_ID },
12661 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12662 PCI_ANY_ID, PCI_ANY_ID },
12663 /* SSS6200 */
12664 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12665 PCI_ANY_ID, PCI_ANY_ID },
12666 /* Fury ~ 3004 and 3008 */
12667 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12668 PCI_ANY_ID, PCI_ANY_ID },
12669 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12670 PCI_ANY_ID, PCI_ANY_ID },
12671 /* Invader ~ 3108 */
12672 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12673 PCI_ANY_ID, PCI_ANY_ID },
12674 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12675 PCI_ANY_ID, PCI_ANY_ID },
12676 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12677 PCI_ANY_ID, PCI_ANY_ID },
12678 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12679 PCI_ANY_ID, PCI_ANY_ID },
12680 /* Cutlass ~ 3216 and 3224 */
12681 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12682 PCI_ANY_ID, PCI_ANY_ID },
12683 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12684 PCI_ANY_ID, PCI_ANY_ID },
12685 /* Intruder ~ 3316 and 3324 */
12686 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12687 PCI_ANY_ID, PCI_ANY_ID },
12688 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12689 PCI_ANY_ID, PCI_ANY_ID },
12690 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12691 PCI_ANY_ID, PCI_ANY_ID },
12692 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12693 PCI_ANY_ID, PCI_ANY_ID },
12694 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12695 PCI_ANY_ID, PCI_ANY_ID },
12696 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12697 PCI_ANY_ID, PCI_ANY_ID },
12698 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12699 PCI_ANY_ID, PCI_ANY_ID },
12700 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12701 PCI_ANY_ID, PCI_ANY_ID },
12702 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12703 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12704 PCI_ANY_ID, PCI_ANY_ID },
12705 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12706 PCI_ANY_ID, PCI_ANY_ID },
12707 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12708 PCI_ANY_ID, PCI_ANY_ID },
12709 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12710 PCI_ANY_ID, PCI_ANY_ID },
12711 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12712 PCI_ANY_ID, PCI_ANY_ID },
12713 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12714 PCI_ANY_ID, PCI_ANY_ID },
12715 /* Mercator ~ 3616*/
12716 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12717 PCI_ANY_ID, PCI_ANY_ID },
12718
12719 /* Aero SI 0x00E1 Configurable Secure
12720 * 0x00E2 Hard Secure
12721 */
12722 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12723 PCI_ANY_ID, PCI_ANY_ID },
12724 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12725 PCI_ANY_ID, PCI_ANY_ID },
12726
12727 /*
12728 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12729 */
12730 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12731 PCI_ANY_ID, PCI_ANY_ID },
12732 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12733 PCI_ANY_ID, PCI_ANY_ID },
12734
12735 /* Atlas PCIe Switch Management Port */
12736 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12737 PCI_ANY_ID, PCI_ANY_ID },
12738
12739 /* Sea SI 0x00E5 Configurable Secure
12740 * 0x00E6 Hard Secure
12741 */
12742 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12743 PCI_ANY_ID, PCI_ANY_ID },
12744 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12745 PCI_ANY_ID, PCI_ANY_ID },
12746
12747 /*
12748 * ATTO Branded ExpressSAS H12xx GT
12749 */
12750 { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12751 PCI_ANY_ID, PCI_ANY_ID },
12752
12753 /*
12754 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12755 */
12756 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12757 PCI_ANY_ID, PCI_ANY_ID },
12758 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12759 PCI_ANY_ID, PCI_ANY_ID },
12760
12761 {0} /* Terminating entry */
12762 };
12763 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12764
12765 static struct pci_error_handlers _mpt3sas_err_handler = {
12766 .error_detected = scsih_pci_error_detected,
12767 .mmio_enabled = scsih_pci_mmio_enabled,
12768 .slot_reset = scsih_pci_slot_reset,
12769 .resume = scsih_pci_resume,
12770 };
12771
12772 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12773
12774 static struct pci_driver mpt3sas_driver = {
12775 .name = MPT3SAS_DRIVER_NAME,
12776 .id_table = mpt3sas_pci_table,
12777 .probe = _scsih_probe,
12778 .remove = scsih_remove,
12779 .shutdown = scsih_shutdown,
12780 .err_handler = &_mpt3sas_err_handler,
12781 .driver.pm = &scsih_pm_ops,
12782 };
12783
12784 /**
12785 * scsih_init - main entry point for this driver.
12786 *
12787 * Return: 0 success, anything else error.
12788 */
12789 static int
scsih_init(void)12790 scsih_init(void)
12791 {
12792 mpt2_ids = 0;
12793 mpt3_ids = 0;
12794
12795 mpt3sas_base_initialize_callback_handler();
12796
12797 /* queuecommand callback hander */
12798 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12799
12800 /* task management callback handler */
12801 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12802
12803 /* base internal commands callback handler */
12804 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12805 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12806 mpt3sas_port_enable_done);
12807
12808 /* transport internal commands callback handler */
12809 transport_cb_idx = mpt3sas_base_register_callback_handler(
12810 mpt3sas_transport_done);
12811
12812 /* scsih internal commands callback handler */
12813 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12814
12815 /* configuration page API internal commands callback handler */
12816 config_cb_idx = mpt3sas_base_register_callback_handler(
12817 mpt3sas_config_done);
12818
12819 /* ctl module callback handler */
12820 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12821
12822 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12823 _scsih_tm_tr_complete);
12824
12825 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12826 _scsih_tm_volume_tr_complete);
12827
12828 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12829 _scsih_sas_control_complete);
12830
12831 mpt3sas_init_debugfs();
12832 return 0;
12833 }
12834
12835 /**
12836 * scsih_exit - exit point for this driver (when it is a module).
12837 *
12838 * Return: 0 success, anything else error.
12839 */
12840 static void
scsih_exit(void)12841 scsih_exit(void)
12842 {
12843
12844 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12845 mpt3sas_base_release_callback_handler(tm_cb_idx);
12846 mpt3sas_base_release_callback_handler(base_cb_idx);
12847 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12848 mpt3sas_base_release_callback_handler(transport_cb_idx);
12849 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12850 mpt3sas_base_release_callback_handler(config_cb_idx);
12851 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12852
12853 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12854 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12855 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12856
12857 /* raid transport support */
12858 if (hbas_to_enumerate != 1)
12859 raid_class_release(mpt3sas_raid_template);
12860 if (hbas_to_enumerate != 2)
12861 raid_class_release(mpt2sas_raid_template);
12862 sas_release_transport(mpt3sas_transport_template);
12863 mpt3sas_exit_debugfs();
12864 }
12865
12866 /**
12867 * _mpt3sas_init - main entry point for this driver.
12868 *
12869 * Return: 0 success, anything else error.
12870 */
12871 static int __init
_mpt3sas_init(void)12872 _mpt3sas_init(void)
12873 {
12874 int error;
12875
12876 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12877 MPT3SAS_DRIVER_VERSION);
12878
12879 mpt3sas_transport_template =
12880 sas_attach_transport(&mpt3sas_transport_functions);
12881 if (!mpt3sas_transport_template)
12882 return -ENODEV;
12883
12884 /* No need attach mpt3sas raid functions template
12885 * if hbas_to_enumarate value is one.
12886 */
12887 if (hbas_to_enumerate != 1) {
12888 mpt3sas_raid_template =
12889 raid_class_attach(&mpt3sas_raid_functions);
12890 if (!mpt3sas_raid_template) {
12891 sas_release_transport(mpt3sas_transport_template);
12892 return -ENODEV;
12893 }
12894 }
12895
12896 /* No need to attach mpt2sas raid functions template
12897 * if hbas_to_enumarate value is two
12898 */
12899 if (hbas_to_enumerate != 2) {
12900 mpt2sas_raid_template =
12901 raid_class_attach(&mpt2sas_raid_functions);
12902 if (!mpt2sas_raid_template) {
12903 sas_release_transport(mpt3sas_transport_template);
12904 return -ENODEV;
12905 }
12906 }
12907
12908 error = scsih_init();
12909 if (error) {
12910 scsih_exit();
12911 return error;
12912 }
12913
12914 mpt3sas_ctl_init(hbas_to_enumerate);
12915
12916 error = pci_register_driver(&mpt3sas_driver);
12917 if (error)
12918 scsih_exit();
12919
12920 return error;
12921 }
12922
12923 /**
12924 * _mpt3sas_exit - exit point for this driver (when it is a module).
12925 *
12926 */
12927 static void __exit
_mpt3sas_exit(void)12928 _mpt3sas_exit(void)
12929 {
12930 pr_info("mpt3sas version %s unloading\n",
12931 MPT3SAS_DRIVER_VERSION);
12932
12933 mpt3sas_ctl_exit(hbas_to_enumerate);
12934
12935 pci_unregister_driver(&mpt3sas_driver);
12936
12937 scsih_exit();
12938 }
12939
12940 module_init(_mpt3sas_init);
12941 module_exit(_mpt3sas_exit);
12942