1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88 
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static DEFINE_SPINLOCK(ipr_driver_lock);
102 
103 /* This table describes the differences between DMA controller chips */
104 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106 		.mailbox = 0x0042C,
107 		.max_cmds = 100,
108 		.cache_line_size = 0x20,
109 		.clear_isr = 1,
110 		{
111 			.set_interrupt_mask_reg = 0x0022C,
112 			.clr_interrupt_mask_reg = 0x00230,
113 			.clr_interrupt_mask_reg32 = 0x00230,
114 			.sense_interrupt_mask_reg = 0x0022C,
115 			.sense_interrupt_mask_reg32 = 0x0022C,
116 			.clr_interrupt_reg = 0x00228,
117 			.clr_interrupt_reg32 = 0x00228,
118 			.sense_interrupt_reg = 0x00224,
119 			.sense_interrupt_reg32 = 0x00224,
120 			.ioarrin_reg = 0x00404,
121 			.sense_uproc_interrupt_reg = 0x00214,
122 			.sense_uproc_interrupt_reg32 = 0x00214,
123 			.set_uproc_interrupt_reg = 0x00214,
124 			.set_uproc_interrupt_reg32 = 0x00214,
125 			.clr_uproc_interrupt_reg = 0x00218,
126 			.clr_uproc_interrupt_reg32 = 0x00218
127 		}
128 	},
129 	{ /* Snipe and Scamp */
130 		.mailbox = 0x0052C,
131 		.max_cmds = 100,
132 		.cache_line_size = 0x20,
133 		.clear_isr = 1,
134 		{
135 			.set_interrupt_mask_reg = 0x00288,
136 			.clr_interrupt_mask_reg = 0x0028C,
137 			.clr_interrupt_mask_reg32 = 0x0028C,
138 			.sense_interrupt_mask_reg = 0x00288,
139 			.sense_interrupt_mask_reg32 = 0x00288,
140 			.clr_interrupt_reg = 0x00284,
141 			.clr_interrupt_reg32 = 0x00284,
142 			.sense_interrupt_reg = 0x00280,
143 			.sense_interrupt_reg32 = 0x00280,
144 			.ioarrin_reg = 0x00504,
145 			.sense_uproc_interrupt_reg = 0x00290,
146 			.sense_uproc_interrupt_reg32 = 0x00290,
147 			.set_uproc_interrupt_reg = 0x00290,
148 			.set_uproc_interrupt_reg32 = 0x00290,
149 			.clr_uproc_interrupt_reg = 0x00294,
150 			.clr_uproc_interrupt_reg32 = 0x00294
151 		}
152 	},
153 	{ /* CRoC */
154 		.mailbox = 0x00044,
155 		.max_cmds = 1000,
156 		.cache_line_size = 0x20,
157 		.clear_isr = 0,
158 		{
159 			.set_interrupt_mask_reg = 0x00010,
160 			.clr_interrupt_mask_reg = 0x00018,
161 			.clr_interrupt_mask_reg32 = 0x0001C,
162 			.sense_interrupt_mask_reg = 0x00010,
163 			.sense_interrupt_mask_reg32 = 0x00014,
164 			.clr_interrupt_reg = 0x00008,
165 			.clr_interrupt_reg32 = 0x0000C,
166 			.sense_interrupt_reg = 0x00000,
167 			.sense_interrupt_reg32 = 0x00004,
168 			.ioarrin_reg = 0x00070,
169 			.sense_uproc_interrupt_reg = 0x00020,
170 			.sense_uproc_interrupt_reg32 = 0x00024,
171 			.set_uproc_interrupt_reg = 0x00020,
172 			.set_uproc_interrupt_reg32 = 0x00024,
173 			.clr_uproc_interrupt_reg = 0x00028,
174 			.clr_uproc_interrupt_reg32 = 0x0002C,
175 			.init_feedback_reg = 0x0005C,
176 			.dump_addr_reg = 0x00064,
177 			.dump_data_reg = 0x00068,
178 			.endian_swap_reg = 0x00084
179 		}
180 	},
181 };
182 
183 static const struct ipr_chip_t ipr_chip[] = {
184 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
185 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
190 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
192 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
193 };
194 
195 static int ipr_max_bus_speeds [] = {
196 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
197 };
198 
199 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
200 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
201 module_param_named(max_speed, ipr_max_speed, uint, 0);
202 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
203 module_param_named(log_level, ipr_log_level, uint, 0);
204 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
205 module_param_named(testmode, ipr_testmode, int, 0);
206 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
207 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
208 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
209 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
210 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
211 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
213 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
214 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
215 module_param_named(max_devs, ipr_max_devs, int, 0);
216 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(IPR_DRIVER_VERSION);
220 
221 /*  A constant array of IOASCs/URCs/Error Messages */
222 static const
223 struct ipr_error_table_t ipr_error_table[] = {
224 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
225 	"8155: An unknown error was received"},
226 	{0x00330000, 0, 0,
227 	"Soft underlength error"},
228 	{0x005A0000, 0, 0,
229 	"Command to be cancelled not found"},
230 	{0x00808000, 0, 0,
231 	"Qualified success"},
232 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
233 	"FFFE: Soft device bus error recovered by the IOA"},
234 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
235 	"4101: Soft device bus fabric error"},
236 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
237 	"FFFC: Logical block guard error recovered by the device"},
238 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
239 	"FFFC: Logical block reference tag error recovered by the device"},
240 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
241 	"4171: Recovered scatter list tag / sequence number error"},
242 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
243 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
244 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
245 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
246 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
247 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
248 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
249 	"FFFD: Logical block guard error recovered by the IOA"},
250 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
251 	"FFF9: Device sector reassign successful"},
252 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
253 	"FFF7: Media error recovered by device rewrite procedures"},
254 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
255 	"7001: IOA sector reassignment successful"},
256 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
257 	"FFF9: Soft media error. Sector reassignment recommended"},
258 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
259 	"FFF7: Media error recovered by IOA rewrite procedures"},
260 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
261 	"FF3D: Soft PCI bus error recovered by the IOA"},
262 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
263 	"FFF6: Device hardware error recovered by the IOA"},
264 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
265 	"FFF6: Device hardware error recovered by the device"},
266 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
267 	"FF3D: Soft IOA error recovered by the IOA"},
268 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
269 	"FFFA: Undefined device response recovered by the IOA"},
270 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
271 	"FFF6: Device bus error, message or command phase"},
272 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
273 	"FFFE: Task Management Function failed"},
274 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
275 	"FFF6: Failure prediction threshold exceeded"},
276 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
277 	"8009: Impending cache battery pack failure"},
278 	{0x02040400, 0, 0,
279 	"34FF: Disk device format in progress"},
280 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
281 	"9070: IOA requested reset"},
282 	{0x023F0000, 0, 0,
283 	"Synchronization required"},
284 	{0x024E0000, 0, 0,
285 	"No ready, IOA shutdown"},
286 	{0x025A0000, 0, 0,
287 	"Not ready, IOA has been shutdown"},
288 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
289 	"3020: Storage subsystem configuration error"},
290 	{0x03110B00, 0, 0,
291 	"FFF5: Medium error, data unreadable, recommend reassign"},
292 	{0x03110C00, 0, 0,
293 	"7000: Medium error, data unreadable, do not reassign"},
294 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
295 	"FFF3: Disk media format bad"},
296 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
297 	"3002: Addressed device failed to respond to selection"},
298 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
299 	"3100: Device bus error"},
300 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
301 	"3109: IOA timed out a device command"},
302 	{0x04088000, 0, 0,
303 	"3120: SCSI bus is not operational"},
304 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
305 	"4100: Hard device bus fabric error"},
306 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
307 	"310C: Logical block guard error detected by the device"},
308 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
309 	"310C: Logical block reference tag error detected by the device"},
310 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
311 	"4170: Scatter list tag / sequence number error"},
312 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
313 	"8150: Logical block CRC error on IOA to Host transfer"},
314 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
315 	"4170: Logical block sequence number error on IOA to Host transfer"},
316 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
317 	"310D: Logical block reference tag error detected by the IOA"},
318 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
319 	"310D: Logical block guard error detected by the IOA"},
320 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
321 	"9000: IOA reserved area data check"},
322 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
323 	"9001: IOA reserved area invalid data pattern"},
324 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
325 	"9002: IOA reserved area LRC error"},
326 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
327 	"Hardware Error, IOA metadata access error"},
328 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
329 	"102E: Out of alternate sectors for disk storage"},
330 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
331 	"FFF4: Data transfer underlength error"},
332 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
333 	"FFF4: Data transfer overlength error"},
334 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
335 	"3400: Logical unit failure"},
336 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
337 	"FFF4: Device microcode is corrupt"},
338 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
339 	"8150: PCI bus error"},
340 	{0x04430000, 1, 0,
341 	"Unsupported device bus message received"},
342 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
343 	"FFF4: Disk device problem"},
344 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
345 	"8150: Permanent IOA failure"},
346 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
347 	"3010: Disk device returned wrong response to IOA"},
348 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
349 	"8151: IOA microcode error"},
350 	{0x04448500, 0, 0,
351 	"Device bus status error"},
352 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
353 	"8157: IOA error requiring IOA reset to recover"},
354 	{0x04448700, 0, 0,
355 	"ATA device status error"},
356 	{0x04490000, 0, 0,
357 	"Message reject received from the device"},
358 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
359 	"8008: A permanent cache battery pack failure occurred"},
360 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
361 	"9090: Disk unit has been modified after the last known status"},
362 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
363 	"9081: IOA detected device error"},
364 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
365 	"9082: IOA detected device error"},
366 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
367 	"3110: Device bus error, message or command phase"},
368 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
369 	"3110: SAS Command / Task Management Function failed"},
370 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
371 	"9091: Incorrect hardware configuration change has been detected"},
372 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
373 	"9073: Invalid multi-adapter configuration"},
374 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
375 	"4010: Incorrect connection between cascaded expanders"},
376 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
377 	"4020: Connections exceed IOA design limits"},
378 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
379 	"4030: Incorrect multipath connection"},
380 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
381 	"4110: Unsupported enclosure function"},
382 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
383 	"FFF4: Command to logical unit failed"},
384 	{0x05240000, 1, 0,
385 	"Illegal request, invalid request type or request packet"},
386 	{0x05250000, 0, 0,
387 	"Illegal request, invalid resource handle"},
388 	{0x05258000, 0, 0,
389 	"Illegal request, commands not allowed to this device"},
390 	{0x05258100, 0, 0,
391 	"Illegal request, command not allowed to a secondary adapter"},
392 	{0x05258200, 0, 0,
393 	"Illegal request, command not allowed to a non-optimized resource"},
394 	{0x05260000, 0, 0,
395 	"Illegal request, invalid field in parameter list"},
396 	{0x05260100, 0, 0,
397 	"Illegal request, parameter not supported"},
398 	{0x05260200, 0, 0,
399 	"Illegal request, parameter value invalid"},
400 	{0x052C0000, 0, 0,
401 	"Illegal request, command sequence error"},
402 	{0x052C8000, 1, 0,
403 	"Illegal request, dual adapter support not enabled"},
404 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
405 	"9031: Array protection temporarily suspended, protection resuming"},
406 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
407 	"9040: Array protection temporarily suspended, protection resuming"},
408 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
409 	"3140: Device bus not ready to ready transition"},
410 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
411 	"FFFB: SCSI bus was reset"},
412 	{0x06290500, 0, 0,
413 	"FFFE: SCSI bus transition to single ended"},
414 	{0x06290600, 0, 0,
415 	"FFFE: SCSI bus transition to LVD"},
416 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
417 	"FFFB: SCSI bus was reset by another initiator"},
418 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
419 	"3029: A device replacement has occurred"},
420 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
421 	"9051: IOA cache data exists for a missing or failed device"},
422 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
423 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
424 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
425 	"9025: Disk unit is not supported at its physical location"},
426 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
427 	"3020: IOA detected a SCSI bus configuration error"},
428 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
429 	"3150: SCSI bus configuration error"},
430 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
431 	"9074: Asymmetric advanced function disk configuration"},
432 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
433 	"4040: Incomplete multipath connection between IOA and enclosure"},
434 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
435 	"4041: Incomplete multipath connection between enclosure and device"},
436 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
437 	"9075: Incomplete multipath connection between IOA and remote IOA"},
438 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
439 	"9076: Configuration error, missing remote IOA"},
440 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
441 	"4050: Enclosure does not support a required multipath function"},
442 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
443 	"4070: Logically bad block written on device"},
444 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
445 	"9041: Array protection temporarily suspended"},
446 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
447 	"9042: Corrupt array parity detected on specified device"},
448 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
449 	"9030: Array no longer protected due to missing or failed disk unit"},
450 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
451 	"9071: Link operational transition"},
452 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
453 	"9072: Link not operational transition"},
454 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
455 	"9032: Array exposed but still protected"},
456 	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
457 	"70DD: Device forced failed by disrupt device command"},
458 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
459 	"4061: Multipath redundancy level got better"},
460 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
461 	"4060: Multipath redundancy level got worse"},
462 	{0x07270000, 0, 0,
463 	"Failure due to other device"},
464 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
465 	"9008: IOA does not support functions expected by devices"},
466 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
467 	"9010: Cache data associated with attached devices cannot be found"},
468 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
469 	"9011: Cache data belongs to devices other than those attached"},
470 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
471 	"9020: Array missing 2 or more devices with only 1 device present"},
472 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
473 	"9021: Array missing 2 or more devices with 2 or more devices present"},
474 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
475 	"9022: Exposed array is missing a required device"},
476 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
477 	"9023: Array member(s) not at required physical locations"},
478 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
479 	"9024: Array not functional due to present hardware configuration"},
480 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
481 	"9026: Array not functional due to present hardware configuration"},
482 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
483 	"9027: Array is missing a device and parity is out of sync"},
484 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
485 	"9028: Maximum number of arrays already exist"},
486 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
487 	"9050: Required cache data cannot be located for a disk unit"},
488 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
489 	"9052: Cache data exists for a device that has been modified"},
490 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
491 	"9054: IOA resources not available due to previous problems"},
492 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
493 	"9092: Disk unit requires initialization before use"},
494 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
495 	"9029: Incorrect hardware configuration change has been detected"},
496 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
497 	"9060: One or more disk pairs are missing from an array"},
498 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
499 	"9061: One or more disks are missing from an array"},
500 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
501 	"9062: One or more disks are missing from an array"},
502 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
503 	"9063: Maximum number of functional arrays has been exceeded"},
504 	{0x0B260000, 0, 0,
505 	"Aborted command, invalid descriptor"},
506 	{0x0B5A0000, 0, 0,
507 	"Command terminated by host"}
508 };
509 
510 static const struct ipr_ses_table_entry ipr_ses_table[] = {
511 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
512 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
513 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
514 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
515 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
516 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
517 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
518 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
519 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
520 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
521 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
522 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
524 };
525 
526 /*
527  *  Function Prototypes
528  */
529 static int ipr_reset_alert(struct ipr_cmnd *);
530 static void ipr_process_ccn(struct ipr_cmnd *);
531 static void ipr_process_error(struct ipr_cmnd *);
532 static void ipr_reset_ioa_job(struct ipr_cmnd *);
533 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
534 				   enum ipr_shutdown_type);
535 
536 #ifdef CONFIG_SCSI_IPR_TRACE
537 /**
538  * ipr_trc_hook - Add a trace entry to the driver trace
539  * @ipr_cmd:	ipr command struct
540  * @type:		trace type
541  * @add_data:	additional data
542  *
543  * Return value:
544  * 	none
545  **/
ipr_trc_hook(struct ipr_cmnd * ipr_cmd,u8 type,u32 add_data)546 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
547 			 u8 type, u32 add_data)
548 {
549 	struct ipr_trace_entry *trace_entry;
550 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
551 
552 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
553 	trace_entry->time = jiffies;
554 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
555 	trace_entry->type = type;
556 	if (ipr_cmd->ioa_cfg->sis64)
557 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
558 	else
559 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
560 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
561 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
562 	trace_entry->u.add_data = add_data;
563 }
564 #else
565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
566 #endif
567 
568 /**
569  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
570  * @ipr_cmd:	ipr command struct
571  *
572  * Return value:
573  * 	none
574  **/
ipr_reinit_ipr_cmnd(struct ipr_cmnd * ipr_cmd)575 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
576 {
577 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
578 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
579 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
580 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
581 
582 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
583 	ioarcb->data_transfer_length = 0;
584 	ioarcb->read_data_transfer_length = 0;
585 	ioarcb->ioadl_len = 0;
586 	ioarcb->read_ioadl_len = 0;
587 
588 	if (ipr_cmd->ioa_cfg->sis64) {
589 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
590 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
591 		ioasa64->u.gata.status = 0;
592 	} else {
593 		ioarcb->write_ioadl_addr =
594 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
595 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
596 		ioasa->u.gata.status = 0;
597 	}
598 
599 	ioasa->hdr.ioasc = 0;
600 	ioasa->hdr.residual_data_len = 0;
601 	ipr_cmd->scsi_cmd = NULL;
602 	ipr_cmd->qc = NULL;
603 	ipr_cmd->sense_buffer[0] = 0;
604 	ipr_cmd->dma_use_sg = 0;
605 }
606 
607 /**
608  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
609  * @ipr_cmd:	ipr command struct
610  *
611  * Return value:
612  * 	none
613  **/
ipr_init_ipr_cmnd(struct ipr_cmnd * ipr_cmd)614 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
615 {
616 	ipr_reinit_ipr_cmnd(ipr_cmd);
617 	ipr_cmd->u.scratch = 0;
618 	ipr_cmd->sibling = NULL;
619 	init_timer(&ipr_cmd->timer);
620 }
621 
622 /**
623  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
624  * @ioa_cfg:	ioa config struct
625  *
626  * Return value:
627  * 	pointer to ipr command struct
628  **/
629 static
ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg * ioa_cfg)630 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
631 {
632 	struct ipr_cmnd *ipr_cmd;
633 
634 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
635 	list_del(&ipr_cmd->queue);
636 	ipr_init_ipr_cmnd(ipr_cmd);
637 
638 	return ipr_cmd;
639 }
640 
641 /**
642  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
643  * @ioa_cfg:	ioa config struct
644  * @clr_ints:     interrupts to clear
645  *
646  * This function masks all interrupts on the adapter, then clears the
647  * interrupts specified in the mask
648  *
649  * Return value:
650  * 	none
651  **/
ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg * ioa_cfg,u32 clr_ints)652 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
653 					  u32 clr_ints)
654 {
655 	volatile u32 int_reg;
656 
657 	/* Stop new interrupts */
658 	ioa_cfg->allow_interrupts = 0;
659 
660 	/* Set interrupt mask to stop all new interrupts */
661 	if (ioa_cfg->sis64)
662 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
663 	else
664 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
665 
666 	/* Clear any pending interrupts */
667 	if (ioa_cfg->sis64)
668 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
669 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
670 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
671 }
672 
673 /**
674  * ipr_save_pcix_cmd_reg - Save PCI-X command register
675  * @ioa_cfg:	ioa config struct
676  *
677  * Return value:
678  * 	0 on success / -EIO on failure
679  **/
ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)680 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
681 {
682 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
683 
684 	if (pcix_cmd_reg == 0)
685 		return 0;
686 
687 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
688 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
689 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
690 		return -EIO;
691 	}
692 
693 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
694 	return 0;
695 }
696 
697 /**
698  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
699  * @ioa_cfg:	ioa config struct
700  *
701  * Return value:
702  * 	0 on success / -EIO on failure
703  **/
ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)704 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
705 {
706 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
707 
708 	if (pcix_cmd_reg) {
709 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
710 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
711 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
712 			return -EIO;
713 		}
714 	}
715 
716 	return 0;
717 }
718 
719 /**
720  * ipr_sata_eh_done - done function for aborted SATA commands
721  * @ipr_cmd:	ipr command struct
722  *
723  * This function is invoked for ops generated to SATA
724  * devices which are being aborted.
725  *
726  * Return value:
727  * 	none
728  **/
ipr_sata_eh_done(struct ipr_cmnd * ipr_cmd)729 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
730 {
731 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
732 	struct ata_queued_cmd *qc = ipr_cmd->qc;
733 	struct ipr_sata_port *sata_port = qc->ap->private_data;
734 
735 	qc->err_mask |= AC_ERR_OTHER;
736 	sata_port->ioasa.status |= ATA_BUSY;
737 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
738 	ata_qc_complete(qc);
739 }
740 
741 /**
742  * ipr_scsi_eh_done - mid-layer done function for aborted ops
743  * @ipr_cmd:	ipr command struct
744  *
745  * This function is invoked by the interrupt handler for
746  * ops generated by the SCSI mid-layer which are being aborted.
747  *
748  * Return value:
749  * 	none
750  **/
ipr_scsi_eh_done(struct ipr_cmnd * ipr_cmd)751 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
752 {
753 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
754 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
755 
756 	scsi_cmd->result |= (DID_ERROR << 16);
757 
758 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
759 	scsi_cmd->scsi_done(scsi_cmd);
760 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
761 }
762 
763 /**
764  * ipr_fail_all_ops - Fails all outstanding ops.
765  * @ioa_cfg:	ioa config struct
766  *
767  * This function fails all outstanding ops.
768  *
769  * Return value:
770  * 	none
771  **/
ipr_fail_all_ops(struct ipr_ioa_cfg * ioa_cfg)772 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
773 {
774 	struct ipr_cmnd *ipr_cmd, *temp;
775 
776 	ENTER;
777 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
778 		list_del(&ipr_cmd->queue);
779 
780 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
781 		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
782 
783 		if (ipr_cmd->scsi_cmd)
784 			ipr_cmd->done = ipr_scsi_eh_done;
785 		else if (ipr_cmd->qc)
786 			ipr_cmd->done = ipr_sata_eh_done;
787 
788 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
789 		del_timer(&ipr_cmd->timer);
790 		ipr_cmd->done(ipr_cmd);
791 	}
792 
793 	LEAVE;
794 }
795 
796 /**
797  * ipr_send_command -  Send driver initiated requests.
798  * @ipr_cmd:		ipr command struct
799  *
800  * This function sends a command to the adapter using the correct write call.
801  * In the case of sis64, calculate the ioarcb size required. Then or in the
802  * appropriate bits.
803  *
804  * Return value:
805  * 	none
806  **/
ipr_send_command(struct ipr_cmnd * ipr_cmd)807 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
808 {
809 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
810 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
811 
812 	if (ioa_cfg->sis64) {
813 		/* The default size is 256 bytes */
814 		send_dma_addr |= 0x1;
815 
816 		/* If the number of ioadls * size of ioadl > 128 bytes,
817 		   then use a 512 byte ioarcb */
818 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
819 			send_dma_addr |= 0x4;
820 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
821 	} else
822 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
823 }
824 
825 /**
826  * ipr_do_req -  Send driver initiated requests.
827  * @ipr_cmd:		ipr command struct
828  * @done:			done function
829  * @timeout_func:	timeout function
830  * @timeout:		timeout value
831  *
832  * This function sends the specified command to the adapter with the
833  * timeout given. The done function is invoked on command completion.
834  *
835  * Return value:
836  * 	none
837  **/
ipr_do_req(struct ipr_cmnd * ipr_cmd,void (* done)(struct ipr_cmnd *),void (* timeout_func)(struct ipr_cmnd *),u32 timeout)838 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
839 		       void (*done) (struct ipr_cmnd *),
840 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
841 {
842 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
843 
844 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
845 
846 	ipr_cmd->done = done;
847 
848 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
849 	ipr_cmd->timer.expires = jiffies + timeout;
850 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
851 
852 	add_timer(&ipr_cmd->timer);
853 
854 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
855 
856 	ipr_send_command(ipr_cmd);
857 }
858 
859 /**
860  * ipr_internal_cmd_done - Op done function for an internally generated op.
861  * @ipr_cmd:	ipr command struct
862  *
863  * This function is the op done function for an internally generated,
864  * blocking op. It simply wakes the sleeping thread.
865  *
866  * Return value:
867  * 	none
868  **/
ipr_internal_cmd_done(struct ipr_cmnd * ipr_cmd)869 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
870 {
871 	if (ipr_cmd->sibling)
872 		ipr_cmd->sibling = NULL;
873 	else
874 		complete(&ipr_cmd->completion);
875 }
876 
877 /**
878  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
879  * @ipr_cmd:	ipr command struct
880  * @dma_addr:	dma address
881  * @len:	transfer length
882  * @flags:	ioadl flag value
883  *
884  * This function initializes an ioadl in the case where there is only a single
885  * descriptor.
886  *
887  * Return value:
888  * 	nothing
889  **/
ipr_init_ioadl(struct ipr_cmnd * ipr_cmd,dma_addr_t dma_addr,u32 len,int flags)890 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
891 			   u32 len, int flags)
892 {
893 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
894 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
895 
896 	ipr_cmd->dma_use_sg = 1;
897 
898 	if (ipr_cmd->ioa_cfg->sis64) {
899 		ioadl64->flags = cpu_to_be32(flags);
900 		ioadl64->data_len = cpu_to_be32(len);
901 		ioadl64->address = cpu_to_be64(dma_addr);
902 
903 		ipr_cmd->ioarcb.ioadl_len =
904 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
905 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
906 	} else {
907 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
908 		ioadl->address = cpu_to_be32(dma_addr);
909 
910 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
911 			ipr_cmd->ioarcb.read_ioadl_len =
912 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
913 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
914 		} else {
915 			ipr_cmd->ioarcb.ioadl_len =
916 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
917 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
918 		}
919 	}
920 }
921 
922 /**
923  * ipr_send_blocking_cmd - Send command and sleep on its completion.
924  * @ipr_cmd:	ipr command struct
925  * @timeout_func:	function to invoke if command times out
926  * @timeout:	timeout
927  *
928  * Return value:
929  * 	none
930  **/
ipr_send_blocking_cmd(struct ipr_cmnd * ipr_cmd,void (* timeout_func)(struct ipr_cmnd * ipr_cmd),u32 timeout)931 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
932 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
933 				  u32 timeout)
934 {
935 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 
937 	init_completion(&ipr_cmd->completion);
938 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
939 
940 	spin_unlock_irq(ioa_cfg->host->host_lock);
941 	wait_for_completion(&ipr_cmd->completion);
942 	spin_lock_irq(ioa_cfg->host->host_lock);
943 }
944 
945 /**
946  * ipr_send_hcam - Send an HCAM to the adapter.
947  * @ioa_cfg:	ioa config struct
948  * @type:		HCAM type
949  * @hostrcb:	hostrcb struct
950  *
951  * This function will send a Host Controlled Async command to the adapter.
952  * If HCAMs are currently not allowed to be issued to the adapter, it will
953  * place the hostrcb on the free queue.
954  *
955  * Return value:
956  * 	none
957  **/
ipr_send_hcam(struct ipr_ioa_cfg * ioa_cfg,u8 type,struct ipr_hostrcb * hostrcb)958 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
959 			  struct ipr_hostrcb *hostrcb)
960 {
961 	struct ipr_cmnd *ipr_cmd;
962 	struct ipr_ioarcb *ioarcb;
963 
964 	if (ioa_cfg->allow_cmds) {
965 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
966 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
967 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
968 
969 		ipr_cmd->u.hostrcb = hostrcb;
970 		ioarcb = &ipr_cmd->ioarcb;
971 
972 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
973 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
974 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
975 		ioarcb->cmd_pkt.cdb[1] = type;
976 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
977 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
978 
979 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
980 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
981 
982 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
983 			ipr_cmd->done = ipr_process_ccn;
984 		else
985 			ipr_cmd->done = ipr_process_error;
986 
987 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
988 
989 		ipr_send_command(ipr_cmd);
990 	} else {
991 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
992 	}
993 }
994 
995 /**
996  * ipr_update_ata_class - Update the ata class in the resource entry
997  * @res:	resource entry struct
998  * @proto:	cfgte device bus protocol value
999  *
1000  * Return value:
1001  * 	none
1002  **/
ipr_update_ata_class(struct ipr_resource_entry * res,unsigned int proto)1003 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1004 {
1005 	switch(proto) {
1006 	case IPR_PROTO_SATA:
1007 	case IPR_PROTO_SAS_STP:
1008 		res->ata_class = ATA_DEV_ATA;
1009 		break;
1010 	case IPR_PROTO_SATA_ATAPI:
1011 	case IPR_PROTO_SAS_STP_ATAPI:
1012 		res->ata_class = ATA_DEV_ATAPI;
1013 		break;
1014 	default:
1015 		res->ata_class = ATA_DEV_UNKNOWN;
1016 		break;
1017 	};
1018 }
1019 
1020 /**
1021  * ipr_init_res_entry - Initialize a resource entry struct.
1022  * @res:	resource entry struct
1023  * @cfgtew:	config table entry wrapper struct
1024  *
1025  * Return value:
1026  * 	none
1027  **/
ipr_init_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1028 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1029 			       struct ipr_config_table_entry_wrapper *cfgtew)
1030 {
1031 	int found = 0;
1032 	unsigned int proto;
1033 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1034 	struct ipr_resource_entry *gscsi_res = NULL;
1035 
1036 	res->needs_sync_complete = 0;
1037 	res->in_erp = 0;
1038 	res->add_to_ml = 0;
1039 	res->del_from_ml = 0;
1040 	res->resetting_device = 0;
1041 	res->sdev = NULL;
1042 	res->sata_port = NULL;
1043 
1044 	if (ioa_cfg->sis64) {
1045 		proto = cfgtew->u.cfgte64->proto;
1046 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1047 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1048 		res->type = cfgtew->u.cfgte64->res_type;
1049 
1050 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1051 			sizeof(res->res_path));
1052 
1053 		res->bus = 0;
1054 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1055 			sizeof(res->dev_lun.scsi_lun));
1056 		res->lun = scsilun_to_int(&res->dev_lun);
1057 
1058 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1059 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1060 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1061 					found = 1;
1062 					res->target = gscsi_res->target;
1063 					break;
1064 				}
1065 			}
1066 			if (!found) {
1067 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1068 								  ioa_cfg->max_devs_supported);
1069 				set_bit(res->target, ioa_cfg->target_ids);
1070 			}
1071 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1072 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1073 			res->target = 0;
1074 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1075 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1076 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1077 							  ioa_cfg->max_devs_supported);
1078 			set_bit(res->target, ioa_cfg->array_ids);
1079 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1080 			res->bus = IPR_VSET_VIRTUAL_BUS;
1081 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1082 							  ioa_cfg->max_devs_supported);
1083 			set_bit(res->target, ioa_cfg->vset_ids);
1084 		} else {
1085 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1086 							  ioa_cfg->max_devs_supported);
1087 			set_bit(res->target, ioa_cfg->target_ids);
1088 		}
1089 	} else {
1090 		proto = cfgtew->u.cfgte->proto;
1091 		res->qmodel = IPR_QUEUEING_MODEL(res);
1092 		res->flags = cfgtew->u.cfgte->flags;
1093 		if (res->flags & IPR_IS_IOA_RESOURCE)
1094 			res->type = IPR_RES_TYPE_IOAFP;
1095 		else
1096 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1097 
1098 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1099 		res->target = cfgtew->u.cfgte->res_addr.target;
1100 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1101 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1102 	}
1103 
1104 	ipr_update_ata_class(res, proto);
1105 }
1106 
1107 /**
1108  * ipr_is_same_device - Determine if two devices are the same.
1109  * @res:	resource entry struct
1110  * @cfgtew:	config table entry wrapper struct
1111  *
1112  * Return value:
1113  * 	1 if the devices are the same / 0 otherwise
1114  **/
ipr_is_same_device(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1115 static int ipr_is_same_device(struct ipr_resource_entry *res,
1116 			      struct ipr_config_table_entry_wrapper *cfgtew)
1117 {
1118 	if (res->ioa_cfg->sis64) {
1119 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1120 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1121 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 					sizeof(cfgtew->u.cfgte64->lun))) {
1123 			return 1;
1124 		}
1125 	} else {
1126 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1127 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1128 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1129 			return 1;
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 /**
1136  * ipr_format_res_path - Format the resource path for printing.
1137  * @res_path:	resource path
1138  * @buf:	buffer
1139  *
1140  * Return value:
1141  * 	pointer to buffer
1142  **/
ipr_format_res_path(u8 * res_path,char * buffer,int len)1143 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1144 {
1145 	int i;
1146 	char *p = buffer;
1147 
1148 	*p = '\0';
1149 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1150 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1151 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1152 
1153 	return buffer;
1154 }
1155 
1156 /**
1157  * ipr_update_res_entry - Update the resource entry.
1158  * @res:	resource entry struct
1159  * @cfgtew:	config table entry wrapper struct
1160  *
1161  * Return value:
1162  *      none
1163  **/
ipr_update_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1164 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1165 				 struct ipr_config_table_entry_wrapper *cfgtew)
1166 {
1167 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1168 	unsigned int proto;
1169 	int new_path = 0;
1170 
1171 	if (res->ioa_cfg->sis64) {
1172 		res->flags = cfgtew->u.cfgte64->flags;
1173 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1174 		res->type = cfgtew->u.cfgte64->res_type;
1175 
1176 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1177 			sizeof(struct ipr_std_inq_data));
1178 
1179 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1180 		proto = cfgtew->u.cfgte64->proto;
1181 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1182 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1183 
1184 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1185 			sizeof(res->dev_lun.scsi_lun));
1186 
1187 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1188 					sizeof(res->res_path))) {
1189 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1190 				sizeof(res->res_path));
1191 			new_path = 1;
1192 		}
1193 
1194 		if (res->sdev && new_path)
1195 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1196 				    ipr_format_res_path(res->res_path, buffer,
1197 							sizeof(buffer)));
1198 	} else {
1199 		res->flags = cfgtew->u.cfgte->flags;
1200 		if (res->flags & IPR_IS_IOA_RESOURCE)
1201 			res->type = IPR_RES_TYPE_IOAFP;
1202 		else
1203 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1204 
1205 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1206 			sizeof(struct ipr_std_inq_data));
1207 
1208 		res->qmodel = IPR_QUEUEING_MODEL(res);
1209 		proto = cfgtew->u.cfgte->proto;
1210 		res->res_handle = cfgtew->u.cfgte->res_handle;
1211 	}
1212 
1213 	ipr_update_ata_class(res, proto);
1214 }
1215 
1216 /**
1217  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1218  * 			  for the resource.
1219  * @res:	resource entry struct
1220  * @cfgtew:	config table entry wrapper struct
1221  *
1222  * Return value:
1223  *      none
1224  **/
ipr_clear_res_target(struct ipr_resource_entry * res)1225 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1226 {
1227 	struct ipr_resource_entry *gscsi_res = NULL;
1228 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1229 
1230 	if (!ioa_cfg->sis64)
1231 		return;
1232 
1233 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1234 		clear_bit(res->target, ioa_cfg->array_ids);
1235 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1236 		clear_bit(res->target, ioa_cfg->vset_ids);
1237 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1238 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1239 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1240 				return;
1241 		clear_bit(res->target, ioa_cfg->target_ids);
1242 
1243 	} else if (res->bus == 0)
1244 		clear_bit(res->target, ioa_cfg->target_ids);
1245 }
1246 
1247 /**
1248  * ipr_handle_config_change - Handle a config change from the adapter
1249  * @ioa_cfg:	ioa config struct
1250  * @hostrcb:	hostrcb
1251  *
1252  * Return value:
1253  * 	none
1254  **/
ipr_handle_config_change(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1255 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1256 				     struct ipr_hostrcb *hostrcb)
1257 {
1258 	struct ipr_resource_entry *res = NULL;
1259 	struct ipr_config_table_entry_wrapper cfgtew;
1260 	__be32 cc_res_handle;
1261 
1262 	u32 is_ndn = 1;
1263 
1264 	if (ioa_cfg->sis64) {
1265 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1266 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1267 	} else {
1268 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1269 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1270 	}
1271 
1272 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1273 		if (res->res_handle == cc_res_handle) {
1274 			is_ndn = 0;
1275 			break;
1276 		}
1277 	}
1278 
1279 	if (is_ndn) {
1280 		if (list_empty(&ioa_cfg->free_res_q)) {
1281 			ipr_send_hcam(ioa_cfg,
1282 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1283 				      hostrcb);
1284 			return;
1285 		}
1286 
1287 		res = list_entry(ioa_cfg->free_res_q.next,
1288 				 struct ipr_resource_entry, queue);
1289 
1290 		list_del(&res->queue);
1291 		ipr_init_res_entry(res, &cfgtew);
1292 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1293 	}
1294 
1295 	ipr_update_res_entry(res, &cfgtew);
1296 
1297 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1298 		if (res->sdev) {
1299 			res->del_from_ml = 1;
1300 			res->res_handle = IPR_INVALID_RES_HANDLE;
1301 			if (ioa_cfg->allow_ml_add_del)
1302 				schedule_work(&ioa_cfg->work_q);
1303 		} else {
1304 			ipr_clear_res_target(res);
1305 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1306 		}
1307 	} else if (!res->sdev || res->del_from_ml) {
1308 		res->add_to_ml = 1;
1309 		if (ioa_cfg->allow_ml_add_del)
1310 			schedule_work(&ioa_cfg->work_q);
1311 	}
1312 
1313 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1314 }
1315 
1316 /**
1317  * ipr_process_ccn - Op done function for a CCN.
1318  * @ipr_cmd:	ipr command struct
1319  *
1320  * This function is the op done function for a configuration
1321  * change notification host controlled async from the adapter.
1322  *
1323  * Return value:
1324  * 	none
1325  **/
ipr_process_ccn(struct ipr_cmnd * ipr_cmd)1326 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1327 {
1328 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1329 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1330 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1331 
1332 	list_del(&hostrcb->queue);
1333 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1334 
1335 	if (ioasc) {
1336 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1337 			dev_err(&ioa_cfg->pdev->dev,
1338 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1339 
1340 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1341 	} else {
1342 		ipr_handle_config_change(ioa_cfg, hostrcb);
1343 	}
1344 }
1345 
1346 /**
1347  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1348  * @i:		index into buffer
1349  * @buf:		string to modify
1350  *
1351  * This function will strip all trailing whitespace, pad the end
1352  * of the string with a single space, and NULL terminate the string.
1353  *
1354  * Return value:
1355  * 	new length of string
1356  **/
strip_and_pad_whitespace(int i,char * buf)1357 static int strip_and_pad_whitespace(int i, char *buf)
1358 {
1359 	while (i && buf[i] == ' ')
1360 		i--;
1361 	buf[i+1] = ' ';
1362 	buf[i+2] = '\0';
1363 	return i + 2;
1364 }
1365 
1366 /**
1367  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1368  * @prefix:		string to print at start of printk
1369  * @hostrcb:	hostrcb pointer
1370  * @vpd:		vendor/product id/sn struct
1371  *
1372  * Return value:
1373  * 	none
1374  **/
ipr_log_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_vpd * vpd)1375 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1376 				struct ipr_vpd *vpd)
1377 {
1378 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1379 	int i = 0;
1380 
1381 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1382 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1383 
1384 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1385 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1386 
1387 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1388 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1389 
1390 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1391 }
1392 
1393 /**
1394  * ipr_log_vpd - Log the passed VPD to the error log.
1395  * @vpd:		vendor/product id/sn struct
1396  *
1397  * Return value:
1398  * 	none
1399  **/
ipr_log_vpd(struct ipr_vpd * vpd)1400 static void ipr_log_vpd(struct ipr_vpd *vpd)
1401 {
1402 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1403 		    + IPR_SERIAL_NUM_LEN];
1404 
1405 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1406 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1407 	       IPR_PROD_ID_LEN);
1408 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1409 	ipr_err("Vendor/Product ID: %s\n", buffer);
1410 
1411 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1412 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1413 	ipr_err("    Serial Number: %s\n", buffer);
1414 }
1415 
1416 /**
1417  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1418  * @prefix:		string to print at start of printk
1419  * @hostrcb:	hostrcb pointer
1420  * @vpd:		vendor/product id/sn/wwn struct
1421  *
1422  * Return value:
1423  * 	none
1424  **/
ipr_log_ext_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_ext_vpd * vpd)1425 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1426 				    struct ipr_ext_vpd *vpd)
1427 {
1428 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1429 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1430 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1431 }
1432 
1433 /**
1434  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1435  * @vpd:		vendor/product id/sn/wwn struct
1436  *
1437  * Return value:
1438  * 	none
1439  **/
ipr_log_ext_vpd(struct ipr_ext_vpd * vpd)1440 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1441 {
1442 	ipr_log_vpd(&vpd->vpd);
1443 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1444 		be32_to_cpu(vpd->wwid[1]));
1445 }
1446 
1447 /**
1448  * ipr_log_enhanced_cache_error - Log a cache error.
1449  * @ioa_cfg:	ioa config struct
1450  * @hostrcb:	hostrcb struct
1451  *
1452  * Return value:
1453  * 	none
1454  **/
ipr_log_enhanced_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1455 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1456 					 struct ipr_hostrcb *hostrcb)
1457 {
1458 	struct ipr_hostrcb_type_12_error *error;
1459 
1460 	if (ioa_cfg->sis64)
1461 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1462 	else
1463 		error = &hostrcb->hcam.u.error.u.type_12_error;
1464 
1465 	ipr_err("-----Current Configuration-----\n");
1466 	ipr_err("Cache Directory Card Information:\n");
1467 	ipr_log_ext_vpd(&error->ioa_vpd);
1468 	ipr_err("Adapter Card Information:\n");
1469 	ipr_log_ext_vpd(&error->cfc_vpd);
1470 
1471 	ipr_err("-----Expected Configuration-----\n");
1472 	ipr_err("Cache Directory Card Information:\n");
1473 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1474 	ipr_err("Adapter Card Information:\n");
1475 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1476 
1477 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1478 		     be32_to_cpu(error->ioa_data[0]),
1479 		     be32_to_cpu(error->ioa_data[1]),
1480 		     be32_to_cpu(error->ioa_data[2]));
1481 }
1482 
1483 /**
1484  * ipr_log_cache_error - Log a cache error.
1485  * @ioa_cfg:	ioa config struct
1486  * @hostrcb:	hostrcb struct
1487  *
1488  * Return value:
1489  * 	none
1490  **/
ipr_log_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1491 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1492 				struct ipr_hostrcb *hostrcb)
1493 {
1494 	struct ipr_hostrcb_type_02_error *error =
1495 		&hostrcb->hcam.u.error.u.type_02_error;
1496 
1497 	ipr_err("-----Current Configuration-----\n");
1498 	ipr_err("Cache Directory Card Information:\n");
1499 	ipr_log_vpd(&error->ioa_vpd);
1500 	ipr_err("Adapter Card Information:\n");
1501 	ipr_log_vpd(&error->cfc_vpd);
1502 
1503 	ipr_err("-----Expected Configuration-----\n");
1504 	ipr_err("Cache Directory Card Information:\n");
1505 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1506 	ipr_err("Adapter Card Information:\n");
1507 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1508 
1509 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1510 		     be32_to_cpu(error->ioa_data[0]),
1511 		     be32_to_cpu(error->ioa_data[1]),
1512 		     be32_to_cpu(error->ioa_data[2]));
1513 }
1514 
1515 /**
1516  * ipr_log_enhanced_config_error - Log a configuration error.
1517  * @ioa_cfg:	ioa config struct
1518  * @hostrcb:	hostrcb struct
1519  *
1520  * Return value:
1521  * 	none
1522  **/
ipr_log_enhanced_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1523 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1524 					  struct ipr_hostrcb *hostrcb)
1525 {
1526 	int errors_logged, i;
1527 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1528 	struct ipr_hostrcb_type_13_error *error;
1529 
1530 	error = &hostrcb->hcam.u.error.u.type_13_error;
1531 	errors_logged = be32_to_cpu(error->errors_logged);
1532 
1533 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1534 		be32_to_cpu(error->errors_detected), errors_logged);
1535 
1536 	dev_entry = error->dev;
1537 
1538 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1539 		ipr_err_separator;
1540 
1541 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1542 		ipr_log_ext_vpd(&dev_entry->vpd);
1543 
1544 		ipr_err("-----New Device Information-----\n");
1545 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1546 
1547 		ipr_err("Cache Directory Card Information:\n");
1548 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1549 
1550 		ipr_err("Adapter Card Information:\n");
1551 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1552 	}
1553 }
1554 
1555 /**
1556  * ipr_log_sis64_config_error - Log a device error.
1557  * @ioa_cfg:	ioa config struct
1558  * @hostrcb:	hostrcb struct
1559  *
1560  * Return value:
1561  * 	none
1562  **/
ipr_log_sis64_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1563 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1564 				       struct ipr_hostrcb *hostrcb)
1565 {
1566 	int errors_logged, i;
1567 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1568 	struct ipr_hostrcb_type_23_error *error;
1569 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1570 
1571 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1572 	errors_logged = be32_to_cpu(error->errors_logged);
1573 
1574 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1575 		be32_to_cpu(error->errors_detected), errors_logged);
1576 
1577 	dev_entry = error->dev;
1578 
1579 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1580 		ipr_err_separator;
1581 
1582 		ipr_err("Device %d : %s", i + 1,
1583 			 ipr_format_res_path(dev_entry->res_path, buffer,
1584 					     sizeof(buffer)));
1585 		ipr_log_ext_vpd(&dev_entry->vpd);
1586 
1587 		ipr_err("-----New Device Information-----\n");
1588 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1589 
1590 		ipr_err("Cache Directory Card Information:\n");
1591 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1592 
1593 		ipr_err("Adapter Card Information:\n");
1594 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1595 	}
1596 }
1597 
1598 /**
1599  * ipr_log_config_error - Log a configuration error.
1600  * @ioa_cfg:	ioa config struct
1601  * @hostrcb:	hostrcb struct
1602  *
1603  * Return value:
1604  * 	none
1605  **/
ipr_log_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1606 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1607 				 struct ipr_hostrcb *hostrcb)
1608 {
1609 	int errors_logged, i;
1610 	struct ipr_hostrcb_device_data_entry *dev_entry;
1611 	struct ipr_hostrcb_type_03_error *error;
1612 
1613 	error = &hostrcb->hcam.u.error.u.type_03_error;
1614 	errors_logged = be32_to_cpu(error->errors_logged);
1615 
1616 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1617 		be32_to_cpu(error->errors_detected), errors_logged);
1618 
1619 	dev_entry = error->dev;
1620 
1621 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1622 		ipr_err_separator;
1623 
1624 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1625 		ipr_log_vpd(&dev_entry->vpd);
1626 
1627 		ipr_err("-----New Device Information-----\n");
1628 		ipr_log_vpd(&dev_entry->new_vpd);
1629 
1630 		ipr_err("Cache Directory Card Information:\n");
1631 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1632 
1633 		ipr_err("Adapter Card Information:\n");
1634 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1635 
1636 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1637 			be32_to_cpu(dev_entry->ioa_data[0]),
1638 			be32_to_cpu(dev_entry->ioa_data[1]),
1639 			be32_to_cpu(dev_entry->ioa_data[2]),
1640 			be32_to_cpu(dev_entry->ioa_data[3]),
1641 			be32_to_cpu(dev_entry->ioa_data[4]));
1642 	}
1643 }
1644 
1645 /**
1646  * ipr_log_enhanced_array_error - Log an array configuration error.
1647  * @ioa_cfg:	ioa config struct
1648  * @hostrcb:	hostrcb struct
1649  *
1650  * Return value:
1651  * 	none
1652  **/
ipr_log_enhanced_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1653 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1654 					 struct ipr_hostrcb *hostrcb)
1655 {
1656 	int i, num_entries;
1657 	struct ipr_hostrcb_type_14_error *error;
1658 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1659 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1660 
1661 	error = &hostrcb->hcam.u.error.u.type_14_error;
1662 
1663 	ipr_err_separator;
1664 
1665 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1666 		error->protection_level,
1667 		ioa_cfg->host->host_no,
1668 		error->last_func_vset_res_addr.bus,
1669 		error->last_func_vset_res_addr.target,
1670 		error->last_func_vset_res_addr.lun);
1671 
1672 	ipr_err_separator;
1673 
1674 	array_entry = error->array_member;
1675 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1676 			    ARRAY_SIZE(error->array_member));
1677 
1678 	for (i = 0; i < num_entries; i++, array_entry++) {
1679 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1680 			continue;
1681 
1682 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1683 			ipr_err("Exposed Array Member %d:\n", i);
1684 		else
1685 			ipr_err("Array Member %d:\n", i);
1686 
1687 		ipr_log_ext_vpd(&array_entry->vpd);
1688 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1689 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1690 				 "Expected Location");
1691 
1692 		ipr_err_separator;
1693 	}
1694 }
1695 
1696 /**
1697  * ipr_log_array_error - Log an array configuration error.
1698  * @ioa_cfg:	ioa config struct
1699  * @hostrcb:	hostrcb struct
1700  *
1701  * Return value:
1702  * 	none
1703  **/
ipr_log_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1704 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1705 				struct ipr_hostrcb *hostrcb)
1706 {
1707 	int i;
1708 	struct ipr_hostrcb_type_04_error *error;
1709 	struct ipr_hostrcb_array_data_entry *array_entry;
1710 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1711 
1712 	error = &hostrcb->hcam.u.error.u.type_04_error;
1713 
1714 	ipr_err_separator;
1715 
1716 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1717 		error->protection_level,
1718 		ioa_cfg->host->host_no,
1719 		error->last_func_vset_res_addr.bus,
1720 		error->last_func_vset_res_addr.target,
1721 		error->last_func_vset_res_addr.lun);
1722 
1723 	ipr_err_separator;
1724 
1725 	array_entry = error->array_member;
1726 
1727 	for (i = 0; i < 18; i++) {
1728 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1729 			continue;
1730 
1731 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1732 			ipr_err("Exposed Array Member %d:\n", i);
1733 		else
1734 			ipr_err("Array Member %d:\n", i);
1735 
1736 		ipr_log_vpd(&array_entry->vpd);
1737 
1738 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1739 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1740 				 "Expected Location");
1741 
1742 		ipr_err_separator;
1743 
1744 		if (i == 9)
1745 			array_entry = error->array_member2;
1746 		else
1747 			array_entry++;
1748 	}
1749 }
1750 
1751 /**
1752  * ipr_log_hex_data - Log additional hex IOA error data.
1753  * @ioa_cfg:	ioa config struct
1754  * @data:		IOA error data
1755  * @len:		data length
1756  *
1757  * Return value:
1758  * 	none
1759  **/
ipr_log_hex_data(struct ipr_ioa_cfg * ioa_cfg,u32 * data,int len)1760 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1761 {
1762 	int i;
1763 
1764 	if (len == 0)
1765 		return;
1766 
1767 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1768 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1769 
1770 	for (i = 0; i < len / 4; i += 4) {
1771 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1772 			be32_to_cpu(data[i]),
1773 			be32_to_cpu(data[i+1]),
1774 			be32_to_cpu(data[i+2]),
1775 			be32_to_cpu(data[i+3]));
1776 	}
1777 }
1778 
1779 /**
1780  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1781  * @ioa_cfg:	ioa config struct
1782  * @hostrcb:	hostrcb struct
1783  *
1784  * Return value:
1785  * 	none
1786  **/
ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1787 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1788 					    struct ipr_hostrcb *hostrcb)
1789 {
1790 	struct ipr_hostrcb_type_17_error *error;
1791 
1792 	if (ioa_cfg->sis64)
1793 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1794 	else
1795 		error = &hostrcb->hcam.u.error.u.type_17_error;
1796 
1797 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1798 	strim(error->failure_reason);
1799 
1800 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1801 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1802 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1803 	ipr_log_hex_data(ioa_cfg, error->data,
1804 			 be32_to_cpu(hostrcb->hcam.length) -
1805 			 (offsetof(struct ipr_hostrcb_error, u) +
1806 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1807 }
1808 
1809 /**
1810  * ipr_log_dual_ioa_error - Log a dual adapter error.
1811  * @ioa_cfg:	ioa config struct
1812  * @hostrcb:	hostrcb struct
1813  *
1814  * Return value:
1815  * 	none
1816  **/
ipr_log_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1817 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1818 				   struct ipr_hostrcb *hostrcb)
1819 {
1820 	struct ipr_hostrcb_type_07_error *error;
1821 
1822 	error = &hostrcb->hcam.u.error.u.type_07_error;
1823 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1824 	strim(error->failure_reason);
1825 
1826 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1827 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1828 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1829 	ipr_log_hex_data(ioa_cfg, error->data,
1830 			 be32_to_cpu(hostrcb->hcam.length) -
1831 			 (offsetof(struct ipr_hostrcb_error, u) +
1832 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1833 }
1834 
1835 static const struct {
1836 	u8 active;
1837 	char *desc;
1838 } path_active_desc[] = {
1839 	{ IPR_PATH_NO_INFO, "Path" },
1840 	{ IPR_PATH_ACTIVE, "Active path" },
1841 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1842 };
1843 
1844 static const struct {
1845 	u8 state;
1846 	char *desc;
1847 } path_state_desc[] = {
1848 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1849 	{ IPR_PATH_HEALTHY, "is healthy" },
1850 	{ IPR_PATH_DEGRADED, "is degraded" },
1851 	{ IPR_PATH_FAILED, "is failed" }
1852 };
1853 
1854 /**
1855  * ipr_log_fabric_path - Log a fabric path error
1856  * @hostrcb:	hostrcb struct
1857  * @fabric:		fabric descriptor
1858  *
1859  * Return value:
1860  * 	none
1861  **/
ipr_log_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_fabric_desc * fabric)1862 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1863 				struct ipr_hostrcb_fabric_desc *fabric)
1864 {
1865 	int i, j;
1866 	u8 path_state = fabric->path_state;
1867 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1868 	u8 state = path_state & IPR_PATH_STATE_MASK;
1869 
1870 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1871 		if (path_active_desc[i].active != active)
1872 			continue;
1873 
1874 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1875 			if (path_state_desc[j].state != state)
1876 				continue;
1877 
1878 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1879 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1880 					     path_active_desc[i].desc, path_state_desc[j].desc,
1881 					     fabric->ioa_port);
1882 			} else if (fabric->cascaded_expander == 0xff) {
1883 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1884 					     path_active_desc[i].desc, path_state_desc[j].desc,
1885 					     fabric->ioa_port, fabric->phy);
1886 			} else if (fabric->phy == 0xff) {
1887 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1888 					     path_active_desc[i].desc, path_state_desc[j].desc,
1889 					     fabric->ioa_port, fabric->cascaded_expander);
1890 			} else {
1891 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1892 					     path_active_desc[i].desc, path_state_desc[j].desc,
1893 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1894 			}
1895 			return;
1896 		}
1897 	}
1898 
1899 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1900 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1901 }
1902 
1903 /**
1904  * ipr_log64_fabric_path - Log a fabric path error
1905  * @hostrcb:	hostrcb struct
1906  * @fabric:		fabric descriptor
1907  *
1908  * Return value:
1909  * 	none
1910  **/
ipr_log64_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_fabric_desc * fabric)1911 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1912 				  struct ipr_hostrcb64_fabric_desc *fabric)
1913 {
1914 	int i, j;
1915 	u8 path_state = fabric->path_state;
1916 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1917 	u8 state = path_state & IPR_PATH_STATE_MASK;
1918 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1919 
1920 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1921 		if (path_active_desc[i].active != active)
1922 			continue;
1923 
1924 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1925 			if (path_state_desc[j].state != state)
1926 				continue;
1927 
1928 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1929 				     path_active_desc[i].desc, path_state_desc[j].desc,
1930 				     ipr_format_res_path(fabric->res_path, buffer,
1931 							 sizeof(buffer)));
1932 			return;
1933 		}
1934 	}
1935 
1936 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1937 		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1938 }
1939 
1940 static const struct {
1941 	u8 type;
1942 	char *desc;
1943 } path_type_desc[] = {
1944 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1945 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1946 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1947 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1948 };
1949 
1950 static const struct {
1951 	u8 status;
1952 	char *desc;
1953 } path_status_desc[] = {
1954 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1955 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1956 	{ IPR_PATH_CFG_FAILED, "Failed" },
1957 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1958 	{ IPR_PATH_NOT_DETECTED, "Missing" },
1959 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1960 };
1961 
1962 static const char *link_rate[] = {
1963 	"unknown",
1964 	"disabled",
1965 	"phy reset problem",
1966 	"spinup hold",
1967 	"port selector",
1968 	"unknown",
1969 	"unknown",
1970 	"unknown",
1971 	"1.5Gbps",
1972 	"3.0Gbps",
1973 	"unknown",
1974 	"unknown",
1975 	"unknown",
1976 	"unknown",
1977 	"unknown",
1978 	"unknown"
1979 };
1980 
1981 /**
1982  * ipr_log_path_elem - Log a fabric path element.
1983  * @hostrcb:	hostrcb struct
1984  * @cfg:		fabric path element struct
1985  *
1986  * Return value:
1987  * 	none
1988  **/
ipr_log_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_config_element * cfg)1989 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1990 			      struct ipr_hostrcb_config_element *cfg)
1991 {
1992 	int i, j;
1993 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1994 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1995 
1996 	if (type == IPR_PATH_CFG_NOT_EXIST)
1997 		return;
1998 
1999 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2000 		if (path_type_desc[i].type != type)
2001 			continue;
2002 
2003 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2004 			if (path_status_desc[j].status != status)
2005 				continue;
2006 
2007 			if (type == IPR_PATH_CFG_IOA_PORT) {
2008 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2009 					     path_status_desc[j].desc, path_type_desc[i].desc,
2010 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2011 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2012 			} else {
2013 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2014 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2015 						     path_status_desc[j].desc, path_type_desc[i].desc,
2016 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2017 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2018 				} else if (cfg->cascaded_expander == 0xff) {
2019 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2020 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2021 						     path_type_desc[i].desc, cfg->phy,
2022 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2023 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2024 				} else if (cfg->phy == 0xff) {
2025 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2026 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2027 						     path_type_desc[i].desc, cfg->cascaded_expander,
2028 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2029 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2030 				} else {
2031 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2032 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2033 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2034 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2035 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2036 				}
2037 			}
2038 			return;
2039 		}
2040 	}
2041 
2042 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2043 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2044 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2045 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2046 }
2047 
2048 /**
2049  * ipr_log64_path_elem - Log a fabric path element.
2050  * @hostrcb:	hostrcb struct
2051  * @cfg:		fabric path element struct
2052  *
2053  * Return value:
2054  * 	none
2055  **/
ipr_log64_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_config_element * cfg)2056 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2057 				struct ipr_hostrcb64_config_element *cfg)
2058 {
2059 	int i, j;
2060 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2061 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2062 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2063 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2064 
2065 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2066 		return;
2067 
2068 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2069 		if (path_type_desc[i].type != type)
2070 			continue;
2071 
2072 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2073 			if (path_status_desc[j].status != status)
2074 				continue;
2075 
2076 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2077 				     path_status_desc[j].desc, path_type_desc[i].desc,
2078 				     ipr_format_res_path(cfg->res_path, buffer,
2079 							 sizeof(buffer)),
2080 				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2081 				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2082 			return;
2083 		}
2084 	}
2085 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2086 		     "WWN=%08X%08X\n", cfg->type_status,
2087 		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2088 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2089 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2090 }
2091 
2092 /**
2093  * ipr_log_fabric_error - Log a fabric error.
2094  * @ioa_cfg:	ioa config struct
2095  * @hostrcb:	hostrcb struct
2096  *
2097  * Return value:
2098  * 	none
2099  **/
ipr_log_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2100 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2101 				 struct ipr_hostrcb *hostrcb)
2102 {
2103 	struct ipr_hostrcb_type_20_error *error;
2104 	struct ipr_hostrcb_fabric_desc *fabric;
2105 	struct ipr_hostrcb_config_element *cfg;
2106 	int i, add_len;
2107 
2108 	error = &hostrcb->hcam.u.error.u.type_20_error;
2109 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2110 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2111 
2112 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2113 		(offsetof(struct ipr_hostrcb_error, u) +
2114 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2115 
2116 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2117 		ipr_log_fabric_path(hostrcb, fabric);
2118 		for_each_fabric_cfg(fabric, cfg)
2119 			ipr_log_path_elem(hostrcb, cfg);
2120 
2121 		add_len -= be16_to_cpu(fabric->length);
2122 		fabric = (struct ipr_hostrcb_fabric_desc *)
2123 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2124 	}
2125 
2126 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2127 }
2128 
2129 /**
2130  * ipr_log_sis64_array_error - Log a sis64 array error.
2131  * @ioa_cfg:	ioa config struct
2132  * @hostrcb:	hostrcb struct
2133  *
2134  * Return value:
2135  * 	none
2136  **/
ipr_log_sis64_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2137 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2138 				      struct ipr_hostrcb *hostrcb)
2139 {
2140 	int i, num_entries;
2141 	struct ipr_hostrcb_type_24_error *error;
2142 	struct ipr_hostrcb64_array_data_entry *array_entry;
2143 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2144 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2145 
2146 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2147 
2148 	ipr_err_separator;
2149 
2150 	ipr_err("RAID %s Array Configuration: %s\n",
2151 		error->protection_level,
2152 		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2153 
2154 	ipr_err_separator;
2155 
2156 	array_entry = error->array_member;
2157 	num_entries = min_t(u32, error->num_entries,
2158 			    ARRAY_SIZE(error->array_member));
2159 
2160 	for (i = 0; i < num_entries; i++, array_entry++) {
2161 
2162 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2163 			continue;
2164 
2165 		if (error->exposed_mode_adn == i)
2166 			ipr_err("Exposed Array Member %d:\n", i);
2167 		else
2168 			ipr_err("Array Member %d:\n", i);
2169 
2170 		ipr_err("Array Member %d:\n", i);
2171 		ipr_log_ext_vpd(&array_entry->vpd);
2172 		ipr_err("Current Location: %s\n",
2173 			 ipr_format_res_path(array_entry->res_path, buffer,
2174 					     sizeof(buffer)));
2175 		ipr_err("Expected Location: %s\n",
2176 			 ipr_format_res_path(array_entry->expected_res_path,
2177 					     buffer, sizeof(buffer)));
2178 
2179 		ipr_err_separator;
2180 	}
2181 }
2182 
2183 /**
2184  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2185  * @ioa_cfg:	ioa config struct
2186  * @hostrcb:	hostrcb struct
2187  *
2188  * Return value:
2189  * 	none
2190  **/
ipr_log_sis64_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2191 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2192 				       struct ipr_hostrcb *hostrcb)
2193 {
2194 	struct ipr_hostrcb_type_30_error *error;
2195 	struct ipr_hostrcb64_fabric_desc *fabric;
2196 	struct ipr_hostrcb64_config_element *cfg;
2197 	int i, add_len;
2198 
2199 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2200 
2201 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2202 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2203 
2204 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2205 		(offsetof(struct ipr_hostrcb64_error, u) +
2206 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2207 
2208 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2209 		ipr_log64_fabric_path(hostrcb, fabric);
2210 		for_each_fabric_cfg(fabric, cfg)
2211 			ipr_log64_path_elem(hostrcb, cfg);
2212 
2213 		add_len -= be16_to_cpu(fabric->length);
2214 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2215 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2216 	}
2217 
2218 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2219 }
2220 
2221 /**
2222  * ipr_log_generic_error - Log an adapter error.
2223  * @ioa_cfg:	ioa config struct
2224  * @hostrcb:	hostrcb struct
2225  *
2226  * Return value:
2227  * 	none
2228  **/
ipr_log_generic_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2229 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2230 				  struct ipr_hostrcb *hostrcb)
2231 {
2232 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2233 			 be32_to_cpu(hostrcb->hcam.length));
2234 }
2235 
2236 /**
2237  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2238  * @ioasc:	IOASC
2239  *
2240  * This function will return the index of into the ipr_error_table
2241  * for the specified IOASC. If the IOASC is not in the table,
2242  * 0 will be returned, which points to the entry used for unknown errors.
2243  *
2244  * Return value:
2245  * 	index into the ipr_error_table
2246  **/
ipr_get_error(u32 ioasc)2247 static u32 ipr_get_error(u32 ioasc)
2248 {
2249 	int i;
2250 
2251 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2252 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2253 			return i;
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * ipr_handle_log_data - Log an adapter error.
2260  * @ioa_cfg:	ioa config struct
2261  * @hostrcb:	hostrcb struct
2262  *
2263  * This function logs an adapter error to the system.
2264  *
2265  * Return value:
2266  * 	none
2267  **/
ipr_handle_log_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2268 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2269 				struct ipr_hostrcb *hostrcb)
2270 {
2271 	u32 ioasc;
2272 	int error_index;
2273 
2274 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2275 		return;
2276 
2277 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2278 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2279 
2280 	if (ioa_cfg->sis64)
2281 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2282 	else
2283 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2284 
2285 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2286 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2287 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2288 		scsi_report_bus_reset(ioa_cfg->host,
2289 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2290 	}
2291 
2292 	error_index = ipr_get_error(ioasc);
2293 
2294 	if (!ipr_error_table[error_index].log_hcam)
2295 		return;
2296 
2297 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2298 
2299 	/* Set indication we have logged an error */
2300 	ioa_cfg->errors_logged++;
2301 
2302 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2303 		return;
2304 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2305 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2306 
2307 	switch (hostrcb->hcam.overlay_id) {
2308 	case IPR_HOST_RCB_OVERLAY_ID_2:
2309 		ipr_log_cache_error(ioa_cfg, hostrcb);
2310 		break;
2311 	case IPR_HOST_RCB_OVERLAY_ID_3:
2312 		ipr_log_config_error(ioa_cfg, hostrcb);
2313 		break;
2314 	case IPR_HOST_RCB_OVERLAY_ID_4:
2315 	case IPR_HOST_RCB_OVERLAY_ID_6:
2316 		ipr_log_array_error(ioa_cfg, hostrcb);
2317 		break;
2318 	case IPR_HOST_RCB_OVERLAY_ID_7:
2319 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2320 		break;
2321 	case IPR_HOST_RCB_OVERLAY_ID_12:
2322 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2323 		break;
2324 	case IPR_HOST_RCB_OVERLAY_ID_13:
2325 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2326 		break;
2327 	case IPR_HOST_RCB_OVERLAY_ID_14:
2328 	case IPR_HOST_RCB_OVERLAY_ID_16:
2329 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2330 		break;
2331 	case IPR_HOST_RCB_OVERLAY_ID_17:
2332 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2333 		break;
2334 	case IPR_HOST_RCB_OVERLAY_ID_20:
2335 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2336 		break;
2337 	case IPR_HOST_RCB_OVERLAY_ID_23:
2338 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2339 		break;
2340 	case IPR_HOST_RCB_OVERLAY_ID_24:
2341 	case IPR_HOST_RCB_OVERLAY_ID_26:
2342 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2343 		break;
2344 	case IPR_HOST_RCB_OVERLAY_ID_30:
2345 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2346 		break;
2347 	case IPR_HOST_RCB_OVERLAY_ID_1:
2348 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2349 	default:
2350 		ipr_log_generic_error(ioa_cfg, hostrcb);
2351 		break;
2352 	}
2353 }
2354 
2355 /**
2356  * ipr_process_error - Op done function for an adapter error log.
2357  * @ipr_cmd:	ipr command struct
2358  *
2359  * This function is the op done function for an error log host
2360  * controlled async from the adapter. It will log the error and
2361  * send the HCAM back to the adapter.
2362  *
2363  * Return value:
2364  * 	none
2365  **/
ipr_process_error(struct ipr_cmnd * ipr_cmd)2366 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2367 {
2368 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2369 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2370 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2371 	u32 fd_ioasc;
2372 
2373 	if (ioa_cfg->sis64)
2374 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2375 	else
2376 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2377 
2378 	list_del(&hostrcb->queue);
2379 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2380 
2381 	if (!ioasc) {
2382 		ipr_handle_log_data(ioa_cfg, hostrcb);
2383 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2384 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2385 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2386 		dev_err(&ioa_cfg->pdev->dev,
2387 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2388 	}
2389 
2390 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2391 }
2392 
2393 /**
2394  * ipr_timeout -  An internally generated op has timed out.
2395  * @ipr_cmd:	ipr command struct
2396  *
2397  * This function blocks host requests and initiates an
2398  * adapter reset.
2399  *
2400  * Return value:
2401  * 	none
2402  **/
ipr_timeout(struct ipr_cmnd * ipr_cmd)2403 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2404 {
2405 	unsigned long lock_flags = 0;
2406 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2407 
2408 	ENTER;
2409 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2410 
2411 	ioa_cfg->errors_logged++;
2412 	dev_err(&ioa_cfg->pdev->dev,
2413 		"Adapter being reset due to command timeout.\n");
2414 
2415 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2416 		ioa_cfg->sdt_state = GET_DUMP;
2417 
2418 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2419 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2420 
2421 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2422 	LEAVE;
2423 }
2424 
2425 /**
2426  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2427  * @ipr_cmd:	ipr command struct
2428  *
2429  * This function blocks host requests and initiates an
2430  * adapter reset.
2431  *
2432  * Return value:
2433  * 	none
2434  **/
ipr_oper_timeout(struct ipr_cmnd * ipr_cmd)2435 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2436 {
2437 	unsigned long lock_flags = 0;
2438 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2439 
2440 	ENTER;
2441 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2442 
2443 	ioa_cfg->errors_logged++;
2444 	dev_err(&ioa_cfg->pdev->dev,
2445 		"Adapter timed out transitioning to operational.\n");
2446 
2447 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2448 		ioa_cfg->sdt_state = GET_DUMP;
2449 
2450 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2451 		if (ipr_fastfail)
2452 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2453 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2454 	}
2455 
2456 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2457 	LEAVE;
2458 }
2459 
2460 /**
2461  * ipr_reset_reload - Reset/Reload the IOA
2462  * @ioa_cfg:		ioa config struct
2463  * @shutdown_type:	shutdown type
2464  *
2465  * This function resets the adapter and re-initializes it.
2466  * This function assumes that all new host commands have been stopped.
2467  * Return value:
2468  * 	SUCCESS / FAILED
2469  **/
ipr_reset_reload(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)2470 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2471 			    enum ipr_shutdown_type shutdown_type)
2472 {
2473 	if (!ioa_cfg->in_reset_reload)
2474 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2475 
2476 	spin_unlock_irq(ioa_cfg->host->host_lock);
2477 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2478 	spin_lock_irq(ioa_cfg->host->host_lock);
2479 
2480 	/* If we got hit with a host reset while we were already resetting
2481 	 the adapter for some reason, and the reset failed. */
2482 	if (ioa_cfg->ioa_is_dead) {
2483 		ipr_trace;
2484 		return FAILED;
2485 	}
2486 
2487 	return SUCCESS;
2488 }
2489 
2490 /**
2491  * ipr_find_ses_entry - Find matching SES in SES table
2492  * @res:	resource entry struct of SES
2493  *
2494  * Return value:
2495  * 	pointer to SES table entry / NULL on failure
2496  **/
2497 static const struct ipr_ses_table_entry *
ipr_find_ses_entry(struct ipr_resource_entry * res)2498 ipr_find_ses_entry(struct ipr_resource_entry *res)
2499 {
2500 	int i, j, matches;
2501 	struct ipr_std_inq_vpids *vpids;
2502 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2503 
2504 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2505 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2506 			if (ste->compare_product_id_byte[j] == 'X') {
2507 				vpids = &res->std_inq_data.vpids;
2508 				if (vpids->product_id[j] == ste->product_id[j])
2509 					matches++;
2510 				else
2511 					break;
2512 			} else
2513 				matches++;
2514 		}
2515 
2516 		if (matches == IPR_PROD_ID_LEN)
2517 			return ste;
2518 	}
2519 
2520 	return NULL;
2521 }
2522 
2523 /**
2524  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2525  * @ioa_cfg:	ioa config struct
2526  * @bus:		SCSI bus
2527  * @bus_width:	bus width
2528  *
2529  * Return value:
2530  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2531  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2532  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2533  *	max 160MHz = max 320MB/sec).
2534  **/
ipr_get_max_scsi_speed(struct ipr_ioa_cfg * ioa_cfg,u8 bus,u8 bus_width)2535 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2536 {
2537 	struct ipr_resource_entry *res;
2538 	const struct ipr_ses_table_entry *ste;
2539 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2540 
2541 	/* Loop through each config table entry in the config table buffer */
2542 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2543 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2544 			continue;
2545 
2546 		if (bus != res->bus)
2547 			continue;
2548 
2549 		if (!(ste = ipr_find_ses_entry(res)))
2550 			continue;
2551 
2552 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2553 	}
2554 
2555 	return max_xfer_rate;
2556 }
2557 
2558 /**
2559  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2560  * @ioa_cfg:		ioa config struct
2561  * @max_delay:		max delay in micro-seconds to wait
2562  *
2563  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2564  *
2565  * Return value:
2566  * 	0 on success / other on failure
2567  **/
ipr_wait_iodbg_ack(struct ipr_ioa_cfg * ioa_cfg,int max_delay)2568 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2569 {
2570 	volatile u32 pcii_reg;
2571 	int delay = 1;
2572 
2573 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2574 	while (delay < max_delay) {
2575 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2576 
2577 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2578 			return 0;
2579 
2580 		/* udelay cannot be used if delay is more than a few milliseconds */
2581 		if ((delay / 1000) > MAX_UDELAY_MS)
2582 			mdelay(delay / 1000);
2583 		else
2584 			udelay(delay);
2585 
2586 		delay += delay;
2587 	}
2588 	return -EIO;
2589 }
2590 
2591 /**
2592  * ipr_get_sis64_dump_data_section - Dump IOA memory
2593  * @ioa_cfg:			ioa config struct
2594  * @start_addr:			adapter address to dump
2595  * @dest:			destination kernel buffer
2596  * @length_in_words:		length to dump in 4 byte words
2597  *
2598  * Return value:
2599  * 	0 on success
2600  **/
ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2601 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2602 					   u32 start_addr,
2603 					   __be32 *dest, u32 length_in_words)
2604 {
2605 	int i;
2606 
2607 	for (i = 0; i < length_in_words; i++) {
2608 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2609 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2610 		dest++;
2611 	}
2612 
2613 	return 0;
2614 }
2615 
2616 /**
2617  * ipr_get_ldump_data_section - Dump IOA memory
2618  * @ioa_cfg:			ioa config struct
2619  * @start_addr:			adapter address to dump
2620  * @dest:				destination kernel buffer
2621  * @length_in_words:	length to dump in 4 byte words
2622  *
2623  * Return value:
2624  * 	0 on success / -EIO on failure
2625  **/
ipr_get_ldump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2626 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2627 				      u32 start_addr,
2628 				      __be32 *dest, u32 length_in_words)
2629 {
2630 	volatile u32 temp_pcii_reg;
2631 	int i, delay = 0;
2632 
2633 	if (ioa_cfg->sis64)
2634 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2635 						       dest, length_in_words);
2636 
2637 	/* Write IOA interrupt reg starting LDUMP state  */
2638 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2639 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2640 
2641 	/* Wait for IO debug acknowledge */
2642 	if (ipr_wait_iodbg_ack(ioa_cfg,
2643 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2644 		dev_err(&ioa_cfg->pdev->dev,
2645 			"IOA dump long data transfer timeout\n");
2646 		return -EIO;
2647 	}
2648 
2649 	/* Signal LDUMP interlocked - clear IO debug ack */
2650 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2651 	       ioa_cfg->regs.clr_interrupt_reg);
2652 
2653 	/* Write Mailbox with starting address */
2654 	writel(start_addr, ioa_cfg->ioa_mailbox);
2655 
2656 	/* Signal address valid - clear IOA Reset alert */
2657 	writel(IPR_UPROCI_RESET_ALERT,
2658 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2659 
2660 	for (i = 0; i < length_in_words; i++) {
2661 		/* Wait for IO debug acknowledge */
2662 		if (ipr_wait_iodbg_ack(ioa_cfg,
2663 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2664 			dev_err(&ioa_cfg->pdev->dev,
2665 				"IOA dump short data transfer timeout\n");
2666 			return -EIO;
2667 		}
2668 
2669 		/* Read data from mailbox and increment destination pointer */
2670 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2671 		dest++;
2672 
2673 		/* For all but the last word of data, signal data received */
2674 		if (i < (length_in_words - 1)) {
2675 			/* Signal dump data received - Clear IO debug Ack */
2676 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2677 			       ioa_cfg->regs.clr_interrupt_reg);
2678 		}
2679 	}
2680 
2681 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2682 	writel(IPR_UPROCI_RESET_ALERT,
2683 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2684 
2685 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2686 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2687 
2688 	/* Signal dump data received - Clear IO debug Ack */
2689 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2690 	       ioa_cfg->regs.clr_interrupt_reg);
2691 
2692 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2693 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2694 		temp_pcii_reg =
2695 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2696 
2697 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2698 			return 0;
2699 
2700 		udelay(10);
2701 		delay += 10;
2702 	}
2703 
2704 	return 0;
2705 }
2706 
2707 #ifdef CONFIG_SCSI_IPR_DUMP
2708 /**
2709  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2710  * @ioa_cfg:		ioa config struct
2711  * @pci_address:	adapter address
2712  * @length:			length of data to copy
2713  *
2714  * Copy data from PCI adapter to kernel buffer.
2715  * Note: length MUST be a 4 byte multiple
2716  * Return value:
2717  * 	0 on success / other on failure
2718  **/
ipr_sdt_copy(struct ipr_ioa_cfg * ioa_cfg,unsigned long pci_address,u32 length)2719 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2720 			unsigned long pci_address, u32 length)
2721 {
2722 	int bytes_copied = 0;
2723 	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2724 	__be32 *page;
2725 	unsigned long lock_flags = 0;
2726 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2727 
2728 	if (ioa_cfg->sis64)
2729 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2730 	else
2731 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2732 
2733 	while (bytes_copied < length &&
2734 	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2735 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2736 		    ioa_dump->page_offset == 0) {
2737 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2738 
2739 			if (!page) {
2740 				ipr_trace;
2741 				return bytes_copied;
2742 			}
2743 
2744 			ioa_dump->page_offset = 0;
2745 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2746 			ioa_dump->next_page_index++;
2747 		} else
2748 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2749 
2750 		rem_len = length - bytes_copied;
2751 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2752 		cur_len = min(rem_len, rem_page_len);
2753 
2754 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2755 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2756 			rc = -EIO;
2757 		} else {
2758 			rc = ipr_get_ldump_data_section(ioa_cfg,
2759 							pci_address + bytes_copied,
2760 							&page[ioa_dump->page_offset / 4],
2761 							(cur_len / sizeof(u32)));
2762 		}
2763 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2764 
2765 		if (!rc) {
2766 			ioa_dump->page_offset += cur_len;
2767 			bytes_copied += cur_len;
2768 		} else {
2769 			ipr_trace;
2770 			break;
2771 		}
2772 		schedule();
2773 	}
2774 
2775 	return bytes_copied;
2776 }
2777 
2778 /**
2779  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2780  * @hdr:	dump entry header struct
2781  *
2782  * Return value:
2783  * 	nothing
2784  **/
ipr_init_dump_entry_hdr(struct ipr_dump_entry_header * hdr)2785 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2786 {
2787 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2788 	hdr->num_elems = 1;
2789 	hdr->offset = sizeof(*hdr);
2790 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2791 }
2792 
2793 /**
2794  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2795  * @ioa_cfg:	ioa config struct
2796  * @driver_dump:	driver dump struct
2797  *
2798  * Return value:
2799  * 	nothing
2800  **/
ipr_dump_ioa_type_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2801 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2802 				   struct ipr_driver_dump *driver_dump)
2803 {
2804 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2805 
2806 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2807 	driver_dump->ioa_type_entry.hdr.len =
2808 		sizeof(struct ipr_dump_ioa_type_entry) -
2809 		sizeof(struct ipr_dump_entry_header);
2810 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2811 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2812 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2813 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2814 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2815 		ucode_vpd->minor_release[1];
2816 	driver_dump->hdr.num_entries++;
2817 }
2818 
2819 /**
2820  * ipr_dump_version_data - Fill in the driver version in the dump.
2821  * @ioa_cfg:	ioa config struct
2822  * @driver_dump:	driver dump struct
2823  *
2824  * Return value:
2825  * 	nothing
2826  **/
ipr_dump_version_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2827 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2828 				  struct ipr_driver_dump *driver_dump)
2829 {
2830 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2831 	driver_dump->version_entry.hdr.len =
2832 		sizeof(struct ipr_dump_version_entry) -
2833 		sizeof(struct ipr_dump_entry_header);
2834 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2835 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2836 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2837 	driver_dump->hdr.num_entries++;
2838 }
2839 
2840 /**
2841  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2842  * @ioa_cfg:	ioa config struct
2843  * @driver_dump:	driver dump struct
2844  *
2845  * Return value:
2846  * 	nothing
2847  **/
ipr_dump_trace_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2848 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2849 				   struct ipr_driver_dump *driver_dump)
2850 {
2851 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2852 	driver_dump->trace_entry.hdr.len =
2853 		sizeof(struct ipr_dump_trace_entry) -
2854 		sizeof(struct ipr_dump_entry_header);
2855 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2856 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2857 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2858 	driver_dump->hdr.num_entries++;
2859 }
2860 
2861 /**
2862  * ipr_dump_location_data - Fill in the IOA location in the dump.
2863  * @ioa_cfg:	ioa config struct
2864  * @driver_dump:	driver dump struct
2865  *
2866  * Return value:
2867  * 	nothing
2868  **/
ipr_dump_location_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2869 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2870 				   struct ipr_driver_dump *driver_dump)
2871 {
2872 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2873 	driver_dump->location_entry.hdr.len =
2874 		sizeof(struct ipr_dump_location_entry) -
2875 		sizeof(struct ipr_dump_entry_header);
2876 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2877 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2878 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2879 	driver_dump->hdr.num_entries++;
2880 }
2881 
2882 /**
2883  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2884  * @ioa_cfg:	ioa config struct
2885  * @dump:		dump struct
2886  *
2887  * Return value:
2888  * 	nothing
2889  **/
ipr_get_ioa_dump(struct ipr_ioa_cfg * ioa_cfg,struct ipr_dump * dump)2890 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2891 {
2892 	unsigned long start_addr, sdt_word;
2893 	unsigned long lock_flags = 0;
2894 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2895 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2896 	u32 num_entries, max_num_entries, start_off, end_off;
2897 	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2898 	struct ipr_sdt *sdt;
2899 	int valid = 1;
2900 	int i;
2901 
2902 	ENTER;
2903 
2904 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2905 
2906 	if (ioa_cfg->sdt_state != READ_DUMP) {
2907 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908 		return;
2909 	}
2910 
2911 	if (ioa_cfg->sis64) {
2912 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2913 		ssleep(IPR_DUMP_DELAY_SECONDS);
2914 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2915 	}
2916 
2917 	start_addr = readl(ioa_cfg->ioa_mailbox);
2918 
2919 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2920 		dev_err(&ioa_cfg->pdev->dev,
2921 			"Invalid dump table format: %lx\n", start_addr);
2922 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923 		return;
2924 	}
2925 
2926 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2927 
2928 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2929 
2930 	/* Initialize the overall dump header */
2931 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2932 	driver_dump->hdr.num_entries = 1;
2933 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2934 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2935 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2936 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2937 
2938 	ipr_dump_version_data(ioa_cfg, driver_dump);
2939 	ipr_dump_location_data(ioa_cfg, driver_dump);
2940 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2941 	ipr_dump_trace_data(ioa_cfg, driver_dump);
2942 
2943 	/* Update dump_header */
2944 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2945 
2946 	/* IOA Dump entry */
2947 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2948 	ioa_dump->hdr.len = 0;
2949 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2950 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2951 
2952 	/* First entries in sdt are actually a list of dump addresses and
2953 	 lengths to gather the real dump data.  sdt represents the pointer
2954 	 to the ioa generated dump table.  Dump data will be extracted based
2955 	 on entries in this table */
2956 	sdt = &ioa_dump->sdt;
2957 
2958 	if (ioa_cfg->sis64) {
2959 		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2960 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2961 	} else {
2962 		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2963 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2964 	}
2965 
2966 	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2967 			(max_num_entries * sizeof(struct ipr_sdt_entry));
2968 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2969 					bytes_to_copy / sizeof(__be32));
2970 
2971 	/* Smart Dump table is ready to use and the first entry is valid */
2972 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2973 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2974 		dev_err(&ioa_cfg->pdev->dev,
2975 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2976 			rc, be32_to_cpu(sdt->hdr.state));
2977 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2978 		ioa_cfg->sdt_state = DUMP_OBTAINED;
2979 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2980 		return;
2981 	}
2982 
2983 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2984 
2985 	if (num_entries > max_num_entries)
2986 		num_entries = max_num_entries;
2987 
2988 	/* Update dump length to the actual data to be copied */
2989 	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2990 	if (ioa_cfg->sis64)
2991 		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2992 	else
2993 		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2994 
2995 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2996 
2997 	for (i = 0; i < num_entries; i++) {
2998 		if (ioa_dump->hdr.len > max_dump_size) {
2999 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3000 			break;
3001 		}
3002 
3003 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3004 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3005 			if (ioa_cfg->sis64)
3006 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3007 			else {
3008 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3009 				end_off = be32_to_cpu(sdt->entry[i].end_token);
3010 
3011 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3012 					bytes_to_copy = end_off - start_off;
3013 				else
3014 					valid = 0;
3015 			}
3016 			if (valid) {
3017 				if (bytes_to_copy > max_dump_size) {
3018 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3019 					continue;
3020 				}
3021 
3022 				/* Copy data from adapter to driver buffers */
3023 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3024 							    bytes_to_copy);
3025 
3026 				ioa_dump->hdr.len += bytes_copied;
3027 
3028 				if (bytes_copied != bytes_to_copy) {
3029 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3030 					break;
3031 				}
3032 			}
3033 		}
3034 	}
3035 
3036 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3037 
3038 	/* Update dump_header */
3039 	driver_dump->hdr.len += ioa_dump->hdr.len;
3040 	wmb();
3041 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3042 	LEAVE;
3043 }
3044 
3045 #else
3046 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3047 #endif
3048 
3049 /**
3050  * ipr_release_dump - Free adapter dump memory
3051  * @kref:	kref struct
3052  *
3053  * Return value:
3054  *	nothing
3055  **/
ipr_release_dump(struct kref * kref)3056 static void ipr_release_dump(struct kref *kref)
3057 {
3058 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3059 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3060 	unsigned long lock_flags = 0;
3061 	int i;
3062 
3063 	ENTER;
3064 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3065 	ioa_cfg->dump = NULL;
3066 	ioa_cfg->sdt_state = INACTIVE;
3067 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3068 
3069 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3070 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3071 
3072 	vfree(dump->ioa_dump.ioa_data);
3073 	kfree(dump);
3074 	LEAVE;
3075 }
3076 
3077 /**
3078  * ipr_worker_thread - Worker thread
3079  * @work:		ioa config struct
3080  *
3081  * Called at task level from a work thread. This function takes care
3082  * of adding and removing device from the mid-layer as configuration
3083  * changes are detected by the adapter.
3084  *
3085  * Return value:
3086  * 	nothing
3087  **/
ipr_worker_thread(struct work_struct * work)3088 static void ipr_worker_thread(struct work_struct *work)
3089 {
3090 	unsigned long lock_flags;
3091 	struct ipr_resource_entry *res;
3092 	struct scsi_device *sdev;
3093 	struct ipr_dump *dump;
3094 	struct ipr_ioa_cfg *ioa_cfg =
3095 		container_of(work, struct ipr_ioa_cfg, work_q);
3096 	u8 bus, target, lun;
3097 	int did_work;
3098 
3099 	ENTER;
3100 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3101 
3102 	if (ioa_cfg->sdt_state == READ_DUMP) {
3103 		dump = ioa_cfg->dump;
3104 		if (!dump) {
3105 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3106 			return;
3107 		}
3108 		kref_get(&dump->kref);
3109 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3110 		ipr_get_ioa_dump(ioa_cfg, dump);
3111 		kref_put(&dump->kref, ipr_release_dump);
3112 
3113 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3114 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3115 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3116 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 		return;
3118 	}
3119 
3120 restart:
3121 	do {
3122 		did_work = 0;
3123 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3124 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3125 			return;
3126 		}
3127 
3128 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3129 			if (res->del_from_ml && res->sdev) {
3130 				did_work = 1;
3131 				sdev = res->sdev;
3132 				if (!scsi_device_get(sdev)) {
3133 					if (!res->add_to_ml)
3134 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3135 					else
3136 						res->del_from_ml = 0;
3137 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 					scsi_remove_device(sdev);
3139 					scsi_device_put(sdev);
3140 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3141 				}
3142 				break;
3143 			}
3144 		}
3145 	} while(did_work);
3146 
3147 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3148 		if (res->add_to_ml) {
3149 			bus = res->bus;
3150 			target = res->target;
3151 			lun = res->lun;
3152 			res->add_to_ml = 0;
3153 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3155 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3156 			goto restart;
3157 		}
3158 	}
3159 
3160 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3162 	LEAVE;
3163 }
3164 
3165 #ifdef CONFIG_SCSI_IPR_TRACE
3166 /**
3167  * ipr_read_trace - Dump the adapter trace
3168  * @filp:		open sysfs file
3169  * @kobj:		kobject struct
3170  * @bin_attr:		bin_attribute struct
3171  * @buf:		buffer
3172  * @off:		offset
3173  * @count:		buffer size
3174  *
3175  * Return value:
3176  *	number of bytes printed to buffer
3177  **/
ipr_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3178 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3179 			      struct bin_attribute *bin_attr,
3180 			      char *buf, loff_t off, size_t count)
3181 {
3182 	struct device *dev = container_of(kobj, struct device, kobj);
3183 	struct Scsi_Host *shost = class_to_shost(dev);
3184 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3185 	unsigned long lock_flags = 0;
3186 	ssize_t ret;
3187 
3188 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3190 				IPR_TRACE_SIZE);
3191 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3192 
3193 	return ret;
3194 }
3195 
3196 static struct bin_attribute ipr_trace_attr = {
3197 	.attr =	{
3198 		.name = "trace",
3199 		.mode = S_IRUGO,
3200 	},
3201 	.size = 0,
3202 	.read = ipr_read_trace,
3203 };
3204 #endif
3205 
3206 /**
3207  * ipr_show_fw_version - Show the firmware version
3208  * @dev:	class device struct
3209  * @buf:	buffer
3210  *
3211  * Return value:
3212  *	number of bytes printed to buffer
3213  **/
ipr_show_fw_version(struct device * dev,struct device_attribute * attr,char * buf)3214 static ssize_t ipr_show_fw_version(struct device *dev,
3215 				   struct device_attribute *attr, char *buf)
3216 {
3217 	struct Scsi_Host *shost = class_to_shost(dev);
3218 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3219 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3220 	unsigned long lock_flags = 0;
3221 	int len;
3222 
3223 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3225 		       ucode_vpd->major_release, ucode_vpd->card_type,
3226 		       ucode_vpd->minor_release[0],
3227 		       ucode_vpd->minor_release[1]);
3228 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3229 	return len;
3230 }
3231 
3232 static struct device_attribute ipr_fw_version_attr = {
3233 	.attr = {
3234 		.name =		"fw_version",
3235 		.mode =		S_IRUGO,
3236 	},
3237 	.show = ipr_show_fw_version,
3238 };
3239 
3240 /**
3241  * ipr_show_log_level - Show the adapter's error logging level
3242  * @dev:	class device struct
3243  * @buf:	buffer
3244  *
3245  * Return value:
3246  * 	number of bytes printed to buffer
3247  **/
ipr_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3248 static ssize_t ipr_show_log_level(struct device *dev,
3249 				   struct device_attribute *attr, char *buf)
3250 {
3251 	struct Scsi_Host *shost = class_to_shost(dev);
3252 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3253 	unsigned long lock_flags = 0;
3254 	int len;
3255 
3256 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3258 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259 	return len;
3260 }
3261 
3262 /**
3263  * ipr_store_log_level - Change the adapter's error logging level
3264  * @dev:	class device struct
3265  * @buf:	buffer
3266  *
3267  * Return value:
3268  * 	number of bytes printed to buffer
3269  **/
ipr_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3270 static ssize_t ipr_store_log_level(struct device *dev,
3271 			           struct device_attribute *attr,
3272 				   const char *buf, size_t count)
3273 {
3274 	struct Scsi_Host *shost = class_to_shost(dev);
3275 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3276 	unsigned long lock_flags = 0;
3277 
3278 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3279 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3280 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3281 	return strlen(buf);
3282 }
3283 
3284 static struct device_attribute ipr_log_level_attr = {
3285 	.attr = {
3286 		.name =		"log_level",
3287 		.mode =		S_IRUGO | S_IWUSR,
3288 	},
3289 	.show = ipr_show_log_level,
3290 	.store = ipr_store_log_level
3291 };
3292 
3293 /**
3294  * ipr_store_diagnostics - IOA Diagnostics interface
3295  * @dev:	device struct
3296  * @buf:	buffer
3297  * @count:	buffer size
3298  *
3299  * This function will reset the adapter and wait a reasonable
3300  * amount of time for any errors that the adapter might log.
3301  *
3302  * Return value:
3303  * 	count on success / other on failure
3304  **/
ipr_store_diagnostics(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3305 static ssize_t ipr_store_diagnostics(struct device *dev,
3306 				     struct device_attribute *attr,
3307 				     const char *buf, size_t count)
3308 {
3309 	struct Scsi_Host *shost = class_to_shost(dev);
3310 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3311 	unsigned long lock_flags = 0;
3312 	int rc = count;
3313 
3314 	if (!capable(CAP_SYS_ADMIN))
3315 		return -EACCES;
3316 
3317 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3318 	while(ioa_cfg->in_reset_reload) {
3319 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3321 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3322 	}
3323 
3324 	ioa_cfg->errors_logged = 0;
3325 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3326 
3327 	if (ioa_cfg->in_reset_reload) {
3328 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3330 
3331 		/* Wait for a second for any errors to be logged */
3332 		msleep(1000);
3333 	} else {
3334 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3335 		return -EIO;
3336 	}
3337 
3338 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3339 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3340 		rc = -EIO;
3341 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342 
3343 	return rc;
3344 }
3345 
3346 static struct device_attribute ipr_diagnostics_attr = {
3347 	.attr = {
3348 		.name =		"run_diagnostics",
3349 		.mode =		S_IWUSR,
3350 	},
3351 	.store = ipr_store_diagnostics
3352 };
3353 
3354 /**
3355  * ipr_show_adapter_state - Show the adapter's state
3356  * @class_dev:	device struct
3357  * @buf:	buffer
3358  *
3359  * Return value:
3360  * 	number of bytes printed to buffer
3361  **/
ipr_show_adapter_state(struct device * dev,struct device_attribute * attr,char * buf)3362 static ssize_t ipr_show_adapter_state(struct device *dev,
3363 				      struct device_attribute *attr, char *buf)
3364 {
3365 	struct Scsi_Host *shost = class_to_shost(dev);
3366 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3367 	unsigned long lock_flags = 0;
3368 	int len;
3369 
3370 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3371 	if (ioa_cfg->ioa_is_dead)
3372 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3373 	else
3374 		len = snprintf(buf, PAGE_SIZE, "online\n");
3375 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3376 	return len;
3377 }
3378 
3379 /**
3380  * ipr_store_adapter_state - Change adapter state
3381  * @dev:	device struct
3382  * @buf:	buffer
3383  * @count:	buffer size
3384  *
3385  * This function will change the adapter's state.
3386  *
3387  * Return value:
3388  * 	count on success / other on failure
3389  **/
ipr_store_adapter_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3390 static ssize_t ipr_store_adapter_state(struct device *dev,
3391 				       struct device_attribute *attr,
3392 				       const char *buf, size_t count)
3393 {
3394 	struct Scsi_Host *shost = class_to_shost(dev);
3395 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3396 	unsigned long lock_flags;
3397 	int result = count;
3398 
3399 	if (!capable(CAP_SYS_ADMIN))
3400 		return -EACCES;
3401 
3402 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3404 		ioa_cfg->ioa_is_dead = 0;
3405 		ioa_cfg->reset_retries = 0;
3406 		ioa_cfg->in_ioa_bringdown = 0;
3407 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3408 	}
3409 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3411 
3412 	return result;
3413 }
3414 
3415 static struct device_attribute ipr_ioa_state_attr = {
3416 	.attr = {
3417 		.name =		"online_state",
3418 		.mode =		S_IRUGO | S_IWUSR,
3419 	},
3420 	.show = ipr_show_adapter_state,
3421 	.store = ipr_store_adapter_state
3422 };
3423 
3424 /**
3425  * ipr_store_reset_adapter - Reset the adapter
3426  * @dev:	device struct
3427  * @buf:	buffer
3428  * @count:	buffer size
3429  *
3430  * This function will reset the adapter.
3431  *
3432  * Return value:
3433  * 	count on success / other on failure
3434  **/
ipr_store_reset_adapter(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3435 static ssize_t ipr_store_reset_adapter(struct device *dev,
3436 				       struct device_attribute *attr,
3437 				       const char *buf, size_t count)
3438 {
3439 	struct Scsi_Host *shost = class_to_shost(dev);
3440 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3441 	unsigned long lock_flags;
3442 	int result = count;
3443 
3444 	if (!capable(CAP_SYS_ADMIN))
3445 		return -EACCES;
3446 
3447 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3448 	if (!ioa_cfg->in_reset_reload)
3449 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3450 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3452 
3453 	return result;
3454 }
3455 
3456 static struct device_attribute ipr_ioa_reset_attr = {
3457 	.attr = {
3458 		.name =		"reset_host",
3459 		.mode =		S_IWUSR,
3460 	},
3461 	.store = ipr_store_reset_adapter
3462 };
3463 
3464 /**
3465  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3466  * @buf_len:		buffer length
3467  *
3468  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3469  * list to use for microcode download
3470  *
3471  * Return value:
3472  * 	pointer to sglist / NULL on failure
3473  **/
ipr_alloc_ucode_buffer(int buf_len)3474 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3475 {
3476 	int sg_size, order, bsize_elem, num_elem, i, j;
3477 	struct ipr_sglist *sglist;
3478 	struct scatterlist *scatterlist;
3479 	struct page *page;
3480 
3481 	/* Get the minimum size per scatter/gather element */
3482 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3483 
3484 	/* Get the actual size per element */
3485 	order = get_order(sg_size);
3486 
3487 	/* Determine the actual number of bytes per element */
3488 	bsize_elem = PAGE_SIZE * (1 << order);
3489 
3490 	/* Determine the actual number of sg entries needed */
3491 	if (buf_len % bsize_elem)
3492 		num_elem = (buf_len / bsize_elem) + 1;
3493 	else
3494 		num_elem = buf_len / bsize_elem;
3495 
3496 	/* Allocate a scatter/gather list for the DMA */
3497 	sglist = kzalloc(sizeof(struct ipr_sglist) +
3498 			 (sizeof(struct scatterlist) * (num_elem - 1)),
3499 			 GFP_KERNEL);
3500 
3501 	if (sglist == NULL) {
3502 		ipr_trace;
3503 		return NULL;
3504 	}
3505 
3506 	scatterlist = sglist->scatterlist;
3507 	sg_init_table(scatterlist, num_elem);
3508 
3509 	sglist->order = order;
3510 	sglist->num_sg = num_elem;
3511 
3512 	/* Allocate a bunch of sg elements */
3513 	for (i = 0; i < num_elem; i++) {
3514 		page = alloc_pages(GFP_KERNEL, order);
3515 		if (!page) {
3516 			ipr_trace;
3517 
3518 			/* Free up what we already allocated */
3519 			for (j = i - 1; j >= 0; j--)
3520 				__free_pages(sg_page(&scatterlist[j]), order);
3521 			kfree(sglist);
3522 			return NULL;
3523 		}
3524 
3525 		sg_set_page(&scatterlist[i], page, 0, 0);
3526 	}
3527 
3528 	return sglist;
3529 }
3530 
3531 /**
3532  * ipr_free_ucode_buffer - Frees a microcode download buffer
3533  * @p_dnld:		scatter/gather list pointer
3534  *
3535  * Free a DMA'able ucode download buffer previously allocated with
3536  * ipr_alloc_ucode_buffer
3537  *
3538  * Return value:
3539  * 	nothing
3540  **/
ipr_free_ucode_buffer(struct ipr_sglist * sglist)3541 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3542 {
3543 	int i;
3544 
3545 	for (i = 0; i < sglist->num_sg; i++)
3546 		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3547 
3548 	kfree(sglist);
3549 }
3550 
3551 /**
3552  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3553  * @sglist:		scatter/gather list pointer
3554  * @buffer:		buffer pointer
3555  * @len:		buffer length
3556  *
3557  * Copy a microcode image from a user buffer into a buffer allocated by
3558  * ipr_alloc_ucode_buffer
3559  *
3560  * Return value:
3561  * 	0 on success / other on failure
3562  **/
ipr_copy_ucode_buffer(struct ipr_sglist * sglist,u8 * buffer,u32 len)3563 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3564 				 u8 *buffer, u32 len)
3565 {
3566 	int bsize_elem, i, result = 0;
3567 	struct scatterlist *scatterlist;
3568 	void *kaddr;
3569 
3570 	/* Determine the actual number of bytes per element */
3571 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3572 
3573 	scatterlist = sglist->scatterlist;
3574 
3575 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3576 		struct page *page = sg_page(&scatterlist[i]);
3577 
3578 		kaddr = kmap(page);
3579 		memcpy(kaddr, buffer, bsize_elem);
3580 		kunmap(page);
3581 
3582 		scatterlist[i].length = bsize_elem;
3583 
3584 		if (result != 0) {
3585 			ipr_trace;
3586 			return result;
3587 		}
3588 	}
3589 
3590 	if (len % bsize_elem) {
3591 		struct page *page = sg_page(&scatterlist[i]);
3592 
3593 		kaddr = kmap(page);
3594 		memcpy(kaddr, buffer, len % bsize_elem);
3595 		kunmap(page);
3596 
3597 		scatterlist[i].length = len % bsize_elem;
3598 	}
3599 
3600 	sglist->buffer_len = len;
3601 	return result;
3602 }
3603 
3604 /**
3605  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3606  * @ipr_cmd:		ipr command struct
3607  * @sglist:		scatter/gather list
3608  *
3609  * Builds a microcode download IOA data list (IOADL).
3610  *
3611  **/
ipr_build_ucode_ioadl64(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3612 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3613 				    struct ipr_sglist *sglist)
3614 {
3615 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3616 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3617 	struct scatterlist *scatterlist = sglist->scatterlist;
3618 	int i;
3619 
3620 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3621 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3622 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3623 
3624 	ioarcb->ioadl_len =
3625 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3626 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3627 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3628 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3629 		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3630 	}
3631 
3632 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3633 }
3634 
3635 /**
3636  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3637  * @ipr_cmd:	ipr command struct
3638  * @sglist:		scatter/gather list
3639  *
3640  * Builds a microcode download IOA data list (IOADL).
3641  *
3642  **/
ipr_build_ucode_ioadl(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3643 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3644 				  struct ipr_sglist *sglist)
3645 {
3646 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3647 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3648 	struct scatterlist *scatterlist = sglist->scatterlist;
3649 	int i;
3650 
3651 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3652 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3653 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3654 
3655 	ioarcb->ioadl_len =
3656 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3657 
3658 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3659 		ioadl[i].flags_and_data_len =
3660 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3661 		ioadl[i].address =
3662 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3663 	}
3664 
3665 	ioadl[i-1].flags_and_data_len |=
3666 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3667 }
3668 
3669 /**
3670  * ipr_update_ioa_ucode - Update IOA's microcode
3671  * @ioa_cfg:	ioa config struct
3672  * @sglist:		scatter/gather list
3673  *
3674  * Initiate an adapter reset to update the IOA's microcode
3675  *
3676  * Return value:
3677  * 	0 on success / -EIO on failure
3678  **/
ipr_update_ioa_ucode(struct ipr_ioa_cfg * ioa_cfg,struct ipr_sglist * sglist)3679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3680 				struct ipr_sglist *sglist)
3681 {
3682 	unsigned long lock_flags;
3683 
3684 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3685 	while(ioa_cfg->in_reset_reload) {
3686 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3687 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3688 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3689 	}
3690 
3691 	if (ioa_cfg->ucode_sglist) {
3692 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3693 		dev_err(&ioa_cfg->pdev->dev,
3694 			"Microcode download already in progress\n");
3695 		return -EIO;
3696 	}
3697 
3698 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3699 					sglist->num_sg, DMA_TO_DEVICE);
3700 
3701 	if (!sglist->num_dma_sg) {
3702 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3703 		dev_err(&ioa_cfg->pdev->dev,
3704 			"Failed to map microcode download buffer!\n");
3705 		return -EIO;
3706 	}
3707 
3708 	ioa_cfg->ucode_sglist = sglist;
3709 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3710 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3711 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3712 
3713 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3714 	ioa_cfg->ucode_sglist = NULL;
3715 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3716 	return 0;
3717 }
3718 
3719 /**
3720  * ipr_store_update_fw - Update the firmware on the adapter
3721  * @class_dev:	device struct
3722  * @buf:	buffer
3723  * @count:	buffer size
3724  *
3725  * This function will update the firmware on the adapter.
3726  *
3727  * Return value:
3728  * 	count on success / other on failure
3729  **/
ipr_store_update_fw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3730 static ssize_t ipr_store_update_fw(struct device *dev,
3731 				   struct device_attribute *attr,
3732 				   const char *buf, size_t count)
3733 {
3734 	struct Scsi_Host *shost = class_to_shost(dev);
3735 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3736 	struct ipr_ucode_image_header *image_hdr;
3737 	const struct firmware *fw_entry;
3738 	struct ipr_sglist *sglist;
3739 	char fname[100];
3740 	char *src;
3741 	int len, result, dnld_size;
3742 
3743 	if (!capable(CAP_SYS_ADMIN))
3744 		return -EACCES;
3745 
3746 	len = snprintf(fname, 99, "%s", buf);
3747 	fname[len-1] = '\0';
3748 
3749 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3750 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3751 		return -EIO;
3752 	}
3753 
3754 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3755 
3756 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3757 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3758 	sglist = ipr_alloc_ucode_buffer(dnld_size);
3759 
3760 	if (!sglist) {
3761 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3762 		release_firmware(fw_entry);
3763 		return -ENOMEM;
3764 	}
3765 
3766 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3767 
3768 	if (result) {
3769 		dev_err(&ioa_cfg->pdev->dev,
3770 			"Microcode buffer copy to DMA buffer failed\n");
3771 		goto out;
3772 	}
3773 
3774 	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3775 
3776 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3777 
3778 	if (!result)
3779 		result = count;
3780 out:
3781 	ipr_free_ucode_buffer(sglist);
3782 	release_firmware(fw_entry);
3783 	return result;
3784 }
3785 
3786 static struct device_attribute ipr_update_fw_attr = {
3787 	.attr = {
3788 		.name =		"update_fw",
3789 		.mode =		S_IWUSR,
3790 	},
3791 	.store = ipr_store_update_fw
3792 };
3793 
3794 /**
3795  * ipr_show_fw_type - Show the adapter's firmware type.
3796  * @dev:	class device struct
3797  * @buf:	buffer
3798  *
3799  * Return value:
3800  *	number of bytes printed to buffer
3801  **/
ipr_show_fw_type(struct device * dev,struct device_attribute * attr,char * buf)3802 static ssize_t ipr_show_fw_type(struct device *dev,
3803 				struct device_attribute *attr, char *buf)
3804 {
3805 	struct Scsi_Host *shost = class_to_shost(dev);
3806 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3807 	unsigned long lock_flags = 0;
3808 	int len;
3809 
3810 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3811 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3812 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3813 	return len;
3814 }
3815 
3816 static struct device_attribute ipr_ioa_fw_type_attr = {
3817 	.attr = {
3818 		.name =		"fw_type",
3819 		.mode =		S_IRUGO,
3820 	},
3821 	.show = ipr_show_fw_type
3822 };
3823 
3824 static struct device_attribute *ipr_ioa_attrs[] = {
3825 	&ipr_fw_version_attr,
3826 	&ipr_log_level_attr,
3827 	&ipr_diagnostics_attr,
3828 	&ipr_ioa_state_attr,
3829 	&ipr_ioa_reset_attr,
3830 	&ipr_update_fw_attr,
3831 	&ipr_ioa_fw_type_attr,
3832 	NULL,
3833 };
3834 
3835 #ifdef CONFIG_SCSI_IPR_DUMP
3836 /**
3837  * ipr_read_dump - Dump the adapter
3838  * @filp:		open sysfs file
3839  * @kobj:		kobject struct
3840  * @bin_attr:		bin_attribute struct
3841  * @buf:		buffer
3842  * @off:		offset
3843  * @count:		buffer size
3844  *
3845  * Return value:
3846  *	number of bytes printed to buffer
3847  **/
ipr_read_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3848 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3849 			     struct bin_attribute *bin_attr,
3850 			     char *buf, loff_t off, size_t count)
3851 {
3852 	struct device *cdev = container_of(kobj, struct device, kobj);
3853 	struct Scsi_Host *shost = class_to_shost(cdev);
3854 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3855 	struct ipr_dump *dump;
3856 	unsigned long lock_flags = 0;
3857 	char *src;
3858 	int len, sdt_end;
3859 	size_t rc = count;
3860 
3861 	if (!capable(CAP_SYS_ADMIN))
3862 		return -EACCES;
3863 
3864 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3865 	dump = ioa_cfg->dump;
3866 
3867 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3868 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3869 		return 0;
3870 	}
3871 	kref_get(&dump->kref);
3872 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3873 
3874 	if (off > dump->driver_dump.hdr.len) {
3875 		kref_put(&dump->kref, ipr_release_dump);
3876 		return 0;
3877 	}
3878 
3879 	if (off + count > dump->driver_dump.hdr.len) {
3880 		count = dump->driver_dump.hdr.len - off;
3881 		rc = count;
3882 	}
3883 
3884 	if (count && off < sizeof(dump->driver_dump)) {
3885 		if (off + count > sizeof(dump->driver_dump))
3886 			len = sizeof(dump->driver_dump) - off;
3887 		else
3888 			len = count;
3889 		src = (u8 *)&dump->driver_dump + off;
3890 		memcpy(buf, src, len);
3891 		buf += len;
3892 		off += len;
3893 		count -= len;
3894 	}
3895 
3896 	off -= sizeof(dump->driver_dump);
3897 
3898 	if (ioa_cfg->sis64)
3899 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3900 			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3901 			   sizeof(struct ipr_sdt_entry));
3902 	else
3903 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3904 			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3905 
3906 	if (count && off < sdt_end) {
3907 		if (off + count > sdt_end)
3908 			len = sdt_end - off;
3909 		else
3910 			len = count;
3911 		src = (u8 *)&dump->ioa_dump + off;
3912 		memcpy(buf, src, len);
3913 		buf += len;
3914 		off += len;
3915 		count -= len;
3916 	}
3917 
3918 	off -= sdt_end;
3919 
3920 	while (count) {
3921 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3922 			len = PAGE_ALIGN(off) - off;
3923 		else
3924 			len = count;
3925 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3926 		src += off & ~PAGE_MASK;
3927 		memcpy(buf, src, len);
3928 		buf += len;
3929 		off += len;
3930 		count -= len;
3931 	}
3932 
3933 	kref_put(&dump->kref, ipr_release_dump);
3934 	return rc;
3935 }
3936 
3937 /**
3938  * ipr_alloc_dump - Prepare for adapter dump
3939  * @ioa_cfg:	ioa config struct
3940  *
3941  * Return value:
3942  *	0 on success / other on failure
3943  **/
ipr_alloc_dump(struct ipr_ioa_cfg * ioa_cfg)3944 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3945 {
3946 	struct ipr_dump *dump;
3947 	__be32 **ioa_data;
3948 	unsigned long lock_flags = 0;
3949 
3950 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3951 
3952 	if (!dump) {
3953 		ipr_err("Dump memory allocation failed\n");
3954 		return -ENOMEM;
3955 	}
3956 
3957 	if (ioa_cfg->sis64)
3958 		ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3959 	else
3960 		ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3961 
3962 	if (!ioa_data) {
3963 		ipr_err("Dump memory allocation failed\n");
3964 		kfree(dump);
3965 		return -ENOMEM;
3966 	}
3967 
3968 	dump->ioa_dump.ioa_data = ioa_data;
3969 
3970 	kref_init(&dump->kref);
3971 	dump->ioa_cfg = ioa_cfg;
3972 
3973 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3974 
3975 	if (INACTIVE != ioa_cfg->sdt_state) {
3976 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3977 		vfree(dump->ioa_dump.ioa_data);
3978 		kfree(dump);
3979 		return 0;
3980 	}
3981 
3982 	ioa_cfg->dump = dump;
3983 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3984 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3985 		ioa_cfg->dump_taken = 1;
3986 		schedule_work(&ioa_cfg->work_q);
3987 	}
3988 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3989 
3990 	return 0;
3991 }
3992 
3993 /**
3994  * ipr_free_dump - Free adapter dump memory
3995  * @ioa_cfg:	ioa config struct
3996  *
3997  * Return value:
3998  *	0 on success / other on failure
3999  **/
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4000 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4001 {
4002 	struct ipr_dump *dump;
4003 	unsigned long lock_flags = 0;
4004 
4005 	ENTER;
4006 
4007 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4008 	dump = ioa_cfg->dump;
4009 	if (!dump) {
4010 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4011 		return 0;
4012 	}
4013 
4014 	ioa_cfg->dump = NULL;
4015 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016 
4017 	kref_put(&dump->kref, ipr_release_dump);
4018 
4019 	LEAVE;
4020 	return 0;
4021 }
4022 
4023 /**
4024  * ipr_write_dump - Setup dump state of adapter
4025  * @filp:		open sysfs file
4026  * @kobj:		kobject struct
4027  * @bin_attr:		bin_attribute struct
4028  * @buf:		buffer
4029  * @off:		offset
4030  * @count:		buffer size
4031  *
4032  * Return value:
4033  *	number of bytes printed to buffer
4034  **/
ipr_write_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4035 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4036 			      struct bin_attribute *bin_attr,
4037 			      char *buf, loff_t off, size_t count)
4038 {
4039 	struct device *cdev = container_of(kobj, struct device, kobj);
4040 	struct Scsi_Host *shost = class_to_shost(cdev);
4041 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4042 	int rc;
4043 
4044 	if (!capable(CAP_SYS_ADMIN))
4045 		return -EACCES;
4046 
4047 	if (buf[0] == '1')
4048 		rc = ipr_alloc_dump(ioa_cfg);
4049 	else if (buf[0] == '0')
4050 		rc = ipr_free_dump(ioa_cfg);
4051 	else
4052 		return -EINVAL;
4053 
4054 	if (rc)
4055 		return rc;
4056 	else
4057 		return count;
4058 }
4059 
4060 static struct bin_attribute ipr_dump_attr = {
4061 	.attr =	{
4062 		.name = "dump",
4063 		.mode = S_IRUSR | S_IWUSR,
4064 	},
4065 	.size = 0,
4066 	.read = ipr_read_dump,
4067 	.write = ipr_write_dump
4068 };
4069 #else
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4070 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4071 #endif
4072 
4073 /**
4074  * ipr_change_queue_depth - Change the device's queue depth
4075  * @sdev:	scsi device struct
4076  * @qdepth:	depth to set
4077  * @reason:	calling context
4078  *
4079  * Return value:
4080  * 	actual depth set
4081  **/
ipr_change_queue_depth(struct scsi_device * sdev,int qdepth,int reason)4082 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4083 				  int reason)
4084 {
4085 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4086 	struct ipr_resource_entry *res;
4087 	unsigned long lock_flags = 0;
4088 
4089 	if (reason != SCSI_QDEPTH_DEFAULT)
4090 		return -EOPNOTSUPP;
4091 
4092 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4093 	res = (struct ipr_resource_entry *)sdev->hostdata;
4094 
4095 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4096 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4097 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4098 
4099 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4100 	return sdev->queue_depth;
4101 }
4102 
4103 /**
4104  * ipr_change_queue_type - Change the device's queue type
4105  * @dsev:		scsi device struct
4106  * @tag_type:	type of tags to use
4107  *
4108  * Return value:
4109  * 	actual queue type set
4110  **/
ipr_change_queue_type(struct scsi_device * sdev,int tag_type)4111 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4112 {
4113 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4114 	struct ipr_resource_entry *res;
4115 	unsigned long lock_flags = 0;
4116 
4117 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4118 	res = (struct ipr_resource_entry *)sdev->hostdata;
4119 
4120 	if (res) {
4121 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4122 			/*
4123 			 * We don't bother quiescing the device here since the
4124 			 * adapter firmware does it for us.
4125 			 */
4126 			scsi_set_tag_type(sdev, tag_type);
4127 
4128 			if (tag_type)
4129 				scsi_activate_tcq(sdev, sdev->queue_depth);
4130 			else
4131 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4132 		} else
4133 			tag_type = 0;
4134 	} else
4135 		tag_type = 0;
4136 
4137 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4138 	return tag_type;
4139 }
4140 
4141 /**
4142  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4143  * @dev:	device struct
4144  * @attr:	device attribute structure
4145  * @buf:	buffer
4146  *
4147  * Return value:
4148  * 	number of bytes printed to buffer
4149  **/
ipr_show_adapter_handle(struct device * dev,struct device_attribute * attr,char * buf)4150 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4151 {
4152 	struct scsi_device *sdev = to_scsi_device(dev);
4153 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4154 	struct ipr_resource_entry *res;
4155 	unsigned long lock_flags = 0;
4156 	ssize_t len = -ENXIO;
4157 
4158 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4159 	res = (struct ipr_resource_entry *)sdev->hostdata;
4160 	if (res)
4161 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4162 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4163 	return len;
4164 }
4165 
4166 static struct device_attribute ipr_adapter_handle_attr = {
4167 	.attr = {
4168 		.name = 	"adapter_handle",
4169 		.mode =		S_IRUSR,
4170 	},
4171 	.show = ipr_show_adapter_handle
4172 };
4173 
4174 /**
4175  * ipr_show_resource_path - Show the resource path or the resource address for
4176  *			    this device.
4177  * @dev:	device struct
4178  * @attr:	device attribute structure
4179  * @buf:	buffer
4180  *
4181  * Return value:
4182  * 	number of bytes printed to buffer
4183  **/
ipr_show_resource_path(struct device * dev,struct device_attribute * attr,char * buf)4184 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4185 {
4186 	struct scsi_device *sdev = to_scsi_device(dev);
4187 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4188 	struct ipr_resource_entry *res;
4189 	unsigned long lock_flags = 0;
4190 	ssize_t len = -ENXIO;
4191 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4192 
4193 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4194 	res = (struct ipr_resource_entry *)sdev->hostdata;
4195 	if (res && ioa_cfg->sis64)
4196 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4197 			       ipr_format_res_path(res->res_path, buffer,
4198 						   sizeof(buffer)));
4199 	else if (res)
4200 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4201 			       res->bus, res->target, res->lun);
4202 
4203 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204 	return len;
4205 }
4206 
4207 static struct device_attribute ipr_resource_path_attr = {
4208 	.attr = {
4209 		.name = 	"resource_path",
4210 		.mode =		S_IRUGO,
4211 	},
4212 	.show = ipr_show_resource_path
4213 };
4214 
4215 /**
4216  * ipr_show_device_id - Show the device_id for this device.
4217  * @dev:	device struct
4218  * @attr:	device attribute structure
4219  * @buf:	buffer
4220  *
4221  * Return value:
4222  *	number of bytes printed to buffer
4223  **/
ipr_show_device_id(struct device * dev,struct device_attribute * attr,char * buf)4224 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4225 {
4226 	struct scsi_device *sdev = to_scsi_device(dev);
4227 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4228 	struct ipr_resource_entry *res;
4229 	unsigned long lock_flags = 0;
4230 	ssize_t len = -ENXIO;
4231 
4232 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4233 	res = (struct ipr_resource_entry *)sdev->hostdata;
4234 	if (res && ioa_cfg->sis64)
4235 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4236 	else if (res)
4237 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4238 
4239 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4240 	return len;
4241 }
4242 
4243 static struct device_attribute ipr_device_id_attr = {
4244 	.attr = {
4245 		.name =		"device_id",
4246 		.mode =		S_IRUGO,
4247 	},
4248 	.show = ipr_show_device_id
4249 };
4250 
4251 /**
4252  * ipr_show_resource_type - Show the resource type for this device.
4253  * @dev:	device struct
4254  * @attr:	device attribute structure
4255  * @buf:	buffer
4256  *
4257  * Return value:
4258  *	number of bytes printed to buffer
4259  **/
ipr_show_resource_type(struct device * dev,struct device_attribute * attr,char * buf)4260 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4261 {
4262 	struct scsi_device *sdev = to_scsi_device(dev);
4263 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4264 	struct ipr_resource_entry *res;
4265 	unsigned long lock_flags = 0;
4266 	ssize_t len = -ENXIO;
4267 
4268 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4269 	res = (struct ipr_resource_entry *)sdev->hostdata;
4270 
4271 	if (res)
4272 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4273 
4274 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275 	return len;
4276 }
4277 
4278 static struct device_attribute ipr_resource_type_attr = {
4279 	.attr = {
4280 		.name =		"resource_type",
4281 		.mode =		S_IRUGO,
4282 	},
4283 	.show = ipr_show_resource_type
4284 };
4285 
4286 static struct device_attribute *ipr_dev_attrs[] = {
4287 	&ipr_adapter_handle_attr,
4288 	&ipr_resource_path_attr,
4289 	&ipr_device_id_attr,
4290 	&ipr_resource_type_attr,
4291 	NULL,
4292 };
4293 
4294 /**
4295  * ipr_biosparam - Return the HSC mapping
4296  * @sdev:			scsi device struct
4297  * @block_device:	block device pointer
4298  * @capacity:		capacity of the device
4299  * @parm:			Array containing returned HSC values.
4300  *
4301  * This function generates the HSC parms that fdisk uses.
4302  * We want to make sure we return something that places partitions
4303  * on 4k boundaries for best performance with the IOA.
4304  *
4305  * Return value:
4306  * 	0 on success
4307  **/
ipr_biosparam(struct scsi_device * sdev,struct block_device * block_device,sector_t capacity,int * parm)4308 static int ipr_biosparam(struct scsi_device *sdev,
4309 			 struct block_device *block_device,
4310 			 sector_t capacity, int *parm)
4311 {
4312 	int heads, sectors;
4313 	sector_t cylinders;
4314 
4315 	heads = 128;
4316 	sectors = 32;
4317 
4318 	cylinders = capacity;
4319 	sector_div(cylinders, (128 * 32));
4320 
4321 	/* return result */
4322 	parm[0] = heads;
4323 	parm[1] = sectors;
4324 	parm[2] = cylinders;
4325 
4326 	return 0;
4327 }
4328 
4329 /**
4330  * ipr_find_starget - Find target based on bus/target.
4331  * @starget:	scsi target struct
4332  *
4333  * Return value:
4334  * 	resource entry pointer if found / NULL if not found
4335  **/
ipr_find_starget(struct scsi_target * starget)4336 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4337 {
4338 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4339 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4340 	struct ipr_resource_entry *res;
4341 
4342 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4343 		if ((res->bus == starget->channel) &&
4344 		    (res->target == starget->id)) {
4345 			return res;
4346 		}
4347 	}
4348 
4349 	return NULL;
4350 }
4351 
4352 static struct ata_port_info sata_port_info;
4353 
4354 /**
4355  * ipr_target_alloc - Prepare for commands to a SCSI target
4356  * @starget:	scsi target struct
4357  *
4358  * If the device is a SATA device, this function allocates an
4359  * ATA port with libata, else it does nothing.
4360  *
4361  * Return value:
4362  * 	0 on success / non-0 on failure
4363  **/
ipr_target_alloc(struct scsi_target * starget)4364 static int ipr_target_alloc(struct scsi_target *starget)
4365 {
4366 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4367 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4368 	struct ipr_sata_port *sata_port;
4369 	struct ata_port *ap;
4370 	struct ipr_resource_entry *res;
4371 	unsigned long lock_flags;
4372 
4373 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4374 	res = ipr_find_starget(starget);
4375 	starget->hostdata = NULL;
4376 
4377 	if (res && ipr_is_gata(res)) {
4378 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4379 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4380 		if (!sata_port)
4381 			return -ENOMEM;
4382 
4383 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4384 		if (ap) {
4385 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 			sata_port->ioa_cfg = ioa_cfg;
4387 			sata_port->ap = ap;
4388 			sata_port->res = res;
4389 
4390 			res->sata_port = sata_port;
4391 			ap->private_data = sata_port;
4392 			starget->hostdata = sata_port;
4393 		} else {
4394 			kfree(sata_port);
4395 			return -ENOMEM;
4396 		}
4397 	}
4398 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4399 
4400 	return 0;
4401 }
4402 
4403 /**
4404  * ipr_target_destroy - Destroy a SCSI target
4405  * @starget:	scsi target struct
4406  *
4407  * If the device was a SATA device, this function frees the libata
4408  * ATA port, else it does nothing.
4409  *
4410  **/
ipr_target_destroy(struct scsi_target * starget)4411 static void ipr_target_destroy(struct scsi_target *starget)
4412 {
4413 	struct ipr_sata_port *sata_port = starget->hostdata;
4414 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4415 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4416 
4417 	if (ioa_cfg->sis64) {
4418 		if (!ipr_find_starget(starget)) {
4419 			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4420 				clear_bit(starget->id, ioa_cfg->array_ids);
4421 			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4422 				clear_bit(starget->id, ioa_cfg->vset_ids);
4423 			else if (starget->channel == 0)
4424 				clear_bit(starget->id, ioa_cfg->target_ids);
4425 		}
4426 	}
4427 
4428 	if (sata_port) {
4429 		starget->hostdata = NULL;
4430 		ata_sas_port_destroy(sata_port->ap);
4431 		kfree(sata_port);
4432 	}
4433 }
4434 
4435 /**
4436  * ipr_find_sdev - Find device based on bus/target/lun.
4437  * @sdev:	scsi device struct
4438  *
4439  * Return value:
4440  * 	resource entry pointer if found / NULL if not found
4441  **/
ipr_find_sdev(struct scsi_device * sdev)4442 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4443 {
4444 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4445 	struct ipr_resource_entry *res;
4446 
4447 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4448 		if ((res->bus == sdev->channel) &&
4449 		    (res->target == sdev->id) &&
4450 		    (res->lun == sdev->lun))
4451 			return res;
4452 	}
4453 
4454 	return NULL;
4455 }
4456 
4457 /**
4458  * ipr_slave_destroy - Unconfigure a SCSI device
4459  * @sdev:	scsi device struct
4460  *
4461  * Return value:
4462  * 	nothing
4463  **/
ipr_slave_destroy(struct scsi_device * sdev)4464 static void ipr_slave_destroy(struct scsi_device *sdev)
4465 {
4466 	struct ipr_resource_entry *res;
4467 	struct ipr_ioa_cfg *ioa_cfg;
4468 	unsigned long lock_flags = 0;
4469 
4470 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4471 
4472 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 	res = (struct ipr_resource_entry *) sdev->hostdata;
4474 	if (res) {
4475 		if (res->sata_port)
4476 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4477 		sdev->hostdata = NULL;
4478 		res->sdev = NULL;
4479 		res->sata_port = NULL;
4480 	}
4481 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4482 }
4483 
4484 /**
4485  * ipr_slave_configure - Configure a SCSI device
4486  * @sdev:	scsi device struct
4487  *
4488  * This function configures the specified scsi device.
4489  *
4490  * Return value:
4491  * 	0 on success
4492  **/
ipr_slave_configure(struct scsi_device * sdev)4493 static int ipr_slave_configure(struct scsi_device *sdev)
4494 {
4495 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4496 	struct ipr_resource_entry *res;
4497 	struct ata_port *ap = NULL;
4498 	unsigned long lock_flags = 0;
4499 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4500 
4501 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4502 	res = sdev->hostdata;
4503 	if (res) {
4504 		if (ipr_is_af_dasd_device(res))
4505 			sdev->type = TYPE_RAID;
4506 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4507 			sdev->scsi_level = 4;
4508 			sdev->no_uld_attach = 1;
4509 		}
4510 		if (ipr_is_vset_device(res)) {
4511 			blk_queue_rq_timeout(sdev->request_queue,
4512 					     IPR_VSET_RW_TIMEOUT);
4513 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4514 		}
4515 		if (ipr_is_gata(res) && res->sata_port)
4516 			ap = res->sata_port->ap;
4517 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4518 
4519 		if (ap) {
4520 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4521 			ata_sas_slave_configure(sdev, ap);
4522 		} else
4523 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4524 		if (ioa_cfg->sis64)
4525 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4526 				    ipr_format_res_path(res->res_path, buffer,
4527 							sizeof(buffer)));
4528 		return 0;
4529 	}
4530 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4531 	return 0;
4532 }
4533 
4534 /**
4535  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4536  * @sdev:	scsi device struct
4537  *
4538  * This function initializes an ATA port so that future commands
4539  * sent through queuecommand will work.
4540  *
4541  * Return value:
4542  * 	0 on success
4543  **/
ipr_ata_slave_alloc(struct scsi_device * sdev)4544 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4545 {
4546 	struct ipr_sata_port *sata_port = NULL;
4547 	int rc = -ENXIO;
4548 
4549 	ENTER;
4550 	if (sdev->sdev_target)
4551 		sata_port = sdev->sdev_target->hostdata;
4552 	if (sata_port) {
4553 		rc = ata_sas_port_init(sata_port->ap);
4554 		if (rc == 0)
4555 			rc = ata_sas_sync_probe(sata_port->ap);
4556 	}
4557 
4558 	if (rc)
4559 		ipr_slave_destroy(sdev);
4560 
4561 	LEAVE;
4562 	return rc;
4563 }
4564 
4565 /**
4566  * ipr_slave_alloc - Prepare for commands to a device.
4567  * @sdev:	scsi device struct
4568  *
4569  * This function saves a pointer to the resource entry
4570  * in the scsi device struct if the device exists. We
4571  * can then use this pointer in ipr_queuecommand when
4572  * handling new commands.
4573  *
4574  * Return value:
4575  * 	0 on success / -ENXIO if device does not exist
4576  **/
ipr_slave_alloc(struct scsi_device * sdev)4577 static int ipr_slave_alloc(struct scsi_device *sdev)
4578 {
4579 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4580 	struct ipr_resource_entry *res;
4581 	unsigned long lock_flags;
4582 	int rc = -ENXIO;
4583 
4584 	sdev->hostdata = NULL;
4585 
4586 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587 
4588 	res = ipr_find_sdev(sdev);
4589 	if (res) {
4590 		res->sdev = sdev;
4591 		res->add_to_ml = 0;
4592 		res->in_erp = 0;
4593 		sdev->hostdata = res;
4594 		if (!ipr_is_naca_model(res))
4595 			res->needs_sync_complete = 1;
4596 		rc = 0;
4597 		if (ipr_is_gata(res)) {
4598 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4599 			return ipr_ata_slave_alloc(sdev);
4600 		}
4601 	}
4602 
4603 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604 
4605 	return rc;
4606 }
4607 
4608 /**
4609  * ipr_eh_host_reset - Reset the host adapter
4610  * @scsi_cmd:	scsi command struct
4611  *
4612  * Return value:
4613  * 	SUCCESS / FAILED
4614  **/
__ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)4615 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4616 {
4617 	struct ipr_ioa_cfg *ioa_cfg;
4618 	int rc;
4619 
4620 	ENTER;
4621 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4622 
4623 	if (!ioa_cfg->in_reset_reload) {
4624 		dev_err(&ioa_cfg->pdev->dev,
4625 			"Adapter being reset as a result of error recovery.\n");
4626 
4627 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4628 			ioa_cfg->sdt_state = GET_DUMP;
4629 	}
4630 
4631 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4632 
4633 	LEAVE;
4634 	return rc;
4635 }
4636 
ipr_eh_host_reset(struct scsi_cmnd * cmd)4637 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4638 {
4639 	int rc;
4640 
4641 	spin_lock_irq(cmd->device->host->host_lock);
4642 	rc = __ipr_eh_host_reset(cmd);
4643 	spin_unlock_irq(cmd->device->host->host_lock);
4644 
4645 	return rc;
4646 }
4647 
4648 /**
4649  * ipr_device_reset - Reset the device
4650  * @ioa_cfg:	ioa config struct
4651  * @res:		resource entry struct
4652  *
4653  * This function issues a device reset to the affected device.
4654  * If the device is a SCSI device, a LUN reset will be sent
4655  * to the device first. If that does not work, a target reset
4656  * will be sent. If the device is a SATA device, a PHY reset will
4657  * be sent.
4658  *
4659  * Return value:
4660  *	0 on success / non-zero on failure
4661  **/
ipr_device_reset(struct ipr_ioa_cfg * ioa_cfg,struct ipr_resource_entry * res)4662 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4663 			    struct ipr_resource_entry *res)
4664 {
4665 	struct ipr_cmnd *ipr_cmd;
4666 	struct ipr_ioarcb *ioarcb;
4667 	struct ipr_cmd_pkt *cmd_pkt;
4668 	struct ipr_ioarcb_ata_regs *regs;
4669 	u32 ioasc;
4670 
4671 	ENTER;
4672 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4673 	ioarcb = &ipr_cmd->ioarcb;
4674 	cmd_pkt = &ioarcb->cmd_pkt;
4675 
4676 	if (ipr_cmd->ioa_cfg->sis64) {
4677 		regs = &ipr_cmd->i.ata_ioadl.regs;
4678 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4679 	} else
4680 		regs = &ioarcb->u.add_data.u.regs;
4681 
4682 	ioarcb->res_handle = res->res_handle;
4683 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4684 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4685 	if (ipr_is_gata(res)) {
4686 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4687 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4688 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4689 	}
4690 
4691 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4692 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4693 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4694 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4695 		if (ipr_cmd->ioa_cfg->sis64)
4696 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4697 			       sizeof(struct ipr_ioasa_gata));
4698 		else
4699 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4700 			       sizeof(struct ipr_ioasa_gata));
4701 	}
4702 
4703 	LEAVE;
4704 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4705 }
4706 
4707 /**
4708  * ipr_sata_reset - Reset the SATA port
4709  * @link:	SATA link to reset
4710  * @classes:	class of the attached device
4711  *
4712  * This function issues a SATA phy reset to the affected ATA link.
4713  *
4714  * Return value:
4715  *	0 on success / non-zero on failure
4716  **/
ipr_sata_reset(struct ata_link * link,unsigned int * classes,unsigned long deadline)4717 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4718 				unsigned long deadline)
4719 {
4720 	struct ipr_sata_port *sata_port = link->ap->private_data;
4721 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4722 	struct ipr_resource_entry *res;
4723 	unsigned long lock_flags = 0;
4724 	int rc = -ENXIO;
4725 
4726 	ENTER;
4727 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4728 	while(ioa_cfg->in_reset_reload) {
4729 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4730 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4731 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4732 	}
4733 
4734 	res = sata_port->res;
4735 	if (res) {
4736 		rc = ipr_device_reset(ioa_cfg, res);
4737 		*classes = res->ata_class;
4738 	}
4739 
4740 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4741 	LEAVE;
4742 	return rc;
4743 }
4744 
4745 /**
4746  * ipr_eh_dev_reset - Reset the device
4747  * @scsi_cmd:	scsi command struct
4748  *
4749  * This function issues a device reset to the affected device.
4750  * A LUN reset will be sent to the device first. If that does
4751  * not work, a target reset will be sent.
4752  *
4753  * Return value:
4754  *	SUCCESS / FAILED
4755  **/
__ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)4756 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4757 {
4758 	struct ipr_cmnd *ipr_cmd;
4759 	struct ipr_ioa_cfg *ioa_cfg;
4760 	struct ipr_resource_entry *res;
4761 	struct ata_port *ap;
4762 	int rc = 0;
4763 
4764 	ENTER;
4765 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4766 	res = scsi_cmd->device->hostdata;
4767 
4768 	if (!res)
4769 		return FAILED;
4770 
4771 	/*
4772 	 * If we are currently going through reset/reload, return failed. This will force the
4773 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4774 	 * reset to complete
4775 	 */
4776 	if (ioa_cfg->in_reset_reload)
4777 		return FAILED;
4778 	if (ioa_cfg->ioa_is_dead)
4779 		return FAILED;
4780 
4781 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4782 		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4783 			if (ipr_cmd->scsi_cmd)
4784 				ipr_cmd->done = ipr_scsi_eh_done;
4785 			if (ipr_cmd->qc)
4786 				ipr_cmd->done = ipr_sata_eh_done;
4787 			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4788 				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4789 				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4790 			}
4791 		}
4792 	}
4793 
4794 	res->resetting_device = 1;
4795 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4796 
4797 	if (ipr_is_gata(res) && res->sata_port) {
4798 		ap = res->sata_port->ap;
4799 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4800 		ata_std_error_handler(ap);
4801 		spin_lock_irq(scsi_cmd->device->host->host_lock);
4802 
4803 		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4804 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4805 				rc = -EIO;
4806 				break;
4807 			}
4808 		}
4809 	} else
4810 		rc = ipr_device_reset(ioa_cfg, res);
4811 	res->resetting_device = 0;
4812 
4813 	LEAVE;
4814 	return (rc ? FAILED : SUCCESS);
4815 }
4816 
ipr_eh_dev_reset(struct scsi_cmnd * cmd)4817 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4818 {
4819 	int rc;
4820 
4821 	spin_lock_irq(cmd->device->host->host_lock);
4822 	rc = __ipr_eh_dev_reset(cmd);
4823 	spin_unlock_irq(cmd->device->host->host_lock);
4824 
4825 	return rc;
4826 }
4827 
4828 /**
4829  * ipr_bus_reset_done - Op done function for bus reset.
4830  * @ipr_cmd:	ipr command struct
4831  *
4832  * This function is the op done function for a bus reset
4833  *
4834  * Return value:
4835  * 	none
4836  **/
ipr_bus_reset_done(struct ipr_cmnd * ipr_cmd)4837 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4838 {
4839 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4840 	struct ipr_resource_entry *res;
4841 
4842 	ENTER;
4843 	if (!ioa_cfg->sis64)
4844 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4845 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4846 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4847 				break;
4848 			}
4849 		}
4850 
4851 	/*
4852 	 * If abort has not completed, indicate the reset has, else call the
4853 	 * abort's done function to wake the sleeping eh thread
4854 	 */
4855 	if (ipr_cmd->sibling->sibling)
4856 		ipr_cmd->sibling->sibling = NULL;
4857 	else
4858 		ipr_cmd->sibling->done(ipr_cmd->sibling);
4859 
4860 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4861 	LEAVE;
4862 }
4863 
4864 /**
4865  * ipr_abort_timeout - An abort task has timed out
4866  * @ipr_cmd:	ipr command struct
4867  *
4868  * This function handles when an abort task times out. If this
4869  * happens we issue a bus reset since we have resources tied
4870  * up that must be freed before returning to the midlayer.
4871  *
4872  * Return value:
4873  *	none
4874  **/
ipr_abort_timeout(struct ipr_cmnd * ipr_cmd)4875 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4876 {
4877 	struct ipr_cmnd *reset_cmd;
4878 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4879 	struct ipr_cmd_pkt *cmd_pkt;
4880 	unsigned long lock_flags = 0;
4881 
4882 	ENTER;
4883 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4884 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4885 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4886 		return;
4887 	}
4888 
4889 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4890 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4891 	ipr_cmd->sibling = reset_cmd;
4892 	reset_cmd->sibling = ipr_cmd;
4893 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4894 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4895 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4896 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4897 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4898 
4899 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4900 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4901 	LEAVE;
4902 }
4903 
4904 /**
4905  * ipr_cancel_op - Cancel specified op
4906  * @scsi_cmd:	scsi command struct
4907  *
4908  * This function cancels specified op.
4909  *
4910  * Return value:
4911  *	SUCCESS / FAILED
4912  **/
ipr_cancel_op(struct scsi_cmnd * scsi_cmd)4913 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4914 {
4915 	struct ipr_cmnd *ipr_cmd;
4916 	struct ipr_ioa_cfg *ioa_cfg;
4917 	struct ipr_resource_entry *res;
4918 	struct ipr_cmd_pkt *cmd_pkt;
4919 	u32 ioasc, int_reg;
4920 	int op_found = 0;
4921 
4922 	ENTER;
4923 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4924 	res = scsi_cmd->device->hostdata;
4925 
4926 	/* If we are currently going through reset/reload, return failed.
4927 	 * This will force the mid-layer to call ipr_eh_host_reset,
4928 	 * which will then go to sleep and wait for the reset to complete
4929 	 */
4930 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4931 		return FAILED;
4932 	if (!res)
4933 		return FAILED;
4934 
4935 	/*
4936 	 * If we are aborting a timed out op, chances are that the timeout was caused
4937 	 * by a still not detected EEH error. In such cases, reading a register will
4938 	 * trigger the EEH recovery infrastructure.
4939 	 */
4940 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4941 
4942 	if (!ipr_is_gscsi(res))
4943 		return FAILED;
4944 
4945 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4946 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4947 			ipr_cmd->done = ipr_scsi_eh_done;
4948 			op_found = 1;
4949 			break;
4950 		}
4951 	}
4952 
4953 	if (!op_found)
4954 		return SUCCESS;
4955 
4956 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4957 	ipr_cmd->ioarcb.res_handle = res->res_handle;
4958 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4959 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4960 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4961 	ipr_cmd->u.sdev = scsi_cmd->device;
4962 
4963 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4964 		    scsi_cmd->cmnd[0]);
4965 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4966 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4967 
4968 	/*
4969 	 * If the abort task timed out and we sent a bus reset, we will get
4970 	 * one the following responses to the abort
4971 	 */
4972 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4973 		ioasc = 0;
4974 		ipr_trace;
4975 	}
4976 
4977 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4978 	if (!ipr_is_naca_model(res))
4979 		res->needs_sync_complete = 1;
4980 
4981 	LEAVE;
4982 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4983 }
4984 
4985 /**
4986  * ipr_eh_abort - Abort a single op
4987  * @scsi_cmd:	scsi command struct
4988  *
4989  * Return value:
4990  * 	SUCCESS / FAILED
4991  **/
ipr_eh_abort(struct scsi_cmnd * scsi_cmd)4992 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4993 {
4994 	unsigned long flags;
4995 	int rc;
4996 
4997 	ENTER;
4998 
4999 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5000 	rc = ipr_cancel_op(scsi_cmd);
5001 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5002 
5003 	LEAVE;
5004 	return rc;
5005 }
5006 
5007 /**
5008  * ipr_handle_other_interrupt - Handle "other" interrupts
5009  * @ioa_cfg:	ioa config struct
5010  * @int_reg:	interrupt register
5011  *
5012  * Return value:
5013  * 	IRQ_NONE / IRQ_HANDLED
5014  **/
ipr_handle_other_interrupt(struct ipr_ioa_cfg * ioa_cfg,u32 int_reg)5015 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5016 					      u32 int_reg)
5017 {
5018 	irqreturn_t rc = IRQ_HANDLED;
5019 	u32 int_mask_reg;
5020 
5021 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5022 	int_reg &= ~int_mask_reg;
5023 
5024 	/* If an interrupt on the adapter did not occur, ignore it.
5025 	 * Or in the case of SIS 64, check for a stage change interrupt.
5026 	 */
5027 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5028 		if (ioa_cfg->sis64) {
5029 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5030 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5031 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5032 
5033 				/* clear stage change */
5034 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5035 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5036 				list_del(&ioa_cfg->reset_cmd->queue);
5037 				del_timer(&ioa_cfg->reset_cmd->timer);
5038 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5039 				return IRQ_HANDLED;
5040 			}
5041 		}
5042 
5043 		return IRQ_NONE;
5044 	}
5045 
5046 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5047 		/* Mask the interrupt */
5048 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5049 
5050 		/* Clear the interrupt */
5051 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5052 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5053 
5054 		list_del(&ioa_cfg->reset_cmd->queue);
5055 		del_timer(&ioa_cfg->reset_cmd->timer);
5056 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5057 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5058 		if (ioa_cfg->clear_isr) {
5059 			if (ipr_debug && printk_ratelimit())
5060 				dev_err(&ioa_cfg->pdev->dev,
5061 					"Spurious interrupt detected. 0x%08X\n", int_reg);
5062 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5063 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5064 			return IRQ_NONE;
5065 		}
5066 	} else {
5067 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5068 			ioa_cfg->ioa_unit_checked = 1;
5069 		else
5070 			dev_err(&ioa_cfg->pdev->dev,
5071 				"Permanent IOA failure. 0x%08X\n", int_reg);
5072 
5073 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5074 			ioa_cfg->sdt_state = GET_DUMP;
5075 
5076 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5077 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5078 	}
5079 
5080 	return rc;
5081 }
5082 
5083 /**
5084  * ipr_isr_eh - Interrupt service routine error handler
5085  * @ioa_cfg:	ioa config struct
5086  * @msg:	message to log
5087  *
5088  * Return value:
5089  * 	none
5090  **/
ipr_isr_eh(struct ipr_ioa_cfg * ioa_cfg,char * msg)5091 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5092 {
5093 	ioa_cfg->errors_logged++;
5094 	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5095 
5096 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5097 		ioa_cfg->sdt_state = GET_DUMP;
5098 
5099 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5100 }
5101 
5102 /**
5103  * ipr_isr - Interrupt service routine
5104  * @irq:	irq number
5105  * @devp:	pointer to ioa config struct
5106  *
5107  * Return value:
5108  * 	IRQ_NONE / IRQ_HANDLED
5109  **/
ipr_isr(int irq,void * devp)5110 static irqreturn_t ipr_isr(int irq, void *devp)
5111 {
5112 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5113 	unsigned long lock_flags = 0;
5114 	u32 int_reg = 0;
5115 	u32 ioasc;
5116 	u16 cmd_index;
5117 	int num_hrrq = 0;
5118 	int irq_none = 0;
5119 	struct ipr_cmnd *ipr_cmd;
5120 	irqreturn_t rc = IRQ_NONE;
5121 
5122 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5123 
5124 	/* If interrupts are disabled, ignore the interrupt */
5125 	if (!ioa_cfg->allow_interrupts) {
5126 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5127 		return IRQ_NONE;
5128 	}
5129 
5130 	while (1) {
5131 		ipr_cmd = NULL;
5132 
5133 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5134 		       ioa_cfg->toggle_bit) {
5135 
5136 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5137 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5138 
5139 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5140 				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5141 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5142 				return IRQ_HANDLED;
5143 			}
5144 
5145 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5146 
5147 			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5148 
5149 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5150 
5151 			list_del(&ipr_cmd->queue);
5152 			del_timer(&ipr_cmd->timer);
5153 			ipr_cmd->done(ipr_cmd);
5154 
5155 			rc = IRQ_HANDLED;
5156 
5157 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5158 				ioa_cfg->hrrq_curr++;
5159 			} else {
5160 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5161 				ioa_cfg->toggle_bit ^= 1u;
5162 			}
5163 		}
5164 
5165 		if (ipr_cmd && !ioa_cfg->clear_isr)
5166 			break;
5167 
5168 		if (ipr_cmd != NULL) {
5169 			/* Clear the PCI interrupt */
5170 			num_hrrq = 0;
5171 			do {
5172 				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5173 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5174 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5175 					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5176 
5177 		} else if (rc == IRQ_NONE && irq_none == 0) {
5178 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5179 			irq_none++;
5180 		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5181 			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5182 			ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5183 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5184 			return IRQ_HANDLED;
5185 		} else
5186 			break;
5187 	}
5188 
5189 	if (unlikely(rc == IRQ_NONE))
5190 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5191 
5192 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5193 	return rc;
5194 }
5195 
5196 /**
5197  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5198  * @ioa_cfg:	ioa config struct
5199  * @ipr_cmd:	ipr command struct
5200  *
5201  * Return value:
5202  * 	0 on success / -1 on failure
5203  **/
ipr_build_ioadl64(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5204 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5205 			     struct ipr_cmnd *ipr_cmd)
5206 {
5207 	int i, nseg;
5208 	struct scatterlist *sg;
5209 	u32 length;
5210 	u32 ioadl_flags = 0;
5211 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5212 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5213 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5214 
5215 	length = scsi_bufflen(scsi_cmd);
5216 	if (!length)
5217 		return 0;
5218 
5219 	nseg = scsi_dma_map(scsi_cmd);
5220 	if (nseg < 0) {
5221 		if (printk_ratelimit())
5222 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5223 		return -1;
5224 	}
5225 
5226 	ipr_cmd->dma_use_sg = nseg;
5227 
5228 	ioarcb->data_transfer_length = cpu_to_be32(length);
5229 	ioarcb->ioadl_len =
5230 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5231 
5232 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5233 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5234 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5235 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5236 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5237 
5238 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5239 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5240 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5241 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5242 	}
5243 
5244 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5245 	return 0;
5246 }
5247 
5248 /**
5249  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5250  * @ioa_cfg:	ioa config struct
5251  * @ipr_cmd:	ipr command struct
5252  *
5253  * Return value:
5254  * 	0 on success / -1 on failure
5255  **/
ipr_build_ioadl(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5256 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5257 			   struct ipr_cmnd *ipr_cmd)
5258 {
5259 	int i, nseg;
5260 	struct scatterlist *sg;
5261 	u32 length;
5262 	u32 ioadl_flags = 0;
5263 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5264 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5265 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5266 
5267 	length = scsi_bufflen(scsi_cmd);
5268 	if (!length)
5269 		return 0;
5270 
5271 	nseg = scsi_dma_map(scsi_cmd);
5272 	if (nseg < 0) {
5273 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5274 		return -1;
5275 	}
5276 
5277 	ipr_cmd->dma_use_sg = nseg;
5278 
5279 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5280 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5281 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5282 		ioarcb->data_transfer_length = cpu_to_be32(length);
5283 		ioarcb->ioadl_len =
5284 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5285 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5286 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5287 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5288 		ioarcb->read_ioadl_len =
5289 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5290 	}
5291 
5292 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5293 		ioadl = ioarcb->u.add_data.u.ioadl;
5294 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5295 				    offsetof(struct ipr_ioarcb, u.add_data));
5296 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5297 	}
5298 
5299 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5300 		ioadl[i].flags_and_data_len =
5301 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5302 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5303 	}
5304 
5305 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5306 	return 0;
5307 }
5308 
5309 /**
5310  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5311  * @scsi_cmd:	scsi command struct
5312  *
5313  * Return value:
5314  * 	task attributes
5315  **/
ipr_get_task_attributes(struct scsi_cmnd * scsi_cmd)5316 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5317 {
5318 	u8 tag[2];
5319 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5320 
5321 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5322 		switch (tag[0]) {
5323 		case MSG_SIMPLE_TAG:
5324 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5325 			break;
5326 		case MSG_HEAD_TAG:
5327 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5328 			break;
5329 		case MSG_ORDERED_TAG:
5330 			rc = IPR_FLAGS_LO_ORDERED_TASK;
5331 			break;
5332 		};
5333 	}
5334 
5335 	return rc;
5336 }
5337 
5338 /**
5339  * ipr_erp_done - Process completion of ERP for a device
5340  * @ipr_cmd:		ipr command struct
5341  *
5342  * This function copies the sense buffer into the scsi_cmd
5343  * struct and pushes the scsi_done function.
5344  *
5345  * Return value:
5346  * 	nothing
5347  **/
ipr_erp_done(struct ipr_cmnd * ipr_cmd)5348 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5349 {
5350 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5351 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5352 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5353 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5354 
5355 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5356 		scsi_cmd->result |= (DID_ERROR << 16);
5357 		scmd_printk(KERN_ERR, scsi_cmd,
5358 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5359 	} else {
5360 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5361 		       SCSI_SENSE_BUFFERSIZE);
5362 	}
5363 
5364 	if (res) {
5365 		if (!ipr_is_naca_model(res))
5366 			res->needs_sync_complete = 1;
5367 		res->in_erp = 0;
5368 	}
5369 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5370 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5371 	scsi_cmd->scsi_done(scsi_cmd);
5372 }
5373 
5374 /**
5375  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5376  * @ipr_cmd:	ipr command struct
5377  *
5378  * Return value:
5379  * 	none
5380  **/
ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd * ipr_cmd)5381 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5382 {
5383 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5384 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5385 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5386 
5387 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5388 	ioarcb->data_transfer_length = 0;
5389 	ioarcb->read_data_transfer_length = 0;
5390 	ioarcb->ioadl_len = 0;
5391 	ioarcb->read_ioadl_len = 0;
5392 	ioasa->hdr.ioasc = 0;
5393 	ioasa->hdr.residual_data_len = 0;
5394 
5395 	if (ipr_cmd->ioa_cfg->sis64)
5396 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5397 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5398 	else {
5399 		ioarcb->write_ioadl_addr =
5400 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5401 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5402 	}
5403 }
5404 
5405 /**
5406  * ipr_erp_request_sense - Send request sense to a device
5407  * @ipr_cmd:	ipr command struct
5408  *
5409  * This function sends a request sense to a device as a result
5410  * of a check condition.
5411  *
5412  * Return value:
5413  * 	nothing
5414  **/
ipr_erp_request_sense(struct ipr_cmnd * ipr_cmd)5415 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5416 {
5417 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5418 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5419 
5420 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5421 		ipr_erp_done(ipr_cmd);
5422 		return;
5423 	}
5424 
5425 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5426 
5427 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5428 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5429 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5430 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5431 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5432 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5433 
5434 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5435 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5436 
5437 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5438 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5439 }
5440 
5441 /**
5442  * ipr_erp_cancel_all - Send cancel all to a device
5443  * @ipr_cmd:	ipr command struct
5444  *
5445  * This function sends a cancel all to a device to clear the
5446  * queue. If we are running TCQ on the device, QERR is set to 1,
5447  * which means all outstanding ops have been dropped on the floor.
5448  * Cancel all will return them to us.
5449  *
5450  * Return value:
5451  * 	nothing
5452  **/
ipr_erp_cancel_all(struct ipr_cmnd * ipr_cmd)5453 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5454 {
5455 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5456 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5457 	struct ipr_cmd_pkt *cmd_pkt;
5458 
5459 	res->in_erp = 1;
5460 
5461 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5462 
5463 	if (!scsi_get_tag_type(scsi_cmd->device)) {
5464 		ipr_erp_request_sense(ipr_cmd);
5465 		return;
5466 	}
5467 
5468 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5469 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5470 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5471 
5472 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5473 		   IPR_CANCEL_ALL_TIMEOUT);
5474 }
5475 
5476 /**
5477  * ipr_dump_ioasa - Dump contents of IOASA
5478  * @ioa_cfg:	ioa config struct
5479  * @ipr_cmd:	ipr command struct
5480  * @res:		resource entry struct
5481  *
5482  * This function is invoked by the interrupt handler when ops
5483  * fail. It will log the IOASA if appropriate. Only called
5484  * for GPDD ops.
5485  *
5486  * Return value:
5487  * 	none
5488  **/
ipr_dump_ioasa(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd,struct ipr_resource_entry * res)5489 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5490 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5491 {
5492 	int i;
5493 	u16 data_len;
5494 	u32 ioasc, fd_ioasc;
5495 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5496 	__be32 *ioasa_data = (__be32 *)ioasa;
5497 	int error_index;
5498 
5499 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5500 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5501 
5502 	if (0 == ioasc)
5503 		return;
5504 
5505 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5506 		return;
5507 
5508 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5509 		error_index = ipr_get_error(fd_ioasc);
5510 	else
5511 		error_index = ipr_get_error(ioasc);
5512 
5513 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5514 		/* Don't log an error if the IOA already logged one */
5515 		if (ioasa->hdr.ilid != 0)
5516 			return;
5517 
5518 		if (!ipr_is_gscsi(res))
5519 			return;
5520 
5521 		if (ipr_error_table[error_index].log_ioasa == 0)
5522 			return;
5523 	}
5524 
5525 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5526 
5527 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5528 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5529 		data_len = sizeof(struct ipr_ioasa64);
5530 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5531 		data_len = sizeof(struct ipr_ioasa);
5532 
5533 	ipr_err("IOASA Dump:\n");
5534 
5535 	for (i = 0; i < data_len / 4; i += 4) {
5536 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5537 			be32_to_cpu(ioasa_data[i]),
5538 			be32_to_cpu(ioasa_data[i+1]),
5539 			be32_to_cpu(ioasa_data[i+2]),
5540 			be32_to_cpu(ioasa_data[i+3]));
5541 	}
5542 }
5543 
5544 /**
5545  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5546  * @ioasa:		IOASA
5547  * @sense_buf:	sense data buffer
5548  *
5549  * Return value:
5550  * 	none
5551  **/
ipr_gen_sense(struct ipr_cmnd * ipr_cmd)5552 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5553 {
5554 	u32 failing_lba;
5555 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5556 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5557 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5558 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5559 
5560 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5561 
5562 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5563 		return;
5564 
5565 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5566 
5567 	if (ipr_is_vset_device(res) &&
5568 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5569 	    ioasa->u.vset.failing_lba_hi != 0) {
5570 		sense_buf[0] = 0x72;
5571 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5572 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5573 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5574 
5575 		sense_buf[7] = 12;
5576 		sense_buf[8] = 0;
5577 		sense_buf[9] = 0x0A;
5578 		sense_buf[10] = 0x80;
5579 
5580 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5581 
5582 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5583 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5584 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5585 		sense_buf[15] = failing_lba & 0x000000ff;
5586 
5587 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5588 
5589 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5590 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5591 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5592 		sense_buf[19] = failing_lba & 0x000000ff;
5593 	} else {
5594 		sense_buf[0] = 0x70;
5595 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5596 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5597 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5598 
5599 		/* Illegal request */
5600 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5601 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5602 			sense_buf[7] = 10;	/* additional length */
5603 
5604 			/* IOARCB was in error */
5605 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5606 				sense_buf[15] = 0xC0;
5607 			else	/* Parameter data was invalid */
5608 				sense_buf[15] = 0x80;
5609 
5610 			sense_buf[16] =
5611 			    ((IPR_FIELD_POINTER_MASK &
5612 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5613 			sense_buf[17] =
5614 			    (IPR_FIELD_POINTER_MASK &
5615 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5616 		} else {
5617 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5618 				if (ipr_is_vset_device(res))
5619 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5620 				else
5621 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5622 
5623 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5624 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5625 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5626 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5627 				sense_buf[6] = failing_lba & 0x000000ff;
5628 			}
5629 
5630 			sense_buf[7] = 6;	/* additional length */
5631 		}
5632 	}
5633 }
5634 
5635 /**
5636  * ipr_get_autosense - Copy autosense data to sense buffer
5637  * @ipr_cmd:	ipr command struct
5638  *
5639  * This function copies the autosense buffer to the buffer
5640  * in the scsi_cmd, if there is autosense available.
5641  *
5642  * Return value:
5643  *	1 if autosense was available / 0 if not
5644  **/
ipr_get_autosense(struct ipr_cmnd * ipr_cmd)5645 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5646 {
5647 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5648 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5649 
5650 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5651 		return 0;
5652 
5653 	if (ipr_cmd->ioa_cfg->sis64)
5654 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5655 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5656 			   SCSI_SENSE_BUFFERSIZE));
5657 	else
5658 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5659 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5660 			   SCSI_SENSE_BUFFERSIZE));
5661 	return 1;
5662 }
5663 
5664 /**
5665  * ipr_erp_start - Process an error response for a SCSI op
5666  * @ioa_cfg:	ioa config struct
5667  * @ipr_cmd:	ipr command struct
5668  *
5669  * This function determines whether or not to initiate ERP
5670  * on the affected device.
5671  *
5672  * Return value:
5673  * 	nothing
5674  **/
ipr_erp_start(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5675 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5676 			      struct ipr_cmnd *ipr_cmd)
5677 {
5678 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5679 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5680 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5681 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5682 
5683 	if (!res) {
5684 		ipr_scsi_eh_done(ipr_cmd);
5685 		return;
5686 	}
5687 
5688 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5689 		ipr_gen_sense(ipr_cmd);
5690 
5691 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5692 
5693 	switch (masked_ioasc) {
5694 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5695 		if (ipr_is_naca_model(res))
5696 			scsi_cmd->result |= (DID_ABORT << 16);
5697 		else
5698 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5699 		break;
5700 	case IPR_IOASC_IR_RESOURCE_HANDLE:
5701 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5702 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5703 		break;
5704 	case IPR_IOASC_HW_SEL_TIMEOUT:
5705 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5706 		if (!ipr_is_naca_model(res))
5707 			res->needs_sync_complete = 1;
5708 		break;
5709 	case IPR_IOASC_SYNC_REQUIRED:
5710 		if (!res->in_erp)
5711 			res->needs_sync_complete = 1;
5712 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5713 		break;
5714 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5715 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5716 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5717 		break;
5718 	case IPR_IOASC_BUS_WAS_RESET:
5719 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5720 		/*
5721 		 * Report the bus reset and ask for a retry. The device
5722 		 * will give CC/UA the next command.
5723 		 */
5724 		if (!res->resetting_device)
5725 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5726 		scsi_cmd->result |= (DID_ERROR << 16);
5727 		if (!ipr_is_naca_model(res))
5728 			res->needs_sync_complete = 1;
5729 		break;
5730 	case IPR_IOASC_HW_DEV_BUS_STATUS:
5731 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5732 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5733 			if (!ipr_get_autosense(ipr_cmd)) {
5734 				if (!ipr_is_naca_model(res)) {
5735 					ipr_erp_cancel_all(ipr_cmd);
5736 					return;
5737 				}
5738 			}
5739 		}
5740 		if (!ipr_is_naca_model(res))
5741 			res->needs_sync_complete = 1;
5742 		break;
5743 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5744 		break;
5745 	default:
5746 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5747 			scsi_cmd->result |= (DID_ERROR << 16);
5748 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5749 			res->needs_sync_complete = 1;
5750 		break;
5751 	}
5752 
5753 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5754 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5755 	scsi_cmd->scsi_done(scsi_cmd);
5756 }
5757 
5758 /**
5759  * ipr_scsi_done - mid-layer done function
5760  * @ipr_cmd:	ipr command struct
5761  *
5762  * This function is invoked by the interrupt handler for
5763  * ops generated by the SCSI mid-layer
5764  *
5765  * Return value:
5766  * 	none
5767  **/
ipr_scsi_done(struct ipr_cmnd * ipr_cmd)5768 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5769 {
5770 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5771 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5772 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5773 
5774 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5775 
5776 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5777 		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5778 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5779 		scsi_cmd->scsi_done(scsi_cmd);
5780 	} else
5781 		ipr_erp_start(ioa_cfg, ipr_cmd);
5782 }
5783 
5784 /**
5785  * ipr_queuecommand - Queue a mid-layer request
5786  * @scsi_cmd:	scsi command struct
5787  * @done:		done function
5788  *
5789  * This function queues a request generated by the mid-layer.
5790  *
5791  * Return value:
5792  *	0 on success
5793  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5794  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5795  **/
ipr_queuecommand_lck(struct scsi_cmnd * scsi_cmd,void (* done)(struct scsi_cmnd *))5796 static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5797 			    void (*done) (struct scsi_cmnd *))
5798 {
5799 	struct ipr_ioa_cfg *ioa_cfg;
5800 	struct ipr_resource_entry *res;
5801 	struct ipr_ioarcb *ioarcb;
5802 	struct ipr_cmnd *ipr_cmd;
5803 	int rc = 0;
5804 
5805 	scsi_cmd->scsi_done = done;
5806 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5807 	res = scsi_cmd->device->hostdata;
5808 	scsi_cmd->result = (DID_OK << 16);
5809 
5810 	/*
5811 	 * We are currently blocking all devices due to a host reset
5812 	 * We have told the host to stop giving us new requests, but
5813 	 * ERP ops don't count. FIXME
5814 	 */
5815 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5816 		return SCSI_MLQUEUE_HOST_BUSY;
5817 
5818 	/*
5819 	 * FIXME - Create scsi_set_host_offline interface
5820 	 *  and the ioa_is_dead check can be removed
5821 	 */
5822 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5823 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5824 		scsi_cmd->result = (DID_NO_CONNECT << 16);
5825 		scsi_cmd->scsi_done(scsi_cmd);
5826 		return 0;
5827 	}
5828 
5829 	if (ipr_is_gata(res) && res->sata_port)
5830 		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5831 
5832 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5833 	ioarcb = &ipr_cmd->ioarcb;
5834 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5835 
5836 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5837 	ipr_cmd->scsi_cmd = scsi_cmd;
5838 	ioarcb->res_handle = res->res_handle;
5839 	ipr_cmd->done = ipr_scsi_done;
5840 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5841 
5842 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5843 		if (scsi_cmd->underflow == 0)
5844 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5845 
5846 		if (res->needs_sync_complete) {
5847 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5848 			res->needs_sync_complete = 0;
5849 		}
5850 
5851 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5852 		if (ipr_is_gscsi(res))
5853 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5854 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5855 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5856 	}
5857 
5858 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5859 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5860 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5861 
5862 	if (likely(rc == 0)) {
5863 		if (ioa_cfg->sis64)
5864 			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5865 		else
5866 			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5867 	}
5868 
5869 	if (unlikely(rc != 0)) {
5870 		list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5871 		return SCSI_MLQUEUE_HOST_BUSY;
5872 	}
5873 
5874 	ipr_send_command(ipr_cmd);
5875 	return 0;
5876 }
5877 
DEF_SCSI_QCMD(ipr_queuecommand)5878 static DEF_SCSI_QCMD(ipr_queuecommand)
5879 
5880 /**
5881  * ipr_ioctl - IOCTL handler
5882  * @sdev:	scsi device struct
5883  * @cmd:	IOCTL cmd
5884  * @arg:	IOCTL arg
5885  *
5886  * Return value:
5887  * 	0 on success / other on failure
5888  **/
5889 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5890 {
5891 	struct ipr_resource_entry *res;
5892 
5893 	res = (struct ipr_resource_entry *)sdev->hostdata;
5894 	if (res && ipr_is_gata(res)) {
5895 		if (cmd == HDIO_GET_IDENTITY)
5896 			return -ENOTTY;
5897 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5898 	}
5899 
5900 	return -EINVAL;
5901 }
5902 
5903 /**
5904  * ipr_info - Get information about the card/driver
5905  * @scsi_host:	scsi host struct
5906  *
5907  * Return value:
5908  * 	pointer to buffer with description string
5909  **/
ipr_ioa_info(struct Scsi_Host * host)5910 static const char * ipr_ioa_info(struct Scsi_Host *host)
5911 {
5912 	static char buffer[512];
5913 	struct ipr_ioa_cfg *ioa_cfg;
5914 	unsigned long lock_flags = 0;
5915 
5916 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5917 
5918 	spin_lock_irqsave(host->host_lock, lock_flags);
5919 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5920 	spin_unlock_irqrestore(host->host_lock, lock_flags);
5921 
5922 	return buffer;
5923 }
5924 
5925 static struct scsi_host_template driver_template = {
5926 	.module = THIS_MODULE,
5927 	.name = "IPR",
5928 	.info = ipr_ioa_info,
5929 	.ioctl = ipr_ioctl,
5930 	.queuecommand = ipr_queuecommand,
5931 	.eh_abort_handler = ipr_eh_abort,
5932 	.eh_device_reset_handler = ipr_eh_dev_reset,
5933 	.eh_host_reset_handler = ipr_eh_host_reset,
5934 	.slave_alloc = ipr_slave_alloc,
5935 	.slave_configure = ipr_slave_configure,
5936 	.slave_destroy = ipr_slave_destroy,
5937 	.target_alloc = ipr_target_alloc,
5938 	.target_destroy = ipr_target_destroy,
5939 	.change_queue_depth = ipr_change_queue_depth,
5940 	.change_queue_type = ipr_change_queue_type,
5941 	.bios_param = ipr_biosparam,
5942 	.can_queue = IPR_MAX_COMMANDS,
5943 	.this_id = -1,
5944 	.sg_tablesize = IPR_MAX_SGLIST,
5945 	.max_sectors = IPR_IOA_MAX_SECTORS,
5946 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5947 	.use_clustering = ENABLE_CLUSTERING,
5948 	.shost_attrs = ipr_ioa_attrs,
5949 	.sdev_attrs = ipr_dev_attrs,
5950 	.proc_name = IPR_NAME
5951 };
5952 
5953 /**
5954  * ipr_ata_phy_reset - libata phy_reset handler
5955  * @ap:		ata port to reset
5956  *
5957  **/
ipr_ata_phy_reset(struct ata_port * ap)5958 static void ipr_ata_phy_reset(struct ata_port *ap)
5959 {
5960 	unsigned long flags;
5961 	struct ipr_sata_port *sata_port = ap->private_data;
5962 	struct ipr_resource_entry *res = sata_port->res;
5963 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5964 	int rc;
5965 
5966 	ENTER;
5967 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5968 	while(ioa_cfg->in_reset_reload) {
5969 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5970 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5971 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5972 	}
5973 
5974 	if (!ioa_cfg->allow_cmds)
5975 		goto out_unlock;
5976 
5977 	rc = ipr_device_reset(ioa_cfg, res);
5978 
5979 	if (rc) {
5980 		ap->link.device[0].class = ATA_DEV_NONE;
5981 		goto out_unlock;
5982 	}
5983 
5984 	ap->link.device[0].class = res->ata_class;
5985 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5986 		ap->link.device[0].class = ATA_DEV_NONE;
5987 
5988 out_unlock:
5989 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5990 	LEAVE;
5991 }
5992 
5993 /**
5994  * ipr_ata_post_internal - Cleanup after an internal command
5995  * @qc:	ATA queued command
5996  *
5997  * Return value:
5998  * 	none
5999  **/
ipr_ata_post_internal(struct ata_queued_cmd * qc)6000 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6001 {
6002 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6003 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6004 	struct ipr_cmnd *ipr_cmd;
6005 	unsigned long flags;
6006 
6007 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6008 	while(ioa_cfg->in_reset_reload) {
6009 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6010 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6011 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6012 	}
6013 
6014 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6015 		if (ipr_cmd->qc == qc) {
6016 			ipr_device_reset(ioa_cfg, sata_port->res);
6017 			break;
6018 		}
6019 	}
6020 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6021 }
6022 
6023 /**
6024  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6025  * @regs:	destination
6026  * @tf:	source ATA taskfile
6027  *
6028  * Return value:
6029  * 	none
6030  **/
ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs * regs,struct ata_taskfile * tf)6031 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6032 			     struct ata_taskfile *tf)
6033 {
6034 	regs->feature = tf->feature;
6035 	regs->nsect = tf->nsect;
6036 	regs->lbal = tf->lbal;
6037 	regs->lbam = tf->lbam;
6038 	regs->lbah = tf->lbah;
6039 	regs->device = tf->device;
6040 	regs->command = tf->command;
6041 	regs->hob_feature = tf->hob_feature;
6042 	regs->hob_nsect = tf->hob_nsect;
6043 	regs->hob_lbal = tf->hob_lbal;
6044 	regs->hob_lbam = tf->hob_lbam;
6045 	regs->hob_lbah = tf->hob_lbah;
6046 	regs->ctl = tf->ctl;
6047 }
6048 
6049 /**
6050  * ipr_sata_done - done function for SATA commands
6051  * @ipr_cmd:	ipr command struct
6052  *
6053  * This function is invoked by the interrupt handler for
6054  * ops generated by the SCSI mid-layer to SATA devices
6055  *
6056  * Return value:
6057  * 	none
6058  **/
ipr_sata_done(struct ipr_cmnd * ipr_cmd)6059 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6060 {
6061 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6062 	struct ata_queued_cmd *qc = ipr_cmd->qc;
6063 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6064 	struct ipr_resource_entry *res = sata_port->res;
6065 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6066 
6067 	if (ipr_cmd->ioa_cfg->sis64)
6068 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6069 		       sizeof(struct ipr_ioasa_gata));
6070 	else
6071 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6072 		       sizeof(struct ipr_ioasa_gata));
6073 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6074 
6075 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6076 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6077 
6078 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6079 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6080 	else
6081 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6082 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6083 	ata_qc_complete(qc);
6084 }
6085 
6086 /**
6087  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6088  * @ipr_cmd:	ipr command struct
6089  * @qc:		ATA queued command
6090  *
6091  **/
ipr_build_ata_ioadl64(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6092 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6093 				  struct ata_queued_cmd *qc)
6094 {
6095 	u32 ioadl_flags = 0;
6096 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6097 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6098 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6099 	int len = qc->nbytes;
6100 	struct scatterlist *sg;
6101 	unsigned int si;
6102 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6103 
6104 	if (len == 0)
6105 		return;
6106 
6107 	if (qc->dma_dir == DMA_TO_DEVICE) {
6108 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6109 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6110 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6111 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6112 
6113 	ioarcb->data_transfer_length = cpu_to_be32(len);
6114 	ioarcb->ioadl_len =
6115 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6116 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6117 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6118 
6119 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6120 		ioadl64->flags = cpu_to_be32(ioadl_flags);
6121 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6122 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6123 
6124 		last_ioadl64 = ioadl64;
6125 		ioadl64++;
6126 	}
6127 
6128 	if (likely(last_ioadl64))
6129 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6130 }
6131 
6132 /**
6133  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6134  * @ipr_cmd:	ipr command struct
6135  * @qc:		ATA queued command
6136  *
6137  **/
ipr_build_ata_ioadl(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6138 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6139 				struct ata_queued_cmd *qc)
6140 {
6141 	u32 ioadl_flags = 0;
6142 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6143 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6144 	struct ipr_ioadl_desc *last_ioadl = NULL;
6145 	int len = qc->nbytes;
6146 	struct scatterlist *sg;
6147 	unsigned int si;
6148 
6149 	if (len == 0)
6150 		return;
6151 
6152 	if (qc->dma_dir == DMA_TO_DEVICE) {
6153 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6154 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6155 		ioarcb->data_transfer_length = cpu_to_be32(len);
6156 		ioarcb->ioadl_len =
6157 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6158 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6159 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6160 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6161 		ioarcb->read_ioadl_len =
6162 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6163 	}
6164 
6165 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6166 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6167 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6168 
6169 		last_ioadl = ioadl;
6170 		ioadl++;
6171 	}
6172 
6173 	if (likely(last_ioadl))
6174 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6175 }
6176 
6177 /**
6178  * ipr_qc_issue - Issue a SATA qc to a device
6179  * @qc:	queued command
6180  *
6181  * Return value:
6182  * 	0 if success
6183  **/
ipr_qc_issue(struct ata_queued_cmd * qc)6184 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6185 {
6186 	struct ata_port *ap = qc->ap;
6187 	struct ipr_sata_port *sata_port = ap->private_data;
6188 	struct ipr_resource_entry *res = sata_port->res;
6189 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6190 	struct ipr_cmnd *ipr_cmd;
6191 	struct ipr_ioarcb *ioarcb;
6192 	struct ipr_ioarcb_ata_regs *regs;
6193 
6194 	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6195 		return AC_ERR_SYSTEM;
6196 
6197 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6198 	ioarcb = &ipr_cmd->ioarcb;
6199 
6200 	if (ioa_cfg->sis64) {
6201 		regs = &ipr_cmd->i.ata_ioadl.regs;
6202 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6203 	} else
6204 		regs = &ioarcb->u.add_data.u.regs;
6205 
6206 	memset(regs, 0, sizeof(*regs));
6207 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6208 
6209 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6210 	ipr_cmd->qc = qc;
6211 	ipr_cmd->done = ipr_sata_done;
6212 	ipr_cmd->ioarcb.res_handle = res->res_handle;
6213 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6214 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6215 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6216 	ipr_cmd->dma_use_sg = qc->n_elem;
6217 
6218 	if (ioa_cfg->sis64)
6219 		ipr_build_ata_ioadl64(ipr_cmd, qc);
6220 	else
6221 		ipr_build_ata_ioadl(ipr_cmd, qc);
6222 
6223 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6224 	ipr_copy_sata_tf(regs, &qc->tf);
6225 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6226 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6227 
6228 	switch (qc->tf.protocol) {
6229 	case ATA_PROT_NODATA:
6230 	case ATA_PROT_PIO:
6231 		break;
6232 
6233 	case ATA_PROT_DMA:
6234 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6235 		break;
6236 
6237 	case ATAPI_PROT_PIO:
6238 	case ATAPI_PROT_NODATA:
6239 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6240 		break;
6241 
6242 	case ATAPI_PROT_DMA:
6243 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6244 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6245 		break;
6246 
6247 	default:
6248 		WARN_ON(1);
6249 		return AC_ERR_INVALID;
6250 	}
6251 
6252 	ipr_send_command(ipr_cmd);
6253 
6254 	return 0;
6255 }
6256 
6257 /**
6258  * ipr_qc_fill_rtf - Read result TF
6259  * @qc: ATA queued command
6260  *
6261  * Return value:
6262  * 	true
6263  **/
ipr_qc_fill_rtf(struct ata_queued_cmd * qc)6264 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6265 {
6266 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6267 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6268 	struct ata_taskfile *tf = &qc->result_tf;
6269 
6270 	tf->feature = g->error;
6271 	tf->nsect = g->nsect;
6272 	tf->lbal = g->lbal;
6273 	tf->lbam = g->lbam;
6274 	tf->lbah = g->lbah;
6275 	tf->device = g->device;
6276 	tf->command = g->status;
6277 	tf->hob_nsect = g->hob_nsect;
6278 	tf->hob_lbal = g->hob_lbal;
6279 	tf->hob_lbam = g->hob_lbam;
6280 	tf->hob_lbah = g->hob_lbah;
6281 	tf->ctl = g->alt_status;
6282 
6283 	return true;
6284 }
6285 
6286 static struct ata_port_operations ipr_sata_ops = {
6287 	.phy_reset = ipr_ata_phy_reset,
6288 	.hardreset = ipr_sata_reset,
6289 	.post_internal_cmd = ipr_ata_post_internal,
6290 	.qc_prep = ata_noop_qc_prep,
6291 	.qc_issue = ipr_qc_issue,
6292 	.qc_fill_rtf = ipr_qc_fill_rtf,
6293 	.port_start = ata_sas_port_start,
6294 	.port_stop = ata_sas_port_stop
6295 };
6296 
6297 static struct ata_port_info sata_port_info = {
6298 	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6299 	.pio_mask	= ATA_PIO4_ONLY,
6300 	.mwdma_mask	= ATA_MWDMA2,
6301 	.udma_mask	= ATA_UDMA6,
6302 	.port_ops	= &ipr_sata_ops
6303 };
6304 
6305 #ifdef CONFIG_PPC_PSERIES
6306 static const u16 ipr_blocked_processors[] = {
6307 	PV_NORTHSTAR,
6308 	PV_PULSAR,
6309 	PV_POWER4,
6310 	PV_ICESTAR,
6311 	PV_SSTAR,
6312 	PV_POWER4p,
6313 	PV_630,
6314 	PV_630p
6315 };
6316 
6317 /**
6318  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6319  * @ioa_cfg:	ioa cfg struct
6320  *
6321  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6322  * certain pSeries hardware. This function determines if the given
6323  * adapter is in one of these confgurations or not.
6324  *
6325  * Return value:
6326  * 	1 if adapter is not supported / 0 if adapter is supported
6327  **/
ipr_invalid_adapter(struct ipr_ioa_cfg * ioa_cfg)6328 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6329 {
6330 	int i;
6331 
6332 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6333 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6334 			if (__is_processor(ipr_blocked_processors[i]))
6335 				return 1;
6336 		}
6337 	}
6338 	return 0;
6339 }
6340 #else
6341 #define ipr_invalid_adapter(ioa_cfg) 0
6342 #endif
6343 
6344 /**
6345  * ipr_ioa_bringdown_done - IOA bring down completion.
6346  * @ipr_cmd:	ipr command struct
6347  *
6348  * This function processes the completion of an adapter bring down.
6349  * It wakes any reset sleepers.
6350  *
6351  * Return value:
6352  * 	IPR_RC_JOB_RETURN
6353  **/
ipr_ioa_bringdown_done(struct ipr_cmnd * ipr_cmd)6354 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6355 {
6356 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6357 
6358 	ENTER;
6359 	ioa_cfg->in_reset_reload = 0;
6360 	ioa_cfg->reset_retries = 0;
6361 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6362 	wake_up_all(&ioa_cfg->reset_wait_q);
6363 
6364 	spin_unlock_irq(ioa_cfg->host->host_lock);
6365 	scsi_unblock_requests(ioa_cfg->host);
6366 	spin_lock_irq(ioa_cfg->host->host_lock);
6367 	LEAVE;
6368 
6369 	return IPR_RC_JOB_RETURN;
6370 }
6371 
6372 /**
6373  * ipr_ioa_reset_done - IOA reset completion.
6374  * @ipr_cmd:	ipr command struct
6375  *
6376  * This function processes the completion of an adapter reset.
6377  * It schedules any necessary mid-layer add/removes and
6378  * wakes any reset sleepers.
6379  *
6380  * Return value:
6381  * 	IPR_RC_JOB_RETURN
6382  **/
ipr_ioa_reset_done(struct ipr_cmnd * ipr_cmd)6383 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6384 {
6385 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6386 	struct ipr_resource_entry *res;
6387 	struct ipr_hostrcb *hostrcb, *temp;
6388 	int i = 0;
6389 
6390 	ENTER;
6391 	ioa_cfg->in_reset_reload = 0;
6392 	ioa_cfg->allow_cmds = 1;
6393 	ioa_cfg->reset_cmd = NULL;
6394 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6395 
6396 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6397 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6398 			ipr_trace;
6399 			break;
6400 		}
6401 	}
6402 	schedule_work(&ioa_cfg->work_q);
6403 
6404 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6405 		list_del(&hostrcb->queue);
6406 		if (i++ < IPR_NUM_LOG_HCAMS)
6407 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6408 		else
6409 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6410 	}
6411 
6412 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6413 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6414 
6415 	ioa_cfg->reset_retries = 0;
6416 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6417 	wake_up_all(&ioa_cfg->reset_wait_q);
6418 
6419 	spin_unlock(ioa_cfg->host->host_lock);
6420 	scsi_unblock_requests(ioa_cfg->host);
6421 	spin_lock(ioa_cfg->host->host_lock);
6422 
6423 	if (!ioa_cfg->allow_cmds)
6424 		scsi_block_requests(ioa_cfg->host);
6425 
6426 	LEAVE;
6427 	return IPR_RC_JOB_RETURN;
6428 }
6429 
6430 /**
6431  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6432  * @supported_dev:	supported device struct
6433  * @vpids:			vendor product id struct
6434  *
6435  * Return value:
6436  * 	none
6437  **/
ipr_set_sup_dev_dflt(struct ipr_supported_device * supported_dev,struct ipr_std_inq_vpids * vpids)6438 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6439 				 struct ipr_std_inq_vpids *vpids)
6440 {
6441 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6442 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6443 	supported_dev->num_records = 1;
6444 	supported_dev->data_length =
6445 		cpu_to_be16(sizeof(struct ipr_supported_device));
6446 	supported_dev->reserved = 0;
6447 }
6448 
6449 /**
6450  * ipr_set_supported_devs - Send Set Supported Devices for a device
6451  * @ipr_cmd:	ipr command struct
6452  *
6453  * This function sends a Set Supported Devices to the adapter
6454  *
6455  * Return value:
6456  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6457  **/
ipr_set_supported_devs(struct ipr_cmnd * ipr_cmd)6458 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6459 {
6460 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6461 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6462 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6463 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6464 
6465 	ipr_cmd->job_step = ipr_ioa_reset_done;
6466 
6467 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6468 		if (!ipr_is_scsi_disk(res))
6469 			continue;
6470 
6471 		ipr_cmd->u.res = res;
6472 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6473 
6474 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6475 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6476 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6477 
6478 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6479 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6480 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6481 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6482 
6483 		ipr_init_ioadl(ipr_cmd,
6484 			       ioa_cfg->vpd_cbs_dma +
6485 				 offsetof(struct ipr_misc_cbs, supp_dev),
6486 			       sizeof(struct ipr_supported_device),
6487 			       IPR_IOADL_FLAGS_WRITE_LAST);
6488 
6489 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6490 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6491 
6492 		if (!ioa_cfg->sis64)
6493 			ipr_cmd->job_step = ipr_set_supported_devs;
6494 		return IPR_RC_JOB_RETURN;
6495 	}
6496 
6497 	return IPR_RC_JOB_CONTINUE;
6498 }
6499 
6500 /**
6501  * ipr_get_mode_page - Locate specified mode page
6502  * @mode_pages:	mode page buffer
6503  * @page_code:	page code to find
6504  * @len:		minimum required length for mode page
6505  *
6506  * Return value:
6507  * 	pointer to mode page / NULL on failure
6508  **/
ipr_get_mode_page(struct ipr_mode_pages * mode_pages,u32 page_code,u32 len)6509 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6510 			       u32 page_code, u32 len)
6511 {
6512 	struct ipr_mode_page_hdr *mode_hdr;
6513 	u32 page_length;
6514 	u32 length;
6515 
6516 	if (!mode_pages || (mode_pages->hdr.length == 0))
6517 		return NULL;
6518 
6519 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6520 	mode_hdr = (struct ipr_mode_page_hdr *)
6521 		(mode_pages->data + mode_pages->hdr.block_desc_len);
6522 
6523 	while (length) {
6524 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6525 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6526 				return mode_hdr;
6527 			break;
6528 		} else {
6529 			page_length = (sizeof(struct ipr_mode_page_hdr) +
6530 				       mode_hdr->page_length);
6531 			length -= page_length;
6532 			mode_hdr = (struct ipr_mode_page_hdr *)
6533 				((unsigned long)mode_hdr + page_length);
6534 		}
6535 	}
6536 	return NULL;
6537 }
6538 
6539 /**
6540  * ipr_check_term_power - Check for term power errors
6541  * @ioa_cfg:	ioa config struct
6542  * @mode_pages:	IOAFP mode pages buffer
6543  *
6544  * Check the IOAFP's mode page 28 for term power errors
6545  *
6546  * Return value:
6547  * 	nothing
6548  **/
ipr_check_term_power(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)6549 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6550 				 struct ipr_mode_pages *mode_pages)
6551 {
6552 	int i;
6553 	int entry_length;
6554 	struct ipr_dev_bus_entry *bus;
6555 	struct ipr_mode_page28 *mode_page;
6556 
6557 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6558 				      sizeof(struct ipr_mode_page28));
6559 
6560 	entry_length = mode_page->entry_length;
6561 
6562 	bus = mode_page->bus;
6563 
6564 	for (i = 0; i < mode_page->num_entries; i++) {
6565 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6566 			dev_err(&ioa_cfg->pdev->dev,
6567 				"Term power is absent on scsi bus %d\n",
6568 				bus->res_addr.bus);
6569 		}
6570 
6571 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6572 	}
6573 }
6574 
6575 /**
6576  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6577  * @ioa_cfg:	ioa config struct
6578  *
6579  * Looks through the config table checking for SES devices. If
6580  * the SES device is in the SES table indicating a maximum SCSI
6581  * bus speed, the speed is limited for the bus.
6582  *
6583  * Return value:
6584  * 	none
6585  **/
ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg * ioa_cfg)6586 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6587 {
6588 	u32 max_xfer_rate;
6589 	int i;
6590 
6591 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6592 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6593 						       ioa_cfg->bus_attr[i].bus_width);
6594 
6595 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6596 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6597 	}
6598 }
6599 
6600 /**
6601  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6602  * @ioa_cfg:	ioa config struct
6603  * @mode_pages:	mode page 28 buffer
6604  *
6605  * Updates mode page 28 based on driver configuration
6606  *
6607  * Return value:
6608  * 	none
6609  **/
ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)6610 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6611 					  	struct ipr_mode_pages *mode_pages)
6612 {
6613 	int i, entry_length;
6614 	struct ipr_dev_bus_entry *bus;
6615 	struct ipr_bus_attributes *bus_attr;
6616 	struct ipr_mode_page28 *mode_page;
6617 
6618 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6619 				      sizeof(struct ipr_mode_page28));
6620 
6621 	entry_length = mode_page->entry_length;
6622 
6623 	/* Loop for each device bus entry */
6624 	for (i = 0, bus = mode_page->bus;
6625 	     i < mode_page->num_entries;
6626 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6627 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6628 			dev_err(&ioa_cfg->pdev->dev,
6629 				"Invalid resource address reported: 0x%08X\n",
6630 				IPR_GET_PHYS_LOC(bus->res_addr));
6631 			continue;
6632 		}
6633 
6634 		bus_attr = &ioa_cfg->bus_attr[i];
6635 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6636 		bus->bus_width = bus_attr->bus_width;
6637 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6638 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6639 		if (bus_attr->qas_enabled)
6640 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6641 		else
6642 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6643 	}
6644 }
6645 
6646 /**
6647  * ipr_build_mode_select - Build a mode select command
6648  * @ipr_cmd:	ipr command struct
6649  * @res_handle:	resource handle to send command to
6650  * @parm:		Byte 2 of Mode Sense command
6651  * @dma_addr:	DMA buffer address
6652  * @xfer_len:	data transfer length
6653  *
6654  * Return value:
6655  * 	none
6656  **/
ipr_build_mode_select(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)6657 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6658 				  __be32 res_handle, u8 parm,
6659 				  dma_addr_t dma_addr, u8 xfer_len)
6660 {
6661 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6662 
6663 	ioarcb->res_handle = res_handle;
6664 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6665 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6666 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6667 	ioarcb->cmd_pkt.cdb[1] = parm;
6668 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6669 
6670 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6671 }
6672 
6673 /**
6674  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6675  * @ipr_cmd:	ipr command struct
6676  *
6677  * This function sets up the SCSI bus attributes and sends
6678  * a Mode Select for Page 28 to activate them.
6679  *
6680  * Return value:
6681  * 	IPR_RC_JOB_RETURN
6682  **/
ipr_ioafp_mode_select_page28(struct ipr_cmnd * ipr_cmd)6683 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6684 {
6685 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6686 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6687 	int length;
6688 
6689 	ENTER;
6690 	ipr_scsi_bus_speed_limit(ioa_cfg);
6691 	ipr_check_term_power(ioa_cfg, mode_pages);
6692 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6693 	length = mode_pages->hdr.length + 1;
6694 	mode_pages->hdr.length = 0;
6695 
6696 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6697 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6698 			      length);
6699 
6700 	ipr_cmd->job_step = ipr_set_supported_devs;
6701 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6702 				    struct ipr_resource_entry, queue);
6703 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6704 
6705 	LEAVE;
6706 	return IPR_RC_JOB_RETURN;
6707 }
6708 
6709 /**
6710  * ipr_build_mode_sense - Builds a mode sense command
6711  * @ipr_cmd:	ipr command struct
6712  * @res:		resource entry struct
6713  * @parm:		Byte 2 of mode sense command
6714  * @dma_addr:	DMA address of mode sense buffer
6715  * @xfer_len:	Size of DMA buffer
6716  *
6717  * Return value:
6718  * 	none
6719  **/
ipr_build_mode_sense(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)6720 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6721 				 __be32 res_handle,
6722 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6723 {
6724 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6725 
6726 	ioarcb->res_handle = res_handle;
6727 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6728 	ioarcb->cmd_pkt.cdb[2] = parm;
6729 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6730 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6731 
6732 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6733 }
6734 
6735 /**
6736  * ipr_reset_cmd_failed - Handle failure of IOA reset command
6737  * @ipr_cmd:	ipr command struct
6738  *
6739  * This function handles the failure of an IOA bringup command.
6740  *
6741  * Return value:
6742  * 	IPR_RC_JOB_RETURN
6743  **/
ipr_reset_cmd_failed(struct ipr_cmnd * ipr_cmd)6744 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6745 {
6746 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6747 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6748 
6749 	dev_err(&ioa_cfg->pdev->dev,
6750 		"0x%02X failed with IOASC: 0x%08X\n",
6751 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6752 
6753 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6754 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6755 	return IPR_RC_JOB_RETURN;
6756 }
6757 
6758 /**
6759  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6760  * @ipr_cmd:	ipr command struct
6761  *
6762  * This function handles the failure of a Mode Sense to the IOAFP.
6763  * Some adapters do not handle all mode pages.
6764  *
6765  * Return value:
6766  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6767  **/
ipr_reset_mode_sense_failed(struct ipr_cmnd * ipr_cmd)6768 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6769 {
6770 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6771 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6772 
6773 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6774 		ipr_cmd->job_step = ipr_set_supported_devs;
6775 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6776 					    struct ipr_resource_entry, queue);
6777 		return IPR_RC_JOB_CONTINUE;
6778 	}
6779 
6780 	return ipr_reset_cmd_failed(ipr_cmd);
6781 }
6782 
6783 /**
6784  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6785  * @ipr_cmd:	ipr command struct
6786  *
6787  * This function send a Page 28 mode sense to the IOA to
6788  * retrieve SCSI bus attributes.
6789  *
6790  * Return value:
6791  * 	IPR_RC_JOB_RETURN
6792  **/
ipr_ioafp_mode_sense_page28(struct ipr_cmnd * ipr_cmd)6793 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6794 {
6795 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6796 
6797 	ENTER;
6798 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6799 			     0x28, ioa_cfg->vpd_cbs_dma +
6800 			     offsetof(struct ipr_misc_cbs, mode_pages),
6801 			     sizeof(struct ipr_mode_pages));
6802 
6803 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6804 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6805 
6806 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6807 
6808 	LEAVE;
6809 	return IPR_RC_JOB_RETURN;
6810 }
6811 
6812 /**
6813  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6814  * @ipr_cmd:	ipr command struct
6815  *
6816  * This function enables dual IOA RAID support if possible.
6817  *
6818  * Return value:
6819  * 	IPR_RC_JOB_RETURN
6820  **/
ipr_ioafp_mode_select_page24(struct ipr_cmnd * ipr_cmd)6821 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6822 {
6823 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6825 	struct ipr_mode_page24 *mode_page;
6826 	int length;
6827 
6828 	ENTER;
6829 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6830 				      sizeof(struct ipr_mode_page24));
6831 
6832 	if (mode_page)
6833 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6834 
6835 	length = mode_pages->hdr.length + 1;
6836 	mode_pages->hdr.length = 0;
6837 
6838 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6839 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6840 			      length);
6841 
6842 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6843 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6844 
6845 	LEAVE;
6846 	return IPR_RC_JOB_RETURN;
6847 }
6848 
6849 /**
6850  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6851  * @ipr_cmd:	ipr command struct
6852  *
6853  * This function handles the failure of a Mode Sense to the IOAFP.
6854  * Some adapters do not handle all mode pages.
6855  *
6856  * Return value:
6857  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6858  **/
ipr_reset_mode_sense_page24_failed(struct ipr_cmnd * ipr_cmd)6859 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6860 {
6861 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6862 
6863 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6864 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6865 		return IPR_RC_JOB_CONTINUE;
6866 	}
6867 
6868 	return ipr_reset_cmd_failed(ipr_cmd);
6869 }
6870 
6871 /**
6872  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6873  * @ipr_cmd:	ipr command struct
6874  *
6875  * This function send a mode sense to the IOA to retrieve
6876  * the IOA Advanced Function Control mode page.
6877  *
6878  * Return value:
6879  * 	IPR_RC_JOB_RETURN
6880  **/
ipr_ioafp_mode_sense_page24(struct ipr_cmnd * ipr_cmd)6881 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6882 {
6883 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6884 
6885 	ENTER;
6886 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6887 			     0x24, ioa_cfg->vpd_cbs_dma +
6888 			     offsetof(struct ipr_misc_cbs, mode_pages),
6889 			     sizeof(struct ipr_mode_pages));
6890 
6891 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6892 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6893 
6894 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6895 
6896 	LEAVE;
6897 	return IPR_RC_JOB_RETURN;
6898 }
6899 
6900 /**
6901  * ipr_init_res_table - Initialize the resource table
6902  * @ipr_cmd:	ipr command struct
6903  *
6904  * This function looks through the existing resource table, comparing
6905  * it with the config table. This function will take care of old/new
6906  * devices and schedule adding/removing them from the mid-layer
6907  * as appropriate.
6908  *
6909  * Return value:
6910  * 	IPR_RC_JOB_CONTINUE
6911  **/
ipr_init_res_table(struct ipr_cmnd * ipr_cmd)6912 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6913 {
6914 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6915 	struct ipr_resource_entry *res, *temp;
6916 	struct ipr_config_table_entry_wrapper cfgtew;
6917 	int entries, found, flag, i;
6918 	LIST_HEAD(old_res);
6919 
6920 	ENTER;
6921 	if (ioa_cfg->sis64)
6922 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6923 	else
6924 		flag = ioa_cfg->u.cfg_table->hdr.flags;
6925 
6926 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6927 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6928 
6929 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6930 		list_move_tail(&res->queue, &old_res);
6931 
6932 	if (ioa_cfg->sis64)
6933 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6934 	else
6935 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6936 
6937 	for (i = 0; i < entries; i++) {
6938 		if (ioa_cfg->sis64)
6939 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6940 		else
6941 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6942 		found = 0;
6943 
6944 		list_for_each_entry_safe(res, temp, &old_res, queue) {
6945 			if (ipr_is_same_device(res, &cfgtew)) {
6946 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6947 				found = 1;
6948 				break;
6949 			}
6950 		}
6951 
6952 		if (!found) {
6953 			if (list_empty(&ioa_cfg->free_res_q)) {
6954 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6955 				break;
6956 			}
6957 
6958 			found = 1;
6959 			res = list_entry(ioa_cfg->free_res_q.next,
6960 					 struct ipr_resource_entry, queue);
6961 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6962 			ipr_init_res_entry(res, &cfgtew);
6963 			res->add_to_ml = 1;
6964 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6965 			res->sdev->allow_restart = 1;
6966 
6967 		if (found)
6968 			ipr_update_res_entry(res, &cfgtew);
6969 	}
6970 
6971 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6972 		if (res->sdev) {
6973 			res->del_from_ml = 1;
6974 			res->res_handle = IPR_INVALID_RES_HANDLE;
6975 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6976 		}
6977 	}
6978 
6979 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6980 		ipr_clear_res_target(res);
6981 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6982 	}
6983 
6984 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6985 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6986 	else
6987 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6988 
6989 	LEAVE;
6990 	return IPR_RC_JOB_CONTINUE;
6991 }
6992 
6993 /**
6994  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6995  * @ipr_cmd:	ipr command struct
6996  *
6997  * This function sends a Query IOA Configuration command
6998  * to the adapter to retrieve the IOA configuration table.
6999  *
7000  * Return value:
7001  * 	IPR_RC_JOB_RETURN
7002  **/
ipr_ioafp_query_ioa_cfg(struct ipr_cmnd * ipr_cmd)7003 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7004 {
7005 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7006 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7007 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7008 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7009 
7010 	ENTER;
7011 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7012 		ioa_cfg->dual_raid = 1;
7013 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7014 		 ucode_vpd->major_release, ucode_vpd->card_type,
7015 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7016 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7017 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7018 
7019 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7020 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7021 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7022 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7023 
7024 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7025 		       IPR_IOADL_FLAGS_READ_LAST);
7026 
7027 	ipr_cmd->job_step = ipr_init_res_table;
7028 
7029 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7030 
7031 	LEAVE;
7032 	return IPR_RC_JOB_RETURN;
7033 }
7034 
7035 /**
7036  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7037  * @ipr_cmd:	ipr command struct
7038  *
7039  * This utility function sends an inquiry to the adapter.
7040  *
7041  * Return value:
7042  * 	none
7043  **/
ipr_ioafp_inquiry(struct ipr_cmnd * ipr_cmd,u8 flags,u8 page,dma_addr_t dma_addr,u8 xfer_len)7044 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7045 			      dma_addr_t dma_addr, u8 xfer_len)
7046 {
7047 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7048 
7049 	ENTER;
7050 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7051 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7052 
7053 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7054 	ioarcb->cmd_pkt.cdb[1] = flags;
7055 	ioarcb->cmd_pkt.cdb[2] = page;
7056 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7057 
7058 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7059 
7060 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7061 	LEAVE;
7062 }
7063 
7064 /**
7065  * ipr_inquiry_page_supported - Is the given inquiry page supported
7066  * @page0:		inquiry page 0 buffer
7067  * @page:		page code.
7068  *
7069  * This function determines if the specified inquiry page is supported.
7070  *
7071  * Return value:
7072  *	1 if page is supported / 0 if not
7073  **/
ipr_inquiry_page_supported(struct ipr_inquiry_page0 * page0,u8 page)7074 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7075 {
7076 	int i;
7077 
7078 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7079 		if (page0->page[i] == page)
7080 			return 1;
7081 
7082 	return 0;
7083 }
7084 
7085 /**
7086  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7087  * @ipr_cmd:	ipr command struct
7088  *
7089  * This function sends a Page 0xD0 inquiry to the adapter
7090  * to retrieve adapter capabilities.
7091  *
7092  * Return value:
7093  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7094  **/
ipr_ioafp_cap_inquiry(struct ipr_cmnd * ipr_cmd)7095 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7096 {
7097 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7098 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7099 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7100 
7101 	ENTER;
7102 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7103 	memset(cap, 0, sizeof(*cap));
7104 
7105 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7106 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7107 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7108 				  sizeof(struct ipr_inquiry_cap));
7109 		return IPR_RC_JOB_RETURN;
7110 	}
7111 
7112 	LEAVE;
7113 	return IPR_RC_JOB_CONTINUE;
7114 }
7115 
7116 /**
7117  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7118  * @ipr_cmd:	ipr command struct
7119  *
7120  * This function sends a Page 3 inquiry to the adapter
7121  * to retrieve software VPD information.
7122  *
7123  * Return value:
7124  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7125  **/
ipr_ioafp_page3_inquiry(struct ipr_cmnd * ipr_cmd)7126 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7127 {
7128 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7129 
7130 	ENTER;
7131 
7132 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7133 
7134 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7135 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7136 			  sizeof(struct ipr_inquiry_page3));
7137 
7138 	LEAVE;
7139 	return IPR_RC_JOB_RETURN;
7140 }
7141 
7142 /**
7143  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7144  * @ipr_cmd:	ipr command struct
7145  *
7146  * This function sends a Page 0 inquiry to the adapter
7147  * to retrieve supported inquiry pages.
7148  *
7149  * Return value:
7150  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7151  **/
ipr_ioafp_page0_inquiry(struct ipr_cmnd * ipr_cmd)7152 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7153 {
7154 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7155 	char type[5];
7156 
7157 	ENTER;
7158 
7159 	/* Grab the type out of the VPD and store it away */
7160 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7161 	type[4] = '\0';
7162 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7163 
7164 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7165 
7166 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7167 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7168 			  sizeof(struct ipr_inquiry_page0));
7169 
7170 	LEAVE;
7171 	return IPR_RC_JOB_RETURN;
7172 }
7173 
7174 /**
7175  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7176  * @ipr_cmd:	ipr command struct
7177  *
7178  * This function sends a standard inquiry to the adapter.
7179  *
7180  * Return value:
7181  * 	IPR_RC_JOB_RETURN
7182  **/
ipr_ioafp_std_inquiry(struct ipr_cmnd * ipr_cmd)7183 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7184 {
7185 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7186 
7187 	ENTER;
7188 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7189 
7190 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7191 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7192 			  sizeof(struct ipr_ioa_vpd));
7193 
7194 	LEAVE;
7195 	return IPR_RC_JOB_RETURN;
7196 }
7197 
7198 /**
7199  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7200  * @ipr_cmd:	ipr command struct
7201  *
7202  * This function send an Identify Host Request Response Queue
7203  * command to establish the HRRQ with the adapter.
7204  *
7205  * Return value:
7206  * 	IPR_RC_JOB_RETURN
7207  **/
ipr_ioafp_identify_hrrq(struct ipr_cmnd * ipr_cmd)7208 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7209 {
7210 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7211 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7212 
7213 	ENTER;
7214 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7215 
7216 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7217 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7218 
7219 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7220 	if (ioa_cfg->sis64)
7221 		ioarcb->cmd_pkt.cdb[1] = 0x1;
7222 	ioarcb->cmd_pkt.cdb[2] =
7223 		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7224 	ioarcb->cmd_pkt.cdb[3] =
7225 		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7226 	ioarcb->cmd_pkt.cdb[4] =
7227 		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7228 	ioarcb->cmd_pkt.cdb[5] =
7229 		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7230 	ioarcb->cmd_pkt.cdb[7] =
7231 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7232 	ioarcb->cmd_pkt.cdb[8] =
7233 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7234 
7235 	if (ioa_cfg->sis64) {
7236 		ioarcb->cmd_pkt.cdb[10] =
7237 			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7238 		ioarcb->cmd_pkt.cdb[11] =
7239 			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7240 		ioarcb->cmd_pkt.cdb[12] =
7241 			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7242 		ioarcb->cmd_pkt.cdb[13] =
7243 			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7244 	}
7245 
7246 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7247 
7248 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7249 
7250 	LEAVE;
7251 	return IPR_RC_JOB_RETURN;
7252 }
7253 
7254 /**
7255  * ipr_reset_timer_done - Adapter reset timer function
7256  * @ipr_cmd:	ipr command struct
7257  *
7258  * Description: This function is used in adapter reset processing
7259  * for timing events. If the reset_cmd pointer in the IOA
7260  * config struct is not this adapter's we are doing nested
7261  * resets and fail_all_ops will take care of freeing the
7262  * command block.
7263  *
7264  * Return value:
7265  * 	none
7266  **/
ipr_reset_timer_done(struct ipr_cmnd * ipr_cmd)7267 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7268 {
7269 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7270 	unsigned long lock_flags = 0;
7271 
7272 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7273 
7274 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7275 		list_del(&ipr_cmd->queue);
7276 		ipr_cmd->done(ipr_cmd);
7277 	}
7278 
7279 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7280 }
7281 
7282 /**
7283  * ipr_reset_start_timer - Start a timer for adapter reset job
7284  * @ipr_cmd:	ipr command struct
7285  * @timeout:	timeout value
7286  *
7287  * Description: This function is used in adapter reset processing
7288  * for timing events. If the reset_cmd pointer in the IOA
7289  * config struct is not this adapter's we are doing nested
7290  * resets and fail_all_ops will take care of freeing the
7291  * command block.
7292  *
7293  * Return value:
7294  * 	none
7295  **/
ipr_reset_start_timer(struct ipr_cmnd * ipr_cmd,unsigned long timeout)7296 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7297 				  unsigned long timeout)
7298 {
7299 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7300 	ipr_cmd->done = ipr_reset_ioa_job;
7301 
7302 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7303 	ipr_cmd->timer.expires = jiffies + timeout;
7304 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7305 	add_timer(&ipr_cmd->timer);
7306 }
7307 
7308 /**
7309  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7310  * @ioa_cfg:	ioa cfg struct
7311  *
7312  * Return value:
7313  * 	nothing
7314  **/
ipr_init_ioa_mem(struct ipr_ioa_cfg * ioa_cfg)7315 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7316 {
7317 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7318 
7319 	/* Initialize Host RRQ pointers */
7320 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7321 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7322 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7323 	ioa_cfg->toggle_bit = 1;
7324 
7325 	/* Zero out config table */
7326 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7327 }
7328 
7329 /**
7330  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7331  * @ipr_cmd:	ipr command struct
7332  *
7333  * Return value:
7334  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7335  **/
ipr_reset_next_stage(struct ipr_cmnd * ipr_cmd)7336 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7337 {
7338 	unsigned long stage, stage_time;
7339 	u32 feedback;
7340 	volatile u32 int_reg;
7341 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7342 	u64 maskval = 0;
7343 
7344 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7345 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7346 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7347 
7348 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7349 
7350 	/* sanity check the stage_time value */
7351 	if (stage_time == 0)
7352 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7353 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7354 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7355 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7356 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7357 
7358 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7359 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7360 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7361 		stage_time = ioa_cfg->transop_timeout;
7362 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7363 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7364 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7365 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7366 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7367 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7368 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7369 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7370 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7371 			return IPR_RC_JOB_CONTINUE;
7372 		}
7373 	}
7374 
7375 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7376 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7377 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7378 	ipr_cmd->done = ipr_reset_ioa_job;
7379 	add_timer(&ipr_cmd->timer);
7380 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7381 
7382 	return IPR_RC_JOB_RETURN;
7383 }
7384 
7385 /**
7386  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7387  * @ipr_cmd:	ipr command struct
7388  *
7389  * This function reinitializes some control blocks and
7390  * enables destructive diagnostics on the adapter.
7391  *
7392  * Return value:
7393  * 	IPR_RC_JOB_RETURN
7394  **/
ipr_reset_enable_ioa(struct ipr_cmnd * ipr_cmd)7395 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7396 {
7397 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7398 	volatile u32 int_reg;
7399 	volatile u64 maskval;
7400 
7401 	ENTER;
7402 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7403 	ipr_init_ioa_mem(ioa_cfg);
7404 
7405 	ioa_cfg->allow_interrupts = 1;
7406 	if (ioa_cfg->sis64) {
7407 		/* Set the adapter to the correct endian mode. */
7408 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7409 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7410 	}
7411 
7412 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7413 
7414 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7415 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7416 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7417 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7418 		return IPR_RC_JOB_CONTINUE;
7419 	}
7420 
7421 	/* Enable destructive diagnostics on IOA */
7422 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7423 
7424 	if (ioa_cfg->sis64) {
7425 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7426 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7427 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7428 	} else
7429 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7430 
7431 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7432 
7433 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7434 
7435 	if (ioa_cfg->sis64) {
7436 		ipr_cmd->job_step = ipr_reset_next_stage;
7437 		return IPR_RC_JOB_CONTINUE;
7438 	}
7439 
7440 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7441 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7442 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7443 	ipr_cmd->done = ipr_reset_ioa_job;
7444 	add_timer(&ipr_cmd->timer);
7445 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7446 
7447 	LEAVE;
7448 	return IPR_RC_JOB_RETURN;
7449 }
7450 
7451 /**
7452  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7453  * @ipr_cmd:	ipr command struct
7454  *
7455  * This function is invoked when an adapter dump has run out
7456  * of processing time.
7457  *
7458  * Return value:
7459  * 	IPR_RC_JOB_CONTINUE
7460  **/
ipr_reset_wait_for_dump(struct ipr_cmnd * ipr_cmd)7461 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7462 {
7463 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7464 
7465 	if (ioa_cfg->sdt_state == GET_DUMP)
7466 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7467 	else if (ioa_cfg->sdt_state == READ_DUMP)
7468 		ioa_cfg->sdt_state = ABORT_DUMP;
7469 
7470 	ioa_cfg->dump_timeout = 1;
7471 	ipr_cmd->job_step = ipr_reset_alert;
7472 
7473 	return IPR_RC_JOB_CONTINUE;
7474 }
7475 
7476 /**
7477  * ipr_unit_check_no_data - Log a unit check/no data error log
7478  * @ioa_cfg:		ioa config struct
7479  *
7480  * Logs an error indicating the adapter unit checked, but for some
7481  * reason, we were unable to fetch the unit check buffer.
7482  *
7483  * Return value:
7484  * 	nothing
7485  **/
ipr_unit_check_no_data(struct ipr_ioa_cfg * ioa_cfg)7486 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7487 {
7488 	ioa_cfg->errors_logged++;
7489 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7490 }
7491 
7492 /**
7493  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7494  * @ioa_cfg:		ioa config struct
7495  *
7496  * Fetches the unit check buffer from the adapter by clocking the data
7497  * through the mailbox register.
7498  *
7499  * Return value:
7500  * 	nothing
7501  **/
ipr_get_unit_check_buffer(struct ipr_ioa_cfg * ioa_cfg)7502 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7503 {
7504 	unsigned long mailbox;
7505 	struct ipr_hostrcb *hostrcb;
7506 	struct ipr_uc_sdt sdt;
7507 	int rc, length;
7508 	u32 ioasc;
7509 
7510 	mailbox = readl(ioa_cfg->ioa_mailbox);
7511 
7512 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7513 		ipr_unit_check_no_data(ioa_cfg);
7514 		return;
7515 	}
7516 
7517 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7518 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7519 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7520 
7521 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7522 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7523 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7524 		ipr_unit_check_no_data(ioa_cfg);
7525 		return;
7526 	}
7527 
7528 	/* Find length of the first sdt entry (UC buffer) */
7529 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7530 		length = be32_to_cpu(sdt.entry[0].end_token);
7531 	else
7532 		length = (be32_to_cpu(sdt.entry[0].end_token) -
7533 			  be32_to_cpu(sdt.entry[0].start_token)) &
7534 			  IPR_FMT2_MBX_ADDR_MASK;
7535 
7536 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7537 			     struct ipr_hostrcb, queue);
7538 	list_del(&hostrcb->queue);
7539 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7540 
7541 	rc = ipr_get_ldump_data_section(ioa_cfg,
7542 					be32_to_cpu(sdt.entry[0].start_token),
7543 					(__be32 *)&hostrcb->hcam,
7544 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7545 
7546 	if (!rc) {
7547 		ipr_handle_log_data(ioa_cfg, hostrcb);
7548 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7549 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7550 		    ioa_cfg->sdt_state == GET_DUMP)
7551 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7552 	} else
7553 		ipr_unit_check_no_data(ioa_cfg);
7554 
7555 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7556 }
7557 
7558 /**
7559  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7560  * @ipr_cmd:	ipr command struct
7561  *
7562  * Description: This function will call to get the unit check buffer.
7563  *
7564  * Return value:
7565  *	IPR_RC_JOB_RETURN
7566  **/
ipr_reset_get_unit_check_job(struct ipr_cmnd * ipr_cmd)7567 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7568 {
7569 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7570 
7571 	ENTER;
7572 	ioa_cfg->ioa_unit_checked = 0;
7573 	ipr_get_unit_check_buffer(ioa_cfg);
7574 	ipr_cmd->job_step = ipr_reset_alert;
7575 	ipr_reset_start_timer(ipr_cmd, 0);
7576 
7577 	LEAVE;
7578 	return IPR_RC_JOB_RETURN;
7579 }
7580 
7581 /**
7582  * ipr_reset_restore_cfg_space - Restore PCI config space.
7583  * @ipr_cmd:	ipr command struct
7584  *
7585  * Description: This function restores the saved PCI config space of
7586  * the adapter, fails all outstanding ops back to the callers, and
7587  * fetches the dump/unit check if applicable to this reset.
7588  *
7589  * Return value:
7590  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7591  **/
ipr_reset_restore_cfg_space(struct ipr_cmnd * ipr_cmd)7592 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7593 {
7594 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7595 	u32 int_reg;
7596 
7597 	ENTER;
7598 	ioa_cfg->pdev->state_saved = true;
7599 	pci_restore_state(ioa_cfg->pdev);
7600 
7601 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7602 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7603 		return IPR_RC_JOB_CONTINUE;
7604 	}
7605 
7606 	ipr_fail_all_ops(ioa_cfg);
7607 
7608 	if (ioa_cfg->sis64) {
7609 		/* Set the adapter to the correct endian mode. */
7610 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7611 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7612 	}
7613 
7614 	if (ioa_cfg->ioa_unit_checked) {
7615 		if (ioa_cfg->sis64) {
7616 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7617 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7618 			return IPR_RC_JOB_RETURN;
7619 		} else {
7620 			ioa_cfg->ioa_unit_checked = 0;
7621 			ipr_get_unit_check_buffer(ioa_cfg);
7622 			ipr_cmd->job_step = ipr_reset_alert;
7623 			ipr_reset_start_timer(ipr_cmd, 0);
7624 			return IPR_RC_JOB_RETURN;
7625 		}
7626 	}
7627 
7628 	if (ioa_cfg->in_ioa_bringdown) {
7629 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7630 	} else {
7631 		ipr_cmd->job_step = ipr_reset_enable_ioa;
7632 
7633 		if (GET_DUMP == ioa_cfg->sdt_state) {
7634 			ioa_cfg->sdt_state = READ_DUMP;
7635 			ioa_cfg->dump_timeout = 0;
7636 			if (ioa_cfg->sis64)
7637 				ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7638 			else
7639 				ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7640 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7641 			schedule_work(&ioa_cfg->work_q);
7642 			return IPR_RC_JOB_RETURN;
7643 		}
7644 	}
7645 
7646 	LEAVE;
7647 	return IPR_RC_JOB_CONTINUE;
7648 }
7649 
7650 /**
7651  * ipr_reset_bist_done - BIST has completed on the adapter.
7652  * @ipr_cmd:	ipr command struct
7653  *
7654  * Description: Unblock config space and resume the reset process.
7655  *
7656  * Return value:
7657  * 	IPR_RC_JOB_CONTINUE
7658  **/
ipr_reset_bist_done(struct ipr_cmnd * ipr_cmd)7659 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7660 {
7661 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 
7663 	ENTER;
7664 	if (ioa_cfg->cfg_locked)
7665 		pci_cfg_access_unlock(ioa_cfg->pdev);
7666 	ioa_cfg->cfg_locked = 0;
7667 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7668 	LEAVE;
7669 	return IPR_RC_JOB_CONTINUE;
7670 }
7671 
7672 /**
7673  * ipr_reset_start_bist - Run BIST on the adapter.
7674  * @ipr_cmd:	ipr command struct
7675  *
7676  * Description: This function runs BIST on the adapter, then delays 2 seconds.
7677  *
7678  * Return value:
7679  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7680  **/
ipr_reset_start_bist(struct ipr_cmnd * ipr_cmd)7681 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7682 {
7683 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7684 	int rc = PCIBIOS_SUCCESSFUL;
7685 
7686 	ENTER;
7687 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7688 		writel(IPR_UPROCI_SIS64_START_BIST,
7689 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7690 	else
7691 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7692 
7693 	if (rc == PCIBIOS_SUCCESSFUL) {
7694 		ipr_cmd->job_step = ipr_reset_bist_done;
7695 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7696 		rc = IPR_RC_JOB_RETURN;
7697 	} else {
7698 		if (ioa_cfg->cfg_locked)
7699 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7700 		ioa_cfg->cfg_locked = 0;
7701 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7702 		rc = IPR_RC_JOB_CONTINUE;
7703 	}
7704 
7705 	LEAVE;
7706 	return rc;
7707 }
7708 
7709 /**
7710  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7711  * @ipr_cmd:	ipr command struct
7712  *
7713  * Description: This clears PCI reset to the adapter and delays two seconds.
7714  *
7715  * Return value:
7716  * 	IPR_RC_JOB_RETURN
7717  **/
ipr_reset_slot_reset_done(struct ipr_cmnd * ipr_cmd)7718 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7719 {
7720 	ENTER;
7721 	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7722 	ipr_cmd->job_step = ipr_reset_bist_done;
7723 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7724 	LEAVE;
7725 	return IPR_RC_JOB_RETURN;
7726 }
7727 
7728 /**
7729  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7730  * @ipr_cmd:	ipr command struct
7731  *
7732  * Description: This asserts PCI reset to the adapter.
7733  *
7734  * Return value:
7735  * 	IPR_RC_JOB_RETURN
7736  **/
ipr_reset_slot_reset(struct ipr_cmnd * ipr_cmd)7737 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7738 {
7739 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740 	struct pci_dev *pdev = ioa_cfg->pdev;
7741 
7742 	ENTER;
7743 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7744 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7745 	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7746 	LEAVE;
7747 	return IPR_RC_JOB_RETURN;
7748 }
7749 
7750 /**
7751  * ipr_reset_block_config_access_wait - Wait for permission to block config access
7752  * @ipr_cmd:	ipr command struct
7753  *
7754  * Description: This attempts to block config access to the IOA.
7755  *
7756  * Return value:
7757  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7758  **/
ipr_reset_block_config_access_wait(struct ipr_cmnd * ipr_cmd)7759 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7760 {
7761 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762 	int rc = IPR_RC_JOB_CONTINUE;
7763 
7764 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7765 		ioa_cfg->cfg_locked = 1;
7766 		ipr_cmd->job_step = ioa_cfg->reset;
7767 	} else {
7768 		if (ipr_cmd->u.time_left) {
7769 			rc = IPR_RC_JOB_RETURN;
7770 			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7771 			ipr_reset_start_timer(ipr_cmd,
7772 					      IPR_CHECK_FOR_RESET_TIMEOUT);
7773 		} else {
7774 			ipr_cmd->job_step = ioa_cfg->reset;
7775 			dev_err(&ioa_cfg->pdev->dev,
7776 				"Timed out waiting to lock config access. Resetting anyway.\n");
7777 		}
7778 	}
7779 
7780 	return rc;
7781 }
7782 
7783 /**
7784  * ipr_reset_block_config_access - Block config access to the IOA
7785  * @ipr_cmd:	ipr command struct
7786  *
7787  * Description: This attempts to block config access to the IOA
7788  *
7789  * Return value:
7790  * 	IPR_RC_JOB_CONTINUE
7791  **/
ipr_reset_block_config_access(struct ipr_cmnd * ipr_cmd)7792 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7793 {
7794 	ipr_cmd->ioa_cfg->cfg_locked = 0;
7795 	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7796 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7797 	return IPR_RC_JOB_CONTINUE;
7798 }
7799 
7800 /**
7801  * ipr_reset_allowed - Query whether or not IOA can be reset
7802  * @ioa_cfg:	ioa config struct
7803  *
7804  * Return value:
7805  * 	0 if reset not allowed / non-zero if reset is allowed
7806  **/
ipr_reset_allowed(struct ipr_ioa_cfg * ioa_cfg)7807 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7808 {
7809 	volatile u32 temp_reg;
7810 
7811 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7812 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7813 }
7814 
7815 /**
7816  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7817  * @ipr_cmd:	ipr command struct
7818  *
7819  * Description: This function waits for adapter permission to run BIST,
7820  * then runs BIST. If the adapter does not give permission after a
7821  * reasonable time, we will reset the adapter anyway. The impact of
7822  * resetting the adapter without warning the adapter is the risk of
7823  * losing the persistent error log on the adapter. If the adapter is
7824  * reset while it is writing to the flash on the adapter, the flash
7825  * segment will have bad ECC and be zeroed.
7826  *
7827  * Return value:
7828  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7829  **/
ipr_reset_wait_to_start_bist(struct ipr_cmnd * ipr_cmd)7830 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7831 {
7832 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7833 	int rc = IPR_RC_JOB_RETURN;
7834 
7835 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7836 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7837 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7838 	} else {
7839 		ipr_cmd->job_step = ipr_reset_block_config_access;
7840 		rc = IPR_RC_JOB_CONTINUE;
7841 	}
7842 
7843 	return rc;
7844 }
7845 
7846 /**
7847  * ipr_reset_alert - Alert the adapter of a pending reset
7848  * @ipr_cmd:	ipr command struct
7849  *
7850  * Description: This function alerts the adapter that it will be reset.
7851  * If memory space is not currently enabled, proceed directly
7852  * to running BIST on the adapter. The timer must always be started
7853  * so we guarantee we do not run BIST from ipr_isr.
7854  *
7855  * Return value:
7856  * 	IPR_RC_JOB_RETURN
7857  **/
ipr_reset_alert(struct ipr_cmnd * ipr_cmd)7858 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7859 {
7860 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7861 	u16 cmd_reg;
7862 	int rc;
7863 
7864 	ENTER;
7865 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7866 
7867 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7868 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7869 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7870 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7871 	} else {
7872 		ipr_cmd->job_step = ipr_reset_block_config_access;
7873 	}
7874 
7875 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7876 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7877 
7878 	LEAVE;
7879 	return IPR_RC_JOB_RETURN;
7880 }
7881 
7882 /**
7883  * ipr_reset_ucode_download_done - Microcode download completion
7884  * @ipr_cmd:	ipr command struct
7885  *
7886  * Description: This function unmaps the microcode download buffer.
7887  *
7888  * Return value:
7889  * 	IPR_RC_JOB_CONTINUE
7890  **/
ipr_reset_ucode_download_done(struct ipr_cmnd * ipr_cmd)7891 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7892 {
7893 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7894 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7895 
7896 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7897 		     sglist->num_sg, DMA_TO_DEVICE);
7898 
7899 	ipr_cmd->job_step = ipr_reset_alert;
7900 	return IPR_RC_JOB_CONTINUE;
7901 }
7902 
7903 /**
7904  * ipr_reset_ucode_download - Download microcode to the adapter
7905  * @ipr_cmd:	ipr command struct
7906  *
7907  * Description: This function checks to see if it there is microcode
7908  * to download to the adapter. If there is, a download is performed.
7909  *
7910  * Return value:
7911  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7912  **/
ipr_reset_ucode_download(struct ipr_cmnd * ipr_cmd)7913 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7914 {
7915 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7916 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7917 
7918 	ENTER;
7919 	ipr_cmd->job_step = ipr_reset_alert;
7920 
7921 	if (!sglist)
7922 		return IPR_RC_JOB_CONTINUE;
7923 
7924 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7925 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7926 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7927 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7928 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7929 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7930 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7931 
7932 	if (ioa_cfg->sis64)
7933 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7934 	else
7935 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7936 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7937 
7938 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7939 		   IPR_WRITE_BUFFER_TIMEOUT);
7940 
7941 	LEAVE;
7942 	return IPR_RC_JOB_RETURN;
7943 }
7944 
7945 /**
7946  * ipr_reset_shutdown_ioa - Shutdown the adapter
7947  * @ipr_cmd:	ipr command struct
7948  *
7949  * Description: This function issues an adapter shutdown of the
7950  * specified type to the specified adapter as part of the
7951  * adapter reset job.
7952  *
7953  * Return value:
7954  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7955  **/
ipr_reset_shutdown_ioa(struct ipr_cmnd * ipr_cmd)7956 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7957 {
7958 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7959 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7960 	unsigned long timeout;
7961 	int rc = IPR_RC_JOB_CONTINUE;
7962 
7963 	ENTER;
7964 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7965 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7966 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7967 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7968 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7969 
7970 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7971 			timeout = IPR_SHUTDOWN_TIMEOUT;
7972 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7973 			timeout = IPR_INTERNAL_TIMEOUT;
7974 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7975 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7976 		else
7977 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7978 
7979 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7980 
7981 		rc = IPR_RC_JOB_RETURN;
7982 		ipr_cmd->job_step = ipr_reset_ucode_download;
7983 	} else
7984 		ipr_cmd->job_step = ipr_reset_alert;
7985 
7986 	LEAVE;
7987 	return rc;
7988 }
7989 
7990 /**
7991  * ipr_reset_ioa_job - Adapter reset job
7992  * @ipr_cmd:	ipr command struct
7993  *
7994  * Description: This function is the job router for the adapter reset job.
7995  *
7996  * Return value:
7997  * 	none
7998  **/
ipr_reset_ioa_job(struct ipr_cmnd * ipr_cmd)7999 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8000 {
8001 	u32 rc, ioasc;
8002 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8003 
8004 	do {
8005 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8006 
8007 		if (ioa_cfg->reset_cmd != ipr_cmd) {
8008 			/*
8009 			 * We are doing nested adapter resets and this is
8010 			 * not the current reset job.
8011 			 */
8012 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8013 			return;
8014 		}
8015 
8016 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8017 			rc = ipr_cmd->job_step_failed(ipr_cmd);
8018 			if (rc == IPR_RC_JOB_RETURN)
8019 				return;
8020 		}
8021 
8022 		ipr_reinit_ipr_cmnd(ipr_cmd);
8023 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8024 		rc = ipr_cmd->job_step(ipr_cmd);
8025 	} while(rc == IPR_RC_JOB_CONTINUE);
8026 }
8027 
8028 /**
8029  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8030  * @ioa_cfg:		ioa config struct
8031  * @job_step:		first job step of reset job
8032  * @shutdown_type:	shutdown type
8033  *
8034  * Description: This function will initiate the reset of the given adapter
8035  * starting at the selected job step.
8036  * If the caller needs to wait on the completion of the reset,
8037  * the caller must sleep on the reset_wait_q.
8038  *
8039  * Return value:
8040  * 	none
8041  **/
_ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,int (* job_step)(struct ipr_cmnd *),enum ipr_shutdown_type shutdown_type)8042 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8043 				    int (*job_step) (struct ipr_cmnd *),
8044 				    enum ipr_shutdown_type shutdown_type)
8045 {
8046 	struct ipr_cmnd *ipr_cmd;
8047 
8048 	ioa_cfg->in_reset_reload = 1;
8049 	ioa_cfg->allow_cmds = 0;
8050 	scsi_block_requests(ioa_cfg->host);
8051 
8052 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8053 	ioa_cfg->reset_cmd = ipr_cmd;
8054 	ipr_cmd->job_step = job_step;
8055 	ipr_cmd->u.shutdown_type = shutdown_type;
8056 
8057 	ipr_reset_ioa_job(ipr_cmd);
8058 }
8059 
8060 /**
8061  * ipr_initiate_ioa_reset - Initiate an adapter reset
8062  * @ioa_cfg:		ioa config struct
8063  * @shutdown_type:	shutdown type
8064  *
8065  * Description: This function will initiate the reset of the given adapter.
8066  * If the caller needs to wait on the completion of the reset,
8067  * the caller must sleep on the reset_wait_q.
8068  *
8069  * Return value:
8070  * 	none
8071  **/
ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)8072 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8073 				   enum ipr_shutdown_type shutdown_type)
8074 {
8075 	if (ioa_cfg->ioa_is_dead)
8076 		return;
8077 
8078 	if (ioa_cfg->in_reset_reload) {
8079 		if (ioa_cfg->sdt_state == GET_DUMP)
8080 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8081 		else if (ioa_cfg->sdt_state == READ_DUMP)
8082 			ioa_cfg->sdt_state = ABORT_DUMP;
8083 	}
8084 
8085 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8086 		dev_err(&ioa_cfg->pdev->dev,
8087 			"IOA taken offline - error recovery failed\n");
8088 
8089 		ioa_cfg->reset_retries = 0;
8090 		ioa_cfg->ioa_is_dead = 1;
8091 
8092 		if (ioa_cfg->in_ioa_bringdown) {
8093 			ioa_cfg->reset_cmd = NULL;
8094 			ioa_cfg->in_reset_reload = 0;
8095 			ipr_fail_all_ops(ioa_cfg);
8096 			wake_up_all(&ioa_cfg->reset_wait_q);
8097 
8098 			spin_unlock_irq(ioa_cfg->host->host_lock);
8099 			scsi_unblock_requests(ioa_cfg->host);
8100 			spin_lock_irq(ioa_cfg->host->host_lock);
8101 			return;
8102 		} else {
8103 			ioa_cfg->in_ioa_bringdown = 1;
8104 			shutdown_type = IPR_SHUTDOWN_NONE;
8105 		}
8106 	}
8107 
8108 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8109 				shutdown_type);
8110 }
8111 
8112 /**
8113  * ipr_reset_freeze - Hold off all I/O activity
8114  * @ipr_cmd:	ipr command struct
8115  *
8116  * Description: If the PCI slot is frozen, hold off all I/O
8117  * activity; then, as soon as the slot is available again,
8118  * initiate an adapter reset.
8119  */
ipr_reset_freeze(struct ipr_cmnd * ipr_cmd)8120 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8121 {
8122 	/* Disallow new interrupts, avoid loop */
8123 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
8124 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8125 	ipr_cmd->done = ipr_reset_ioa_job;
8126 	return IPR_RC_JOB_RETURN;
8127 }
8128 
8129 /**
8130  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8131  * @pdev:	PCI device struct
8132  *
8133  * Description: This routine is called to tell us that the PCI bus
8134  * is down. Can't do anything here, except put the device driver
8135  * into a holding pattern, waiting for the PCI bus to come back.
8136  */
ipr_pci_frozen(struct pci_dev * pdev)8137 static void ipr_pci_frozen(struct pci_dev *pdev)
8138 {
8139 	unsigned long flags = 0;
8140 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8141 
8142 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8143 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8144 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8145 }
8146 
8147 /**
8148  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8149  * @pdev:	PCI device struct
8150  *
8151  * Description: This routine is called by the pci error recovery
8152  * code after the PCI slot has been reset, just before we
8153  * should resume normal operations.
8154  */
ipr_pci_slot_reset(struct pci_dev * pdev)8155 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8156 {
8157 	unsigned long flags = 0;
8158 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8159 
8160 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8161 	if (ioa_cfg->needs_warm_reset)
8162 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8163 	else
8164 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8165 					IPR_SHUTDOWN_NONE);
8166 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8167 	return PCI_ERS_RESULT_RECOVERED;
8168 }
8169 
8170 /**
8171  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8172  * @pdev:	PCI device struct
8173  *
8174  * Description: This routine is called when the PCI bus has
8175  * permanently failed.
8176  */
ipr_pci_perm_failure(struct pci_dev * pdev)8177 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8178 {
8179 	unsigned long flags = 0;
8180 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8181 
8182 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8183 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8184 		ioa_cfg->sdt_state = ABORT_DUMP;
8185 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8186 	ioa_cfg->in_ioa_bringdown = 1;
8187 	ioa_cfg->allow_cmds = 0;
8188 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8189 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8190 }
8191 
8192 /**
8193  * ipr_pci_error_detected - Called when a PCI error is detected.
8194  * @pdev:	PCI device struct
8195  * @state:	PCI channel state
8196  *
8197  * Description: Called when a PCI error is detected.
8198  *
8199  * Return value:
8200  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8201  */
ipr_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8202 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8203 					       pci_channel_state_t state)
8204 {
8205 	switch (state) {
8206 	case pci_channel_io_frozen:
8207 		ipr_pci_frozen(pdev);
8208 		return PCI_ERS_RESULT_NEED_RESET;
8209 	case pci_channel_io_perm_failure:
8210 		ipr_pci_perm_failure(pdev);
8211 		return PCI_ERS_RESULT_DISCONNECT;
8212 		break;
8213 	default:
8214 		break;
8215 	}
8216 	return PCI_ERS_RESULT_NEED_RESET;
8217 }
8218 
8219 /**
8220  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8221  * @ioa_cfg:	ioa cfg struct
8222  *
8223  * Description: This is the second phase of adapter intialization
8224  * This function takes care of initilizing the adapter to the point
8225  * where it can accept new commands.
8226 
8227  * Return value:
8228  * 	0 on success / -EIO on failure
8229  **/
ipr_probe_ioa_part2(struct ipr_ioa_cfg * ioa_cfg)8230 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8231 {
8232 	int rc = 0;
8233 	unsigned long host_lock_flags = 0;
8234 
8235 	ENTER;
8236 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8237 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8238 	if (ioa_cfg->needs_hard_reset) {
8239 		ioa_cfg->needs_hard_reset = 0;
8240 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8241 	} else
8242 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8243 					IPR_SHUTDOWN_NONE);
8244 
8245 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8246 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8247 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8248 
8249 	if (ioa_cfg->ioa_is_dead) {
8250 		rc = -EIO;
8251 	} else if (ipr_invalid_adapter(ioa_cfg)) {
8252 		if (!ipr_testmode)
8253 			rc = -EIO;
8254 
8255 		dev_err(&ioa_cfg->pdev->dev,
8256 			"Adapter not supported in this hardware configuration.\n");
8257 	}
8258 
8259 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8260 
8261 	LEAVE;
8262 	return rc;
8263 }
8264 
8265 /**
8266  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8267  * @ioa_cfg:	ioa config struct
8268  *
8269  * Return value:
8270  * 	none
8271  **/
ipr_free_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)8272 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8273 {
8274 	int i;
8275 
8276 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8277 		if (ioa_cfg->ipr_cmnd_list[i])
8278 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8279 				      ioa_cfg->ipr_cmnd_list[i],
8280 				      ioa_cfg->ipr_cmnd_list_dma[i]);
8281 
8282 		ioa_cfg->ipr_cmnd_list[i] = NULL;
8283 	}
8284 
8285 	if (ioa_cfg->ipr_cmd_pool)
8286 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8287 
8288 	kfree(ioa_cfg->ipr_cmnd_list);
8289 	kfree(ioa_cfg->ipr_cmnd_list_dma);
8290 	ioa_cfg->ipr_cmnd_list = NULL;
8291 	ioa_cfg->ipr_cmnd_list_dma = NULL;
8292 	ioa_cfg->ipr_cmd_pool = NULL;
8293 }
8294 
8295 /**
8296  * ipr_free_mem - Frees memory allocated for an adapter
8297  * @ioa_cfg:	ioa cfg struct
8298  *
8299  * Return value:
8300  * 	nothing
8301  **/
ipr_free_mem(struct ipr_ioa_cfg * ioa_cfg)8302 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8303 {
8304 	int i;
8305 
8306 	kfree(ioa_cfg->res_entries);
8307 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8308 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8309 	ipr_free_cmd_blks(ioa_cfg);
8310 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8311 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8312 	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8313 			    ioa_cfg->u.cfg_table,
8314 			    ioa_cfg->cfg_table_dma);
8315 
8316 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8317 		pci_free_consistent(ioa_cfg->pdev,
8318 				    sizeof(struct ipr_hostrcb),
8319 				    ioa_cfg->hostrcb[i],
8320 				    ioa_cfg->hostrcb_dma[i]);
8321 	}
8322 
8323 	ipr_free_dump(ioa_cfg);
8324 	kfree(ioa_cfg->trace);
8325 }
8326 
8327 /**
8328  * ipr_free_all_resources - Free all allocated resources for an adapter.
8329  * @ipr_cmd:	ipr command struct
8330  *
8331  * This function frees all allocated resources for the
8332  * specified adapter.
8333  *
8334  * Return value:
8335  * 	none
8336  **/
ipr_free_all_resources(struct ipr_ioa_cfg * ioa_cfg)8337 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8338 {
8339 	struct pci_dev *pdev = ioa_cfg->pdev;
8340 
8341 	ENTER;
8342 	free_irq(pdev->irq, ioa_cfg);
8343 	pci_disable_msi(pdev);
8344 	iounmap(ioa_cfg->hdw_dma_regs);
8345 	pci_release_regions(pdev);
8346 	ipr_free_mem(ioa_cfg);
8347 	scsi_host_put(ioa_cfg->host);
8348 	pci_disable_device(pdev);
8349 	LEAVE;
8350 }
8351 
8352 /**
8353  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8354  * @ioa_cfg:	ioa config struct
8355  *
8356  * Return value:
8357  * 	0 on success / -ENOMEM on allocation failure
8358  **/
ipr_alloc_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)8359 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8360 {
8361 	struct ipr_cmnd *ipr_cmd;
8362 	struct ipr_ioarcb *ioarcb;
8363 	dma_addr_t dma_addr;
8364 	int i;
8365 
8366 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8367 						 sizeof(struct ipr_cmnd), 512, 0);
8368 
8369 	if (!ioa_cfg->ipr_cmd_pool)
8370 		return -ENOMEM;
8371 
8372 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8373 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8374 
8375 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8376 		ipr_free_cmd_blks(ioa_cfg);
8377 		return -ENOMEM;
8378 	}
8379 
8380 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8381 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8382 
8383 		if (!ipr_cmd) {
8384 			ipr_free_cmd_blks(ioa_cfg);
8385 			return -ENOMEM;
8386 		}
8387 
8388 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8389 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8390 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8391 
8392 		ioarcb = &ipr_cmd->ioarcb;
8393 		ipr_cmd->dma_addr = dma_addr;
8394 		if (ioa_cfg->sis64)
8395 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8396 		else
8397 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8398 
8399 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8400 		if (ioa_cfg->sis64) {
8401 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8402 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8403 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8404 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8405 		} else {
8406 			ioarcb->write_ioadl_addr =
8407 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8408 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8409 			ioarcb->ioasa_host_pci_addr =
8410 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8411 		}
8412 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8413 		ipr_cmd->cmd_index = i;
8414 		ipr_cmd->ioa_cfg = ioa_cfg;
8415 		ipr_cmd->sense_buffer_dma = dma_addr +
8416 			offsetof(struct ipr_cmnd, sense_buffer);
8417 
8418 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8419 	}
8420 
8421 	return 0;
8422 }
8423 
8424 /**
8425  * ipr_alloc_mem - Allocate memory for an adapter
8426  * @ioa_cfg:	ioa config struct
8427  *
8428  * Return value:
8429  * 	0 on success / non-zero for error
8430  **/
ipr_alloc_mem(struct ipr_ioa_cfg * ioa_cfg)8431 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8432 {
8433 	struct pci_dev *pdev = ioa_cfg->pdev;
8434 	int i, rc = -ENOMEM;
8435 
8436 	ENTER;
8437 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8438 				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8439 
8440 	if (!ioa_cfg->res_entries)
8441 		goto out;
8442 
8443 	if (ioa_cfg->sis64) {
8444 		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8445 					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8446 		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8447 					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8448 		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8449 					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8450 	}
8451 
8452 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8453 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8454 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8455 	}
8456 
8457 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8458 						sizeof(struct ipr_misc_cbs),
8459 						&ioa_cfg->vpd_cbs_dma);
8460 
8461 	if (!ioa_cfg->vpd_cbs)
8462 		goto out_free_res_entries;
8463 
8464 	if (ipr_alloc_cmd_blks(ioa_cfg))
8465 		goto out_free_vpd_cbs;
8466 
8467 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8468 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8469 						 &ioa_cfg->host_rrq_dma);
8470 
8471 	if (!ioa_cfg->host_rrq)
8472 		goto out_ipr_free_cmd_blocks;
8473 
8474 	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8475 						    ioa_cfg->cfg_table_size,
8476 						    &ioa_cfg->cfg_table_dma);
8477 
8478 	if (!ioa_cfg->u.cfg_table)
8479 		goto out_free_host_rrq;
8480 
8481 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8482 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8483 							   sizeof(struct ipr_hostrcb),
8484 							   &ioa_cfg->hostrcb_dma[i]);
8485 
8486 		if (!ioa_cfg->hostrcb[i])
8487 			goto out_free_hostrcb_dma;
8488 
8489 		ioa_cfg->hostrcb[i]->hostrcb_dma =
8490 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8491 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8492 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8493 	}
8494 
8495 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8496 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8497 
8498 	if (!ioa_cfg->trace)
8499 		goto out_free_hostrcb_dma;
8500 
8501 	rc = 0;
8502 out:
8503 	LEAVE;
8504 	return rc;
8505 
8506 out_free_hostrcb_dma:
8507 	while (i-- > 0) {
8508 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8509 				    ioa_cfg->hostrcb[i],
8510 				    ioa_cfg->hostrcb_dma[i]);
8511 	}
8512 	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8513 			    ioa_cfg->u.cfg_table,
8514 			    ioa_cfg->cfg_table_dma);
8515 out_free_host_rrq:
8516 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8517 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8518 out_ipr_free_cmd_blocks:
8519 	ipr_free_cmd_blks(ioa_cfg);
8520 out_free_vpd_cbs:
8521 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8522 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8523 out_free_res_entries:
8524 	kfree(ioa_cfg->res_entries);
8525 	goto out;
8526 }
8527 
8528 /**
8529  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8530  * @ioa_cfg:	ioa config struct
8531  *
8532  * Return value:
8533  * 	none
8534  **/
ipr_initialize_bus_attr(struct ipr_ioa_cfg * ioa_cfg)8535 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8536 {
8537 	int i;
8538 
8539 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8540 		ioa_cfg->bus_attr[i].bus = i;
8541 		ioa_cfg->bus_attr[i].qas_enabled = 0;
8542 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8543 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8544 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8545 		else
8546 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8547 	}
8548 }
8549 
8550 /**
8551  * ipr_init_ioa_cfg - Initialize IOA config struct
8552  * @ioa_cfg:	ioa config struct
8553  * @host:		scsi host struct
8554  * @pdev:		PCI dev struct
8555  *
8556  * Return value:
8557  * 	none
8558  **/
ipr_init_ioa_cfg(struct ipr_ioa_cfg * ioa_cfg,struct Scsi_Host * host,struct pci_dev * pdev)8559 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8560 				       struct Scsi_Host *host, struct pci_dev *pdev)
8561 {
8562 	const struct ipr_interrupt_offsets *p;
8563 	struct ipr_interrupts *t;
8564 	void __iomem *base;
8565 
8566 	ioa_cfg->host = host;
8567 	ioa_cfg->pdev = pdev;
8568 	ioa_cfg->log_level = ipr_log_level;
8569 	ioa_cfg->doorbell = IPR_DOORBELL;
8570 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8571 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8572 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8573 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8574 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8575 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8576 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8577 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8578 
8579 	INIT_LIST_HEAD(&ioa_cfg->free_q);
8580 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8581 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8582 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8583 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8584 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8585 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8586 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8587 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8588 	ioa_cfg->sdt_state = INACTIVE;
8589 
8590 	ipr_initialize_bus_attr(ioa_cfg);
8591 	ioa_cfg->max_devs_supported = ipr_max_devs;
8592 
8593 	if (ioa_cfg->sis64) {
8594 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8595 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8596 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8597 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8598 	} else {
8599 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8600 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8601 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8602 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8603 	}
8604 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8605 	host->unique_id = host->host_no;
8606 	host->max_cmd_len = IPR_MAX_CDB_LEN;
8607 	host->can_queue = ioa_cfg->max_cmds;
8608 	pci_set_drvdata(pdev, ioa_cfg);
8609 
8610 	p = &ioa_cfg->chip_cfg->regs;
8611 	t = &ioa_cfg->regs;
8612 	base = ioa_cfg->hdw_dma_regs;
8613 
8614 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8615 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8616 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8617 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8618 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8619 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8620 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8621 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8622 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8623 	t->ioarrin_reg = base + p->ioarrin_reg;
8624 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8625 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8626 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8627 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8628 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8629 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8630 
8631 	if (ioa_cfg->sis64) {
8632 		t->init_feedback_reg = base + p->init_feedback_reg;
8633 		t->dump_addr_reg = base + p->dump_addr_reg;
8634 		t->dump_data_reg = base + p->dump_data_reg;
8635 		t->endian_swap_reg = base + p->endian_swap_reg;
8636 	}
8637 }
8638 
8639 /**
8640  * ipr_get_chip_info - Find adapter chip information
8641  * @dev_id:		PCI device id struct
8642  *
8643  * Return value:
8644  * 	ptr to chip information on success / NULL on failure
8645  **/
8646 static const struct ipr_chip_t * __devinit
ipr_get_chip_info(const struct pci_device_id * dev_id)8647 ipr_get_chip_info(const struct pci_device_id *dev_id)
8648 {
8649 	int i;
8650 
8651 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8652 		if (ipr_chip[i].vendor == dev_id->vendor &&
8653 		    ipr_chip[i].device == dev_id->device)
8654 			return &ipr_chip[i];
8655 	return NULL;
8656 }
8657 
8658 /**
8659  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8660  * @pdev:		PCI device struct
8661  *
8662  * Description: Simply set the msi_received flag to 1 indicating that
8663  * Message Signaled Interrupts are supported.
8664  *
8665  * Return value:
8666  * 	0 on success / non-zero on failure
8667  **/
ipr_test_intr(int irq,void * devp)8668 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8669 {
8670 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8671 	unsigned long lock_flags = 0;
8672 	irqreturn_t rc = IRQ_HANDLED;
8673 
8674 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8675 
8676 	ioa_cfg->msi_received = 1;
8677 	wake_up(&ioa_cfg->msi_wait_q);
8678 
8679 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8680 	return rc;
8681 }
8682 
8683 /**
8684  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8685  * @pdev:		PCI device struct
8686  *
8687  * Description: The return value from pci_enable_msi() can not always be
8688  * trusted.  This routine sets up and initiates a test interrupt to determine
8689  * if the interrupt is received via the ipr_test_intr() service routine.
8690  * If the tests fails, the driver will fall back to LSI.
8691  *
8692  * Return value:
8693  * 	0 on success / non-zero on failure
8694  **/
ipr_test_msi(struct ipr_ioa_cfg * ioa_cfg,struct pci_dev * pdev)8695 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8696 				  struct pci_dev *pdev)
8697 {
8698 	int rc;
8699 	volatile u32 int_reg;
8700 	unsigned long lock_flags = 0;
8701 
8702 	ENTER;
8703 
8704 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8705 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8706 	ioa_cfg->msi_received = 0;
8707 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8708 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8709 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8710 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8711 
8712 	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8713 	if (rc) {
8714 		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8715 		return rc;
8716 	} else if (ipr_debug)
8717 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8718 
8719 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8720 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8721 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8722 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8723 
8724 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8725 	if (!ioa_cfg->msi_received) {
8726 		/* MSI test failed */
8727 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8728 		rc = -EOPNOTSUPP;
8729 	} else if (ipr_debug)
8730 		dev_info(&pdev->dev, "MSI test succeeded.\n");
8731 
8732 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8733 
8734 	free_irq(pdev->irq, ioa_cfg);
8735 
8736 	LEAVE;
8737 
8738 	return rc;
8739 }
8740 
8741 /**
8742  * ipr_probe_ioa - Allocates memory and does first stage of initialization
8743  * @pdev:		PCI device struct
8744  * @dev_id:		PCI device id struct
8745  *
8746  * Return value:
8747  * 	0 on success / non-zero on failure
8748  **/
ipr_probe_ioa(struct pci_dev * pdev,const struct pci_device_id * dev_id)8749 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8750 				   const struct pci_device_id *dev_id)
8751 {
8752 	struct ipr_ioa_cfg *ioa_cfg;
8753 	struct Scsi_Host *host;
8754 	unsigned long ipr_regs_pci;
8755 	void __iomem *ipr_regs;
8756 	int rc = PCIBIOS_SUCCESSFUL;
8757 	volatile u32 mask, uproc, interrupts;
8758 
8759 	ENTER;
8760 
8761 	if ((rc = pci_enable_device(pdev))) {
8762 		dev_err(&pdev->dev, "Cannot enable adapter\n");
8763 		goto out;
8764 	}
8765 
8766 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8767 
8768 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8769 
8770 	if (!host) {
8771 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8772 		rc = -ENOMEM;
8773 		goto out_disable;
8774 	}
8775 
8776 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8777 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8778 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8779 		      sata_port_info.flags, &ipr_sata_ops);
8780 
8781 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8782 
8783 	if (!ioa_cfg->ipr_chip) {
8784 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8785 			dev_id->vendor, dev_id->device);
8786 		goto out_scsi_host_put;
8787 	}
8788 
8789 	/* set SIS 32 or SIS 64 */
8790 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8791 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8792 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8793 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
8794 
8795 	if (ipr_transop_timeout)
8796 		ioa_cfg->transop_timeout = ipr_transop_timeout;
8797 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8798 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8799 	else
8800 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8801 
8802 	ioa_cfg->revid = pdev->revision;
8803 
8804 	ipr_regs_pci = pci_resource_start(pdev, 0);
8805 
8806 	rc = pci_request_regions(pdev, IPR_NAME);
8807 	if (rc < 0) {
8808 		dev_err(&pdev->dev,
8809 			"Couldn't register memory range of registers\n");
8810 		goto out_scsi_host_put;
8811 	}
8812 
8813 	ipr_regs = pci_ioremap_bar(pdev, 0);
8814 
8815 	if (!ipr_regs) {
8816 		dev_err(&pdev->dev,
8817 			"Couldn't map memory range of registers\n");
8818 		rc = -ENOMEM;
8819 		goto out_release_regions;
8820 	}
8821 
8822 	ioa_cfg->hdw_dma_regs = ipr_regs;
8823 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8824 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8825 
8826 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8827 
8828 	pci_set_master(pdev);
8829 
8830 	if (ioa_cfg->sis64) {
8831 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8832 		if (rc < 0) {
8833 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8834 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8835 		}
8836 
8837 	} else
8838 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8839 
8840 	if (rc < 0) {
8841 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8842 		goto cleanup_nomem;
8843 	}
8844 
8845 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8846 				   ioa_cfg->chip_cfg->cache_line_size);
8847 
8848 	if (rc != PCIBIOS_SUCCESSFUL) {
8849 		dev_err(&pdev->dev, "Write of cache line size failed\n");
8850 		rc = -EIO;
8851 		goto cleanup_nomem;
8852 	}
8853 
8854 	/* Enable MSI style interrupts if they are supported. */
8855 	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8856 		rc = ipr_test_msi(ioa_cfg, pdev);
8857 		if (rc == -EOPNOTSUPP)
8858 			pci_disable_msi(pdev);
8859 		else if (rc)
8860 			goto out_msi_disable;
8861 		else
8862 			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8863 	} else if (ipr_debug)
8864 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8865 
8866 	/* Save away PCI config space for use following IOA reset */
8867 	rc = pci_save_state(pdev);
8868 
8869 	if (rc != PCIBIOS_SUCCESSFUL) {
8870 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8871 		rc = -EIO;
8872 		goto out_msi_disable;
8873 	}
8874 
8875 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8876 		goto out_msi_disable;
8877 
8878 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8879 		goto out_msi_disable;
8880 
8881 	if (ioa_cfg->sis64)
8882 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8883 				+ ((sizeof(struct ipr_config_table_entry64)
8884 				* ioa_cfg->max_devs_supported)));
8885 	else
8886 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8887 				+ ((sizeof(struct ipr_config_table_entry)
8888 				* ioa_cfg->max_devs_supported)));
8889 
8890 	rc = ipr_alloc_mem(ioa_cfg);
8891 	if (rc < 0) {
8892 		dev_err(&pdev->dev,
8893 			"Couldn't allocate enough memory for device driver!\n");
8894 		goto out_msi_disable;
8895 	}
8896 
8897 	/*
8898 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8899 	 * the card is in an unknown state and needs a hard reset
8900 	 */
8901 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8902 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8903 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8904 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8905 		ioa_cfg->needs_hard_reset = 1;
8906 	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8907 		ioa_cfg->needs_hard_reset = 1;
8908 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8909 		ioa_cfg->ioa_unit_checked = 1;
8910 
8911 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8912 	rc = request_irq(pdev->irq, ipr_isr,
8913 			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8914 			 IPR_NAME, ioa_cfg);
8915 
8916 	if (rc) {
8917 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8918 			pdev->irq, rc);
8919 		goto cleanup_nolog;
8920 	}
8921 
8922 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8923 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8924 		ioa_cfg->needs_warm_reset = 1;
8925 		ioa_cfg->reset = ipr_reset_slot_reset;
8926 	} else
8927 		ioa_cfg->reset = ipr_reset_start_bist;
8928 
8929 	spin_lock(&ipr_driver_lock);
8930 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8931 	spin_unlock(&ipr_driver_lock);
8932 
8933 	LEAVE;
8934 out:
8935 	return rc;
8936 
8937 cleanup_nolog:
8938 	ipr_free_mem(ioa_cfg);
8939 out_msi_disable:
8940 	pci_disable_msi(pdev);
8941 cleanup_nomem:
8942 	iounmap(ipr_regs);
8943 out_release_regions:
8944 	pci_release_regions(pdev);
8945 out_scsi_host_put:
8946 	scsi_host_put(host);
8947 out_disable:
8948 	pci_disable_device(pdev);
8949 	goto out;
8950 }
8951 
8952 /**
8953  * ipr_scan_vsets - Scans for VSET devices
8954  * @ioa_cfg:	ioa config struct
8955  *
8956  * Description: Since the VSET resources do not follow SAM in that we can have
8957  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8958  *
8959  * Return value:
8960  * 	none
8961  **/
ipr_scan_vsets(struct ipr_ioa_cfg * ioa_cfg)8962 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8963 {
8964 	int target, lun;
8965 
8966 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8967 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8968 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8969 }
8970 
8971 /**
8972  * ipr_initiate_ioa_bringdown - Bring down an adapter
8973  * @ioa_cfg:		ioa config struct
8974  * @shutdown_type:	shutdown type
8975  *
8976  * Description: This function will initiate bringing down the adapter.
8977  * This consists of issuing an IOA shutdown to the adapter
8978  * to flush the cache, and running BIST.
8979  * If the caller needs to wait on the completion of the reset,
8980  * the caller must sleep on the reset_wait_q.
8981  *
8982  * Return value:
8983  * 	none
8984  **/
ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)8985 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8986 				       enum ipr_shutdown_type shutdown_type)
8987 {
8988 	ENTER;
8989 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8990 		ioa_cfg->sdt_state = ABORT_DUMP;
8991 	ioa_cfg->reset_retries = 0;
8992 	ioa_cfg->in_ioa_bringdown = 1;
8993 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8994 	LEAVE;
8995 }
8996 
8997 /**
8998  * __ipr_remove - Remove a single adapter
8999  * @pdev:	pci device struct
9000  *
9001  * Adapter hot plug remove entry point.
9002  *
9003  * Return value:
9004  * 	none
9005  **/
__ipr_remove(struct pci_dev * pdev)9006 static void __ipr_remove(struct pci_dev *pdev)
9007 {
9008 	unsigned long host_lock_flags = 0;
9009 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9010 	ENTER;
9011 
9012 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9013 	while(ioa_cfg->in_reset_reload) {
9014 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9015 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9016 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9017 	}
9018 
9019 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9020 
9021 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9022 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9023 	flush_work_sync(&ioa_cfg->work_q);
9024 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9025 
9026 	spin_lock(&ipr_driver_lock);
9027 	list_del(&ioa_cfg->queue);
9028 	spin_unlock(&ipr_driver_lock);
9029 
9030 	if (ioa_cfg->sdt_state == ABORT_DUMP)
9031 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9032 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9033 
9034 	ipr_free_all_resources(ioa_cfg);
9035 
9036 	LEAVE;
9037 }
9038 
9039 /**
9040  * ipr_remove - IOA hot plug remove entry point
9041  * @pdev:	pci device struct
9042  *
9043  * Adapter hot plug remove entry point.
9044  *
9045  * Return value:
9046  * 	none
9047  **/
ipr_remove(struct pci_dev * pdev)9048 static void __devexit ipr_remove(struct pci_dev *pdev)
9049 {
9050 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9051 
9052 	ENTER;
9053 
9054 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9055 			      &ipr_trace_attr);
9056 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9057 			     &ipr_dump_attr);
9058 	scsi_remove_host(ioa_cfg->host);
9059 
9060 	__ipr_remove(pdev);
9061 
9062 	LEAVE;
9063 }
9064 
9065 /**
9066  * ipr_probe - Adapter hot plug add entry point
9067  *
9068  * Return value:
9069  * 	0 on success / non-zero on failure
9070  **/
ipr_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)9071 static int __devinit ipr_probe(struct pci_dev *pdev,
9072 			       const struct pci_device_id *dev_id)
9073 {
9074 	struct ipr_ioa_cfg *ioa_cfg;
9075 	int rc;
9076 
9077 	rc = ipr_probe_ioa(pdev, dev_id);
9078 
9079 	if (rc)
9080 		return rc;
9081 
9082 	ioa_cfg = pci_get_drvdata(pdev);
9083 	rc = ipr_probe_ioa_part2(ioa_cfg);
9084 
9085 	if (rc) {
9086 		__ipr_remove(pdev);
9087 		return rc;
9088 	}
9089 
9090 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9091 
9092 	if (rc) {
9093 		__ipr_remove(pdev);
9094 		return rc;
9095 	}
9096 
9097 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9098 				   &ipr_trace_attr);
9099 
9100 	if (rc) {
9101 		scsi_remove_host(ioa_cfg->host);
9102 		__ipr_remove(pdev);
9103 		return rc;
9104 	}
9105 
9106 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9107 				   &ipr_dump_attr);
9108 
9109 	if (rc) {
9110 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9111 				      &ipr_trace_attr);
9112 		scsi_remove_host(ioa_cfg->host);
9113 		__ipr_remove(pdev);
9114 		return rc;
9115 	}
9116 
9117 	scsi_scan_host(ioa_cfg->host);
9118 	ipr_scan_vsets(ioa_cfg);
9119 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9120 	ioa_cfg->allow_ml_add_del = 1;
9121 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
9122 	schedule_work(&ioa_cfg->work_q);
9123 	return 0;
9124 }
9125 
9126 /**
9127  * ipr_shutdown - Shutdown handler.
9128  * @pdev:	pci device struct
9129  *
9130  * This function is invoked upon system shutdown/reboot. It will issue
9131  * an adapter shutdown to the adapter to flush the write cache.
9132  *
9133  * Return value:
9134  * 	none
9135  **/
ipr_shutdown(struct pci_dev * pdev)9136 static void ipr_shutdown(struct pci_dev *pdev)
9137 {
9138 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9139 	unsigned long lock_flags = 0;
9140 
9141 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9142 	while(ioa_cfg->in_reset_reload) {
9143 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9144 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9145 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9146 	}
9147 
9148 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9149 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9150 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9151 }
9152 
9153 static struct pci_device_id ipr_pci_table[] __devinitdata = {
9154 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9155 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9156 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9157 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9158 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9159 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9160 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9161 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9162 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9163 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9164 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9165 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9166 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9167 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9168 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9169 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9170 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9171 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9172 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9173 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9174 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9175 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9176 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9177 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9178 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9179 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9180 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9181 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9182 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9183 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9184 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9185 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9186 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9187 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9188 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9189 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9190 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9191 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9192 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9193 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9194 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9195 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9196 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9197 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9198 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9199 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9200 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9201 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9202 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9203 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9204 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9205 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9206 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9207 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9208 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9209 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9210 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9211 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9212 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9213 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9214 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9215 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9216 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9217 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9218 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9219 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9220 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9221 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9222 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9223 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9224 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9225 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9226 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9227 	{ }
9228 };
9229 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9230 
9231 static struct pci_error_handlers ipr_err_handler = {
9232 	.error_detected = ipr_pci_error_detected,
9233 	.slot_reset = ipr_pci_slot_reset,
9234 };
9235 
9236 static struct pci_driver ipr_driver = {
9237 	.name = IPR_NAME,
9238 	.id_table = ipr_pci_table,
9239 	.probe = ipr_probe,
9240 	.remove = __devexit_p(ipr_remove),
9241 	.shutdown = ipr_shutdown,
9242 	.err_handler = &ipr_err_handler,
9243 };
9244 
9245 /**
9246  * ipr_halt_done - Shutdown prepare completion
9247  *
9248  * Return value:
9249  * 	none
9250  **/
ipr_halt_done(struct ipr_cmnd * ipr_cmd)9251 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9252 {
9253 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9254 
9255 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9256 }
9257 
9258 /**
9259  * ipr_halt - Issue shutdown prepare to all adapters
9260  *
9261  * Return value:
9262  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9263  **/
ipr_halt(struct notifier_block * nb,ulong event,void * buf)9264 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9265 {
9266 	struct ipr_cmnd *ipr_cmd;
9267 	struct ipr_ioa_cfg *ioa_cfg;
9268 	unsigned long flags = 0;
9269 
9270 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9271 		return NOTIFY_DONE;
9272 
9273 	spin_lock(&ipr_driver_lock);
9274 
9275 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9276 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9277 		if (!ioa_cfg->allow_cmds) {
9278 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9279 			continue;
9280 		}
9281 
9282 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9283 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9284 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9285 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9286 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9287 
9288 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9289 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9290 	}
9291 	spin_unlock(&ipr_driver_lock);
9292 
9293 	return NOTIFY_OK;
9294 }
9295 
9296 static struct notifier_block ipr_notifier = {
9297 	ipr_halt, NULL, 0
9298 };
9299 
9300 /**
9301  * ipr_init - Module entry point
9302  *
9303  * Return value:
9304  * 	0 on success / negative value on failure
9305  **/
ipr_init(void)9306 static int __init ipr_init(void)
9307 {
9308 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9309 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9310 
9311 	register_reboot_notifier(&ipr_notifier);
9312 	return pci_register_driver(&ipr_driver);
9313 }
9314 
9315 /**
9316  * ipr_exit - Module unload
9317  *
9318  * Module unload entry point.
9319  *
9320  * Return value:
9321  * 	none
9322  **/
ipr_exit(void)9323 static void __exit ipr_exit(void)
9324 {
9325 	unregister_reboot_notifier(&ipr_notifier);
9326 	pci_unregister_driver(&ipr_driver);
9327 }
9328 
9329 module_init(ipr_init);
9330 module_exit(ipr_exit);
9331