1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/slab.h>
4 
5 #include "qlge.h"
6 
7 /* Read a NIC register from the alternate function. */
ql_read_other_func_reg(struct ql_adapter * qdev,u32 reg)8 static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
9 						u32 reg)
10 {
11 	u32 register_to_read;
12 	u32 reg_val;
13 	unsigned int status = 0;
14 
15 	register_to_read = MPI_NIC_REG_BLOCK
16 				| MPI_NIC_READ
17 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
18 				| reg;
19 	status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
20 	if (status != 0)
21 		return 0xffffffff;
22 
23 	return reg_val;
24 }
25 
26 /* Write a NIC register from the alternate function. */
ql_write_other_func_reg(struct ql_adapter * qdev,u32 reg,u32 reg_val)27 static int ql_write_other_func_reg(struct ql_adapter *qdev,
28 					u32 reg, u32 reg_val)
29 {
30 	u32 register_to_read;
31 	int status = 0;
32 
33 	register_to_read = MPI_NIC_REG_BLOCK
34 				| MPI_NIC_READ
35 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
36 				| reg;
37 	status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
38 
39 	return status;
40 }
41 
ql_wait_other_func_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)42 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
43 					u32 bit, u32 err_bit)
44 {
45 	u32 temp;
46 	int count = 10;
47 
48 	while (count) {
49 		temp = ql_read_other_func_reg(qdev, reg);
50 
51 		/* check for errors */
52 		if (temp & err_bit)
53 			return -1;
54 		else if (temp & bit)
55 			return 0;
56 		mdelay(10);
57 		count--;
58 	}
59 	return -1;
60 }
61 
ql_read_other_func_serdes_reg(struct ql_adapter * qdev,u32 reg,u32 * data)62 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
63 							u32 *data)
64 {
65 	int status;
66 
67 	/* wait for reg to come ready */
68 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
69 						XG_SERDES_ADDR_RDY, 0);
70 	if (status)
71 		goto exit;
72 
73 	/* set up for reg read */
74 	ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
75 
76 	/* wait for reg to come ready */
77 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
78 						XG_SERDES_ADDR_RDY, 0);
79 	if (status)
80 		goto exit;
81 
82 	/* get the data */
83 	*data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
84 exit:
85 	return status;
86 }
87 
88 /* Read out the SERDES registers */
ql_read_serdes_reg(struct ql_adapter * qdev,u32 reg,u32 * data)89 static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
90 {
91 	int status;
92 
93 	/* wait for reg to come ready */
94 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
95 	if (status)
96 		goto exit;
97 
98 	/* set up for reg read */
99 	ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
100 
101 	/* wait for reg to come ready */
102 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
103 	if (status)
104 		goto exit;
105 
106 	/* get the data */
107 	*data = ql_read32(qdev, XG_SERDES_DATA);
108 exit:
109 	return status;
110 }
111 
ql_get_both_serdes(struct ql_adapter * qdev,u32 addr,u32 * direct_ptr,u32 * indirect_ptr,unsigned int direct_valid,unsigned int indirect_valid)112 static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
113 			u32 *direct_ptr, u32 *indirect_ptr,
114 			unsigned int direct_valid, unsigned int indirect_valid)
115 {
116 	unsigned int status;
117 
118 	status = 1;
119 	if (direct_valid)
120 		status = ql_read_serdes_reg(qdev, addr, direct_ptr);
121 	/* Dead fill any failures or invalids. */
122 	if (status)
123 		*direct_ptr = 0xDEADBEEF;
124 
125 	status = 1;
126 	if (indirect_valid)
127 		status = ql_read_other_func_serdes_reg(
128 						qdev, addr, indirect_ptr);
129 	/* Dead fill any failures or invalids. */
130 	if (status)
131 		*indirect_ptr = 0xDEADBEEF;
132 }
133 
ql_get_serdes_regs(struct ql_adapter * qdev,struct ql_mpi_coredump * mpi_coredump)134 static int ql_get_serdes_regs(struct ql_adapter *qdev,
135 				struct ql_mpi_coredump *mpi_coredump)
136 {
137 	int status;
138 	unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
139 	unsigned int xaui_indirect_valid, i;
140 	u32 *direct_ptr, temp;
141 	u32 *indirect_ptr;
142 
143 	xfi_direct_valid = xfi_indirect_valid = 0;
144 	xaui_direct_valid = xaui_indirect_valid = 1;
145 
146 	/* The XAUI needs to be read out per port */
147 	if (qdev->func & 1) {
148 		/* We are NIC 2	*/
149 		status = ql_read_other_func_serdes_reg(qdev,
150 				XG_SERDES_XAUI_HSS_PCS_START, &temp);
151 		if (status)
152 			temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
153 		if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
154 					XG_SERDES_ADDR_XAUI_PWR_DOWN)
155 			xaui_indirect_valid = 0;
156 
157 		status = ql_read_serdes_reg(qdev,
158 				XG_SERDES_XAUI_HSS_PCS_START, &temp);
159 		if (status)
160 			temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
161 
162 		if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
163 					XG_SERDES_ADDR_XAUI_PWR_DOWN)
164 			xaui_direct_valid = 0;
165 	} else {
166 		/* We are NIC 1	*/
167 		status = ql_read_other_func_serdes_reg(qdev,
168 				XG_SERDES_XAUI_HSS_PCS_START, &temp);
169 		if (status)
170 			temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
171 		if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
172 					XG_SERDES_ADDR_XAUI_PWR_DOWN)
173 			xaui_indirect_valid = 0;
174 
175 		status = ql_read_serdes_reg(qdev,
176 				XG_SERDES_XAUI_HSS_PCS_START, &temp);
177 		if (status)
178 			temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
179 		if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
180 					XG_SERDES_ADDR_XAUI_PWR_DOWN)
181 			xaui_direct_valid = 0;
182 	}
183 
184 	/*
185 	 * XFI register is shared so only need to read one
186 	 * functions and then check the bits.
187 	 */
188 	status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
189 	if (status)
190 		temp = 0;
191 
192 	if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
193 					XG_SERDES_ADDR_XFI1_PWR_UP) {
194 		/* now see if i'm NIC 1 or NIC 2 */
195 		if (qdev->func & 1)
196 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
197 			xfi_indirect_valid = 1;
198 		else
199 			xfi_direct_valid = 1;
200 	}
201 	if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
202 					XG_SERDES_ADDR_XFI2_PWR_UP) {
203 		/* now see if i'm NIC 1 or NIC 2 */
204 		if (qdev->func & 1)
205 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
206 			xfi_direct_valid = 1;
207 		else
208 			xfi_indirect_valid = 1;
209 	}
210 
211 	/* Get XAUI_AN register block. */
212 	if (qdev->func & 1) {
213 		/* Function 2 is direct	*/
214 		direct_ptr = mpi_coredump->serdes2_xaui_an;
215 		indirect_ptr = mpi_coredump->serdes_xaui_an;
216 	} else {
217 		/* Function 1 is direct	*/
218 		direct_ptr = mpi_coredump->serdes_xaui_an;
219 		indirect_ptr = mpi_coredump->serdes2_xaui_an;
220 	}
221 
222 	for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
223 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
224 					xaui_direct_valid, xaui_indirect_valid);
225 
226 	/* Get XAUI_HSS_PCS register block. */
227 	if (qdev->func & 1) {
228 		direct_ptr =
229 			mpi_coredump->serdes2_xaui_hss_pcs;
230 		indirect_ptr =
231 			mpi_coredump->serdes_xaui_hss_pcs;
232 	} else {
233 		direct_ptr =
234 			mpi_coredump->serdes_xaui_hss_pcs;
235 		indirect_ptr =
236 			mpi_coredump->serdes2_xaui_hss_pcs;
237 	}
238 
239 	for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
240 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
241 					xaui_direct_valid, xaui_indirect_valid);
242 
243 	/* Get XAUI_XFI_AN register block. */
244 	if (qdev->func & 1) {
245 		direct_ptr = mpi_coredump->serdes2_xfi_an;
246 		indirect_ptr = mpi_coredump->serdes_xfi_an;
247 	} else {
248 		direct_ptr = mpi_coredump->serdes_xfi_an;
249 		indirect_ptr = mpi_coredump->serdes2_xfi_an;
250 	}
251 
252 	for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
253 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
254 					xfi_direct_valid, xfi_indirect_valid);
255 
256 	/* Get XAUI_XFI_TRAIN register block. */
257 	if (qdev->func & 1) {
258 		direct_ptr = mpi_coredump->serdes2_xfi_train;
259 		indirect_ptr =
260 			mpi_coredump->serdes_xfi_train;
261 	} else {
262 		direct_ptr = mpi_coredump->serdes_xfi_train;
263 		indirect_ptr =
264 			mpi_coredump->serdes2_xfi_train;
265 	}
266 
267 	for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
268 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
269 					xfi_direct_valid, xfi_indirect_valid);
270 
271 	/* Get XAUI_XFI_HSS_PCS register block. */
272 	if (qdev->func & 1) {
273 		direct_ptr =
274 			mpi_coredump->serdes2_xfi_hss_pcs;
275 		indirect_ptr =
276 			mpi_coredump->serdes_xfi_hss_pcs;
277 	} else {
278 		direct_ptr =
279 			mpi_coredump->serdes_xfi_hss_pcs;
280 		indirect_ptr =
281 			mpi_coredump->serdes2_xfi_hss_pcs;
282 	}
283 
284 	for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
285 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
286 					xfi_direct_valid, xfi_indirect_valid);
287 
288 	/* Get XAUI_XFI_HSS_TX register block. */
289 	if (qdev->func & 1) {
290 		direct_ptr =
291 			mpi_coredump->serdes2_xfi_hss_tx;
292 		indirect_ptr =
293 			mpi_coredump->serdes_xfi_hss_tx;
294 	} else {
295 		direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
296 		indirect_ptr =
297 			mpi_coredump->serdes2_xfi_hss_tx;
298 	}
299 	for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
300 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
301 					xfi_direct_valid, xfi_indirect_valid);
302 
303 	/* Get XAUI_XFI_HSS_RX register block. */
304 	if (qdev->func & 1) {
305 		direct_ptr =
306 			mpi_coredump->serdes2_xfi_hss_rx;
307 		indirect_ptr =
308 			mpi_coredump->serdes_xfi_hss_rx;
309 	} else {
310 		direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
311 		indirect_ptr =
312 			mpi_coredump->serdes2_xfi_hss_rx;
313 	}
314 
315 	for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
316 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
317 					xfi_direct_valid, xfi_indirect_valid);
318 
319 
320 	/* Get XAUI_XFI_HSS_PLL register block. */
321 	if (qdev->func & 1) {
322 		direct_ptr =
323 			mpi_coredump->serdes2_xfi_hss_pll;
324 		indirect_ptr =
325 			mpi_coredump->serdes_xfi_hss_pll;
326 	} else {
327 		direct_ptr =
328 			mpi_coredump->serdes_xfi_hss_pll;
329 		indirect_ptr =
330 			mpi_coredump->serdes2_xfi_hss_pll;
331 	}
332 	for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
333 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
334 					xfi_direct_valid, xfi_indirect_valid);
335 	return 0;
336 }
337 
ql_read_other_func_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)338 static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
339 							u32 *data)
340 {
341 	int status = 0;
342 
343 	/* wait for reg to come ready */
344 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
345 						XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
346 	if (status)
347 		goto exit;
348 
349 	/* set up for reg read */
350 	ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
351 
352 	/* wait for reg to come ready */
353 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
354 						XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
355 	if (status)
356 		goto exit;
357 
358 	/* get the data */
359 	*data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
360 exit:
361 	return status;
362 }
363 
364 /* Read the 400 xgmac control/statistics registers
365  * skipping unused locations.
366  */
ql_get_xgmac_regs(struct ql_adapter * qdev,u32 * buf,unsigned int other_function)367 static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
368 					unsigned int other_function)
369 {
370 	int status = 0;
371 	int i;
372 
373 	for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
374 		/* We're reading 400 xgmac registers, but we filter out
375 		 * serveral locations that are non-responsive to reads.
376 		 */
377 		if ((i == 0x00000114) ||
378 			(i == 0x00000118) ||
379 			(i == 0x0000013c) ||
380 			(i == 0x00000140) ||
381 			(i > 0x00000150 && i < 0x000001fc) ||
382 			(i > 0x00000278 && i < 0x000002a0) ||
383 			(i > 0x000002c0 && i < 0x000002cf) ||
384 			(i > 0x000002dc && i < 0x000002f0) ||
385 			(i > 0x000003c8 && i < 0x00000400) ||
386 			(i > 0x00000400 && i < 0x00000410) ||
387 			(i > 0x00000410 && i < 0x00000420) ||
388 			(i > 0x00000420 && i < 0x00000430) ||
389 			(i > 0x00000430 && i < 0x00000440) ||
390 			(i > 0x00000440 && i < 0x00000450) ||
391 			(i > 0x00000450 && i < 0x00000500) ||
392 			(i > 0x0000054c && i < 0x00000568) ||
393 			(i > 0x000005c8 && i < 0x00000600)) {
394 			if (other_function)
395 				status =
396 				ql_read_other_func_xgmac_reg(qdev, i, buf);
397 			else
398 				status = ql_read_xgmac_reg(qdev, i, buf);
399 
400 			if (status)
401 				*buf = 0xdeadbeef;
402 			break;
403 		}
404 	}
405 	return status;
406 }
407 
ql_get_ets_regs(struct ql_adapter * qdev,u32 * buf)408 static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
409 {
410 	int status = 0;
411 	int i;
412 
413 	for (i = 0; i < 8; i++, buf++) {
414 		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
415 		*buf = ql_read32(qdev, NIC_ETS);
416 	}
417 
418 	for (i = 0; i < 2; i++, buf++) {
419 		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
420 		*buf = ql_read32(qdev, CNA_ETS);
421 	}
422 
423 	return status;
424 }
425 
ql_get_intr_states(struct ql_adapter * qdev,u32 * buf)426 static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
427 {
428 	int i;
429 
430 	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
431 		ql_write32(qdev, INTR_EN,
432 				qdev->intr_context[i].intr_read_mask);
433 		*buf = ql_read32(qdev, INTR_EN);
434 	}
435 }
436 
ql_get_cam_entries(struct ql_adapter * qdev,u32 * buf)437 static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
438 {
439 	int i, status;
440 	u32 value[3];
441 
442 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
443 	if (status)
444 		return status;
445 
446 	for (i = 0; i < 16; i++) {
447 		status = ql_get_mac_addr_reg(qdev,
448 					MAC_ADDR_TYPE_CAM_MAC, i, value);
449 		if (status) {
450 			netif_err(qdev, drv, qdev->ndev,
451 				  "Failed read of mac index register\n");
452 			goto err;
453 		}
454 		*buf++ = value[0];	/* lower MAC address */
455 		*buf++ = value[1];	/* upper MAC address */
456 		*buf++ = value[2];	/* output */
457 	}
458 	for (i = 0; i < 32; i++) {
459 		status = ql_get_mac_addr_reg(qdev,
460 					MAC_ADDR_TYPE_MULTI_MAC, i, value);
461 		if (status) {
462 			netif_err(qdev, drv, qdev->ndev,
463 				  "Failed read of mac index register\n");
464 			goto err;
465 		}
466 		*buf++ = value[0];	/* lower Mcast address */
467 		*buf++ = value[1];	/* upper Mcast address */
468 	}
469 err:
470 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 	return status;
472 }
473 
ql_get_routing_entries(struct ql_adapter * qdev,u32 * buf)474 static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
475 {
476 	int status;
477 	u32 value, i;
478 
479 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
480 	if (status)
481 		return status;
482 
483 	for (i = 0; i < 16; i++) {
484 		status = ql_get_routing_reg(qdev, i, &value);
485 		if (status) {
486 			netif_err(qdev, drv, qdev->ndev,
487 				  "Failed read of routing index register\n");
488 			goto err;
489 		} else {
490 			*buf++ = value;
491 		}
492 	}
493 err:
494 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
495 	return status;
496 }
497 
498 /* Read the MPI Processor shadow registers */
ql_get_mpi_shadow_regs(struct ql_adapter * qdev,u32 * buf)499 static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
500 {
501 	u32 i;
502 	int status;
503 
504 	for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
505 		status = ql_write_mpi_reg(qdev, RISC_124,
506 				(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
507 		if (status)
508 			goto end;
509 		status = ql_read_mpi_reg(qdev, RISC_127, buf);
510 		if (status)
511 			goto end;
512 	}
513 end:
514 	return status;
515 }
516 
517 /* Read the MPI Processor core registers */
ql_get_mpi_regs(struct ql_adapter * qdev,u32 * buf,u32 offset,u32 count)518 static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
519 				u32 offset, u32 count)
520 {
521 	int i, status = 0;
522 	for (i = 0; i < count; i++, buf++) {
523 		status = ql_read_mpi_reg(qdev, offset + i, buf);
524 		if (status)
525 			return status;
526 	}
527 	return status;
528 }
529 
530 /* Read the ASIC probe dump */
ql_get_probe(struct ql_adapter * qdev,u32 clock,u32 valid,u32 * buf)531 static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
532 					u32 valid, u32 *buf)
533 {
534 	u32 module, mux_sel, probe, lo_val, hi_val;
535 
536 	for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
537 		if (!((valid >> module) & 1))
538 			continue;
539 		for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
540 			probe = clock
541 				| PRB_MX_ADDR_ARE
542 				| mux_sel
543 				| (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
544 			ql_write32(qdev, PRB_MX_ADDR, probe);
545 			lo_val = ql_read32(qdev, PRB_MX_DATA);
546 			if (mux_sel == 0) {
547 				*buf = probe;
548 				buf++;
549 			}
550 			probe |= PRB_MX_ADDR_UP;
551 			ql_write32(qdev, PRB_MX_ADDR, probe);
552 			hi_val = ql_read32(qdev, PRB_MX_DATA);
553 			*buf = lo_val;
554 			buf++;
555 			*buf = hi_val;
556 			buf++;
557 		}
558 	}
559 	return buf;
560 }
561 
ql_get_probe_dump(struct ql_adapter * qdev,unsigned int * buf)562 static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
563 {
564 	/* First we have to enable the probe mux */
565 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
566 	buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
567 			PRB_MX_ADDR_VALID_SYS_MOD, buf);
568 	buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
569 			PRB_MX_ADDR_VALID_PCI_MOD, buf);
570 	buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
571 			PRB_MX_ADDR_VALID_XGM_MOD, buf);
572 	buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
573 			PRB_MX_ADDR_VALID_FC_MOD, buf);
574 	return 0;
575 
576 }
577 
578 /* Read out the routing index registers */
ql_get_routing_index_registers(struct ql_adapter * qdev,u32 * buf)579 static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
580 {
581 	int status;
582 	u32 type, index, index_max;
583 	u32 result_index;
584 	u32 result_data;
585 	u32 val;
586 
587 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
588 	if (status)
589 		return status;
590 
591 	for (type = 0; type < 4; type++) {
592 		if (type < 2)
593 			index_max = 8;
594 		else
595 			index_max = 16;
596 		for (index = 0; index < index_max; index++) {
597 			val = RT_IDX_RS
598 				| (type << RT_IDX_TYPE_SHIFT)
599 				| (index << RT_IDX_IDX_SHIFT);
600 			ql_write32(qdev, RT_IDX, val);
601 			result_index = 0;
602 			while ((result_index & RT_IDX_MR) == 0)
603 				result_index = ql_read32(qdev, RT_IDX);
604 			result_data = ql_read32(qdev, RT_DATA);
605 			*buf = type;
606 			buf++;
607 			*buf = index;
608 			buf++;
609 			*buf = result_index;
610 			buf++;
611 			*buf = result_data;
612 			buf++;
613 		}
614 	}
615 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
616 	return status;
617 }
618 
619 /* Read out the MAC protocol registers */
ql_get_mac_protocol_registers(struct ql_adapter * qdev,u32 * buf)620 static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
621 {
622 	u32 result_index, result_data;
623 	u32 type;
624 	u32 index;
625 	u32 offset;
626 	u32 val;
627 	u32 initial_val = MAC_ADDR_RS;
628 	u32 max_index;
629 	u32 max_offset;
630 
631 	for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
632 		switch (type) {
633 
634 		case 0: /* CAM */
635 			initial_val |= MAC_ADDR_ADR;
636 			max_index = MAC_ADDR_MAX_CAM_ENTRIES;
637 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 			break;
639 		case 1: /* Multicast MAC Address */
640 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
641 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
642 			break;
643 		case 2: /* VLAN filter mask */
644 		case 3: /* MC filter mask */
645 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
646 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
647 			break;
648 		case 4: /* FC MAC addresses */
649 			max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
650 			max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
651 			break;
652 		case 5: /* Mgmt MAC addresses */
653 			max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
654 			max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
655 			break;
656 		case 6: /* Mgmt VLAN addresses */
657 			max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
658 			max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
659 			break;
660 		case 7: /* Mgmt IPv4 address */
661 			max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
662 			max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
663 			break;
664 		case 8: /* Mgmt IPv6 address */
665 			max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
666 			max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
667 			break;
668 		case 9: /* Mgmt TCP/UDP Dest port */
669 			max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
670 			max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
671 			break;
672 		default:
673 			pr_err("Bad type!!! 0x%08x\n", type);
674 			max_index = 0;
675 			max_offset = 0;
676 			break;
677 		}
678 		for (index = 0; index < max_index; index++) {
679 			for (offset = 0; offset < max_offset; offset++) {
680 				val = initial_val
681 					| (type << MAC_ADDR_TYPE_SHIFT)
682 					| (index << MAC_ADDR_IDX_SHIFT)
683 					| (offset);
684 				ql_write32(qdev, MAC_ADDR_IDX, val);
685 				result_index = 0;
686 				while ((result_index & MAC_ADDR_MR) == 0) {
687 					result_index = ql_read32(qdev,
688 								MAC_ADDR_IDX);
689 				}
690 				result_data = ql_read32(qdev, MAC_ADDR_DATA);
691 				*buf = result_index;
692 				buf++;
693 				*buf = result_data;
694 				buf++;
695 			}
696 		}
697 	}
698 }
699 
ql_get_sem_registers(struct ql_adapter * qdev,u32 * buf)700 static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
701 {
702 	u32 func_num, reg, reg_val;
703 	int status;
704 
705 	for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
706 		reg = MPI_NIC_REG_BLOCK
707 			| (func_num << MPI_NIC_FUNCTION_SHIFT)
708 			| (SEM / 4);
709 		status = ql_read_mpi_reg(qdev, reg, &reg_val);
710 		*buf = reg_val;
711 		/* if the read failed then dead fill the element. */
712 		if (!status)
713 			*buf = 0xdeadbeef;
714 		buf++;
715 	}
716 }
717 
718 /* Create a coredump segment header */
ql_build_coredump_seg_header(struct mpi_coredump_segment_header * seg_hdr,u32 seg_number,u32 seg_size,u8 * desc)719 static void ql_build_coredump_seg_header(
720 		struct mpi_coredump_segment_header *seg_hdr,
721 		u32 seg_number, u32 seg_size, u8 *desc)
722 {
723 	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
724 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
725 	seg_hdr->segNum = seg_number;
726 	seg_hdr->segSize = seg_size;
727 	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
728 }
729 
730 /*
731  * This function should be called when a coredump / probedump
732  * is to be extracted from the HBA. It is assumed there is a
733  * qdev structure that contains the base address of the register
734  * space for this function as well as a coredump structure that
735  * will contain the dump.
736  */
ql_core_dump(struct ql_adapter * qdev,struct ql_mpi_coredump * mpi_coredump)737 int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
738 {
739 	int status;
740 	int i;
741 
742 	if (!mpi_coredump) {
743 		netif_err(qdev, drv, qdev->ndev, "No memory available\n");
744 		return -ENOMEM;
745 	}
746 
747 	/* Try to get the spinlock, but dont worry if
748 	 * it isn't available.  If the firmware died it
749 	 * might be holding the sem.
750 	 */
751 	ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
752 
753 	status = ql_pause_mpi_risc(qdev);
754 	if (status) {
755 		netif_err(qdev, drv, qdev->ndev,
756 			  "Failed RISC pause. Status = 0x%.08x\n", status);
757 		goto err;
758 	}
759 
760 	/* Insert the global header */
761 	memset(&(mpi_coredump->mpi_global_header), 0,
762 		sizeof(struct mpi_coredump_global_header));
763 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
764 	mpi_coredump->mpi_global_header.headerSize =
765 		sizeof(struct mpi_coredump_global_header);
766 	mpi_coredump->mpi_global_header.imageSize =
767 		sizeof(struct ql_mpi_coredump);
768 	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
769 		sizeof(mpi_coredump->mpi_global_header.idString));
770 
771 	/* Get generic NIC reg dump */
772 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
773 			NIC1_CONTROL_SEG_NUM,
774 			sizeof(struct mpi_coredump_segment_header) +
775 			sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
776 
777 	ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
778 			NIC2_CONTROL_SEG_NUM,
779 			sizeof(struct mpi_coredump_segment_header) +
780 			sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
781 
782 	/* Get XGMac registers. (Segment 18, Rev C. step 21) */
783 	ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
784 			NIC1_XGMAC_SEG_NUM,
785 			sizeof(struct mpi_coredump_segment_header) +
786 			sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
787 
788 	ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
789 			NIC2_XGMAC_SEG_NUM,
790 			sizeof(struct mpi_coredump_segment_header) +
791 			sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
792 
793 	if (qdev->func & 1) {
794 		/* Odd means our function is NIC 2 */
795 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
796 			mpi_coredump->nic2_regs[i] =
797 					 ql_read32(qdev, i * sizeof(u32));
798 
799 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
800 			mpi_coredump->nic_regs[i] =
801 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
802 
803 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
804 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
805 	} else {
806 		/* Even means our function is NIC 1 */
807 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
808 			mpi_coredump->nic_regs[i] =
809 					ql_read32(qdev, i * sizeof(u32));
810 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
811 			mpi_coredump->nic2_regs[i] =
812 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
813 
814 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
815 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
816 	}
817 
818 	/* Rev C. Step 20a */
819 	ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
820 			XAUI_AN_SEG_NUM,
821 			sizeof(struct mpi_coredump_segment_header) +
822 			sizeof(mpi_coredump->serdes_xaui_an),
823 			"XAUI AN Registers");
824 
825 	/* Rev C. Step 20b */
826 	ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
827 			XAUI_HSS_PCS_SEG_NUM,
828 			sizeof(struct mpi_coredump_segment_header) +
829 			sizeof(mpi_coredump->serdes_xaui_hss_pcs),
830 			"XAUI HSS PCS Registers");
831 
832 	ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
833 			sizeof(struct mpi_coredump_segment_header) +
834 			sizeof(mpi_coredump->serdes_xfi_an),
835 			"XFI AN Registers");
836 
837 	ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
838 			XFI_TRAIN_SEG_NUM,
839 			sizeof(struct mpi_coredump_segment_header) +
840 			sizeof(mpi_coredump->serdes_xfi_train),
841 			"XFI TRAIN Registers");
842 
843 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
844 			XFI_HSS_PCS_SEG_NUM,
845 			sizeof(struct mpi_coredump_segment_header) +
846 			sizeof(mpi_coredump->serdes_xfi_hss_pcs),
847 			"XFI HSS PCS Registers");
848 
849 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
850 			XFI_HSS_TX_SEG_NUM,
851 			sizeof(struct mpi_coredump_segment_header) +
852 			sizeof(mpi_coredump->serdes_xfi_hss_tx),
853 			"XFI HSS TX Registers");
854 
855 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
856 			XFI_HSS_RX_SEG_NUM,
857 			sizeof(struct mpi_coredump_segment_header) +
858 			sizeof(mpi_coredump->serdes_xfi_hss_rx),
859 			"XFI HSS RX Registers");
860 
861 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
862 			XFI_HSS_PLL_SEG_NUM,
863 			sizeof(struct mpi_coredump_segment_header) +
864 			sizeof(mpi_coredump->serdes_xfi_hss_pll),
865 			"XFI HSS PLL Registers");
866 
867 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
868 			XAUI2_AN_SEG_NUM,
869 			sizeof(struct mpi_coredump_segment_header) +
870 			sizeof(mpi_coredump->serdes2_xaui_an),
871 			"XAUI2 AN Registers");
872 
873 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
874 			XAUI2_HSS_PCS_SEG_NUM,
875 			sizeof(struct mpi_coredump_segment_header) +
876 			sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
877 			"XAUI2 HSS PCS Registers");
878 
879 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
880 			XFI2_AN_SEG_NUM,
881 			sizeof(struct mpi_coredump_segment_header) +
882 			sizeof(mpi_coredump->serdes2_xfi_an),
883 			"XFI2 AN Registers");
884 
885 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
886 			XFI2_TRAIN_SEG_NUM,
887 			sizeof(struct mpi_coredump_segment_header) +
888 			sizeof(mpi_coredump->serdes2_xfi_train),
889 			"XFI2 TRAIN Registers");
890 
891 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
892 			XFI2_HSS_PCS_SEG_NUM,
893 			sizeof(struct mpi_coredump_segment_header) +
894 			sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
895 			"XFI2 HSS PCS Registers");
896 
897 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
898 			XFI2_HSS_TX_SEG_NUM,
899 			sizeof(struct mpi_coredump_segment_header) +
900 			sizeof(mpi_coredump->serdes2_xfi_hss_tx),
901 			"XFI2 HSS TX Registers");
902 
903 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
904 			XFI2_HSS_RX_SEG_NUM,
905 			sizeof(struct mpi_coredump_segment_header) +
906 			sizeof(mpi_coredump->serdes2_xfi_hss_rx),
907 			"XFI2 HSS RX Registers");
908 
909 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
910 			XFI2_HSS_PLL_SEG_NUM,
911 			sizeof(struct mpi_coredump_segment_header) +
912 			sizeof(mpi_coredump->serdes2_xfi_hss_pll),
913 			"XFI2 HSS PLL Registers");
914 
915 	status = ql_get_serdes_regs(qdev, mpi_coredump);
916 	if (status) {
917 		netif_err(qdev, drv, qdev->ndev,
918 			  "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
919 			  status);
920 		goto err;
921 	}
922 
923 	ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
924 				CORE_SEG_NUM,
925 				sizeof(mpi_coredump->core_regs_seg_hdr) +
926 				sizeof(mpi_coredump->mpi_core_regs) +
927 				sizeof(mpi_coredump->mpi_core_sh_regs),
928 				"Core Registers");
929 
930 	/* Get the MPI Core Registers */
931 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
932 				 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
933 	if (status)
934 		goto err;
935 	/* Get the 16 MPI shadow registers */
936 	status = ql_get_mpi_shadow_regs(qdev,
937 					&mpi_coredump->mpi_core_sh_regs[0]);
938 	if (status)
939 		goto err;
940 
941 	/* Get the Test Logic Registers */
942 	ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
943 				TEST_LOGIC_SEG_NUM,
944 				sizeof(struct mpi_coredump_segment_header)
945 				+ sizeof(mpi_coredump->test_logic_regs),
946 				"Test Logic Regs");
947 	status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
948 				 TEST_REGS_ADDR, TEST_REGS_CNT);
949 	if (status)
950 		goto err;
951 
952 	/* Get the RMII Registers */
953 	ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
954 				RMII_SEG_NUM,
955 				sizeof(struct mpi_coredump_segment_header)
956 				+ sizeof(mpi_coredump->rmii_regs),
957 				"RMII Registers");
958 	status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
959 				 RMII_REGS_ADDR, RMII_REGS_CNT);
960 	if (status)
961 		goto err;
962 
963 	/* Get the FCMAC1 Registers */
964 	ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
965 				FCMAC1_SEG_NUM,
966 				sizeof(struct mpi_coredump_segment_header)
967 				+ sizeof(mpi_coredump->fcmac1_regs),
968 				"FCMAC1 Registers");
969 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
970 				 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
971 	if (status)
972 		goto err;
973 
974 	/* Get the FCMAC2 Registers */
975 
976 	ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
977 				FCMAC2_SEG_NUM,
978 				sizeof(struct mpi_coredump_segment_header)
979 				+ sizeof(mpi_coredump->fcmac2_regs),
980 				"FCMAC2 Registers");
981 
982 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
983 				 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
984 	if (status)
985 		goto err;
986 
987 	/* Get the FC1 MBX Registers */
988 	ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
989 				FC1_MBOX_SEG_NUM,
990 				sizeof(struct mpi_coredump_segment_header)
991 				+ sizeof(mpi_coredump->fc1_mbx_regs),
992 				"FC1 MBox Regs");
993 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
994 				 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
995 	if (status)
996 		goto err;
997 
998 	/* Get the IDE Registers */
999 	ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
1000 				IDE_SEG_NUM,
1001 				sizeof(struct mpi_coredump_segment_header)
1002 				+ sizeof(mpi_coredump->ide_regs),
1003 				"IDE Registers");
1004 	status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1005 				 IDE_REGS_ADDR, IDE_REGS_CNT);
1006 	if (status)
1007 		goto err;
1008 
1009 	/* Get the NIC1 MBX Registers */
1010 	ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1011 				NIC1_MBOX_SEG_NUM,
1012 				sizeof(struct mpi_coredump_segment_header)
1013 				+ sizeof(mpi_coredump->nic1_mbx_regs),
1014 				"NIC1 MBox Regs");
1015 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1016 				 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1017 	if (status)
1018 		goto err;
1019 
1020 	/* Get the SMBus Registers */
1021 	ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1022 				SMBUS_SEG_NUM,
1023 				sizeof(struct mpi_coredump_segment_header)
1024 				+ sizeof(mpi_coredump->smbus_regs),
1025 				"SMBus Registers");
1026 	status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1027 				 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1028 	if (status)
1029 		goto err;
1030 
1031 	/* Get the FC2 MBX Registers */
1032 	ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1033 				FC2_MBOX_SEG_NUM,
1034 				sizeof(struct mpi_coredump_segment_header)
1035 				+ sizeof(mpi_coredump->fc2_mbx_regs),
1036 				"FC2 MBox Regs");
1037 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1038 				 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1039 	if (status)
1040 		goto err;
1041 
1042 	/* Get the NIC2 MBX Registers */
1043 	ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1044 				NIC2_MBOX_SEG_NUM,
1045 				sizeof(struct mpi_coredump_segment_header)
1046 				+ sizeof(mpi_coredump->nic2_mbx_regs),
1047 				"NIC2 MBox Regs");
1048 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1049 				 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1050 	if (status)
1051 		goto err;
1052 
1053 	/* Get the I2C Registers */
1054 	ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1055 				I2C_SEG_NUM,
1056 				sizeof(struct mpi_coredump_segment_header)
1057 				+ sizeof(mpi_coredump->i2c_regs),
1058 				"I2C Registers");
1059 	status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1060 				 I2C_REGS_ADDR, I2C_REGS_CNT);
1061 	if (status)
1062 		goto err;
1063 
1064 	/* Get the MEMC Registers */
1065 	ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1066 				MEMC_SEG_NUM,
1067 				sizeof(struct mpi_coredump_segment_header)
1068 				+ sizeof(mpi_coredump->memc_regs),
1069 				"MEMC Registers");
1070 	status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1071 				 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1072 	if (status)
1073 		goto err;
1074 
1075 	/* Get the PBus Registers */
1076 	ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1077 				PBUS_SEG_NUM,
1078 				sizeof(struct mpi_coredump_segment_header)
1079 				+ sizeof(mpi_coredump->pbus_regs),
1080 				"PBUS Registers");
1081 	status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1082 				 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1083 	if (status)
1084 		goto err;
1085 
1086 	/* Get the MDE Registers */
1087 	ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1088 				MDE_SEG_NUM,
1089 				sizeof(struct mpi_coredump_segment_header)
1090 				+ sizeof(mpi_coredump->mde_regs),
1091 				"MDE Registers");
1092 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1093 				 MDE_REGS_ADDR, MDE_REGS_CNT);
1094 	if (status)
1095 		goto err;
1096 
1097 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1098 				MISC_NIC_INFO_SEG_NUM,
1099 				sizeof(struct mpi_coredump_segment_header)
1100 				+ sizeof(mpi_coredump->misc_nic_info),
1101 				"MISC NIC INFO");
1102 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1103 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1104 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1105 	mpi_coredump->misc_nic_info.function = qdev->func;
1106 
1107 	/* Segment 31 */
1108 	/* Get indexed register values. */
1109 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1110 				INTR_STATES_SEG_NUM,
1111 				sizeof(struct mpi_coredump_segment_header)
1112 				+ sizeof(mpi_coredump->intr_states),
1113 				"INTR States");
1114 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1115 
1116 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1117 				CAM_ENTRIES_SEG_NUM,
1118 				sizeof(struct mpi_coredump_segment_header)
1119 				+ sizeof(mpi_coredump->cam_entries),
1120 				"CAM Entries");
1121 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1122 	if (status)
1123 		goto err;
1124 
1125 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1126 				ROUTING_WORDS_SEG_NUM,
1127 				sizeof(struct mpi_coredump_segment_header)
1128 				+ sizeof(mpi_coredump->nic_routing_words),
1129 				"Routing Words");
1130 	status = ql_get_routing_entries(qdev,
1131 			 &mpi_coredump->nic_routing_words[0]);
1132 	if (status)
1133 		goto err;
1134 
1135 	/* Segment 34 (Rev C. step 23) */
1136 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1137 				ETS_SEG_NUM,
1138 				sizeof(struct mpi_coredump_segment_header)
1139 				+ sizeof(mpi_coredump->ets),
1140 				"ETS Registers");
1141 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1142 	if (status)
1143 		goto err;
1144 
1145 	ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1146 				PROBE_DUMP_SEG_NUM,
1147 				sizeof(struct mpi_coredump_segment_header)
1148 				+ sizeof(mpi_coredump->probe_dump),
1149 				"Probe Dump");
1150 	ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1151 
1152 	ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1153 				ROUTING_INDEX_SEG_NUM,
1154 				sizeof(struct mpi_coredump_segment_header)
1155 				+ sizeof(mpi_coredump->routing_regs),
1156 				"Routing Regs");
1157 	status = ql_get_routing_index_registers(qdev,
1158 					&mpi_coredump->routing_regs[0]);
1159 	if (status)
1160 		goto err;
1161 
1162 	ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1163 				MAC_PROTOCOL_SEG_NUM,
1164 				sizeof(struct mpi_coredump_segment_header)
1165 				+ sizeof(mpi_coredump->mac_prot_regs),
1166 				"MAC Prot Regs");
1167 	ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1168 
1169 	/* Get the semaphore registers for all 5 functions */
1170 	ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1171 			SEM_REGS_SEG_NUM,
1172 			sizeof(struct mpi_coredump_segment_header) +
1173 			sizeof(mpi_coredump->sem_regs),	"Sem Registers");
1174 
1175 	ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1176 
1177 	/* Prevent the mpi restarting while we dump the memory.*/
1178 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1179 
1180 	/* clear the pause */
1181 	status = ql_unpause_mpi_risc(qdev);
1182 	if (status) {
1183 		netif_err(qdev, drv, qdev->ndev,
1184 			  "Failed RISC unpause. Status = 0x%.08x\n", status);
1185 		goto err;
1186 	}
1187 
1188 	/* Reset the RISC so we can dump RAM */
1189 	status = ql_hard_reset_mpi_risc(qdev);
1190 	if (status) {
1191 		netif_err(qdev, drv, qdev->ndev,
1192 			  "Failed RISC reset. Status = 0x%.08x\n", status);
1193 		goto err;
1194 	}
1195 
1196 	ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1197 				WCS_RAM_SEG_NUM,
1198 				sizeof(struct mpi_coredump_segment_header)
1199 				+ sizeof(mpi_coredump->code_ram),
1200 				"WCS RAM");
1201 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1202 					CODE_RAM_ADDR, CODE_RAM_CNT);
1203 	if (status) {
1204 		netif_err(qdev, drv, qdev->ndev,
1205 			  "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1206 			  status);
1207 		goto err;
1208 	}
1209 
1210 	/* Insert the segment header */
1211 	ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1212 				MEMC_RAM_SEG_NUM,
1213 				sizeof(struct mpi_coredump_segment_header)
1214 				+ sizeof(mpi_coredump->memc_ram),
1215 				"MEMC RAM");
1216 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1217 					MEMC_RAM_ADDR, MEMC_RAM_CNT);
1218 	if (status) {
1219 		netif_err(qdev, drv, qdev->ndev,
1220 			  "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1221 			  status);
1222 		goto err;
1223 	}
1224 err:
1225 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1226 	return status;
1227 
1228 }
1229 
ql_get_core_dump(struct ql_adapter * qdev)1230 static void ql_get_core_dump(struct ql_adapter *qdev)
1231 {
1232 	if (!ql_own_firmware(qdev)) {
1233 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1234 		return;
1235 	}
1236 
1237 	if (!netif_running(qdev->ndev)) {
1238 		netif_err(qdev, ifup, qdev->ndev,
1239 			  "Force Coredump can only be done from interface that is up\n");
1240 		return;
1241 	}
1242 	ql_queue_fw_error(qdev);
1243 }
1244 
ql_gen_reg_dump(struct ql_adapter * qdev,struct ql_reg_dump * mpi_coredump)1245 void ql_gen_reg_dump(struct ql_adapter *qdev,
1246 			struct ql_reg_dump *mpi_coredump)
1247 {
1248 	int i, status;
1249 
1250 
1251 	memset(&(mpi_coredump->mpi_global_header), 0,
1252 		sizeof(struct mpi_coredump_global_header));
1253 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1254 	mpi_coredump->mpi_global_header.headerSize =
1255 		sizeof(struct mpi_coredump_global_header);
1256 	mpi_coredump->mpi_global_header.imageSize =
1257 		sizeof(struct ql_reg_dump);
1258 	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
1259 		sizeof(mpi_coredump->mpi_global_header.idString));
1260 
1261 
1262 	/* segment 16 */
1263 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1264 				MISC_NIC_INFO_SEG_NUM,
1265 				sizeof(struct mpi_coredump_segment_header)
1266 				+ sizeof(mpi_coredump->misc_nic_info),
1267 				"MISC NIC INFO");
1268 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1269 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1270 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1271 	mpi_coredump->misc_nic_info.function = qdev->func;
1272 
1273 	/* Segment 16, Rev C. Step 18 */
1274 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1275 				NIC1_CONTROL_SEG_NUM,
1276 				sizeof(struct mpi_coredump_segment_header)
1277 				+ sizeof(mpi_coredump->nic_regs),
1278 				"NIC Registers");
1279 	/* Get generic reg dump */
1280 	for (i = 0; i < 64; i++)
1281 		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1282 
1283 	/* Segment 31 */
1284 	/* Get indexed register values. */
1285 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1286 				INTR_STATES_SEG_NUM,
1287 				sizeof(struct mpi_coredump_segment_header)
1288 				+ sizeof(mpi_coredump->intr_states),
1289 				"INTR States");
1290 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1291 
1292 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1293 				CAM_ENTRIES_SEG_NUM,
1294 				sizeof(struct mpi_coredump_segment_header)
1295 				+ sizeof(mpi_coredump->cam_entries),
1296 				"CAM Entries");
1297 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1298 	if (status)
1299 		return;
1300 
1301 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1302 				ROUTING_WORDS_SEG_NUM,
1303 				sizeof(struct mpi_coredump_segment_header)
1304 				+ sizeof(mpi_coredump->nic_routing_words),
1305 				"Routing Words");
1306 	status = ql_get_routing_entries(qdev,
1307 			 &mpi_coredump->nic_routing_words[0]);
1308 	if (status)
1309 		return;
1310 
1311 	/* Segment 34 (Rev C. step 23) */
1312 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1313 				ETS_SEG_NUM,
1314 				sizeof(struct mpi_coredump_segment_header)
1315 				+ sizeof(mpi_coredump->ets),
1316 				"ETS Registers");
1317 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 	if (status)
1319 		return;
1320 }
1321 
ql_get_dump(struct ql_adapter * qdev,void * buff)1322 void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323 {
1324 	/*
1325 	 * If the dump has already been taken and is stored
1326 	 * in our internal buffer and if force dump is set then
1327 	 * just start the spool to dump it to the log file
1328 	 * and also, take a snapshot of the general regs to
1329 	 * to the user's buffer or else take complete dump
1330 	 * to the user's buffer if force is not set.
1331 	 */
1332 
1333 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 		if (!ql_core_dump(qdev, buff))
1335 			ql_soft_reset_mpi_risc(qdev);
1336 		else
1337 			netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 	} else {
1339 		ql_gen_reg_dump(qdev, buff);
1340 		ql_get_core_dump(qdev);
1341 	}
1342 }
1343 
1344 /* Coredump to messages log file using separate worker thread */
ql_mpi_core_to_log(struct work_struct * work)1345 void ql_mpi_core_to_log(struct work_struct *work)
1346 {
1347 	struct ql_adapter *qdev =
1348 		container_of(work, struct ql_adapter, mpi_core_to_log.work);
1349 	u32 *tmp, count;
1350 	int i;
1351 
1352 	count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1353 	tmp = (u32 *)qdev->mpi_coredump;
1354 	netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1355 		     "Core is dumping to log file!\n");
1356 
1357 	for (i = 0; i < count; i += 8) {
1358 		pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1359 			"%.08x %.08x %.08x\n", i,
1360 			tmp[i + 0],
1361 			tmp[i + 1],
1362 			tmp[i + 2],
1363 			tmp[i + 3],
1364 			tmp[i + 4],
1365 			tmp[i + 5],
1366 			tmp[i + 6],
1367 			tmp[i + 7]);
1368 		msleep(5);
1369 	}
1370 }
1371 
1372 #ifdef QL_REG_DUMP
ql_dump_intr_states(struct ql_adapter * qdev)1373 static void ql_dump_intr_states(struct ql_adapter *qdev)
1374 {
1375 	int i;
1376 	u32 value;
1377 	for (i = 0; i < qdev->intr_count; i++) {
1378 		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1379 		value = ql_read32(qdev, INTR_EN);
1380 		pr_err("%s: Interrupt %d is %s\n",
1381 		       qdev->ndev->name, i,
1382 		       (value & INTR_EN_EN ? "enabled" : "disabled"));
1383 	}
1384 }
1385 
1386 #define DUMP_XGMAC(qdev, reg)					\
1387 do {								\
1388 	u32 data;						\
1389 	ql_read_xgmac_reg(qdev, reg, &data);			\
1390 	pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1391 } while (0)
1392 
ql_dump_xgmac_control_regs(struct ql_adapter * qdev)1393 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1394 {
1395 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1396 		pr_err("%s: Couldn't get xgmac sem\n", __func__);
1397 		return;
1398 	}
1399 	DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1400 	DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1401 	DUMP_XGMAC(qdev, GLOBAL_CFG);
1402 	DUMP_XGMAC(qdev, TX_CFG);
1403 	DUMP_XGMAC(qdev, RX_CFG);
1404 	DUMP_XGMAC(qdev, FLOW_CTL);
1405 	DUMP_XGMAC(qdev, PAUSE_OPCODE);
1406 	DUMP_XGMAC(qdev, PAUSE_TIMER);
1407 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1408 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1409 	DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1410 	DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1411 	DUMP_XGMAC(qdev, MAC_SYS_INT);
1412 	DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1413 	DUMP_XGMAC(qdev, MAC_MGMT_INT);
1414 	DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1415 	DUMP_XGMAC(qdev, EXT_ARB_MODE);
1416 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1417 }
1418 
ql_dump_ets_regs(struct ql_adapter * qdev)1419 static void ql_dump_ets_regs(struct ql_adapter *qdev)
1420 {
1421 }
1422 
ql_dump_cam_entries(struct ql_adapter * qdev)1423 static void ql_dump_cam_entries(struct ql_adapter *qdev)
1424 {
1425 	int i;
1426 	u32 value[3];
1427 
1428 	i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1429 	if (i)
1430 		return;
1431 	for (i = 0; i < 4; i++) {
1432 		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1433 			pr_err("%s: Failed read of mac index register\n",
1434 			       __func__);
1435 			return;
1436 		} else {
1437 			if (value[0])
1438 				pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1439 				       qdev->ndev->name, i, value[1], value[0],
1440 				       value[2]);
1441 		}
1442 	}
1443 	for (i = 0; i < 32; i++) {
1444 		if (ql_get_mac_addr_reg
1445 		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1446 			pr_err("%s: Failed read of mac index register\n",
1447 			       __func__);
1448 			return;
1449 		} else {
1450 			if (value[0])
1451 				pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1452 				       qdev->ndev->name, i, value[1], value[0]);
1453 		}
1454 	}
1455 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1456 }
1457 
ql_dump_routing_entries(struct ql_adapter * qdev)1458 void ql_dump_routing_entries(struct ql_adapter *qdev)
1459 {
1460 	int i;
1461 	u32 value;
1462 	i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1463 	if (i)
1464 		return;
1465 	for (i = 0; i < 16; i++) {
1466 		value = 0;
1467 		if (ql_get_routing_reg(qdev, i, &value)) {
1468 			pr_err("%s: Failed read of routing index register\n",
1469 			       __func__);
1470 			return;
1471 		} else {
1472 			if (value)
1473 				pr_err("%s: Routing Mask %d = 0x%.08x\n",
1474 				       qdev->ndev->name, i, value);
1475 		}
1476 	}
1477 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1478 }
1479 
1480 #define DUMP_REG(qdev, reg)			\
1481 	pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1482 
ql_dump_regs(struct ql_adapter * qdev)1483 void ql_dump_regs(struct ql_adapter *qdev)
1484 {
1485 	pr_err("reg dump for function #%d\n", qdev->func);
1486 	DUMP_REG(qdev, SYS);
1487 	DUMP_REG(qdev, RST_FO);
1488 	DUMP_REG(qdev, FSC);
1489 	DUMP_REG(qdev, CSR);
1490 	DUMP_REG(qdev, ICB_RID);
1491 	DUMP_REG(qdev, ICB_L);
1492 	DUMP_REG(qdev, ICB_H);
1493 	DUMP_REG(qdev, CFG);
1494 	DUMP_REG(qdev, BIOS_ADDR);
1495 	DUMP_REG(qdev, STS);
1496 	DUMP_REG(qdev, INTR_EN);
1497 	DUMP_REG(qdev, INTR_MASK);
1498 	DUMP_REG(qdev, ISR1);
1499 	DUMP_REG(qdev, ISR2);
1500 	DUMP_REG(qdev, ISR3);
1501 	DUMP_REG(qdev, ISR4);
1502 	DUMP_REG(qdev, REV_ID);
1503 	DUMP_REG(qdev, FRC_ECC_ERR);
1504 	DUMP_REG(qdev, ERR_STS);
1505 	DUMP_REG(qdev, RAM_DBG_ADDR);
1506 	DUMP_REG(qdev, RAM_DBG_DATA);
1507 	DUMP_REG(qdev, ECC_ERR_CNT);
1508 	DUMP_REG(qdev, SEM);
1509 	DUMP_REG(qdev, GPIO_1);
1510 	DUMP_REG(qdev, GPIO_2);
1511 	DUMP_REG(qdev, GPIO_3);
1512 	DUMP_REG(qdev, XGMAC_ADDR);
1513 	DUMP_REG(qdev, XGMAC_DATA);
1514 	DUMP_REG(qdev, NIC_ETS);
1515 	DUMP_REG(qdev, CNA_ETS);
1516 	DUMP_REG(qdev, FLASH_ADDR);
1517 	DUMP_REG(qdev, FLASH_DATA);
1518 	DUMP_REG(qdev, CQ_STOP);
1519 	DUMP_REG(qdev, PAGE_TBL_RID);
1520 	DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1521 	DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1522 	DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1523 	DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1524 	DUMP_REG(qdev, COS_DFLT_CQ1);
1525 	DUMP_REG(qdev, COS_DFLT_CQ2);
1526 	DUMP_REG(qdev, SPLT_HDR);
1527 	DUMP_REG(qdev, FC_PAUSE_THRES);
1528 	DUMP_REG(qdev, NIC_PAUSE_THRES);
1529 	DUMP_REG(qdev, FC_ETHERTYPE);
1530 	DUMP_REG(qdev, FC_RCV_CFG);
1531 	DUMP_REG(qdev, NIC_RCV_CFG);
1532 	DUMP_REG(qdev, FC_COS_TAGS);
1533 	DUMP_REG(qdev, NIC_COS_TAGS);
1534 	DUMP_REG(qdev, MGMT_RCV_CFG);
1535 	DUMP_REG(qdev, XG_SERDES_ADDR);
1536 	DUMP_REG(qdev, XG_SERDES_DATA);
1537 	DUMP_REG(qdev, PRB_MX_ADDR);
1538 	DUMP_REG(qdev, PRB_MX_DATA);
1539 	ql_dump_intr_states(qdev);
1540 	ql_dump_xgmac_control_regs(qdev);
1541 	ql_dump_ets_regs(qdev);
1542 	ql_dump_cam_entries(qdev);
1543 	ql_dump_routing_entries(qdev);
1544 }
1545 #endif
1546 
1547 #ifdef QL_STAT_DUMP
1548 
1549 #define DUMP_STAT(qdev, stat)	\
1550 	pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1551 
ql_dump_stat(struct ql_adapter * qdev)1552 void ql_dump_stat(struct ql_adapter *qdev)
1553 {
1554 	pr_err("%s: Enter\n", __func__);
1555 	DUMP_STAT(qdev, tx_pkts);
1556 	DUMP_STAT(qdev, tx_bytes);
1557 	DUMP_STAT(qdev, tx_mcast_pkts);
1558 	DUMP_STAT(qdev, tx_bcast_pkts);
1559 	DUMP_STAT(qdev, tx_ucast_pkts);
1560 	DUMP_STAT(qdev, tx_ctl_pkts);
1561 	DUMP_STAT(qdev, tx_pause_pkts);
1562 	DUMP_STAT(qdev, tx_64_pkt);
1563 	DUMP_STAT(qdev, tx_65_to_127_pkt);
1564 	DUMP_STAT(qdev, tx_128_to_255_pkt);
1565 	DUMP_STAT(qdev, tx_256_511_pkt);
1566 	DUMP_STAT(qdev, tx_512_to_1023_pkt);
1567 	DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1568 	DUMP_STAT(qdev, tx_1519_to_max_pkt);
1569 	DUMP_STAT(qdev, tx_undersize_pkt);
1570 	DUMP_STAT(qdev, tx_oversize_pkt);
1571 	DUMP_STAT(qdev, rx_bytes);
1572 	DUMP_STAT(qdev, rx_bytes_ok);
1573 	DUMP_STAT(qdev, rx_pkts);
1574 	DUMP_STAT(qdev, rx_pkts_ok);
1575 	DUMP_STAT(qdev, rx_bcast_pkts);
1576 	DUMP_STAT(qdev, rx_mcast_pkts);
1577 	DUMP_STAT(qdev, rx_ucast_pkts);
1578 	DUMP_STAT(qdev, rx_undersize_pkts);
1579 	DUMP_STAT(qdev, rx_oversize_pkts);
1580 	DUMP_STAT(qdev, rx_jabber_pkts);
1581 	DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1582 	DUMP_STAT(qdev, rx_drop_events);
1583 	DUMP_STAT(qdev, rx_fcerr_pkts);
1584 	DUMP_STAT(qdev, rx_align_err);
1585 	DUMP_STAT(qdev, rx_symbol_err);
1586 	DUMP_STAT(qdev, rx_mac_err);
1587 	DUMP_STAT(qdev, rx_ctl_pkts);
1588 	DUMP_STAT(qdev, rx_pause_pkts);
1589 	DUMP_STAT(qdev, rx_64_pkts);
1590 	DUMP_STAT(qdev, rx_65_to_127_pkts);
1591 	DUMP_STAT(qdev, rx_128_255_pkts);
1592 	DUMP_STAT(qdev, rx_256_511_pkts);
1593 	DUMP_STAT(qdev, rx_512_to_1023_pkts);
1594 	DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1595 	DUMP_STAT(qdev, rx_1519_to_max_pkts);
1596 	DUMP_STAT(qdev, rx_len_err_pkts);
1597 };
1598 #endif
1599 
1600 #ifdef QL_DEV_DUMP
1601 
1602 #define DUMP_QDEV_FIELD(qdev, type, field)		\
1603 	pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1604 #define DUMP_QDEV_DMA_FIELD(qdev, field)		\
1605 	pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1606 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1607 	pr_err("%s[%d].%s = " type "\n",		 \
1608 	       #array, index, #field, qdev->array[index].field);
ql_dump_qdev(struct ql_adapter * qdev)1609 void ql_dump_qdev(struct ql_adapter *qdev)
1610 {
1611 	int i;
1612 	DUMP_QDEV_FIELD(qdev, "%lx", flags);
1613 	DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1614 	DUMP_QDEV_FIELD(qdev, "%p", pdev);
1615 	DUMP_QDEV_FIELD(qdev, "%p", ndev);
1616 	DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1617 	DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1618 	DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1619 	DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1620 	DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1621 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1622 	DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1623 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1624 	DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1625 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1626 	if (qdev->msi_x_entry)
1627 		for (i = 0; i < qdev->intr_count; i++) {
1628 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1629 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1630 		}
1631 	for (i = 0; i < qdev->intr_count; i++) {
1632 		DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1633 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1634 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1635 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1636 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1637 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1638 	}
1639 	DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1640 	DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1641 	DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1642 	DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1643 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1644 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1645 	DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1646 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1647 	DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1648 	DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1649 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1650 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1651 }
1652 #endif
1653 
1654 #ifdef QL_CB_DUMP
ql_dump_wqicb(struct wqicb * wqicb)1655 void ql_dump_wqicb(struct wqicb *wqicb)
1656 {
1657 	pr_err("Dumping wqicb stuff...\n");
1658 	pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1659 	pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1660 	pr_err("wqicb->cq_id_rss = %d\n",
1661 	       le16_to_cpu(wqicb->cq_id_rss));
1662 	pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1663 	pr_err("wqicb->wq_addr = 0x%llx\n",
1664 	       (unsigned long long) le64_to_cpu(wqicb->addr));
1665 	pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1666 	       (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1667 }
1668 
ql_dump_tx_ring(struct tx_ring * tx_ring)1669 void ql_dump_tx_ring(struct tx_ring *tx_ring)
1670 {
1671 	if (tx_ring == NULL)
1672 		return;
1673 	pr_err("===================== Dumping tx_ring %d ===============\n",
1674 	       tx_ring->wq_id);
1675 	pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1676 	pr_err("tx_ring->base_dma = 0x%llx\n",
1677 	       (unsigned long long) tx_ring->wq_base_dma);
1678 	pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1679 	       tx_ring->cnsmr_idx_sh_reg,
1680 	       tx_ring->cnsmr_idx_sh_reg
1681 			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1682 	pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1683 	pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1684 	pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1685 	pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1686 	pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1687 	pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1688 	pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1689 	pr_err("tx_ring->q = %p\n", tx_ring->q);
1690 	pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1691 }
1692 
ql_dump_ricb(struct ricb * ricb)1693 void ql_dump_ricb(struct ricb *ricb)
1694 {
1695 	int i;
1696 	pr_err("===================== Dumping ricb ===============\n");
1697 	pr_err("Dumping ricb stuff...\n");
1698 
1699 	pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1700 	pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1701 	       ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1702 	       ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1703 	       ricb->flags & RSS_LI ? "RSS_LI " : "",
1704 	       ricb->flags & RSS_LB ? "RSS_LB " : "",
1705 	       ricb->flags & RSS_LM ? "RSS_LM " : "",
1706 	       ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1707 	       ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1708 	       ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1709 	       ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1710 	pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1711 	for (i = 0; i < 16; i++)
1712 		pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1713 		       le32_to_cpu(ricb->hash_cq_id[i]));
1714 	for (i = 0; i < 10; i++)
1715 		pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1716 		       le32_to_cpu(ricb->ipv6_hash_key[i]));
1717 	for (i = 0; i < 4; i++)
1718 		pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1719 		       le32_to_cpu(ricb->ipv4_hash_key[i]));
1720 }
1721 
ql_dump_cqicb(struct cqicb * cqicb)1722 void ql_dump_cqicb(struct cqicb *cqicb)
1723 {
1724 	pr_err("Dumping cqicb stuff...\n");
1725 
1726 	pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1727 	pr_err("cqicb->flags = %x\n", cqicb->flags);
1728 	pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1729 	pr_err("cqicb->addr = 0x%llx\n",
1730 	       (unsigned long long) le64_to_cpu(cqicb->addr));
1731 	pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1732 	       (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1733 	pr_err("cqicb->pkt_delay = 0x%.04x\n",
1734 	       le16_to_cpu(cqicb->pkt_delay));
1735 	pr_err("cqicb->irq_delay = 0x%.04x\n",
1736 	       le16_to_cpu(cqicb->irq_delay));
1737 	pr_err("cqicb->lbq_addr = 0x%llx\n",
1738 	       (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1739 	pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1740 	       le16_to_cpu(cqicb->lbq_buf_size));
1741 	pr_err("cqicb->lbq_len = 0x%.04x\n",
1742 	       le16_to_cpu(cqicb->lbq_len));
1743 	pr_err("cqicb->sbq_addr = 0x%llx\n",
1744 	       (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1745 	pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1746 	       le16_to_cpu(cqicb->sbq_buf_size));
1747 	pr_err("cqicb->sbq_len = 0x%.04x\n",
1748 	       le16_to_cpu(cqicb->sbq_len));
1749 }
1750 
ql_dump_rx_ring(struct rx_ring * rx_ring)1751 void ql_dump_rx_ring(struct rx_ring *rx_ring)
1752 {
1753 	if (rx_ring == NULL)
1754 		return;
1755 	pr_err("===================== Dumping rx_ring %d ===============\n",
1756 	       rx_ring->cq_id);
1757 	pr_err("Dumping rx_ring %d, type = %s%s%s\n",
1758 	       rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
1759 	       rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
1760 	       rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
1761 	pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1762 	pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1763 	pr_err("rx_ring->cq_base_dma = %llx\n",
1764 	       (unsigned long long) rx_ring->cq_base_dma);
1765 	pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1766 	pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1767 	pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1768 	       rx_ring->prod_idx_sh_reg,
1769 	       rx_ring->prod_idx_sh_reg
1770 			? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1771 	pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1772 	       (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1773 	pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1774 	       rx_ring->cnsmr_idx_db_reg);
1775 	pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1776 	pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1777 	pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1778 
1779 	pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
1780 	pr_err("rx_ring->lbq_base_dma = %llx\n",
1781 	       (unsigned long long) rx_ring->lbq_base_dma);
1782 	pr_err("rx_ring->lbq_base_indirect = %p\n",
1783 	       rx_ring->lbq_base_indirect);
1784 	pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
1785 	       (unsigned long long) rx_ring->lbq_base_indirect_dma);
1786 	pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
1787 	pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
1788 	pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
1789 	pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
1790 	       rx_ring->lbq_prod_idx_db_reg);
1791 	pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
1792 	pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
1793 	pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1794 	pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1795 	pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
1796 
1797 	pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
1798 	pr_err("rx_ring->sbq_base_dma = %llx\n",
1799 	       (unsigned long long) rx_ring->sbq_base_dma);
1800 	pr_err("rx_ring->sbq_base_indirect = %p\n",
1801 	       rx_ring->sbq_base_indirect);
1802 	pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
1803 	       (unsigned long long) rx_ring->sbq_base_indirect_dma);
1804 	pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
1805 	pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
1806 	pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
1807 	pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
1808 	       rx_ring->sbq_prod_idx_db_reg);
1809 	pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
1810 	pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
1811 	pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
1812 	pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
1813 	pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
1814 	pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1815 	pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1816 	pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1817 	pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1818 }
1819 
ql_dump_hw_cb(struct ql_adapter * qdev,int size,u32 bit,u16 q_id)1820 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1821 {
1822 	void *ptr;
1823 
1824 	pr_err("%s: Enter\n", __func__);
1825 
1826 	ptr = kmalloc(size, GFP_ATOMIC);
1827 	if (ptr == NULL)
1828 		return;
1829 
1830 	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1831 		pr_err("%s: Failed to upload control block!\n", __func__);
1832 		goto fail_it;
1833 	}
1834 	switch (bit) {
1835 	case CFG_DRQ:
1836 		ql_dump_wqicb((struct wqicb *)ptr);
1837 		break;
1838 	case CFG_DCQ:
1839 		ql_dump_cqicb((struct cqicb *)ptr);
1840 		break;
1841 	case CFG_DR:
1842 		ql_dump_ricb((struct ricb *)ptr);
1843 		break;
1844 	default:
1845 		pr_err("%s: Invalid bit value = %x\n", __func__, bit);
1846 		break;
1847 	}
1848 fail_it:
1849 	kfree(ptr);
1850 }
1851 #endif
1852 
1853 #ifdef QL_OB_DUMP
ql_dump_tx_desc(struct tx_buf_desc * tbd)1854 void ql_dump_tx_desc(struct tx_buf_desc *tbd)
1855 {
1856 	pr_err("tbd->addr  = 0x%llx\n",
1857 	       le64_to_cpu((u64) tbd->addr));
1858 	pr_err("tbd->len   = %d\n",
1859 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1860 	pr_err("tbd->flags = %s %s\n",
1861 	       tbd->len & TX_DESC_C ? "C" : ".",
1862 	       tbd->len & TX_DESC_E ? "E" : ".");
1863 	tbd++;
1864 	pr_err("tbd->addr  = 0x%llx\n",
1865 	       le64_to_cpu((u64) tbd->addr));
1866 	pr_err("tbd->len   = %d\n",
1867 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1868 	pr_err("tbd->flags = %s %s\n",
1869 	       tbd->len & TX_DESC_C ? "C" : ".",
1870 	       tbd->len & TX_DESC_E ? "E" : ".");
1871 	tbd++;
1872 	pr_err("tbd->addr  = 0x%llx\n",
1873 	       le64_to_cpu((u64) tbd->addr));
1874 	pr_err("tbd->len   = %d\n",
1875 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1876 	pr_err("tbd->flags = %s %s\n",
1877 	       tbd->len & TX_DESC_C ? "C" : ".",
1878 	       tbd->len & TX_DESC_E ? "E" : ".");
1879 
1880 }
1881 
ql_dump_ob_mac_iocb(struct ob_mac_iocb_req * ob_mac_iocb)1882 void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
1883 {
1884 	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1885 	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1886 	struct tx_buf_desc *tbd;
1887 	u16 frame_len;
1888 
1889 	pr_err("%s\n", __func__);
1890 	pr_err("opcode         = %s\n",
1891 	       (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1892 	pr_err("flags1          = %s %s %s %s %s\n",
1893 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1894 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1895 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1896 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1897 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1898 	pr_err("flags2          = %s %s %s\n",
1899 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1900 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1901 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1902 	pr_err("flags3          = %s %s %s\n",
1903 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1904 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1905 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1906 	pr_err("tid = %x\n", ob_mac_iocb->tid);
1907 	pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
1908 	pr_err("vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
1909 	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1910 		pr_err("frame_len      = %d\n",
1911 		       le32_to_cpu(ob_mac_tso_iocb->frame_len));
1912 		pr_err("mss      = %d\n",
1913 		       le16_to_cpu(ob_mac_tso_iocb->mss));
1914 		pr_err("prot_hdr_len   = %d\n",
1915 		       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1916 		pr_err("hdr_offset     = 0x%.04x\n",
1917 		       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1918 		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1919 	} else {
1920 		pr_err("frame_len      = %d\n",
1921 		       le16_to_cpu(ob_mac_iocb->frame_len));
1922 		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1923 	}
1924 	tbd = &ob_mac_iocb->tbd[0];
1925 	ql_dump_tx_desc(tbd);
1926 }
1927 
ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp * ob_mac_rsp)1928 void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
1929 {
1930 	pr_err("%s\n", __func__);
1931 	pr_err("opcode         = %d\n", ob_mac_rsp->opcode);
1932 	pr_err("flags          = %s %s %s %s %s %s %s\n",
1933 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
1934 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1935 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1936 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1937 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1938 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1939 	       ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1940 	pr_err("tid = %x\n", ob_mac_rsp->tid);
1941 }
1942 #endif
1943 
1944 #ifdef QL_IB_DUMP
ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp * ib_mac_rsp)1945 void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
1946 {
1947 	pr_err("%s\n", __func__);
1948 	pr_err("opcode         = 0x%x\n", ib_mac_rsp->opcode);
1949 	pr_err("flags1 = %s%s%s%s%s%s\n",
1950 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1951 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1952 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1953 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1954 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1955 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1956 
1957 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1958 		pr_err("%s%s%s Multicast\n",
1959 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1960 		       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1961 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1962 		       IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1963 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1964 		       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1965 
1966 	pr_err("flags2 = %s%s%s%s%s\n",
1967 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1968 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1969 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1970 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1971 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1972 
1973 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1974 		pr_err("%s%s%s%s%s error\n",
1975 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1976 		       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1977 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1978 		       IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1979 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1980 		       IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1981 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1982 		       IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1983 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1984 		       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1985 
1986 	pr_err("flags3 = %s%s\n",
1987 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1988 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1989 
1990 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1991 		pr_err("RSS flags = %s%s%s%s\n",
1992 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1993 			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1994 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1995 			IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1996 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1997 			IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1998 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1999 			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
2000 
2001 	pr_err("data_len	= %d\n",
2002 	       le32_to_cpu(ib_mac_rsp->data_len));
2003 	pr_err("data_addr    = 0x%llx\n",
2004 	       (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
2005 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
2006 		pr_err("rss    = %x\n",
2007 		       le32_to_cpu(ib_mac_rsp->rss));
2008 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
2009 		pr_err("vlan_id    = %x\n",
2010 		       le16_to_cpu(ib_mac_rsp->vlan_id));
2011 
2012 	pr_err("flags4 = %s%s%s\n",
2013 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
2014 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
2015 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
2016 
2017 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2018 		pr_err("hdr length	= %d\n",
2019 		       le32_to_cpu(ib_mac_rsp->hdr_len));
2020 		pr_err("hdr addr    = 0x%llx\n",
2021 		       (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2022 	}
2023 }
2024 #endif
2025 
2026 #ifdef QL_ALL_DUMP
ql_dump_all(struct ql_adapter * qdev)2027 void ql_dump_all(struct ql_adapter *qdev)
2028 {
2029 	int i;
2030 
2031 	QL_DUMP_REGS(qdev);
2032 	QL_DUMP_QDEV(qdev);
2033 	for (i = 0; i < qdev->tx_ring_count; i++) {
2034 		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2035 		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2036 	}
2037 	for (i = 0; i < qdev->rx_ring_count; i++) {
2038 		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2039 		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2040 	}
2041 }
2042 #endif
2043