1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
16 
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
20 
21 /*
22  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23  * @vp: Virtual Path handle.
24  *
25  * Enable vpath interrupts. The function is to be executed the last in
26  * vpath initialization sequence.
27  *
28  * See also: vxge_hw_vpath_intr_disable()
29  */
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle * vp)30 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31 {
32 	u64 val64;
33 
34 	struct __vxge_hw_virtualpath *vpath;
35 	struct vxge_hw_vpath_reg __iomem *vp_reg;
36 	enum vxge_hw_status status = VXGE_HW_OK;
37 	if (vp == NULL) {
38 		status = VXGE_HW_ERR_INVALID_HANDLE;
39 		goto exit;
40 	}
41 
42 	vpath = vp->vpath;
43 
44 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 		goto exit;
47 	}
48 
49 	vp_reg = vpath->vp_reg;
50 
51 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52 
53 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 			&vp_reg->general_errors_reg);
55 
56 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 			&vp_reg->pci_config_errors_reg);
58 
59 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 			&vp_reg->mrpcim_to_vpath_alarm_reg);
61 
62 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 			&vp_reg->srpcim_to_vpath_alarm_reg);
64 
65 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 			&vp_reg->vpath_ppif_int_status);
67 
68 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 			&vp_reg->srpcim_msg_to_vpath_reg);
70 
71 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 			&vp_reg->vpath_pcipif_int_status);
73 
74 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 			&vp_reg->prc_alarm_reg);
76 
77 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 			&vp_reg->wrdma_alarm_status);
79 
80 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 			&vp_reg->asic_ntwk_vp_err_reg);
82 
83 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 			&vp_reg->xgmac_vp_int_status);
85 
86 	val64 = readq(&vp_reg->vpath_general_int_status);
87 
88 	/* Mask unwanted interrupts */
89 
90 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 			&vp_reg->vpath_pcipif_int_mask);
92 
93 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 			&vp_reg->srpcim_msg_to_vpath_mask);
95 
96 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 			&vp_reg->srpcim_to_vpath_alarm_mask);
98 
99 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 			&vp_reg->mrpcim_to_vpath_alarm_mask);
101 
102 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 			&vp_reg->pci_config_errors_mask);
104 
105 	/* Unmask the individual interrupts */
106 
107 	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 		&vp_reg->general_errors_mask);
112 
113 	__vxge_hw_pio_mem_write32_upper(
114 		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 		&vp_reg->kdfcctl_errors_mask);
121 
122 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 
124 	__vxge_hw_pio_mem_write32_upper(
125 		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 		&vp_reg->prc_alarm_mask);
127 
128 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 
131 	if (vpath->hldev->first_vp_id != vpath->vp_id)
132 		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 			&vp_reg->asic_ntwk_vp_err_mask);
134 	else
135 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 		&vp_reg->asic_ntwk_vp_err_mask);
139 
140 	__vxge_hw_pio_mem_write32_upper(0,
141 		&vp_reg->vpath_general_int_mask);
142 exit:
143 	return status;
144 
145 }
146 
147 /*
148  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149  * @vp: Virtual Path handle.
150  *
151  * Disable vpath interrupts. The function is to be executed the last in
152  * vpath initialization sequence.
153  *
154  * See also: vxge_hw_vpath_intr_enable()
155  */
vxge_hw_vpath_intr_disable(struct __vxge_hw_vpath_handle * vp)156 enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 			struct __vxge_hw_vpath_handle *vp)
158 {
159 	u64 val64;
160 
161 	struct __vxge_hw_virtualpath *vpath;
162 	enum vxge_hw_status status = VXGE_HW_OK;
163 	struct vxge_hw_vpath_reg __iomem *vp_reg;
164 	if (vp == NULL) {
165 		status = VXGE_HW_ERR_INVALID_HANDLE;
166 		goto exit;
167 	}
168 
169 	vpath = vp->vpath;
170 
171 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 		goto exit;
174 	}
175 	vp_reg = vpath->vp_reg;
176 
177 	__vxge_hw_pio_mem_write32_upper(
178 		(u32)VXGE_HW_INTR_MASK_ALL,
179 		&vp_reg->vpath_general_int_mask);
180 
181 	val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 
183 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 
185 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 			&vp_reg->general_errors_mask);
187 
188 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 			&vp_reg->pci_config_errors_mask);
190 
191 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 			&vp_reg->mrpcim_to_vpath_alarm_mask);
193 
194 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 			&vp_reg->srpcim_to_vpath_alarm_mask);
196 
197 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 			&vp_reg->vpath_ppif_int_mask);
199 
200 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 			&vp_reg->srpcim_msg_to_vpath_mask);
202 
203 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 			&vp_reg->vpath_pcipif_int_mask);
205 
206 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 			&vp_reg->wrdma_alarm_mask);
208 
209 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 			&vp_reg->prc_alarm_mask);
211 
212 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 			&vp_reg->xgmac_vp_int_mask);
214 
215 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 			&vp_reg->asic_ntwk_vp_err_mask);
217 
218 exit:
219 	return status;
220 }
221 
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo * fifo)222 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223 {
224 	struct vxge_hw_vpath_reg __iomem *vp_reg;
225 	struct vxge_hw_vp_config *config;
226 	u64 val64;
227 
228 	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 		return;
230 
231 	vp_reg = fifo->vp_reg;
232 	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233 
234 	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 		fifo->tim_tti_cfg1_saved = val64;
239 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 	}
241 }
242 
vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring * ring)243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244 {
245 	u64 val64 = ring->tim_rti_cfg1_saved;
246 
247 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 	ring->tim_rti_cfg1_saved = val64;
249 	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250 }
251 
vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo * fifo)252 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253 {
254 	u64 val64 = fifo->tim_tti_cfg3_saved;
255 	u64 timer = (fifo->rtimer * 1000) / 272;
256 
257 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 	if (timer)
259 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261 
262 	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 	/* tti_cfg3_saved is not updated again because it is
264 	 * initialized at one place only - init time.
265 	 */
266 }
267 
vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring * ring)268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269 {
270 	u64 val64 = ring->tim_rti_cfg3_saved;
271 	u64 timer = (ring->rtimer * 1000) / 272;
272 
273 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 	if (timer)
275 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277 
278 	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 	/* rti_cfg3_saved is not updated again because it is
280 	 * initialized at one place only - init time.
281 	 */
282 }
283 
284 /**
285  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286  * @channeh: Channel for rx or tx handle
287  * @msix_id:  MSIX ID
288  *
289  * The function masks the msix interrupt for the given msix_id
290  *
291  * Returns: 0
292  */
vxge_hw_channel_msix_mask(struct __vxge_hw_channel * channel,int msix_id)293 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294 {
295 
296 	__vxge_hw_pio_mem_write32_upper(
297 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 		&channel->common_reg->set_msix_mask_vect[msix_id%4]);
299 }
300 
301 /**
302  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303  * @channeh: Channel for rx or tx handle
304  * @msix_id:  MSI ID
305  *
306  * The function unmasks the msix interrupt for the given msix_id
307  *
308  * Returns: 0
309  */
310 void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel * channel,int msix_id)311 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312 {
313 
314 	__vxge_hw_pio_mem_write32_upper(
315 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 		&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317 }
318 
319 /**
320  * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321  * @channel: Channel for rx or tx handle
322  * @msix_id:  MSI ID
323  *
324  * The function unmasks the msix interrupt for the given msix_id
325  * if configured in MSIX oneshot mode
326  *
327  * Returns: 0
328  */
vxge_hw_channel_msix_clear(struct __vxge_hw_channel * channel,int msix_id)329 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330 {
331 	__vxge_hw_pio_mem_write32_upper(
332 		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334 }
335 
336 /**
337  * vxge_hw_device_set_intr_type - Updates the configuration
338  *		with new interrupt type.
339  * @hldev: HW device handle.
340  * @intr_mode: New interrupt type
341  */
vxge_hw_device_set_intr_type(struct __vxge_hw_device * hldev,u32 intr_mode)342 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343 {
344 
345 	if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 	   (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 		intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350 
351 	hldev->config.intr_mode = intr_mode;
352 	return intr_mode;
353 }
354 
355 /**
356  * vxge_hw_device_intr_enable - Enable interrupts.
357  * @hldev: HW device handle.
358  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359  *      the type(s) of interrupts to enable.
360  *
361  * Enable Titan interrupts. The function is to be executed the last in
362  * Titan initialization sequence.
363  *
364  * See also: vxge_hw_device_intr_disable()
365  */
vxge_hw_device_intr_enable(struct __vxge_hw_device * hldev)366 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367 {
368 	u32 i;
369 	u64 val64;
370 	u32 val32;
371 
372 	vxge_hw_device_mask_all(hldev);
373 
374 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375 
376 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 			continue;
378 
379 		vxge_hw_vpath_intr_enable(
380 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 	}
382 
383 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 		val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386 
387 		if (val64 != 0) {
388 			writeq(val64, &hldev->common_reg->tim_int_status0);
389 
390 			writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 		}
392 
393 		val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395 
396 		if (val32 != 0) {
397 			__vxge_hw_pio_mem_write32_upper(val32,
398 					&hldev->common_reg->tim_int_status1);
399 
400 			__vxge_hw_pio_mem_write32_upper(~val32,
401 					&hldev->common_reg->tim_int_mask1);
402 		}
403 	}
404 
405 	val64 = readq(&hldev->common_reg->titan_general_int_status);
406 
407 	vxge_hw_device_unmask_all(hldev);
408 }
409 
410 /**
411  * vxge_hw_device_intr_disable - Disable Titan interrupts.
412  * @hldev: HW device handle.
413  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414  *      the type(s) of interrupts to disable.
415  *
416  * Disable Titan interrupts.
417  *
418  * See also: vxge_hw_device_intr_enable()
419  */
vxge_hw_device_intr_disable(struct __vxge_hw_device * hldev)420 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421 {
422 	u32 i;
423 
424 	vxge_hw_device_mask_all(hldev);
425 
426 	/* mask all the tim interrupts */
427 	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 		&hldev->common_reg->tim_int_mask1);
430 
431 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432 
433 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 			continue;
435 
436 		vxge_hw_vpath_intr_disable(
437 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 	}
439 }
440 
441 /**
442  * vxge_hw_device_mask_all - Mask all device interrupts.
443  * @hldev: HW device handle.
444  *
445  * Mask	all device interrupts.
446  *
447  * See also: vxge_hw_device_unmask_all()
448  */
vxge_hw_device_mask_all(struct __vxge_hw_device * hldev)449 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450 {
451 	u64 val64;
452 
453 	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 		VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455 
456 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 				&hldev->common_reg->titan_mask_all_int);
458 }
459 
460 /**
461  * vxge_hw_device_unmask_all - Unmask all device interrupts.
462  * @hldev: HW device handle.
463  *
464  * Unmask all device interrupts.
465  *
466  * See also: vxge_hw_device_mask_all()
467  */
vxge_hw_device_unmask_all(struct __vxge_hw_device * hldev)468 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469 {
470 	u64 val64 = 0;
471 
472 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 		val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474 
475 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 			&hldev->common_reg->titan_mask_all_int);
477 }
478 
479 /**
480  * vxge_hw_device_flush_io - Flush io writes.
481  * @hldev: HW device handle.
482  *
483  * The function	performs a read operation to flush io writes.
484  *
485  * Returns: void
486  */
vxge_hw_device_flush_io(struct __vxge_hw_device * hldev)487 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488 {
489 	u32 val32;
490 
491 	val32 = readl(&hldev->common_reg->titan_general_int_status);
492 }
493 
494 /**
495  * __vxge_hw_device_handle_error - Handle error
496  * @hldev: HW device
497  * @vp_id: Vpath Id
498  * @type: Error type. Please see enum vxge_hw_event{}
499  *
500  * Handle error.
501  */
502 static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device * hldev,u32 vp_id,enum vxge_hw_event type)503 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 			      enum vxge_hw_event type)
505 {
506 	switch (type) {
507 	case VXGE_HW_EVENT_UNKNOWN:
508 		break;
509 	case VXGE_HW_EVENT_RESET_START:
510 	case VXGE_HW_EVENT_RESET_COMPLETE:
511 	case VXGE_HW_EVENT_LINK_DOWN:
512 	case VXGE_HW_EVENT_LINK_UP:
513 		goto out;
514 	case VXGE_HW_EVENT_ALARM_CLEARED:
515 		goto out;
516 	case VXGE_HW_EVENT_ECCERR:
517 	case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 		goto out;
519 	case VXGE_HW_EVENT_FIFO_ERR:
520 	case VXGE_HW_EVENT_VPATH_ERR:
521 	case VXGE_HW_EVENT_CRITICAL_ERR:
522 	case VXGE_HW_EVENT_SERR:
523 		break;
524 	case VXGE_HW_EVENT_SRPCIM_SERR:
525 	case VXGE_HW_EVENT_MRPCIM_SERR:
526 		goto out;
527 	case VXGE_HW_EVENT_SLOT_FREEZE:
528 		break;
529 	default:
530 		vxge_assert(0);
531 		goto out;
532 	}
533 
534 	/* notify driver */
535 	if (hldev->uld_callbacks->crit_err)
536 		hldev->uld_callbacks->crit_err(
537 			(struct __vxge_hw_device *)hldev,
538 			type, vp_id);
539 out:
540 
541 	return VXGE_HW_OK;
542 }
543 
544 /*
545  * __vxge_hw_device_handle_link_down_ind
546  * @hldev: HW device handle.
547  *
548  * Link down indication handler. The function is invoked by HW when
549  * Titan indicates that the link is down.
550  */
551 static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device * hldev)552 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553 {
554 	/*
555 	 * If the previous link state is not down, return.
556 	 */
557 	if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 		goto exit;
559 
560 	hldev->link_state = VXGE_HW_LINK_DOWN;
561 
562 	/* notify driver */
563 	if (hldev->uld_callbacks->link_down)
564 		hldev->uld_callbacks->link_down(hldev);
565 exit:
566 	return VXGE_HW_OK;
567 }
568 
569 /*
570  * __vxge_hw_device_handle_link_up_ind
571  * @hldev: HW device handle.
572  *
573  * Link up indication handler. The function is invoked by HW when
574  * Titan indicates that the link is up for programmable amount of time.
575  */
576 static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device * hldev)577 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578 {
579 	/*
580 	 * If the previous link state is not down, return.
581 	 */
582 	if (hldev->link_state == VXGE_HW_LINK_UP)
583 		goto exit;
584 
585 	hldev->link_state = VXGE_HW_LINK_UP;
586 
587 	/* notify driver */
588 	if (hldev->uld_callbacks->link_up)
589 		hldev->uld_callbacks->link_up(hldev);
590 exit:
591 	return VXGE_HW_OK;
592 }
593 
594 /*
595  * __vxge_hw_vpath_alarm_process - Process Alarms.
596  * @vpath: Virtual Path.
597  * @skip_alarms: Do not clear the alarms
598  *
599  * Process vpath alarms.
600  *
601  */
602 static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath * vpath,u32 skip_alarms)603 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 			      u32 skip_alarms)
605 {
606 	u64 val64;
607 	u64 alarm_status;
608 	u64 pic_status;
609 	struct __vxge_hw_device *hldev = NULL;
610 	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 	u64 mask64;
612 	struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 	struct vxge_hw_vpath_reg __iomem *vp_reg;
614 
615 	if (vpath == NULL) {
616 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 			alarm_event);
618 		goto out2;
619 	}
620 
621 	hldev = vpath->hldev;
622 	vp_reg = vpath->vp_reg;
623 	alarm_status = readq(&vp_reg->vpath_general_int_status);
624 
625 	if (alarm_status == VXGE_HW_ALL_FOXES) {
626 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 			alarm_event);
628 		goto out;
629 	}
630 
631 	sw_stats = vpath->sw_stats;
632 
633 	if (alarm_status & ~(
634 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 		sw_stats->error_stats.unknown_alarms++;
639 
640 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 			alarm_event);
642 		goto out;
643 	}
644 
645 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646 
647 		val64 = readq(&vp_reg->xgmac_vp_int_status);
648 
649 		if (val64 &
650 		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651 
652 			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653 
654 			if (((val64 &
655 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 			     (!(val64 &
657 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 			    ((val64 &
659 			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 			     (!(val64 &
661 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 				     ))) {
663 				sw_stats->error_stats.network_sustained_fault++;
664 
665 				writeq(
666 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 					&vp_reg->asic_ntwk_vp_err_mask);
668 
669 				__vxge_hw_device_handle_link_down_ind(hldev);
670 				alarm_event = VXGE_HW_SET_LEVEL(
671 					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 			}
673 
674 			if (((val64 &
675 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 			     (!(val64 &
677 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 			    ((val64 &
679 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 			     (!(val64 &
681 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 				     ))) {
683 
684 				sw_stats->error_stats.network_sustained_ok++;
685 
686 				writeq(
687 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 					&vp_reg->asic_ntwk_vp_err_mask);
689 
690 				__vxge_hw_device_handle_link_up_ind(hldev);
691 				alarm_event = VXGE_HW_SET_LEVEL(
692 					VXGE_HW_EVENT_LINK_UP, alarm_event);
693 			}
694 
695 			writeq(VXGE_HW_INTR_MASK_ALL,
696 				&vp_reg->asic_ntwk_vp_err_reg);
697 
698 			alarm_event = VXGE_HW_SET_LEVEL(
699 				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700 
701 			if (skip_alarms)
702 				return VXGE_HW_OK;
703 		}
704 	}
705 
706 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707 
708 		pic_status = readq(&vp_reg->vpath_ppif_int_status);
709 
710 		if (pic_status &
711 		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712 
713 			val64 = readq(&vp_reg->general_errors_reg);
714 			mask64 = readq(&vp_reg->general_errors_mask);
715 
716 			if ((val64 &
717 				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 				~mask64) {
719 				sw_stats->error_stats.ini_serr_det++;
720 
721 				alarm_event = VXGE_HW_SET_LEVEL(
722 					VXGE_HW_EVENT_SERR, alarm_event);
723 			}
724 
725 			if ((val64 &
726 			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 				~mask64) {
728 				sw_stats->error_stats.dblgen_fifo0_overflow++;
729 
730 				alarm_event = VXGE_HW_SET_LEVEL(
731 					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 			}
733 
734 			if ((val64 &
735 			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 				~mask64)
737 				sw_stats->error_stats.statsb_pif_chain_error++;
738 
739 			if ((val64 &
740 			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 				~mask64)
742 				sw_stats->error_stats.statsb_drop_timeout++;
743 
744 			if ((val64 &
745 				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 				~mask64)
747 				sw_stats->error_stats.target_illegal_access++;
748 
749 			if (!skip_alarms) {
750 				writeq(VXGE_HW_INTR_MASK_ALL,
751 					&vp_reg->general_errors_reg);
752 				alarm_event = VXGE_HW_SET_LEVEL(
753 					VXGE_HW_EVENT_ALARM_CLEARED,
754 					alarm_event);
755 			}
756 		}
757 
758 		if (pic_status &
759 		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760 
761 			val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763 
764 			if ((val64 &
765 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 				~mask64) {
767 				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768 
769 				alarm_event = VXGE_HW_SET_LEVEL(
770 					VXGE_HW_EVENT_FIFO_ERR,
771 					alarm_event);
772 			}
773 
774 			if ((val64 &
775 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 				~mask64) {
777 				sw_stats->error_stats.kdfcctl_fifo0_poison++;
778 
779 				alarm_event = VXGE_HW_SET_LEVEL(
780 					VXGE_HW_EVENT_FIFO_ERR,
781 					alarm_event);
782 			}
783 
784 			if ((val64 &
785 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 				~mask64) {
787 				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788 
789 				alarm_event = VXGE_HW_SET_LEVEL(
790 					VXGE_HW_EVENT_FIFO_ERR,
791 					alarm_event);
792 			}
793 
794 			if (!skip_alarms) {
795 				writeq(VXGE_HW_INTR_MASK_ALL,
796 					&vp_reg->kdfcctl_errors_reg);
797 				alarm_event = VXGE_HW_SET_LEVEL(
798 					VXGE_HW_EVENT_ALARM_CLEARED,
799 					alarm_event);
800 			}
801 		}
802 
803 	}
804 
805 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806 
807 		val64 = readq(&vp_reg->wrdma_alarm_status);
808 
809 		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810 
811 			val64 = readq(&vp_reg->prc_alarm_reg);
812 			mask64 = readq(&vp_reg->prc_alarm_mask);
813 
814 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 				~mask64)
816 				sw_stats->error_stats.prc_ring_bumps++;
817 
818 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 				~mask64) {
820 				sw_stats->error_stats.prc_rxdcm_sc_err++;
821 
822 				alarm_event = VXGE_HW_SET_LEVEL(
823 					VXGE_HW_EVENT_VPATH_ERR,
824 					alarm_event);
825 			}
826 
827 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 				& ~mask64) {
829 				sw_stats->error_stats.prc_rxdcm_sc_abort++;
830 
831 				alarm_event = VXGE_HW_SET_LEVEL(
832 						VXGE_HW_EVENT_VPATH_ERR,
833 						alarm_event);
834 			}
835 
836 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 				 & ~mask64) {
838 				sw_stats->error_stats.prc_quanta_size_err++;
839 
840 				alarm_event = VXGE_HW_SET_LEVEL(
841 					VXGE_HW_EVENT_VPATH_ERR,
842 					alarm_event);
843 			}
844 
845 			if (!skip_alarms) {
846 				writeq(VXGE_HW_INTR_MASK_ALL,
847 					&vp_reg->prc_alarm_reg);
848 				alarm_event = VXGE_HW_SET_LEVEL(
849 						VXGE_HW_EVENT_ALARM_CLEARED,
850 						alarm_event);
851 			}
852 		}
853 	}
854 out:
855 	hldev->stats.sw_dev_err_stats.vpath_alarms++;
856 out2:
857 	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 		return VXGE_HW_OK;
860 
861 	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862 
863 	if (alarm_event == VXGE_HW_EVENT_SERR)
864 		return VXGE_HW_ERR_CRITICAL;
865 
866 	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 		VXGE_HW_ERR_SLOT_FREEZE :
868 		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 		VXGE_HW_ERR_VPATH;
870 }
871 
872 /**
873  * vxge_hw_device_begin_irq - Begin IRQ processing.
874  * @hldev: HW device handle.
875  * @skip_alarms: Do not clear the alarms
876  * @reason: "Reason" for the interrupt, the value of Titan's
877  *	general_int_status register.
878  *
879  * The function	performs two actions, It first checks whether (shared IRQ) the
880  * interrupt was raised	by the device. Next, it	masks the device interrupts.
881  *
882  * Note:
883  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884  * bridge. Therefore, two back-to-back interrupts are potentially possible.
885  *
886  * Returns: 0, if the interrupt	is not "ours" (note that in this case the
887  * device remain enabled).
888  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
889  * status.
890  */
vxge_hw_device_begin_irq(struct __vxge_hw_device * hldev,u32 skip_alarms,u64 * reason)891 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 					     u32 skip_alarms, u64 *reason)
893 {
894 	u32 i;
895 	u64 val64;
896 	u64 adapter_status;
897 	u64 vpath_mask;
898 	enum vxge_hw_status ret = VXGE_HW_OK;
899 
900 	val64 = readq(&hldev->common_reg->titan_general_int_status);
901 
902 	if (unlikely(!val64)) {
903 		/* not Titan interrupt	*/
904 		*reason	= 0;
905 		ret = VXGE_HW_ERR_WRONG_IRQ;
906 		goto exit;
907 	}
908 
909 	if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910 
911 		adapter_status = readq(&hldev->common_reg->adapter_status);
912 
913 		if (adapter_status == VXGE_HW_ALL_FOXES) {
914 
915 			__vxge_hw_device_handle_error(hldev,
916 				NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
917 			*reason	= 0;
918 			ret = VXGE_HW_ERR_SLOT_FREEZE;
919 			goto exit;
920 		}
921 	}
922 
923 	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924 
925 	*reason	= val64;
926 
927 	vpath_mask = hldev->vpaths_deployed >>
928 				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929 
930 	if (val64 &
931 	    VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 		hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933 
934 		return VXGE_HW_OK;
935 	}
936 
937 	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938 
939 	if (unlikely(val64 &
940 			VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941 
942 		enum vxge_hw_status error_level = VXGE_HW_OK;
943 
944 		hldev->stats.sw_dev_err_stats.vpath_alarms++;
945 
946 		for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947 
948 			if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
949 				continue;
950 
951 			ret = __vxge_hw_vpath_alarm_process(
952 				&hldev->virtual_paths[i], skip_alarms);
953 
954 			error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955 
956 			if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 				(ret == VXGE_HW_ERR_SLOT_FREEZE)))
958 				break;
959 		}
960 
961 		ret = error_level;
962 	}
963 exit:
964 	return ret;
965 }
966 
967 /**
968  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969  * condition that has caused the Tx and RX interrupt.
970  * @hldev: HW device.
971  *
972  * Acknowledge (that is, clear) the condition that has caused
973  * the Tx and Rx interrupt.
974  * See also: vxge_hw_device_begin_irq(),
975  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
976  */
vxge_hw_device_clear_tx_rx(struct __vxge_hw_device * hldev)977 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
978 {
979 
980 	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 	   (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 				 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 				&hldev->common_reg->tim_int_status0);
985 	}
986 
987 	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 	   (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 		__vxge_hw_pio_mem_write32_upper(
990 				(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 				 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 				&hldev->common_reg->tim_int_status1);
993 	}
994 }
995 
996 /*
997  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
998  * @channel: Channel
999  * @dtrh: Buffer to return the DTR pointer
1000  *
1001  * Allocates a dtr from the reserve array. If the reserve array is empty,
1002  * it swaps the reserve and free arrays.
1003  *
1004  */
1005 static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel * channel,void ** dtrh)1006 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007 {
1008 	void **tmp_arr;
1009 
1010 	if (channel->reserve_ptr - channel->reserve_top > 0) {
1011 _alloc_after_swap:
1012 		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
1013 
1014 		return VXGE_HW_OK;
1015 	}
1016 
1017 	/* switch between empty	and full arrays	*/
1018 
1019 	/* the idea behind such	a design is that by having free	and reserved
1020 	 * arrays separated we basically separated irq and non-irq parts.
1021 	 * i.e.	no additional lock need	to be done when	we free	a resource */
1022 
1023 	if (channel->length - channel->free_ptr > 0) {
1024 
1025 		tmp_arr	= channel->reserve_arr;
1026 		channel->reserve_arr = channel->free_arr;
1027 		channel->free_arr = tmp_arr;
1028 		channel->reserve_ptr = channel->length;
1029 		channel->reserve_top = channel->free_ptr;
1030 		channel->free_ptr = channel->length;
1031 
1032 		channel->stats->reserve_free_swaps_cnt++;
1033 
1034 		goto _alloc_after_swap;
1035 	}
1036 
1037 	channel->stats->full_cnt++;
1038 
1039 	*dtrh =	NULL;
1040 	return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1041 }
1042 
1043 /*
1044  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1045  * @channelh: Channel
1046  * @dtrh: DTR pointer
1047  *
1048  * Posts a dtr to work array.
1049  *
1050  */
1051 static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel * channel,void * dtrh)1052 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1053 {
1054 	vxge_assert(channel->work_arr[channel->post_index] == NULL);
1055 
1056 	channel->work_arr[channel->post_index++] = dtrh;
1057 
1058 	/* wrap-around */
1059 	if (channel->post_index	== channel->length)
1060 		channel->post_index = 0;
1061 }
1062 
1063 /*
1064  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1065  * @channel: Channel
1066  * @dtr: Buffer to return the next completed DTR pointer
1067  *
1068  * Returns the next completed dtr with out removing it from work array
1069  *
1070  */
1071 void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel * channel,void ** dtrh)1072 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1073 {
1074 	vxge_assert(channel->compl_index < channel->length);
1075 
1076 	*dtrh =	channel->work_arr[channel->compl_index];
1077 	prefetch(*dtrh);
1078 }
1079 
1080 /*
1081  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1082  * @channel: Channel handle
1083  *
1084  * Removes the next completed dtr from work array
1085  *
1086  */
vxge_hw_channel_dtr_complete(struct __vxge_hw_channel * channel)1087 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1088 {
1089 	channel->work_arr[channel->compl_index]	= NULL;
1090 
1091 	/* wrap-around */
1092 	if (++channel->compl_index == channel->length)
1093 		channel->compl_index = 0;
1094 
1095 	channel->stats->total_compl_cnt++;
1096 }
1097 
1098 /*
1099  * vxge_hw_channel_dtr_free - Frees a dtr
1100  * @channel: Channel handle
1101  * @dtr:  DTR pointer
1102  *
1103  * Returns the dtr to free array
1104  *
1105  */
vxge_hw_channel_dtr_free(struct __vxge_hw_channel * channel,void * dtrh)1106 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1107 {
1108 	channel->free_arr[--channel->free_ptr] = dtrh;
1109 }
1110 
1111 /*
1112  * vxge_hw_channel_dtr_count
1113  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1114  *
1115  * Retrieve number of DTRs available. This function can not be called
1116  * from data path. ring_initial_replenishi() is the only user.
1117  */
vxge_hw_channel_dtr_count(struct __vxge_hw_channel * channel)1118 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1119 {
1120 	return (channel->reserve_ptr - channel->reserve_top) +
1121 		(channel->length - channel->free_ptr);
1122 }
1123 
1124 /**
1125  * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor.
1126  * @ring: Handle to the ring object used for receive
1127  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1128  * with a valid handle.
1129  *
1130  * Reserve Rx descriptor for the subsequent filling-in driver
1131  * and posting on the corresponding channel (@channelh)
1132  * via vxge_hw_ring_rxd_post().
1133  *
1134  * Returns: VXGE_HW_OK - success.
1135  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1136  *
1137  */
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring * ring,void ** rxdh)1138 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1139 	void **rxdh)
1140 {
1141 	enum vxge_hw_status status;
1142 	struct __vxge_hw_channel *channel;
1143 
1144 	channel = &ring->channel;
1145 
1146 	status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1147 
1148 	if (status == VXGE_HW_OK) {
1149 		struct vxge_hw_ring_rxd_1 *rxdp =
1150 			(struct vxge_hw_ring_rxd_1 *)*rxdh;
1151 
1152 		rxdp->control_0	= rxdp->control_1 = 0;
1153 	}
1154 
1155 	return status;
1156 }
1157 
1158 /**
1159  * vxge_hw_ring_rxd_free - Free descriptor.
1160  * @ring: Handle to the ring object used for receive
1161  * @rxdh: Descriptor handle.
1162  *
1163  * Free	the reserved descriptor. This operation is "symmetrical" to
1164  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1165  * lifecycle.
1166  *
1167  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1168  * be:
1169  *
1170  * - reserved (vxge_hw_ring_rxd_reserve);
1171  *
1172  * - posted	(vxge_hw_ring_rxd_post);
1173  *
1174  * - completed (vxge_hw_ring_rxd_next_completed);
1175  *
1176  * - and recycled again	(vxge_hw_ring_rxd_free).
1177  *
1178  * For alternative state transitions and more details please refer to
1179  * the design doc.
1180  *
1181  */
vxge_hw_ring_rxd_free(struct __vxge_hw_ring * ring,void * rxdh)1182 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1183 {
1184 	struct __vxge_hw_channel *channel;
1185 
1186 	channel = &ring->channel;
1187 
1188 	vxge_hw_channel_dtr_free(channel, rxdh);
1189 
1190 }
1191 
1192 /**
1193  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1194  * @ring: Handle to the ring object used for receive
1195  * @rxdh: Descriptor handle.
1196  *
1197  * This routine prepares a rxd and posts
1198  */
vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring * ring,void * rxdh)1199 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1200 {
1201 	struct __vxge_hw_channel *channel;
1202 
1203 	channel = &ring->channel;
1204 
1205 	vxge_hw_channel_dtr_post(channel, rxdh);
1206 }
1207 
1208 /**
1209  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1210  * @ring: Handle to the ring object used for receive
1211  * @rxdh: Descriptor handle.
1212  *
1213  * Processes rxd after post
1214  */
vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring * ring,void * rxdh)1215 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1216 {
1217 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1218 	struct __vxge_hw_channel *channel;
1219 
1220 	channel = &ring->channel;
1221 
1222 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1223 
1224 	if (ring->stats->common_stats.usage_cnt > 0)
1225 		ring->stats->common_stats.usage_cnt--;
1226 }
1227 
1228 /**
1229  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1230  * @ring: Handle to the ring object used for receive
1231  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1232  *
1233  * Post	descriptor on the ring.
1234  * Prior to posting the	descriptor should be filled in accordance with
1235  * Host/Titan interface specification for a given service (LL, etc.).
1236  *
1237  */
vxge_hw_ring_rxd_post(struct __vxge_hw_ring * ring,void * rxdh)1238 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1239 {
1240 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1241 	struct __vxge_hw_channel *channel;
1242 
1243 	channel = &ring->channel;
1244 
1245 	wmb();
1246 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1247 
1248 	vxge_hw_channel_dtr_post(channel, rxdh);
1249 
1250 	if (ring->stats->common_stats.usage_cnt > 0)
1251 		ring->stats->common_stats.usage_cnt--;
1252 }
1253 
1254 /**
1255  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1256  * @ring: Handle to the ring object used for receive
1257  * @rxdh: Descriptor handle.
1258  *
1259  * Processes rxd after post with memory barrier.
1260  */
vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring * ring,void * rxdh)1261 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1262 {
1263 	wmb();
1264 	vxge_hw_ring_rxd_post_post(ring, rxdh);
1265 }
1266 
1267 /**
1268  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1269  * @ring: Handle to the ring object used for receive
1270  * @rxdh: Descriptor handle. Returned by HW.
1271  * @t_code:	Transfer code, as per Titan User Guide,
1272  *	 Receive Descriptor Format. Returned by HW.
1273  *
1274  * Retrieve the	_next_ completed descriptor.
1275  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1276  * driver of new completed descriptors. After that
1277  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1278  * completions (the very first completion is passed by HW via
1279  * vxge_hw_ring_callback_f).
1280  *
1281  * Implementation-wise, the driver is free to call
1282  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1283  * ring callback, or in a deferred fashion and separate (from HW)
1284  * context.
1285  *
1286  * Non-zero @t_code means failure to fill-in receive buffer(s)
1287  * of the descriptor.
1288  * For instance, parity	error detected during the data transfer.
1289  * In this case	Titan will complete the descriptor and indicate
1290  * for the host	that the received data is not to be used.
1291  * For details please refer to Titan User Guide.
1292  *
1293  * Returns: VXGE_HW_OK - success.
1294  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1295  * are currently available for processing.
1296  *
1297  * See also: vxge_hw_ring_callback_f{},
1298  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1299  */
vxge_hw_ring_rxd_next_completed(struct __vxge_hw_ring * ring,void ** rxdh,u8 * t_code)1300 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1301 	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1302 {
1303 	struct __vxge_hw_channel *channel;
1304 	struct vxge_hw_ring_rxd_1 *rxdp;
1305 	enum vxge_hw_status status = VXGE_HW_OK;
1306 	u64 control_0, own;
1307 
1308 	channel = &ring->channel;
1309 
1310 	vxge_hw_channel_dtr_try_complete(channel, rxdh);
1311 
1312 	rxdp = *rxdh;
1313 	if (rxdp == NULL) {
1314 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315 		goto exit;
1316 	}
1317 
1318 	control_0 = rxdp->control_0;
1319 	own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1320 	*t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1321 
1322 	/* check whether it is not the end */
1323 	if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324 
1325 		vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1326 				0);
1327 
1328 		++ring->cmpl_cnt;
1329 		vxge_hw_channel_dtr_complete(channel);
1330 
1331 		vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1332 
1333 		ring->stats->common_stats.usage_cnt++;
1334 		if (ring->stats->common_stats.usage_max <
1335 				ring->stats->common_stats.usage_cnt)
1336 			ring->stats->common_stats.usage_max =
1337 				ring->stats->common_stats.usage_cnt;
1338 
1339 		status = VXGE_HW_OK;
1340 		goto exit;
1341 	}
1342 
1343 	/* reset it. since we don't want to return
1344 	 * garbage to the driver */
1345 	*rxdh =	NULL;
1346 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1347 exit:
1348 	return status;
1349 }
1350 
1351 /**
1352  * vxge_hw_ring_handle_tcode - Handle transfer code.
1353  * @ring: Handle to the ring object used for receive
1354  * @rxdh: Descriptor handle.
1355  * @t_code: One of the enumerated (and documented in the Titan user guide)
1356  * "transfer codes".
1357  *
1358  * Handle descriptor's transfer code. The latter comes with each completed
1359  * descriptor.
1360  *
1361  * Returns: one of the enum vxge_hw_status{} enumerated types.
1362  * VXGE_HW_OK			- for success.
1363  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1364  */
vxge_hw_ring_handle_tcode(struct __vxge_hw_ring * ring,void * rxdh,u8 t_code)1365 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1366 	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1367 {
1368 	struct __vxge_hw_channel *channel;
1369 	enum vxge_hw_status status = VXGE_HW_OK;
1370 
1371 	channel = &ring->channel;
1372 
1373 	/* If the t_code is not supported and if the
1374 	 * t_code is other than 0x5 (unparseable packet
1375 	 * such as unknown UPV6 header), Drop it !!!
1376 	 */
1377 
1378 	if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1379 		t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1380 		status = VXGE_HW_OK;
1381 		goto exit;
1382 	}
1383 
1384 	if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1385 		status = VXGE_HW_ERR_INVALID_TCODE;
1386 		goto exit;
1387 	}
1388 
1389 	ring->stats->rxd_t_code_err_cnt[t_code]++;
1390 exit:
1391 	return status;
1392 }
1393 
1394 /**
1395  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1396  *
1397  * @fifo: fifohandle
1398  * @txdl_ptr: The starting location of the TxDL in host memory
1399  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1400  * @no_snoop: No snoop flags
1401  *
1402  * This function posts a non-offload doorbell to doorbell FIFO
1403  *
1404  */
__vxge_hw_non_offload_db_post(struct __vxge_hw_fifo * fifo,u64 txdl_ptr,u32 num_txds,u32 no_snoop)1405 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1406 	u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1407 {
1408 	struct __vxge_hw_channel *channel;
1409 
1410 	channel = &fifo->channel;
1411 
1412 	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1413 		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1414 		VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1415 		&fifo->nofl_db->control_0);
1416 
1417 	mmiowb();
1418 
1419 	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1420 
1421 	mmiowb();
1422 }
1423 
1424 /**
1425  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1426  * the fifo
1427  * @fifoh: Handle to the fifo object used for non offload send
1428  */
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo * fifoh)1429 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1430 {
1431 	return vxge_hw_channel_dtr_count(&fifoh->channel);
1432 }
1433 
1434 /**
1435  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1436  * @fifoh: Handle to the fifo object used for non offload send
1437  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1438  *        with a valid handle.
1439  * @txdl_priv: Buffer to return the pointer to per txdl space
1440  *
1441  * Reserve a single TxDL (that is, fifo descriptor)
1442  * for the subsequent filling-in by driver)
1443  * and posting on the corresponding channel (@channelh)
1444  * via vxge_hw_fifo_txdl_post().
1445  *
1446  * Note: it is the responsibility of driver to reserve multiple descriptors
1447  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1448  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1449  *
1450  * Returns: VXGE_HW_OK - success;
1451  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1452  *
1453  */
vxge_hw_fifo_txdl_reserve(struct __vxge_hw_fifo * fifo,void ** txdlh,void ** txdl_priv)1454 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1455 	struct __vxge_hw_fifo *fifo,
1456 	void **txdlh, void **txdl_priv)
1457 {
1458 	struct __vxge_hw_channel *channel;
1459 	enum vxge_hw_status status;
1460 	int i;
1461 
1462 	channel = &fifo->channel;
1463 
1464 	status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1465 
1466 	if (status == VXGE_HW_OK) {
1467 		struct vxge_hw_fifo_txd *txdp =
1468 			(struct vxge_hw_fifo_txd *)*txdlh;
1469 		struct __vxge_hw_fifo_txdl_priv *priv;
1470 
1471 		priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1472 
1473 		/* reset the TxDL's private */
1474 		priv->align_dma_offset = 0;
1475 		priv->align_vaddr_start = priv->align_vaddr;
1476 		priv->align_used_frags = 0;
1477 		priv->frags = 0;
1478 		priv->alloc_frags = fifo->config->max_frags;
1479 		priv->next_txdl_priv = NULL;
1480 
1481 		*txdl_priv = (void *)(size_t)txdp->host_control;
1482 
1483 		for (i = 0; i < fifo->config->max_frags; i++) {
1484 			txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1485 			txdp->control_0 = txdp->control_1 = 0;
1486 		}
1487 	}
1488 
1489 	return status;
1490 }
1491 
1492 /**
1493  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1494  * descriptor.
1495  * @fifo: Handle to the fifo object used for non offload send
1496  * @txdlh: Descriptor handle.
1497  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1498  *            (of buffers).
1499  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1500  * @size: Size of the data buffer (in bytes).
1501  *
1502  * This API is part of the preparation of the transmit descriptor for posting
1503  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1504  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1505  * All three APIs fill in the fields of the fifo descriptor,
1506  * in accordance with the Titan specification.
1507  *
1508  */
vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo * fifo,void * txdlh,u32 frag_idx,dma_addr_t dma_pointer,u32 size)1509 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1510 				  void *txdlh, u32 frag_idx,
1511 				  dma_addr_t dma_pointer, u32 size)
1512 {
1513 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1514 	struct vxge_hw_fifo_txd *txdp, *txdp_last;
1515 	struct __vxge_hw_channel *channel;
1516 
1517 	channel = &fifo->channel;
1518 
1519 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1520 	txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1521 
1522 	if (frag_idx != 0)
1523 		txdp->control_0 = txdp->control_1 = 0;
1524 	else {
1525 		txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1526 			VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1527 		txdp->control_1 |= fifo->interrupt_type;
1528 		txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1529 			fifo->tx_intr_num);
1530 		if (txdl_priv->frags) {
1531 			txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1532 			(txdl_priv->frags - 1);
1533 			txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1534 				VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1535 		}
1536 	}
1537 
1538 	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1539 
1540 	txdp->buffer_pointer = (u64)dma_pointer;
1541 	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1542 	fifo->stats->total_buffers++;
1543 	txdl_priv->frags++;
1544 }
1545 
1546 /**
1547  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1548  * @fifo: Handle to the fifo object used for non offload send
1549  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1550  * @frags: Number of contiguous buffers that are part of a single
1551  *         transmit operation.
1552  *
1553  * Post descriptor on the 'fifo' type channel for transmission.
1554  * Prior to posting the descriptor should be filled in accordance with
1555  * Host/Titan interface specification for a given service (LL, etc.).
1556  *
1557  */
vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo * fifo,void * txdlh)1558 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1559 {
1560 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1561 	struct vxge_hw_fifo_txd *txdp_last;
1562 	struct vxge_hw_fifo_txd *txdp_first;
1563 	struct __vxge_hw_channel *channel;
1564 
1565 	channel = &fifo->channel;
1566 
1567 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1568 	txdp_first = txdlh;
1569 
1570 	txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1571 	txdp_last->control_0 |=
1572 	      VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1573 	txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1574 
1575 	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1576 
1577 	__vxge_hw_non_offload_db_post(fifo,
1578 		(u64)txdl_priv->dma_addr,
1579 		txdl_priv->frags - 1,
1580 		fifo->no_snoop_bits);
1581 
1582 	fifo->stats->total_posts++;
1583 	fifo->stats->common_stats.usage_cnt++;
1584 	if (fifo->stats->common_stats.usage_max <
1585 		fifo->stats->common_stats.usage_cnt)
1586 		fifo->stats->common_stats.usage_max =
1587 			fifo->stats->common_stats.usage_cnt;
1588 }
1589 
1590 /**
1591  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1592  * @fifo: Handle to the fifo object used for non offload send
1593  * @txdlh: Descriptor handle. Returned by HW.
1594  * @t_code: Transfer code, as per Titan User Guide,
1595  *          Transmit Descriptor Format.
1596  *          Returned by HW.
1597  *
1598  * Retrieve the _next_ completed descriptor.
1599  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1600  * driver of new completed descriptors. After that
1601  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1602  * completions (the very first completion is passed by HW via
1603  * vxge_hw_channel_callback_f).
1604  *
1605  * Implementation-wise, the driver is free to call
1606  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1607  * channel callback, or in a deferred fashion and separate (from HW)
1608  * context.
1609  *
1610  * Non-zero @t_code means failure to process the descriptor.
1611  * The failure could happen, for instance, when the link is
1612  * down, in which case Titan completes the descriptor because it
1613  * is not able to send the data out.
1614  *
1615  * For details please refer to Titan User Guide.
1616  *
1617  * Returns: VXGE_HW_OK - success.
1618  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1619  * are currently available for processing.
1620  *
1621  */
vxge_hw_fifo_txdl_next_completed(struct __vxge_hw_fifo * fifo,void ** txdlh,enum vxge_hw_fifo_tcode * t_code)1622 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1623 	struct __vxge_hw_fifo *fifo, void **txdlh,
1624 	enum vxge_hw_fifo_tcode *t_code)
1625 {
1626 	struct __vxge_hw_channel *channel;
1627 	struct vxge_hw_fifo_txd *txdp;
1628 	enum vxge_hw_status status = VXGE_HW_OK;
1629 
1630 	channel = &fifo->channel;
1631 
1632 	vxge_hw_channel_dtr_try_complete(channel, txdlh);
1633 
1634 	txdp = *txdlh;
1635 	if (txdp == NULL) {
1636 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1637 		goto exit;
1638 	}
1639 
1640 	/* check whether host owns it */
1641 	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1642 
1643 		vxge_assert(txdp->host_control != 0);
1644 
1645 		vxge_hw_channel_dtr_complete(channel);
1646 
1647 		*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1648 
1649 		if (fifo->stats->common_stats.usage_cnt > 0)
1650 			fifo->stats->common_stats.usage_cnt--;
1651 
1652 		status = VXGE_HW_OK;
1653 		goto exit;
1654 	}
1655 
1656 	/* no more completions */
1657 	*txdlh = NULL;
1658 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1659 exit:
1660 	return status;
1661 }
1662 
1663 /**
1664  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1665  * @fifo: Handle to the fifo object used for non offload send
1666  * @txdlh: Descriptor handle.
1667  * @t_code: One of the enumerated (and documented in the Titan user guide)
1668  *          "transfer codes".
1669  *
1670  * Handle descriptor's transfer code. The latter comes with each completed
1671  * descriptor.
1672  *
1673  * Returns: one of the enum vxge_hw_status{} enumerated types.
1674  * VXGE_HW_OK - for success.
1675  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1676  */
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo * fifo,void * txdlh,enum vxge_hw_fifo_tcode t_code)1677 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1678 					      void *txdlh,
1679 					      enum vxge_hw_fifo_tcode t_code)
1680 {
1681 	struct __vxge_hw_channel *channel;
1682 
1683 	enum vxge_hw_status status = VXGE_HW_OK;
1684 	channel = &fifo->channel;
1685 
1686 	if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1687 		status = VXGE_HW_ERR_INVALID_TCODE;
1688 		goto exit;
1689 	}
1690 
1691 	fifo->stats->txd_t_code_err_cnt[t_code]++;
1692 exit:
1693 	return status;
1694 }
1695 
1696 /**
1697  * vxge_hw_fifo_txdl_free - Free descriptor.
1698  * @fifo: Handle to the fifo object used for non offload send
1699  * @txdlh: Descriptor handle.
1700  *
1701  * Free the reserved descriptor. This operation is "symmetrical" to
1702  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1703  * lifecycle.
1704  *
1705  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1706  * be:
1707  *
1708  * - reserved (vxge_hw_fifo_txdl_reserve);
1709  *
1710  * - posted (vxge_hw_fifo_txdl_post);
1711  *
1712  * - completed (vxge_hw_fifo_txdl_next_completed);
1713  *
1714  * - and recycled again (vxge_hw_fifo_txdl_free).
1715  *
1716  * For alternative state transitions and more details please refer to
1717  * the design doc.
1718  *
1719  */
vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo * fifo,void * txdlh)1720 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1721 {
1722 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1723 	u32 max_frags;
1724 	struct __vxge_hw_channel *channel;
1725 
1726 	channel = &fifo->channel;
1727 
1728 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1729 			(struct vxge_hw_fifo_txd *)txdlh);
1730 
1731 	max_frags = fifo->config->max_frags;
1732 
1733 	vxge_hw_channel_dtr_free(channel, txdlh);
1734 }
1735 
1736 /**
1737  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1738  *               to MAC address table.
1739  * @vp: Vpath handle.
1740  * @macaddr: MAC address to be added for this vpath into the list
1741  * @macaddr_mask: MAC address mask for macaddr
1742  * @duplicate_mode: Duplicate MAC address add mode. Please see
1743  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1744  *
1745  * Adds the given mac address and mac address mask into the list for this
1746  * vpath.
1747  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1748  * vxge_hw_vpath_mac_addr_get_next
1749  *
1750  */
1751 enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN],enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)1752 vxge_hw_vpath_mac_addr_add(
1753 	struct __vxge_hw_vpath_handle *vp,
1754 	u8 (macaddr)[ETH_ALEN],
1755 	u8 (macaddr_mask)[ETH_ALEN],
1756 	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1757 {
1758 	u32 i;
1759 	u64 data1 = 0ULL;
1760 	u64 data2 = 0ULL;
1761 	enum vxge_hw_status status = VXGE_HW_OK;
1762 
1763 	if (vp == NULL) {
1764 		status = VXGE_HW_ERR_INVALID_HANDLE;
1765 		goto exit;
1766 	}
1767 
1768 	for (i = 0; i < ETH_ALEN; i++) {
1769 		data1 <<= 8;
1770 		data1 |= (u8)macaddr[i];
1771 
1772 		data2 <<= 8;
1773 		data2 |= (u8)macaddr_mask[i];
1774 	}
1775 
1776 	switch (duplicate_mode) {
1777 	case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1778 		i = 0;
1779 		break;
1780 	case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1781 		i = 1;
1782 		break;
1783 	case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1784 		i = 2;
1785 		break;
1786 	default:
1787 		i = 0;
1788 		break;
1789 	}
1790 
1791 	status = __vxge_hw_vpath_rts_table_set(vp,
1792 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1793 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1794 			0,
1795 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1796 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1797 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1798 exit:
1799 	return status;
1800 }
1801 
1802 /**
1803  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1804  *               from MAC address table.
1805  * @vp: Vpath handle.
1806  * @macaddr: First MAC address entry for this vpath in the list
1807  * @macaddr_mask: MAC address mask for macaddr
1808  *
1809  * Returns the first mac address and mac address mask in the list for this
1810  * vpath.
1811  * see also: vxge_hw_vpath_mac_addr_get_next
1812  *
1813  */
1814 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1815 vxge_hw_vpath_mac_addr_get(
1816 	struct __vxge_hw_vpath_handle *vp,
1817 	u8 (macaddr)[ETH_ALEN],
1818 	u8 (macaddr_mask)[ETH_ALEN])
1819 {
1820 	u32 i;
1821 	u64 data1 = 0ULL;
1822 	u64 data2 = 0ULL;
1823 	enum vxge_hw_status status = VXGE_HW_OK;
1824 
1825 	if (vp == NULL) {
1826 		status = VXGE_HW_ERR_INVALID_HANDLE;
1827 		goto exit;
1828 	}
1829 
1830 	status = __vxge_hw_vpath_rts_table_get(vp,
1831 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1832 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1833 			0, &data1, &data2);
1834 
1835 	if (status != VXGE_HW_OK)
1836 		goto exit;
1837 
1838 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1839 
1840 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1841 
1842 	for (i = ETH_ALEN; i > 0; i--) {
1843 		macaddr[i-1] = (u8)(data1 & 0xFF);
1844 		data1 >>= 8;
1845 
1846 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1847 		data2 >>= 8;
1848 	}
1849 exit:
1850 	return status;
1851 }
1852 
1853 /**
1854  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1855  * vpath
1856  *               from MAC address table.
1857  * @vp: Vpath handle.
1858  * @macaddr: Next MAC address entry for this vpath in the list
1859  * @macaddr_mask: MAC address mask for macaddr
1860  *
1861  * Returns the next mac address and mac address mask in the list for this
1862  * vpath.
1863  * see also: vxge_hw_vpath_mac_addr_get
1864  *
1865  */
1866 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1867 vxge_hw_vpath_mac_addr_get_next(
1868 	struct __vxge_hw_vpath_handle *vp,
1869 	u8 (macaddr)[ETH_ALEN],
1870 	u8 (macaddr_mask)[ETH_ALEN])
1871 {
1872 	u32 i;
1873 	u64 data1 = 0ULL;
1874 	u64 data2 = 0ULL;
1875 	enum vxge_hw_status status = VXGE_HW_OK;
1876 
1877 	if (vp == NULL) {
1878 		status = VXGE_HW_ERR_INVALID_HANDLE;
1879 		goto exit;
1880 	}
1881 
1882 	status = __vxge_hw_vpath_rts_table_get(vp,
1883 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1884 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1885 			0, &data1, &data2);
1886 
1887 	if (status != VXGE_HW_OK)
1888 		goto exit;
1889 
1890 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1891 
1892 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1893 
1894 	for (i = ETH_ALEN; i > 0; i--) {
1895 		macaddr[i-1] = (u8)(data1 & 0xFF);
1896 		data1 >>= 8;
1897 
1898 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1899 		data2 >>= 8;
1900 	}
1901 
1902 exit:
1903 	return status;
1904 }
1905 
1906 /**
1907  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1908  *               to MAC address table.
1909  * @vp: Vpath handle.
1910  * @macaddr: MAC address to be added for this vpath into the list
1911  * @macaddr_mask: MAC address mask for macaddr
1912  *
1913  * Delete the given mac address and mac address mask into the list for this
1914  * vpath.
1915  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1916  * vxge_hw_vpath_mac_addr_get_next
1917  *
1918  */
1919 enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1920 vxge_hw_vpath_mac_addr_delete(
1921 	struct __vxge_hw_vpath_handle *vp,
1922 	u8 (macaddr)[ETH_ALEN],
1923 	u8 (macaddr_mask)[ETH_ALEN])
1924 {
1925 	u32 i;
1926 	u64 data1 = 0ULL;
1927 	u64 data2 = 0ULL;
1928 	enum vxge_hw_status status = VXGE_HW_OK;
1929 
1930 	if (vp == NULL) {
1931 		status = VXGE_HW_ERR_INVALID_HANDLE;
1932 		goto exit;
1933 	}
1934 
1935 	for (i = 0; i < ETH_ALEN; i++) {
1936 		data1 <<= 8;
1937 		data1 |= (u8)macaddr[i];
1938 
1939 		data2 <<= 8;
1940 		data2 |= (u8)macaddr_mask[i];
1941 	}
1942 
1943 	status = __vxge_hw_vpath_rts_table_set(vp,
1944 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1945 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1946 			0,
1947 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1948 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1949 exit:
1950 	return status;
1951 }
1952 
1953 /**
1954  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1955  *               to vlan id table.
1956  * @vp: Vpath handle.
1957  * @vid: vlan id to be added for this vpath into the list
1958  *
1959  * Adds the given vlan id into the list for this  vpath.
1960  * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1961  * vxge_hw_vpath_vid_get_next
1962  *
1963  */
1964 enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle * vp,u64 vid)1965 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1966 {
1967 	enum vxge_hw_status status = VXGE_HW_OK;
1968 
1969 	if (vp == NULL) {
1970 		status = VXGE_HW_ERR_INVALID_HANDLE;
1971 		goto exit;
1972 	}
1973 
1974 	status = __vxge_hw_vpath_rts_table_set(vp,
1975 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1976 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1977 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1978 exit:
1979 	return status;
1980 }
1981 
1982 /**
1983  * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1984  *               from vlan id table.
1985  * @vp: Vpath handle.
1986  * @vid: Buffer to return vlan id
1987  *
1988  * Returns the first vlan id in the list for this vpath.
1989  * see also: vxge_hw_vpath_vid_get_next
1990  *
1991  */
1992 enum vxge_hw_status
vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle * vp,u64 * vid)1993 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1994 {
1995 	u64 data;
1996 	enum vxge_hw_status status = VXGE_HW_OK;
1997 
1998 	if (vp == NULL) {
1999 		status = VXGE_HW_ERR_INVALID_HANDLE;
2000 		goto exit;
2001 	}
2002 
2003 	status = __vxge_hw_vpath_rts_table_get(vp,
2004 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
2005 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2006 			0, vid, &data);
2007 
2008 	*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
2009 exit:
2010 	return status;
2011 }
2012 
2013 /**
2014  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
2015  *               to vlan id table.
2016  * @vp: Vpath handle.
2017  * @vid: vlan id to be added for this vpath into the list
2018  *
2019  * Adds the given vlan id into the list for this  vpath.
2020  * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
2021  * vxge_hw_vpath_vid_get_next
2022  *
2023  */
2024 enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle * vp,u64 vid)2025 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
2026 {
2027 	enum vxge_hw_status status = VXGE_HW_OK;
2028 
2029 	if (vp == NULL) {
2030 		status = VXGE_HW_ERR_INVALID_HANDLE;
2031 		goto exit;
2032 	}
2033 
2034 	status = __vxge_hw_vpath_rts_table_set(vp,
2035 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
2036 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2037 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
2038 exit:
2039 	return status;
2040 }
2041 
2042 /**
2043  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2044  * @vp: Vpath handle.
2045  *
2046  * Enable promiscuous mode of Titan-e operation.
2047  *
2048  * See also: vxge_hw_vpath_promisc_disable().
2049  */
vxge_hw_vpath_promisc_enable(struct __vxge_hw_vpath_handle * vp)2050 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2051 			struct __vxge_hw_vpath_handle *vp)
2052 {
2053 	u64 val64;
2054 	struct __vxge_hw_virtualpath *vpath;
2055 	enum vxge_hw_status status = VXGE_HW_OK;
2056 
2057 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2058 		status = VXGE_HW_ERR_INVALID_HANDLE;
2059 		goto exit;
2060 	}
2061 
2062 	vpath = vp->vpath;
2063 
2064 	/* Enable promiscuous mode for function 0 only */
2065 	if (!(vpath->hldev->access_rights &
2066 		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2067 		return VXGE_HW_OK;
2068 
2069 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2070 
2071 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2072 
2073 		val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2074 			 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2075 			 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2076 			 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2077 
2078 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2079 	}
2080 exit:
2081 	return status;
2082 }
2083 
2084 /**
2085  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2086  * @vp: Vpath handle.
2087  *
2088  * Disable promiscuous mode of Titan-e operation.
2089  *
2090  * See also: vxge_hw_vpath_promisc_enable().
2091  */
vxge_hw_vpath_promisc_disable(struct __vxge_hw_vpath_handle * vp)2092 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2093 			struct __vxge_hw_vpath_handle *vp)
2094 {
2095 	u64 val64;
2096 	struct __vxge_hw_virtualpath *vpath;
2097 	enum vxge_hw_status status = VXGE_HW_OK;
2098 
2099 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2100 		status = VXGE_HW_ERR_INVALID_HANDLE;
2101 		goto exit;
2102 	}
2103 
2104 	vpath = vp->vpath;
2105 
2106 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2107 
2108 	if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2109 
2110 		val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2111 			   VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2112 			   VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2113 
2114 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2115 	}
2116 exit:
2117 	return status;
2118 }
2119 
2120 /*
2121  * vxge_hw_vpath_bcast_enable - Enable broadcast
2122  * @vp: Vpath handle.
2123  *
2124  * Enable receiving broadcasts.
2125  */
vxge_hw_vpath_bcast_enable(struct __vxge_hw_vpath_handle * vp)2126 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2127 			struct __vxge_hw_vpath_handle *vp)
2128 {
2129 	u64 val64;
2130 	struct __vxge_hw_virtualpath *vpath;
2131 	enum vxge_hw_status status = VXGE_HW_OK;
2132 
2133 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2134 		status = VXGE_HW_ERR_INVALID_HANDLE;
2135 		goto exit;
2136 	}
2137 
2138 	vpath = vp->vpath;
2139 
2140 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2141 
2142 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2143 		val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2144 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2145 	}
2146 exit:
2147 	return status;
2148 }
2149 
2150 /**
2151  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2152  * @vp: Vpath handle.
2153  *
2154  * Enable Titan-e multicast addresses.
2155  * Returns: VXGE_HW_OK on success.
2156  *
2157  */
vxge_hw_vpath_mcast_enable(struct __vxge_hw_vpath_handle * vp)2158 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2159 			struct __vxge_hw_vpath_handle *vp)
2160 {
2161 	u64 val64;
2162 	struct __vxge_hw_virtualpath *vpath;
2163 	enum vxge_hw_status status = VXGE_HW_OK;
2164 
2165 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2166 		status = VXGE_HW_ERR_INVALID_HANDLE;
2167 		goto exit;
2168 	}
2169 
2170 	vpath = vp->vpath;
2171 
2172 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2173 
2174 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2175 		val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2176 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2177 	}
2178 exit:
2179 	return status;
2180 }
2181 
2182 /**
2183  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2184  * @vp: Vpath handle.
2185  *
2186  * Disable Titan-e multicast addresses.
2187  * Returns: VXGE_HW_OK - success.
2188  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2189  *
2190  */
2191 enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle * vp)2192 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2193 {
2194 	u64 val64;
2195 	struct __vxge_hw_virtualpath *vpath;
2196 	enum vxge_hw_status status = VXGE_HW_OK;
2197 
2198 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2199 		status = VXGE_HW_ERR_INVALID_HANDLE;
2200 		goto exit;
2201 	}
2202 
2203 	vpath = vp->vpath;
2204 
2205 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2206 
2207 	if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2208 		val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2209 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2210 	}
2211 exit:
2212 	return status;
2213 }
2214 
2215 /*
2216  * vxge_hw_vpath_alarm_process - Process Alarms.
2217  * @vpath: Virtual Path.
2218  * @skip_alarms: Do not clear the alarms
2219  *
2220  * Process vpath alarms.
2221  *
2222  */
vxge_hw_vpath_alarm_process(struct __vxge_hw_vpath_handle * vp,u32 skip_alarms)2223 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2224 			struct __vxge_hw_vpath_handle *vp,
2225 			u32 skip_alarms)
2226 {
2227 	enum vxge_hw_status status = VXGE_HW_OK;
2228 
2229 	if (vp == NULL) {
2230 		status = VXGE_HW_ERR_INVALID_HANDLE;
2231 		goto exit;
2232 	}
2233 
2234 	status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2235 exit:
2236 	return status;
2237 }
2238 
2239 /**
2240  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2241  *                            alrms
2242  * @vp: Virtual Path handle.
2243  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2244  *             interrupts(Can be repeated). If fifo or ring are not enabled
2245  *             the MSIX vector for that should be set to 0
2246  * @alarm_msix_id: MSIX vector for alarm.
2247  *
2248  * This API will associate a given MSIX vector numbers with the four TIM
2249  * interrupts and alarm interrupt.
2250  */
2251 void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle * vp,int * tim_msix_id,int alarm_msix_id)2252 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2253 		       int alarm_msix_id)
2254 {
2255 	u64 val64;
2256 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
2257 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2258 	u32 vp_id = vp->vpath->vp_id;
2259 
2260 	val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2261 		  (vp_id * 4) + tim_msix_id[0]) |
2262 		 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2263 		  (vp_id * 4) + tim_msix_id[1]);
2264 
2265 	writeq(val64, &vp_reg->interrupt_cfg0);
2266 
2267 	writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2268 			(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2269 			&vp_reg->interrupt_cfg2);
2270 
2271 	if (vpath->hldev->config.intr_mode ==
2272 					VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2273 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274 				VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2275 				0, 32), &vp_reg->one_shot_vect0_en);
2276 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2277 				VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2278 				0, 32), &vp_reg->one_shot_vect1_en);
2279 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2280 				VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2281 				0, 32), &vp_reg->one_shot_vect2_en);
2282 	}
2283 }
2284 
2285 /**
2286  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2287  * @vp: Virtual Path handle.
2288  * @msix_id:  MSIX ID
2289  *
2290  * The function masks the msix interrupt for the given msix_id
2291  *
2292  * Returns: 0,
2293  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2294  * status.
2295  * See also:
2296  */
2297 void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle * vp,int msix_id)2298 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2299 {
2300 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301 	__vxge_hw_pio_mem_write32_upper(
2302 		(u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2303 		&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2304 }
2305 
2306 /**
2307  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2308  * @vp: Virtual Path handle.
2309  * @msix_id:  MSI ID
2310  *
2311  * The function clears the msix interrupt for the given msix_id
2312  *
2313  * Returns: 0,
2314  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2315  * status.
2316  * See also:
2317  */
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle * vp,int msix_id)2318 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2319 {
2320 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2321 
2322 	if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2323 		__vxge_hw_pio_mem_write32_upper(
2324 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2325 			&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2326 	else
2327 		__vxge_hw_pio_mem_write32_upper(
2328 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2329 			&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2330 }
2331 
2332 /**
2333  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2334  * @vp: Virtual Path handle.
2335  * @msix_id:  MSI ID
2336  *
2337  * The function unmasks the msix interrupt for the given msix_id
2338  *
2339  * Returns: 0,
2340  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2341  * status.
2342  * See also:
2343  */
2344 void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle * vp,int msix_id)2345 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2346 {
2347 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2348 	__vxge_hw_pio_mem_write32_upper(
2349 			(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2350 			&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2351 }
2352 
2353 /**
2354  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2355  * @vp: Virtual Path handle.
2356  *
2357  * Mask Tx and Rx vpath interrupts.
2358  *
2359  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2360  */
vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle * vp)2361 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2362 {
2363 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2364 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2365 	u64	val64;
2366 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2367 
2368 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2369 		tim_int_mask1, vp->vpath->vp_id);
2370 
2371 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2372 
2373 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2374 		(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2375 		writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2376 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2377 			&hldev->common_reg->tim_int_mask0);
2378 	}
2379 
2380 	val64 = readl(&hldev->common_reg->tim_int_mask1);
2381 
2382 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383 		(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384 		__vxge_hw_pio_mem_write32_upper(
2385 			(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2386 			tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2387 			&hldev->common_reg->tim_int_mask1);
2388 	}
2389 }
2390 
2391 /**
2392  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2393  * @vp: Virtual Path handle.
2394  *
2395  * Unmask Tx and Rx vpath interrupts.
2396  *
2397  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2398  */
vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle * vp)2399 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2400 {
2401 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2402 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2403 	u64	val64;
2404 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2405 
2406 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2407 		tim_int_mask1, vp->vpath->vp_id);
2408 
2409 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2410 
2411 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2412 	   (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2413 		writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2414 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2415 			&hldev->common_reg->tim_int_mask0);
2416 	}
2417 
2418 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419 	   (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420 		__vxge_hw_pio_mem_write32_upper(
2421 			(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2422 			  tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2423 			&hldev->common_reg->tim_int_mask1);
2424 	}
2425 }
2426 
2427 /**
2428  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2429  * descriptors and process the same.
2430  * @ring: Handle to the ring object used for receive
2431  *
2432  * The function	polls the Rx for the completed	descriptors and	calls
2433  * the driver via supplied completion	callback.
2434  *
2435  * Returns: VXGE_HW_OK, if the polling is completed successful.
2436  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2437  * descriptors available which are yet to be processed.
2438  *
2439  * See also: vxge_hw_vpath_poll_rx()
2440  */
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring * ring)2441 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2442 {
2443 	u8 t_code;
2444 	enum vxge_hw_status status = VXGE_HW_OK;
2445 	void *first_rxdh;
2446 	u64 val64 = 0;
2447 	int new_count = 0;
2448 
2449 	ring->cmpl_cnt = 0;
2450 
2451 	status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2452 	if (status == VXGE_HW_OK)
2453 		ring->callback(ring, first_rxdh,
2454 			t_code, ring->channel.userdata);
2455 
2456 	if (ring->cmpl_cnt != 0) {
2457 		ring->doorbell_cnt += ring->cmpl_cnt;
2458 		if (ring->doorbell_cnt >= ring->rxds_limit) {
2459 			/*
2460 			 * Each RxD is of 4 qwords, update the number of
2461 			 * qwords replenished
2462 			 */
2463 			new_count = (ring->doorbell_cnt * 4);
2464 
2465 			/* For each block add 4 more qwords */
2466 			ring->total_db_cnt += ring->doorbell_cnt;
2467 			if (ring->total_db_cnt >= ring->rxds_per_block) {
2468 				new_count += 4;
2469 				/* Reset total count */
2470 				ring->total_db_cnt %= ring->rxds_per_block;
2471 			}
2472 			writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2473 				&ring->vp_reg->prc_rxd_doorbell);
2474 			val64 =
2475 			  readl(&ring->common_reg->titan_general_int_status);
2476 			ring->doorbell_cnt = 0;
2477 		}
2478 	}
2479 
2480 	return status;
2481 }
2482 
2483 /**
2484  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2485  * the same.
2486  * @fifo: Handle to the fifo object used for non offload send
2487  *
2488  * The function polls the Tx for the completed descriptors and calls
2489  * the driver via supplied completion callback.
2490  *
2491  * Returns: VXGE_HW_OK, if the polling is completed successful.
2492  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2493  * descriptors available which are yet to be processed.
2494  */
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo * fifo,struct sk_buff *** skb_ptr,int nr_skb,int * more)2495 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2496 					struct sk_buff ***skb_ptr, int nr_skb,
2497 					int *more)
2498 {
2499 	enum vxge_hw_fifo_tcode t_code;
2500 	void *first_txdlh;
2501 	enum vxge_hw_status status = VXGE_HW_OK;
2502 	struct __vxge_hw_channel *channel;
2503 
2504 	channel = &fifo->channel;
2505 
2506 	status = vxge_hw_fifo_txdl_next_completed(fifo,
2507 				&first_txdlh, &t_code);
2508 	if (status == VXGE_HW_OK)
2509 		if (fifo->callback(fifo, first_txdlh, t_code,
2510 			channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2511 			status = VXGE_HW_COMPLETIONS_REMAIN;
2512 
2513 	return status;
2514 }
2515