1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
30 #include "bnx2x_sp.h"
31 
32 
33 
34 /**
35  * bnx2x_move_fp - move content of the fastpath structure.
36  *
37  * @bp:		driver handle
38  * @from:	source FP index
39  * @to:		destination FP index
40  *
41  * Makes sure the contents of the bp->fp[to].napi is kept
42  * intact. This is done by first copying the napi struct from
43  * the target to the source, and then mem copying the entire
44  * source onto the target
45  */
bnx2x_move_fp(struct bnx2x * bp,int from,int to)46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 {
48 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 
51 	/* Copy the NAPI object as it has been already initialized */
52 	from_fp->napi = to_fp->napi;
53 
54 	/* Move bnx2x_fastpath contents */
55 	memcpy(to_fp, from_fp, sizeof(*to_fp));
56 	to_fp->index = to;
57 }
58 
59 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60 
61 /* free skb in the packet ring at pos idx
62  * return idx of last bd freed
63  */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,u16 idx,unsigned int * pkts_compl,unsigned int * bytes_compl)64 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
65 			     u16 idx, unsigned int *pkts_compl,
66 			     unsigned int *bytes_compl)
67 {
68 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
69 	struct eth_tx_start_bd *tx_start_bd;
70 	struct eth_tx_bd *tx_data_bd;
71 	struct sk_buff *skb = tx_buf->skb;
72 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
73 	int nbd;
74 	u16 split_bd_len = 0;
75 
76 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
77 	prefetch(&skb->end);
78 
79 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
80 	   txdata->txq_index, idx, tx_buf, skb);
81 
82 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
83 
84 
85 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
86 #ifdef BNX2X_STOP_ON_ERROR
87 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
88 		BNX2X_ERR("BAD nbd!\n");
89 		bnx2x_panic();
90 	}
91 #endif
92 	new_cons = nbd + tx_buf->first_bd;
93 
94 	/* Get the next bd */
95 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
96 
97 	/* Skip a parse bd... */
98 	--nbd;
99 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
100 
101 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
102 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
103 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
104 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
105 		--nbd;
106 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
107 	}
108 
109 	/* unmap first bd */
110 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
111 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
112 			 DMA_TO_DEVICE);
113 
114 	/* now free frags */
115 	while (nbd > 0) {
116 
117 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
118 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
119 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
120 		if (--nbd)
121 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
122 	}
123 
124 	/* release skb */
125 	WARN_ON(!skb);
126 	if (likely(skb)) {
127 		(*pkts_compl)++;
128 		(*bytes_compl) += skb->len;
129 	}
130 
131 	dev_kfree_skb_any(skb);
132 	tx_buf->first_bd = 0;
133 	tx_buf->skb = NULL;
134 
135 	return new_cons;
136 }
137 
bnx2x_tx_int(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata)138 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
139 {
140 	struct netdev_queue *txq;
141 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
142 	unsigned int pkts_compl = 0, bytes_compl = 0;
143 
144 #ifdef BNX2X_STOP_ON_ERROR
145 	if (unlikely(bp->panic))
146 		return -1;
147 #endif
148 
149 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
150 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
151 	sw_cons = txdata->tx_pkt_cons;
152 
153 	while (sw_cons != hw_cons) {
154 		u16 pkt_cons;
155 
156 		pkt_cons = TX_BD(sw_cons);
157 
158 		DP(NETIF_MSG_TX_DONE,
159 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
160 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
161 
162 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
163 		    &pkts_compl, &bytes_compl);
164 
165 		sw_cons++;
166 	}
167 
168 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
169 
170 	txdata->tx_pkt_cons = sw_cons;
171 	txdata->tx_bd_cons = bd_cons;
172 
173 	/* Need to make the tx_bd_cons update visible to start_xmit()
174 	 * before checking for netif_tx_queue_stopped().  Without the
175 	 * memory barrier, there is a small possibility that
176 	 * start_xmit() will miss it and cause the queue to be stopped
177 	 * forever.
178 	 * On the other hand we need an rmb() here to ensure the proper
179 	 * ordering of bit testing in the following
180 	 * netif_tx_queue_stopped(txq) call.
181 	 */
182 	smp_mb();
183 
184 	if (unlikely(netif_tx_queue_stopped(txq))) {
185 		/* Taking tx_lock() is needed to prevent reenabling the queue
186 		 * while it's empty. This could have happen if rx_action() gets
187 		 * suspended in bnx2x_tx_int() after the condition before
188 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
189 		 *
190 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
191 		 * sends some packets consuming the whole queue again->
192 		 * stops the queue
193 		 */
194 
195 		__netif_tx_lock(txq, smp_processor_id());
196 
197 		if ((netif_tx_queue_stopped(txq)) &&
198 		    (bp->state == BNX2X_STATE_OPEN) &&
199 		    (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
200 			netif_tx_wake_queue(txq);
201 
202 		__netif_tx_unlock(txq);
203 	}
204 	return 0;
205 }
206 
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)207 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
208 					     u16 idx)
209 {
210 	u16 last_max = fp->last_max_sge;
211 
212 	if (SUB_S16(idx, last_max) > 0)
213 		fp->last_max_sge = idx;
214 }
215 
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,u16 sge_len,struct eth_end_agg_rx_cqe * cqe)216 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
217 					 u16 sge_len,
218 					 struct eth_end_agg_rx_cqe *cqe)
219 {
220 	struct bnx2x *bp = fp->bp;
221 	u16 last_max, last_elem, first_elem;
222 	u16 delta = 0;
223 	u16 i;
224 
225 	if (!sge_len)
226 		return;
227 
228 	/* First mark all used pages */
229 	for (i = 0; i < sge_len; i++)
230 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
231 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
232 
233 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
234 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
235 
236 	/* Here we assume that the last SGE index is the biggest */
237 	prefetch((void *)(fp->sge_mask));
238 	bnx2x_update_last_max_sge(fp,
239 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
240 
241 	last_max = RX_SGE(fp->last_max_sge);
242 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
243 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
244 
245 	/* If ring is not full */
246 	if (last_elem + 1 != first_elem)
247 		last_elem++;
248 
249 	/* Now update the prod */
250 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
251 		if (likely(fp->sge_mask[i]))
252 			break;
253 
254 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
255 		delta += BIT_VEC64_ELEM_SZ;
256 	}
257 
258 	if (delta > 0) {
259 		fp->rx_sge_prod += delta;
260 		/* clear page-end entries */
261 		bnx2x_clear_sge_mask_next_elems(fp);
262 	}
263 
264 	DP(NETIF_MSG_RX_STATUS,
265 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
266 	   fp->last_max_sge, fp->rx_sge_prod);
267 }
268 
269 /* Set Toeplitz hash value in the skb using the value from the
270  * CQE (calculated by HW).
271  */
bnx2x_get_rxhash(const struct bnx2x * bp,const struct eth_fast_path_rx_cqe * cqe)272 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
273 			    const struct eth_fast_path_rx_cqe *cqe)
274 {
275 	/* Set Toeplitz hash from CQE */
276 	if ((bp->dev->features & NETIF_F_RXHASH) &&
277 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
278 		return le32_to_cpu(cqe->rss_hash_result);
279 	return 0;
280 }
281 
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,u16 cons,u16 prod,struct eth_fast_path_rx_cqe * cqe)282 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
283 			    u16 cons, u16 prod,
284 			    struct eth_fast_path_rx_cqe *cqe)
285 {
286 	struct bnx2x *bp = fp->bp;
287 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
288 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
289 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
290 	dma_addr_t mapping;
291 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
292 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
293 
294 	/* print error if current state != stop */
295 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
296 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
297 
298 	/* Try to map an empty data buffer from the aggregation info  */
299 	mapping = dma_map_single(&bp->pdev->dev,
300 				 first_buf->data + NET_SKB_PAD,
301 				 fp->rx_buf_size, DMA_FROM_DEVICE);
302 	/*
303 	 *  ...if it fails - move the skb from the consumer to the producer
304 	 *  and set the current aggregation state as ERROR to drop it
305 	 *  when TPA_STOP arrives.
306 	 */
307 
308 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
309 		/* Move the BD from the consumer to the producer */
310 		bnx2x_reuse_rx_data(fp, cons, prod);
311 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
312 		return;
313 	}
314 
315 	/* move empty data from pool to prod */
316 	prod_rx_buf->data = first_buf->data;
317 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
318 	/* point prod_bd to new data */
319 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
320 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
321 
322 	/* move partial skb from cons to pool (don't unmap yet) */
323 	*first_buf = *cons_rx_buf;
324 
325 	/* mark bin state as START */
326 	tpa_info->parsing_flags =
327 		le16_to_cpu(cqe->pars_flags.flags);
328 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
329 	tpa_info->tpa_state = BNX2X_TPA_START;
330 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
331 	tpa_info->placement_offset = cqe->placement_offset;
332 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
333 	if (fp->mode == TPA_MODE_GRO) {
334 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
335 		tpa_info->full_page =
336 			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
337 		/*
338 		 * FW 7.2.16 BUG workaround:
339 		 * if SGE size is (exactly) multiple gro_size
340 		 * fw will place one less frag on SGE.
341 		 * the calculation is done only for potentially
342 		 * dangerous MTUs.
343 		 */
344 		if (unlikely(bp->gro_check))
345 			if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
346 				tpa_info->full_page -= gro_size;
347 		tpa_info->gro_size = gro_size;
348 	}
349 
350 #ifdef BNX2X_STOP_ON_ERROR
351 	fp->tpa_queue_used |= (1 << queue);
352 #ifdef _ASM_GENERIC_INT_L64_H
353 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
354 #else
355 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
356 #endif
357 	   fp->tpa_queue_used);
358 #endif
359 }
360 
361 /* Timestamp option length allowed for TPA aggregation:
362  *
363  *		nop nop kind length echo val
364  */
365 #define TPA_TSTAMP_OPT_LEN	12
366 /**
367  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
368  *
369  * @bp:			driver handle
370  * @parsing_flags:	parsing flags from the START CQE
371  * @len_on_bd:		total length of the first packet for the
372  *			aggregation.
373  *
374  * Approximate value of the MSS for this aggregation calculated using
375  * the first packet of it.
376  */
bnx2x_set_lro_mss(struct bnx2x * bp,u16 parsing_flags,u16 len_on_bd)377 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
378 				    u16 len_on_bd)
379 {
380 	/*
381 	 * TPA arrgregation won't have either IP options or TCP options
382 	 * other than timestamp or IPv6 extension headers.
383 	 */
384 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
385 
386 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
387 	    PRS_FLAG_OVERETH_IPV6)
388 		hdrs_len += sizeof(struct ipv6hdr);
389 	else /* IPv4 */
390 		hdrs_len += sizeof(struct iphdr);
391 
392 
393 	/* Check if there was a TCP timestamp, if there is it's will
394 	 * always be 12 bytes length: nop nop kind length echo val.
395 	 *
396 	 * Otherwise FW would close the aggregation.
397 	 */
398 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
399 		hdrs_len += TPA_TSTAMP_OPT_LEN;
400 
401 	return len_on_bd - hdrs_len;
402 }
403 
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct sk_buff * skb,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)404 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
405 			       struct bnx2x_agg_info *tpa_info,
406 			       u16 pages,
407 			       struct sk_buff *skb,
408 			       struct eth_end_agg_rx_cqe *cqe,
409 			       u16 cqe_idx)
410 {
411 	struct sw_rx_page *rx_pg, old_rx_pg;
412 	u32 i, frag_len, frag_size;
413 	int err, j, frag_id = 0;
414 	u16 len_on_bd = tpa_info->len_on_bd;
415 	u16 full_page = 0, gro_size = 0;
416 
417 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
418 
419 	if (fp->mode == TPA_MODE_GRO) {
420 		gro_size = tpa_info->gro_size;
421 		full_page = tpa_info->full_page;
422 	}
423 
424 	/* This is needed in order to enable forwarding support */
425 	if (frag_size) {
426 		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
427 					tpa_info->parsing_flags, len_on_bd);
428 
429 		/* set for GRO */
430 		if (fp->mode == TPA_MODE_GRO)
431 			skb_shinfo(skb)->gso_type =
432 			    (GET_FLAG(tpa_info->parsing_flags,
433 				      PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
434 						PRS_FLAG_OVERETH_IPV6) ?
435 				SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
436 	}
437 
438 
439 #ifdef BNX2X_STOP_ON_ERROR
440 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
441 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
442 			  pages, cqe_idx);
443 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
444 		bnx2x_panic();
445 		return -EINVAL;
446 	}
447 #endif
448 
449 	/* Run through the SGL and compose the fragmented skb */
450 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
451 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
452 
453 		/* FW gives the indices of the SGE as if the ring is an array
454 		   (meaning that "next" element will consume 2 indices) */
455 		if (fp->mode == TPA_MODE_GRO)
456 			frag_len = min_t(u32, frag_size, (u32)full_page);
457 		else /* LRO */
458 			frag_len = min_t(u32, frag_size,
459 					 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
460 
461 		rx_pg = &fp->rx_page_ring[sge_idx];
462 		old_rx_pg = *rx_pg;
463 
464 		/* If we fail to allocate a substitute page, we simply stop
465 		   where we are and drop the whole packet */
466 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
467 		if (unlikely(err)) {
468 			fp->eth_q_stats.rx_skb_alloc_failed++;
469 			return err;
470 		}
471 
472 		/* Unmap the page as we r going to pass it to the stack */
473 		dma_unmap_page(&bp->pdev->dev,
474 			       dma_unmap_addr(&old_rx_pg, mapping),
475 			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
476 		/* Add one frag and update the appropriate fields in the skb */
477 		if (fp->mode == TPA_MODE_LRO)
478 			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
479 		else { /* GRO */
480 			int rem;
481 			int offset = 0;
482 			for (rem = frag_len; rem > 0; rem -= gro_size) {
483 				int len = rem > gro_size ? gro_size : rem;
484 				skb_fill_page_desc(skb, frag_id++,
485 						   old_rx_pg.page, offset, len);
486 				if (offset)
487 					get_page(old_rx_pg.page);
488 				offset += len;
489 			}
490 		}
491 
492 		skb->data_len += frag_len;
493 		skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
494 		skb->len += frag_len;
495 
496 		frag_size -= frag_len;
497 	}
498 
499 	return 0;
500 }
501 
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)502 static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
503 				  struct bnx2x_agg_info *tpa_info,
504 				  u16 pages,
505 				  struct eth_end_agg_rx_cqe *cqe,
506 				  u16 cqe_idx)
507 {
508 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
509 	u8 pad = tpa_info->placement_offset;
510 	u16 len = tpa_info->len_on_bd;
511 	struct sk_buff *skb = NULL;
512 	u8 *new_data, *data = rx_buf->data;
513 	u8 old_tpa_state = tpa_info->tpa_state;
514 
515 	tpa_info->tpa_state = BNX2X_TPA_STOP;
516 
517 	/* If we there was an error during the handling of the TPA_START -
518 	 * drop this aggregation.
519 	 */
520 	if (old_tpa_state == BNX2X_TPA_ERROR)
521 		goto drop;
522 
523 	/* Try to allocate the new data */
524 	new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
525 
526 	/* Unmap skb in the pool anyway, as we are going to change
527 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
528 	   fails. */
529 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
530 			 fp->rx_buf_size, DMA_FROM_DEVICE);
531 	if (likely(new_data))
532 		skb = build_skb(data);
533 
534 	if (likely(skb)) {
535 #ifdef BNX2X_STOP_ON_ERROR
536 		if (pad + len > fp->rx_buf_size) {
537 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
538 				  pad, len, fp->rx_buf_size);
539 			bnx2x_panic();
540 			return;
541 		}
542 #endif
543 
544 		skb_reserve(skb, pad + NET_SKB_PAD);
545 		skb_put(skb, len);
546 		skb->rxhash = tpa_info->rxhash;
547 
548 		skb->protocol = eth_type_trans(skb, bp->dev);
549 		skb->ip_summed = CHECKSUM_UNNECESSARY;
550 
551 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
552 					 skb, cqe, cqe_idx)) {
553 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
554 				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
555 			skb_record_rx_queue(skb, fp->rx_queue);
556 			napi_gro_receive(&fp->napi, skb);
557 		} else {
558 			DP(NETIF_MSG_RX_STATUS,
559 			   "Failed to allocate new pages - dropping packet!\n");
560 			dev_kfree_skb_any(skb);
561 		}
562 
563 
564 		/* put new data in bin */
565 		rx_buf->data = new_data;
566 
567 		return;
568 	}
569 	kfree(new_data);
570 drop:
571 	/* drop the packet and keep the buffer in the bin */
572 	DP(NETIF_MSG_RX_STATUS,
573 	   "Failed to allocate or map a new skb - dropping packet!\n");
574 	fp->eth_q_stats.rx_skb_alloc_failed++;
575 }
576 
bnx2x_csum_validate(struct sk_buff * skb,union eth_rx_cqe * cqe,struct bnx2x_fastpath * fp)577 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
578 				struct bnx2x_fastpath *fp)
579 {
580 	/* Do nothing if no L4 csum validation was done.
581 	 * We do not check whether IP csum was validated. For IPv4 we assume
582 	 * that if the card got as far as validating the L4 csum, it also
583 	 * validated the IP csum. IPv6 has no IP csum.
584 	 */
585 	if (cqe->fast_path_cqe.status_flags &
586 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
587 		return;
588 
589 	/* If L4 validation was done, check if an error was found. */
590 
591 	if (cqe->fast_path_cqe.type_error_flags &
592 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
593 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
594 		fp->eth_q_stats.hw_csum_err++;
595 	else
596 		skb->ip_summed = CHECKSUM_UNNECESSARY;
597 }
598 
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)599 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
600 {
601 	struct bnx2x *bp = fp->bp;
602 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
603 	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
604 	int rx_pkt = 0;
605 
606 #ifdef BNX2X_STOP_ON_ERROR
607 	if (unlikely(bp->panic))
608 		return 0;
609 #endif
610 
611 	/* CQ "next element" is of the size of the regular element,
612 	   that's why it's ok here */
613 	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
614 	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
615 		hw_comp_cons++;
616 
617 	bd_cons = fp->rx_bd_cons;
618 	bd_prod = fp->rx_bd_prod;
619 	bd_prod_fw = bd_prod;
620 	sw_comp_cons = fp->rx_comp_cons;
621 	sw_comp_prod = fp->rx_comp_prod;
622 
623 	/* Memory barrier necessary as speculative reads of the rx
624 	 * buffer can be ahead of the index in the status block
625 	 */
626 	rmb();
627 
628 	DP(NETIF_MSG_RX_STATUS,
629 	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
630 	   fp->index, hw_comp_cons, sw_comp_cons);
631 
632 	while (sw_comp_cons != hw_comp_cons) {
633 		struct sw_rx_bd *rx_buf = NULL;
634 		struct sk_buff *skb;
635 		union eth_rx_cqe *cqe;
636 		struct eth_fast_path_rx_cqe *cqe_fp;
637 		u8 cqe_fp_flags;
638 		enum eth_rx_cqe_type cqe_fp_type;
639 		u16 len, pad, queue;
640 		u8 *data;
641 
642 #ifdef BNX2X_STOP_ON_ERROR
643 		if (unlikely(bp->panic))
644 			return 0;
645 #endif
646 
647 		comp_ring_cons = RCQ_BD(sw_comp_cons);
648 		bd_prod = RX_BD(bd_prod);
649 		bd_cons = RX_BD(bd_cons);
650 
651 		cqe = &fp->rx_comp_ring[comp_ring_cons];
652 		cqe_fp = &cqe->fast_path_cqe;
653 		cqe_fp_flags = cqe_fp->type_error_flags;
654 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
655 
656 		DP(NETIF_MSG_RX_STATUS,
657 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
658 		   CQE_TYPE(cqe_fp_flags),
659 		   cqe_fp_flags, cqe_fp->status_flags,
660 		   le32_to_cpu(cqe_fp->rss_hash_result),
661 		   le16_to_cpu(cqe_fp->vlan_tag),
662 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
663 
664 		/* is this a slowpath msg? */
665 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
666 			bnx2x_sp_event(fp, cqe);
667 			goto next_cqe;
668 		}
669 
670 		rx_buf = &fp->rx_buf_ring[bd_cons];
671 		data = rx_buf->data;
672 
673 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
674 			struct bnx2x_agg_info *tpa_info;
675 			u16 frag_size, pages;
676 #ifdef BNX2X_STOP_ON_ERROR
677 			/* sanity check */
678 			if (fp->disable_tpa &&
679 			    (CQE_TYPE_START(cqe_fp_type) ||
680 			     CQE_TYPE_STOP(cqe_fp_type)))
681 				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
682 					  CQE_TYPE(cqe_fp_type));
683 #endif
684 
685 			if (CQE_TYPE_START(cqe_fp_type)) {
686 				u16 queue = cqe_fp->queue_index;
687 				DP(NETIF_MSG_RX_STATUS,
688 				   "calling tpa_start on queue %d\n",
689 				   queue);
690 
691 				bnx2x_tpa_start(fp, queue,
692 						bd_cons, bd_prod,
693 						cqe_fp);
694 
695 				goto next_rx;
696 
697 			}
698 			queue = cqe->end_agg_cqe.queue_index;
699 			tpa_info = &fp->tpa_info[queue];
700 			DP(NETIF_MSG_RX_STATUS,
701 			   "calling tpa_stop on queue %d\n",
702 			   queue);
703 
704 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
705 				    tpa_info->len_on_bd;
706 
707 			if (fp->mode == TPA_MODE_GRO)
708 				pages = (frag_size + tpa_info->full_page - 1) /
709 					 tpa_info->full_page;
710 			else
711 				pages = SGE_PAGE_ALIGN(frag_size) >>
712 					SGE_PAGE_SHIFT;
713 
714 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
715 				       &cqe->end_agg_cqe, comp_ring_cons);
716 #ifdef BNX2X_STOP_ON_ERROR
717 			if (bp->panic)
718 				return 0;
719 #endif
720 
721 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
722 			goto next_cqe;
723 		}
724 		/* non TPA */
725 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
726 		pad = cqe_fp->placement_offset;
727 		dma_sync_single_for_cpu(&bp->pdev->dev,
728 					dma_unmap_addr(rx_buf, mapping),
729 					pad + RX_COPY_THRESH,
730 					DMA_FROM_DEVICE);
731 		pad += NET_SKB_PAD;
732 		prefetch(data + pad); /* speedup eth_type_trans() */
733 		/* is this an error packet? */
734 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
735 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
736 			   "ERROR  flags %x  rx packet %u\n",
737 			   cqe_fp_flags, sw_comp_cons);
738 			fp->eth_q_stats.rx_err_discard_pkt++;
739 			goto reuse_rx;
740 		}
741 
742 		/* Since we don't have a jumbo ring
743 		 * copy small packets if mtu > 1500
744 		 */
745 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
746 		    (len <= RX_COPY_THRESH)) {
747 			skb = netdev_alloc_skb_ip_align(bp->dev, len);
748 			if (skb == NULL) {
749 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
750 				   "ERROR  packet dropped because of alloc failure\n");
751 				fp->eth_q_stats.rx_skb_alloc_failed++;
752 				goto reuse_rx;
753 			}
754 			memcpy(skb->data, data + pad, len);
755 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
756 		} else {
757 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
758 				dma_unmap_single(&bp->pdev->dev,
759 						 dma_unmap_addr(rx_buf, mapping),
760 						 fp->rx_buf_size,
761 						 DMA_FROM_DEVICE);
762 				skb = build_skb(data);
763 				if (unlikely(!skb)) {
764 					kfree(data);
765 					fp->eth_q_stats.rx_skb_alloc_failed++;
766 					goto next_rx;
767 				}
768 				skb_reserve(skb, pad);
769 			} else {
770 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
771 				   "ERROR  packet dropped because of alloc failure\n");
772 				fp->eth_q_stats.rx_skb_alloc_failed++;
773 reuse_rx:
774 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
775 				goto next_rx;
776 			}
777 		}
778 
779 		skb_put(skb, len);
780 		skb->protocol = eth_type_trans(skb, bp->dev);
781 
782 		/* Set Toeplitz hash for a none-LRO skb */
783 		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
784 
785 		skb_checksum_none_assert(skb);
786 
787 		if (bp->dev->features & NETIF_F_RXCSUM)
788 			bnx2x_csum_validate(skb, cqe, fp);
789 
790 
791 		skb_record_rx_queue(skb, fp->rx_queue);
792 
793 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
794 		    PARSING_FLAGS_VLAN)
795 			__vlan_hwaccel_put_tag(skb,
796 					       le16_to_cpu(cqe_fp->vlan_tag));
797 		napi_gro_receive(&fp->napi, skb);
798 
799 
800 next_rx:
801 		rx_buf->data = NULL;
802 
803 		bd_cons = NEXT_RX_IDX(bd_cons);
804 		bd_prod = NEXT_RX_IDX(bd_prod);
805 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
806 		rx_pkt++;
807 next_cqe:
808 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
809 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
810 
811 		if (rx_pkt == budget)
812 			break;
813 	} /* while */
814 
815 	fp->rx_bd_cons = bd_cons;
816 	fp->rx_bd_prod = bd_prod_fw;
817 	fp->rx_comp_cons = sw_comp_cons;
818 	fp->rx_comp_prod = sw_comp_prod;
819 
820 	/* Update producers */
821 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
822 			     fp->rx_sge_prod);
823 
824 	fp->rx_pkt += rx_pkt;
825 	fp->rx_calls++;
826 
827 	return rx_pkt;
828 }
829 
bnx2x_msix_fp_int(int irq,void * fp_cookie)830 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
831 {
832 	struct bnx2x_fastpath *fp = fp_cookie;
833 	struct bnx2x *bp = fp->bp;
834 	u8 cos;
835 
836 	DP(NETIF_MSG_INTR,
837 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
838 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
839 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
840 
841 #ifdef BNX2X_STOP_ON_ERROR
842 	if (unlikely(bp->panic))
843 		return IRQ_HANDLED;
844 #endif
845 
846 	/* Handle Rx and Tx according to MSI-X vector */
847 	prefetch(fp->rx_cons_sb);
848 
849 	for_each_cos_in_tx_queue(fp, cos)
850 		prefetch(fp->txdata[cos].tx_cons_sb);
851 
852 	prefetch(&fp->sb_running_index[SM_RX_ID]);
853 	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
854 
855 	return IRQ_HANDLED;
856 }
857 
858 /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)859 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
860 {
861 	mutex_lock(&bp->port.phy_mutex);
862 
863 	if (bp->port.need_hw_lock)
864 		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
865 }
866 
bnx2x_release_phy_lock(struct bnx2x * bp)867 void bnx2x_release_phy_lock(struct bnx2x *bp)
868 {
869 	if (bp->port.need_hw_lock)
870 		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
871 
872 	mutex_unlock(&bp->port.phy_mutex);
873 }
874 
875 /* calculates MF speed according to current linespeed and MF configuration */
bnx2x_get_mf_speed(struct bnx2x * bp)876 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
877 {
878 	u16 line_speed = bp->link_vars.line_speed;
879 	if (IS_MF(bp)) {
880 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
881 						   bp->mf_config[BP_VN(bp)]);
882 
883 		/* Calculate the current MAX line speed limit for the MF
884 		 * devices
885 		 */
886 		if (IS_MF_SI(bp))
887 			line_speed = (line_speed * maxCfg) / 100;
888 		else { /* SD mode */
889 			u16 vn_max_rate = maxCfg * 100;
890 
891 			if (vn_max_rate < line_speed)
892 				line_speed = vn_max_rate;
893 		}
894 	}
895 
896 	return line_speed;
897 }
898 
899 /**
900  * bnx2x_fill_report_data - fill link report data to report
901  *
902  * @bp:		driver handle
903  * @data:	link state to update
904  *
905  * It uses a none-atomic bit operations because is called under the mutex.
906  */
bnx2x_fill_report_data(struct bnx2x * bp,struct bnx2x_link_report_data * data)907 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
908 					  struct bnx2x_link_report_data *data)
909 {
910 	u16 line_speed = bnx2x_get_mf_speed(bp);
911 
912 	memset(data, 0, sizeof(*data));
913 
914 	/* Fill the report data: efective line speed */
915 	data->line_speed = line_speed;
916 
917 	/* Link is down */
918 	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
919 		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
920 			  &data->link_report_flags);
921 
922 	/* Full DUPLEX */
923 	if (bp->link_vars.duplex == DUPLEX_FULL)
924 		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
925 
926 	/* Rx Flow Control is ON */
927 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
928 		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
929 
930 	/* Tx Flow Control is ON */
931 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
932 		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
933 }
934 
935 /**
936  * bnx2x_link_report - report link status to OS.
937  *
938  * @bp:		driver handle
939  *
940  * Calls the __bnx2x_link_report() under the same locking scheme
941  * as a link/PHY state managing code to ensure a consistent link
942  * reporting.
943  */
944 
bnx2x_link_report(struct bnx2x * bp)945 void bnx2x_link_report(struct bnx2x *bp)
946 {
947 	bnx2x_acquire_phy_lock(bp);
948 	__bnx2x_link_report(bp);
949 	bnx2x_release_phy_lock(bp);
950 }
951 
952 /**
953  * __bnx2x_link_report - report link status to OS.
954  *
955  * @bp:		driver handle
956  *
957  * None atomic inmlementation.
958  * Should be called under the phy_lock.
959  */
__bnx2x_link_report(struct bnx2x * bp)960 void __bnx2x_link_report(struct bnx2x *bp)
961 {
962 	struct bnx2x_link_report_data cur_data;
963 
964 	/* reread mf_cfg */
965 	if (!CHIP_IS_E1(bp))
966 		bnx2x_read_mf_cfg(bp);
967 
968 	/* Read the current link report info */
969 	bnx2x_fill_report_data(bp, &cur_data);
970 
971 	/* Don't report link down or exactly the same link status twice */
972 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
973 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
974 		      &bp->last_reported_link.link_report_flags) &&
975 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
976 		      &cur_data.link_report_flags)))
977 		return;
978 
979 	bp->link_cnt++;
980 
981 	/* We are going to report a new link parameters now -
982 	 * remember the current data for the next time.
983 	 */
984 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
985 
986 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
987 		     &cur_data.link_report_flags)) {
988 		netif_carrier_off(bp->dev);
989 		netdev_err(bp->dev, "NIC Link is Down\n");
990 		return;
991 	} else {
992 		const char *duplex;
993 		const char *flow;
994 
995 		netif_carrier_on(bp->dev);
996 
997 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
998 				       &cur_data.link_report_flags))
999 			duplex = "full";
1000 		else
1001 			duplex = "half";
1002 
1003 		/* Handle the FC at the end so that only these flags would be
1004 		 * possibly set. This way we may easily check if there is no FC
1005 		 * enabled.
1006 		 */
1007 		if (cur_data.link_report_flags) {
1008 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1009 				     &cur_data.link_report_flags)) {
1010 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1011 				     &cur_data.link_report_flags))
1012 					flow = "ON - receive & transmit";
1013 				else
1014 					flow = "ON - receive";
1015 			} else {
1016 				flow = "ON - transmit";
1017 			}
1018 		} else {
1019 			flow = "none";
1020 		}
1021 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1022 			    cur_data.line_speed, duplex, flow);
1023 	}
1024 }
1025 
bnx2x_init_rx_rings(struct bnx2x * bp)1026 void bnx2x_init_rx_rings(struct bnx2x *bp)
1027 {
1028 	int func = BP_FUNC(bp);
1029 	u16 ring_prod;
1030 	int i, j;
1031 
1032 	/* Allocate TPA resources */
1033 	for_each_rx_queue(bp, j) {
1034 		struct bnx2x_fastpath *fp = &bp->fp[j];
1035 
1036 		DP(NETIF_MSG_IFUP,
1037 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1038 
1039 		if (!fp->disable_tpa) {
1040 			/* Fill the per-aggregtion pool */
1041 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1042 				struct bnx2x_agg_info *tpa_info =
1043 					&fp->tpa_info[i];
1044 				struct sw_rx_bd *first_buf =
1045 					&tpa_info->first_buf;
1046 
1047 				first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1048 							  GFP_ATOMIC);
1049 				if (!first_buf->data) {
1050 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1051 						  j);
1052 					bnx2x_free_tpa_pool(bp, fp, i);
1053 					fp->disable_tpa = 1;
1054 					break;
1055 				}
1056 				dma_unmap_addr_set(first_buf, mapping, 0);
1057 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1058 			}
1059 
1060 			/* "next page" elements initialization */
1061 			bnx2x_set_next_page_sgl(fp);
1062 
1063 			/* set SGEs bit mask */
1064 			bnx2x_init_sge_ring_bit_mask(fp);
1065 
1066 			/* Allocate SGEs and initialize the ring elements */
1067 			for (i = 0, ring_prod = 0;
1068 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1069 
1070 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1071 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1072 						  i);
1073 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1074 						  j);
1075 					/* Cleanup already allocated elements */
1076 					bnx2x_free_rx_sge_range(bp, fp,
1077 								ring_prod);
1078 					bnx2x_free_tpa_pool(bp, fp,
1079 							    MAX_AGG_QS(bp));
1080 					fp->disable_tpa = 1;
1081 					ring_prod = 0;
1082 					break;
1083 				}
1084 				ring_prod = NEXT_SGE_IDX(ring_prod);
1085 			}
1086 
1087 			fp->rx_sge_prod = ring_prod;
1088 		}
1089 	}
1090 
1091 	for_each_rx_queue(bp, j) {
1092 		struct bnx2x_fastpath *fp = &bp->fp[j];
1093 
1094 		fp->rx_bd_cons = 0;
1095 
1096 		/* Activate BD ring */
1097 		/* Warning!
1098 		 * this will generate an interrupt (to the TSTORM)
1099 		 * must only be done after chip is initialized
1100 		 */
1101 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1102 				     fp->rx_sge_prod);
1103 
1104 		if (j != 0)
1105 			continue;
1106 
1107 		if (CHIP_IS_E1(bp)) {
1108 			REG_WR(bp, BAR_USTRORM_INTMEM +
1109 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1110 			       U64_LO(fp->rx_comp_mapping));
1111 			REG_WR(bp, BAR_USTRORM_INTMEM +
1112 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1113 			       U64_HI(fp->rx_comp_mapping));
1114 		}
1115 	}
1116 }
1117 
bnx2x_free_tx_skbs(struct bnx2x * bp)1118 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1119 {
1120 	int i;
1121 	u8 cos;
1122 
1123 	for_each_tx_queue(bp, i) {
1124 		struct bnx2x_fastpath *fp = &bp->fp[i];
1125 		for_each_cos_in_tx_queue(fp, cos) {
1126 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1127 			unsigned pkts_compl = 0, bytes_compl = 0;
1128 
1129 			u16 sw_prod = txdata->tx_pkt_prod;
1130 			u16 sw_cons = txdata->tx_pkt_cons;
1131 
1132 			while (sw_cons != sw_prod) {
1133 				bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1134 				    &pkts_compl, &bytes_compl);
1135 				sw_cons++;
1136 			}
1137 			netdev_tx_reset_queue(
1138 			    netdev_get_tx_queue(bp->dev, txdata->txq_index));
1139 		}
1140 	}
1141 }
1142 
bnx2x_free_rx_bds(struct bnx2x_fastpath * fp)1143 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1144 {
1145 	struct bnx2x *bp = fp->bp;
1146 	int i;
1147 
1148 	/* ring wasn't allocated */
1149 	if (fp->rx_buf_ring == NULL)
1150 		return;
1151 
1152 	for (i = 0; i < NUM_RX_BD; i++) {
1153 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1154 		u8 *data = rx_buf->data;
1155 
1156 		if (data == NULL)
1157 			continue;
1158 		dma_unmap_single(&bp->pdev->dev,
1159 				 dma_unmap_addr(rx_buf, mapping),
1160 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1161 
1162 		rx_buf->data = NULL;
1163 		kfree(data);
1164 	}
1165 }
1166 
bnx2x_free_rx_skbs(struct bnx2x * bp)1167 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1168 {
1169 	int j;
1170 
1171 	for_each_rx_queue(bp, j) {
1172 		struct bnx2x_fastpath *fp = &bp->fp[j];
1173 
1174 		bnx2x_free_rx_bds(fp);
1175 
1176 		if (!fp->disable_tpa)
1177 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1178 	}
1179 }
1180 
bnx2x_free_skbs(struct bnx2x * bp)1181 void bnx2x_free_skbs(struct bnx2x *bp)
1182 {
1183 	bnx2x_free_tx_skbs(bp);
1184 	bnx2x_free_rx_skbs(bp);
1185 }
1186 
bnx2x_update_max_mf_config(struct bnx2x * bp,u32 value)1187 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1188 {
1189 	/* load old values */
1190 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1191 
1192 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1193 		/* leave all but MAX value */
1194 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1195 
1196 		/* set new MAX value */
1197 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1198 				& FUNC_MF_CFG_MAX_BW_MASK;
1199 
1200 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1201 	}
1202 }
1203 
1204 /**
1205  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1206  *
1207  * @bp:		driver handle
1208  * @nvecs:	number of vectors to be released
1209  */
bnx2x_free_msix_irqs(struct bnx2x * bp,int nvecs)1210 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1211 {
1212 	int i, offset = 0;
1213 
1214 	if (nvecs == offset)
1215 		return;
1216 	free_irq(bp->msix_table[offset].vector, bp->dev);
1217 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1218 	   bp->msix_table[offset].vector);
1219 	offset++;
1220 #ifdef BCM_CNIC
1221 	if (nvecs == offset)
1222 		return;
1223 	offset++;
1224 #endif
1225 
1226 	for_each_eth_queue(bp, i) {
1227 		if (nvecs == offset)
1228 			return;
1229 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1230 		   i, bp->msix_table[offset].vector);
1231 
1232 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1233 	}
1234 }
1235 
bnx2x_free_irq(struct bnx2x * bp)1236 void bnx2x_free_irq(struct bnx2x *bp)
1237 {
1238 	if (bp->flags & USING_MSIX_FLAG)
1239 		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1240 				     CNIC_PRESENT + 1);
1241 	else if (bp->flags & USING_MSI_FLAG)
1242 		free_irq(bp->pdev->irq, bp->dev);
1243 	else
1244 		free_irq(bp->pdev->irq, bp->dev);
1245 }
1246 
bnx2x_enable_msix(struct bnx2x * bp)1247 int bnx2x_enable_msix(struct bnx2x *bp)
1248 {
1249 	int msix_vec = 0, i, rc, req_cnt;
1250 
1251 	bp->msix_table[msix_vec].entry = msix_vec;
1252 	BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1253 	   bp->msix_table[0].entry);
1254 	msix_vec++;
1255 
1256 #ifdef BCM_CNIC
1257 	bp->msix_table[msix_vec].entry = msix_vec;
1258 	BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1259 	   bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1260 	msix_vec++;
1261 #endif
1262 	/* We need separate vectors for ETH queues only (not FCoE) */
1263 	for_each_eth_queue(bp, i) {
1264 		bp->msix_table[msix_vec].entry = msix_vec;
1265 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1266 			       msix_vec, msix_vec, i);
1267 		msix_vec++;
1268 	}
1269 
1270 	req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1271 
1272 	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1273 
1274 	/*
1275 	 * reconfigure number of tx/rx queues according to available
1276 	 * MSI-X vectors
1277 	 */
1278 	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1279 		/* how less vectors we will have? */
1280 		int diff = req_cnt - rc;
1281 
1282 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1283 
1284 		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1285 
1286 		if (rc) {
1287 			BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1288 			return rc;
1289 		}
1290 		/*
1291 		 * decrease number of queues by number of unallocated entries
1292 		 */
1293 		bp->num_queues -= diff;
1294 
1295 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1296 				  bp->num_queues);
1297 	} else if (rc) {
1298 		/* fall to INTx if not enough memory */
1299 		if (rc == -ENOMEM)
1300 			bp->flags |= DISABLE_MSI_FLAG;
1301 		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1302 		return rc;
1303 	}
1304 
1305 	bp->flags |= USING_MSIX_FLAG;
1306 
1307 	return 0;
1308 }
1309 
bnx2x_req_msix_irqs(struct bnx2x * bp)1310 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1311 {
1312 	int i, rc, offset = 0;
1313 
1314 	rc = request_irq(bp->msix_table[offset++].vector,
1315 			 bnx2x_msix_sp_int, 0,
1316 			 bp->dev->name, bp->dev);
1317 	if (rc) {
1318 		BNX2X_ERR("request sp irq failed\n");
1319 		return -EBUSY;
1320 	}
1321 
1322 #ifdef BCM_CNIC
1323 	offset++;
1324 #endif
1325 	for_each_eth_queue(bp, i) {
1326 		struct bnx2x_fastpath *fp = &bp->fp[i];
1327 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1328 			 bp->dev->name, i);
1329 
1330 		rc = request_irq(bp->msix_table[offset].vector,
1331 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1332 		if (rc) {
1333 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1334 			      bp->msix_table[offset].vector, rc);
1335 			bnx2x_free_msix_irqs(bp, offset);
1336 			return -EBUSY;
1337 		}
1338 
1339 		offset++;
1340 	}
1341 
1342 	i = BNX2X_NUM_ETH_QUEUES(bp);
1343 	offset = 1 + CNIC_PRESENT;
1344 	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1345 	       bp->msix_table[0].vector,
1346 	       0, bp->msix_table[offset].vector,
1347 	       i - 1, bp->msix_table[offset + i - 1].vector);
1348 
1349 	return 0;
1350 }
1351 
bnx2x_enable_msi(struct bnx2x * bp)1352 int bnx2x_enable_msi(struct bnx2x *bp)
1353 {
1354 	int rc;
1355 
1356 	rc = pci_enable_msi(bp->pdev);
1357 	if (rc) {
1358 		BNX2X_DEV_INFO("MSI is not attainable\n");
1359 		return -1;
1360 	}
1361 	bp->flags |= USING_MSI_FLAG;
1362 
1363 	return 0;
1364 }
1365 
bnx2x_req_irq(struct bnx2x * bp)1366 static int bnx2x_req_irq(struct bnx2x *bp)
1367 {
1368 	unsigned long flags;
1369 	int rc;
1370 
1371 	if (bp->flags & USING_MSI_FLAG)
1372 		flags = 0;
1373 	else
1374 		flags = IRQF_SHARED;
1375 
1376 	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1377 			 bp->dev->name, bp->dev);
1378 	return rc;
1379 }
1380 
bnx2x_setup_irqs(struct bnx2x * bp)1381 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1382 {
1383 	int rc = 0;
1384 	if (bp->flags & USING_MSIX_FLAG) {
1385 		rc = bnx2x_req_msix_irqs(bp);
1386 		if (rc)
1387 			return rc;
1388 	} else {
1389 		bnx2x_ack_int(bp);
1390 		rc = bnx2x_req_irq(bp);
1391 		if (rc) {
1392 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1393 			return rc;
1394 		}
1395 		if (bp->flags & USING_MSI_FLAG) {
1396 			bp->dev->irq = bp->pdev->irq;
1397 			netdev_info(bp->dev, "using MSI  IRQ %d\n",
1398 			       bp->pdev->irq);
1399 		}
1400 	}
1401 
1402 	return 0;
1403 }
1404 
bnx2x_napi_enable(struct bnx2x * bp)1405 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1406 {
1407 	int i;
1408 
1409 	for_each_rx_queue(bp, i)
1410 		napi_enable(&bnx2x_fp(bp, i, napi));
1411 }
1412 
bnx2x_napi_disable(struct bnx2x * bp)1413 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1414 {
1415 	int i;
1416 
1417 	for_each_rx_queue(bp, i)
1418 		napi_disable(&bnx2x_fp(bp, i, napi));
1419 }
1420 
bnx2x_netif_start(struct bnx2x * bp)1421 void bnx2x_netif_start(struct bnx2x *bp)
1422 {
1423 	if (netif_running(bp->dev)) {
1424 		bnx2x_napi_enable(bp);
1425 		bnx2x_int_enable(bp);
1426 		if (bp->state == BNX2X_STATE_OPEN)
1427 			netif_tx_wake_all_queues(bp->dev);
1428 	}
1429 }
1430 
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)1431 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1432 {
1433 	bnx2x_int_disable_sync(bp, disable_hw);
1434 	bnx2x_napi_disable(bp);
1435 }
1436 
bnx2x_select_queue(struct net_device * dev,struct sk_buff * skb)1437 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1438 {
1439 	struct bnx2x *bp = netdev_priv(dev);
1440 
1441 #ifdef BCM_CNIC
1442 	if (!NO_FCOE(bp)) {
1443 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1444 		u16 ether_type = ntohs(hdr->h_proto);
1445 
1446 		/* Skip VLAN tag if present */
1447 		if (ether_type == ETH_P_8021Q) {
1448 			struct vlan_ethhdr *vhdr =
1449 				(struct vlan_ethhdr *)skb->data;
1450 
1451 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1452 		}
1453 
1454 		/* If ethertype is FCoE or FIP - use FCoE ring */
1455 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1456 			return bnx2x_fcoe_tx(bp, txq_index);
1457 	}
1458 #endif
1459 	/* select a non-FCoE queue */
1460 	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1461 }
1462 
bnx2x_set_num_queues(struct bnx2x * bp)1463 void bnx2x_set_num_queues(struct bnx2x *bp)
1464 {
1465 	switch (bp->multi_mode) {
1466 	case ETH_RSS_MODE_DISABLED:
1467 		bp->num_queues = 1;
1468 		break;
1469 	case ETH_RSS_MODE_REGULAR:
1470 		bp->num_queues = bnx2x_calc_num_queues(bp);
1471 		break;
1472 
1473 	default:
1474 		bp->num_queues = 1;
1475 		break;
1476 	}
1477 
1478 #ifdef BCM_CNIC
1479 	/* override in STORAGE SD mode */
1480 	if (IS_MF_STORAGE_SD(bp))
1481 		bp->num_queues = 1;
1482 #endif
1483 	/* Add special queues */
1484 	bp->num_queues += NON_ETH_CONTEXT_USE;
1485 }
1486 
1487 /**
1488  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1489  *
1490  * @bp:		Driver handle
1491  *
1492  * We currently support for at most 16 Tx queues for each CoS thus we will
1493  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1494  * bp->max_cos.
1495  *
1496  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1497  * index after all ETH L2 indices.
1498  *
1499  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1500  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1501  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1502  *
1503  * The proper configuration of skb->queue_mapping is handled by
1504  * bnx2x_select_queue() and __skb_tx_hash().
1505  *
1506  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1507  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1508  */
bnx2x_set_real_num_queues(struct bnx2x * bp)1509 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1510 {
1511 	int rc, tx, rx;
1512 
1513 	tx = MAX_TXQS_PER_COS * bp->max_cos;
1514 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1515 
1516 /* account for fcoe queue */
1517 #ifdef BCM_CNIC
1518 	if (!NO_FCOE(bp)) {
1519 		rx += FCOE_PRESENT;
1520 		tx += FCOE_PRESENT;
1521 	}
1522 #endif
1523 
1524 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1525 	if (rc) {
1526 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1527 		return rc;
1528 	}
1529 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
1530 	if (rc) {
1531 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1532 		return rc;
1533 	}
1534 
1535 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1536 			  tx, rx);
1537 
1538 	return rc;
1539 }
1540 
bnx2x_set_rx_buf_size(struct bnx2x * bp)1541 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1542 {
1543 	int i;
1544 
1545 	for_each_queue(bp, i) {
1546 		struct bnx2x_fastpath *fp = &bp->fp[i];
1547 		u32 mtu;
1548 
1549 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
1550 		if (IS_FCOE_IDX(i))
1551 			/*
1552 			 * Although there are no IP frames expected to arrive to
1553 			 * this ring we still want to add an
1554 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1555 			 * overrun attack.
1556 			 */
1557 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1558 		else
1559 			mtu = bp->dev->mtu;
1560 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1561 				  IP_HEADER_ALIGNMENT_PADDING +
1562 				  ETH_OVREHEAD +
1563 				  mtu +
1564 				  BNX2X_FW_RX_ALIGN_END;
1565 		/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1566 	}
1567 }
1568 
bnx2x_init_rss_pf(struct bnx2x * bp)1569 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1570 {
1571 	int i;
1572 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1573 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1574 
1575 	/*
1576 	 * Prepare the inital contents fo the indirection table if RSS is
1577 	 * enabled
1578 	 */
1579 	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1580 		for (i = 0; i < sizeof(ind_table); i++)
1581 			ind_table[i] =
1582 				bp->fp->cl_id +
1583 				ethtool_rxfh_indir_default(i, num_eth_queues);
1584 	}
1585 
1586 	/*
1587 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1588 	 * per-port, so if explicit configuration is needed , do it only
1589 	 * for a PMF.
1590 	 *
1591 	 * For 57712 and newer on the other hand it's a per-function
1592 	 * configuration.
1593 	 */
1594 	return bnx2x_config_rss_pf(bp, ind_table,
1595 				   bp->port.pmf || !CHIP_IS_E1x(bp));
1596 }
1597 
bnx2x_config_rss_pf(struct bnx2x * bp,u8 * ind_table,bool config_hash)1598 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1599 {
1600 	struct bnx2x_config_rss_params params = {NULL};
1601 	int i;
1602 
1603 	/* Although RSS is meaningless when there is a single HW queue we
1604 	 * still need it enabled in order to have HW Rx hash generated.
1605 	 *
1606 	 * if (!is_eth_multi(bp))
1607 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1608 	 */
1609 
1610 	params.rss_obj = &bp->rss_conf_obj;
1611 
1612 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1613 
1614 	/* RSS mode */
1615 	switch (bp->multi_mode) {
1616 	case ETH_RSS_MODE_DISABLED:
1617 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1618 		break;
1619 	case ETH_RSS_MODE_REGULAR:
1620 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1621 		break;
1622 	case ETH_RSS_MODE_VLAN_PRI:
1623 		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1624 		break;
1625 	case ETH_RSS_MODE_E1HOV_PRI:
1626 		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1627 		break;
1628 	case ETH_RSS_MODE_IP_DSCP:
1629 		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1630 		break;
1631 	default:
1632 		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1633 		return -EINVAL;
1634 	}
1635 
1636 	/* If RSS is enabled */
1637 	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1638 		/* RSS configuration */
1639 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1640 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1641 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1642 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1643 
1644 		/* Hash bits */
1645 		params.rss_result_mask = MULTI_MASK;
1646 
1647 		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1648 
1649 		if (config_hash) {
1650 			/* RSS keys */
1651 			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1652 				params.rss_key[i] = random32();
1653 
1654 			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1655 		}
1656 	}
1657 
1658 	return bnx2x_config_rss(bp, &params);
1659 }
1660 
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)1661 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1662 {
1663 	struct bnx2x_func_state_params func_params = {NULL};
1664 
1665 	/* Prepare parameters for function state transitions */
1666 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1667 
1668 	func_params.f_obj = &bp->func_obj;
1669 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
1670 
1671 	func_params.params.hw_init.load_phase = load_code;
1672 
1673 	return bnx2x_func_state_change(bp, &func_params);
1674 }
1675 
1676 /*
1677  * Cleans the object that have internal lists without sending
1678  * ramrods. Should be run when interrutps are disabled.
1679  */
bnx2x_squeeze_objects(struct bnx2x * bp)1680 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1681 {
1682 	int rc;
1683 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1684 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
1685 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1686 
1687 	/***************** Cleanup MACs' object first *************************/
1688 
1689 	/* Wait for completion of requested */
1690 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1691 	/* Perform a dry cleanup */
1692 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1693 
1694 	/* Clean ETH primary MAC */
1695 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1696 	rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1697 				 &ramrod_flags);
1698 	if (rc != 0)
1699 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1700 
1701 	/* Cleanup UC list */
1702 	vlan_mac_flags = 0;
1703 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1704 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1705 				 &ramrod_flags);
1706 	if (rc != 0)
1707 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1708 
1709 	/***************** Now clean mcast object *****************************/
1710 	rparam.mcast_obj = &bp->mcast_obj;
1711 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1712 
1713 	/* Add a DEL command... */
1714 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1715 	if (rc < 0)
1716 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1717 			  rc);
1718 
1719 	/* ...and wait until all pending commands are cleared */
1720 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1721 	while (rc != 0) {
1722 		if (rc < 0) {
1723 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1724 				  rc);
1725 			return;
1726 		}
1727 
1728 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1729 	}
1730 }
1731 
1732 #ifndef BNX2X_STOP_ON_ERROR
1733 #define LOAD_ERROR_EXIT(bp, label) \
1734 	do { \
1735 		(bp)->state = BNX2X_STATE_ERROR; \
1736 		goto label; \
1737 	} while (0)
1738 #else
1739 #define LOAD_ERROR_EXIT(bp, label) \
1740 	do { \
1741 		(bp)->state = BNX2X_STATE_ERROR; \
1742 		(bp)->panic = 1; \
1743 		return -EBUSY; \
1744 	} while (0)
1745 #endif
1746 
bnx2x_test_firmware_version(struct bnx2x * bp,bool is_err)1747 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1748 {
1749 	/* build FW version dword */
1750 	u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1751 		    (BCM_5710_FW_MINOR_VERSION << 8) +
1752 		    (BCM_5710_FW_REVISION_VERSION << 16) +
1753 		    (BCM_5710_FW_ENGINEERING_VERSION << 24);
1754 
1755 	/* read loaded FW from chip */
1756 	u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1757 
1758 	DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1759 
1760 	if (loaded_fw != my_fw) {
1761 		if (is_err)
1762 			BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1763 				  loaded_fw, my_fw);
1764 		return false;
1765 	}
1766 
1767 	return true;
1768 }
1769 
1770 /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)1771 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1772 {
1773 	int port = BP_PORT(bp);
1774 	u32 load_code;
1775 	int i, rc;
1776 
1777 #ifdef BNX2X_STOP_ON_ERROR
1778 	if (unlikely(bp->panic)) {
1779 		BNX2X_ERR("Can't load NIC when there is panic\n");
1780 		return -EPERM;
1781 	}
1782 #endif
1783 
1784 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1785 
1786 	/* Set the initial link reported state to link down */
1787 	bnx2x_acquire_phy_lock(bp);
1788 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1789 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1790 		&bp->last_reported_link.link_report_flags);
1791 	bnx2x_release_phy_lock(bp);
1792 
1793 	/* must be called before memory allocation and HW init */
1794 	bnx2x_ilt_set_info(bp);
1795 
1796 	/*
1797 	 * Zero fastpath structures preserving invariants like napi, which are
1798 	 * allocated only once, fp index, max_cos, bp pointer.
1799 	 * Also set fp->disable_tpa.
1800 	 */
1801 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1802 	for_each_queue(bp, i)
1803 		bnx2x_bz_fp(bp, i);
1804 
1805 
1806 	/* Set the receive queues buffer size */
1807 	bnx2x_set_rx_buf_size(bp);
1808 
1809 	if (bnx2x_alloc_mem(bp))
1810 		return -ENOMEM;
1811 
1812 	/* As long as bnx2x_alloc_mem() may possibly update
1813 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
1814 	 * come after it.
1815 	 */
1816 	rc = bnx2x_set_real_num_queues(bp);
1817 	if (rc) {
1818 		BNX2X_ERR("Unable to set real_num_queues\n");
1819 		LOAD_ERROR_EXIT(bp, load_error0);
1820 	}
1821 
1822 	/* configure multi cos mappings in kernel.
1823 	 * this configuration may be overriden by a multi class queue discipline
1824 	 * or by a dcbx negotiation result.
1825 	 */
1826 	bnx2x_setup_tc(bp->dev, bp->max_cos);
1827 
1828 	bnx2x_napi_enable(bp);
1829 
1830 	/* set pf load just before approaching the MCP */
1831 	bnx2x_set_pf_load(bp);
1832 
1833 	/* Send LOAD_REQUEST command to MCP
1834 	 * Returns the type of LOAD command:
1835 	 * if it is the first port to be initialized
1836 	 * common blocks should be initialized, otherwise - not
1837 	 */
1838 	if (!BP_NOMCP(bp)) {
1839 		/* init fw_seq */
1840 		bp->fw_seq =
1841 			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1842 			 DRV_MSG_SEQ_NUMBER_MASK);
1843 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1844 
1845 		/* Get current FW pulse sequence */
1846 		bp->fw_drv_pulse_wr_seq =
1847 			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1848 			 DRV_PULSE_SEQ_MASK);
1849 		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1850 
1851 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1852 		if (!load_code) {
1853 			BNX2X_ERR("MCP response failure, aborting\n");
1854 			rc = -EBUSY;
1855 			LOAD_ERROR_EXIT(bp, load_error1);
1856 		}
1857 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1858 			BNX2X_ERR("Driver load refused\n");
1859 			rc = -EBUSY; /* other port in diagnostic mode */
1860 			LOAD_ERROR_EXIT(bp, load_error1);
1861 		}
1862 		if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1863 		    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1864 			/* abort nic load if version mismatch */
1865 			if (!bnx2x_test_firmware_version(bp, true)) {
1866 				rc = -EBUSY;
1867 				LOAD_ERROR_EXIT(bp, load_error2);
1868 			}
1869 		}
1870 
1871 	} else {
1872 		int path = BP_PATH(bp);
1873 
1874 		DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1875 		   path, load_count[path][0], load_count[path][1],
1876 		   load_count[path][2]);
1877 		load_count[path][0]++;
1878 		load_count[path][1 + port]++;
1879 		DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1880 		   path, load_count[path][0], load_count[path][1],
1881 		   load_count[path][2]);
1882 		if (load_count[path][0] == 1)
1883 			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1884 		else if (load_count[path][1 + port] == 1)
1885 			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1886 		else
1887 			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1888 	}
1889 
1890 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1891 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1892 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1893 		bp->port.pmf = 1;
1894 		/*
1895 		 * We need the barrier to ensure the ordering between the
1896 		 * writing to bp->port.pmf here and reading it from the
1897 		 * bnx2x_periodic_task().
1898 		 */
1899 		smp_mb();
1900 	} else
1901 		bp->port.pmf = 0;
1902 
1903 	DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
1904 
1905 	/* Init Function state controlling object */
1906 	bnx2x__init_func_obj(bp);
1907 
1908 	/* Initialize HW */
1909 	rc = bnx2x_init_hw(bp, load_code);
1910 	if (rc) {
1911 		BNX2X_ERR("HW init failed, aborting\n");
1912 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1913 		LOAD_ERROR_EXIT(bp, load_error2);
1914 	}
1915 
1916 	/* Connect to IRQs */
1917 	rc = bnx2x_setup_irqs(bp);
1918 	if (rc) {
1919 		BNX2X_ERR("IRQs setup failed\n");
1920 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1921 		LOAD_ERROR_EXIT(bp, load_error2);
1922 	}
1923 
1924 	/* Setup NIC internals and enable interrupts */
1925 	bnx2x_nic_init(bp, load_code);
1926 
1927 	/* Init per-function objects */
1928 	bnx2x_init_bp_objs(bp);
1929 
1930 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1931 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1932 	    (bp->common.shmem2_base)) {
1933 		if (SHMEM2_HAS(bp, dcc_support))
1934 			SHMEM2_WR(bp, dcc_support,
1935 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1936 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1937 	}
1938 
1939 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1940 	rc = bnx2x_func_start(bp);
1941 	if (rc) {
1942 		BNX2X_ERR("Function start failed!\n");
1943 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1944 		LOAD_ERROR_EXIT(bp, load_error3);
1945 	}
1946 
1947 	/* Send LOAD_DONE command to MCP */
1948 	if (!BP_NOMCP(bp)) {
1949 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1950 		if (!load_code) {
1951 			BNX2X_ERR("MCP response failure, aborting\n");
1952 			rc = -EBUSY;
1953 			LOAD_ERROR_EXIT(bp, load_error3);
1954 		}
1955 	}
1956 
1957 	rc = bnx2x_setup_leading(bp);
1958 	if (rc) {
1959 		BNX2X_ERR("Setup leading failed!\n");
1960 		LOAD_ERROR_EXIT(bp, load_error3);
1961 	}
1962 
1963 #ifdef BCM_CNIC
1964 	/* Enable Timer scan */
1965 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1966 #endif
1967 
1968 	for_each_nondefault_queue(bp, i) {
1969 		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1970 		if (rc) {
1971 			BNX2X_ERR("Queue setup failed\n");
1972 			LOAD_ERROR_EXIT(bp, load_error4);
1973 		}
1974 	}
1975 
1976 	rc = bnx2x_init_rss_pf(bp);
1977 	if (rc) {
1978 		BNX2X_ERR("PF RSS init failed\n");
1979 		LOAD_ERROR_EXIT(bp, load_error4);
1980 	}
1981 
1982 	/* Now when Clients are configured we are ready to work */
1983 	bp->state = BNX2X_STATE_OPEN;
1984 
1985 	/* Configure a ucast MAC */
1986 	rc = bnx2x_set_eth_mac(bp, true);
1987 	if (rc) {
1988 		BNX2X_ERR("Setting Ethernet MAC failed\n");
1989 		LOAD_ERROR_EXIT(bp, load_error4);
1990 	}
1991 
1992 	if (bp->pending_max) {
1993 		bnx2x_update_max_mf_config(bp, bp->pending_max);
1994 		bp->pending_max = 0;
1995 	}
1996 
1997 	if (bp->port.pmf)
1998 		bnx2x_initial_phy_init(bp, load_mode);
1999 
2000 	/* Start fast path */
2001 
2002 	/* Initialize Rx filter. */
2003 	netif_addr_lock_bh(bp->dev);
2004 	bnx2x_set_rx_mode(bp->dev);
2005 	netif_addr_unlock_bh(bp->dev);
2006 
2007 	/* Start the Tx */
2008 	switch (load_mode) {
2009 	case LOAD_NORMAL:
2010 		/* Tx queue should be only reenabled */
2011 		netif_tx_wake_all_queues(bp->dev);
2012 		break;
2013 
2014 	case LOAD_OPEN:
2015 		netif_tx_start_all_queues(bp->dev);
2016 		smp_mb__after_clear_bit();
2017 		break;
2018 
2019 	case LOAD_DIAG:
2020 		bp->state = BNX2X_STATE_DIAG;
2021 		break;
2022 
2023 	default:
2024 		break;
2025 	}
2026 
2027 	if (bp->port.pmf)
2028 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2029 	else
2030 		bnx2x__link_status_update(bp);
2031 
2032 	/* start the timer */
2033 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2034 
2035 #ifdef BCM_CNIC
2036 	/* re-read iscsi info */
2037 	bnx2x_get_iscsi_info(bp);
2038 	bnx2x_setup_cnic_irq_info(bp);
2039 	if (bp->state == BNX2X_STATE_OPEN)
2040 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2041 #endif
2042 
2043 	/* mark driver is loaded in shmem2 */
2044 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2045 		u32 val;
2046 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2047 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2048 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2049 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2050 	}
2051 
2052 	/* Wait for all pending SP commands to complete */
2053 	if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2054 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2055 		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2056 		return -EBUSY;
2057 	}
2058 
2059 	bnx2x_dcbx_init(bp);
2060 	return 0;
2061 
2062 #ifndef BNX2X_STOP_ON_ERROR
2063 load_error4:
2064 #ifdef BCM_CNIC
2065 	/* Disable Timer scan */
2066 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2067 #endif
2068 load_error3:
2069 	bnx2x_int_disable_sync(bp, 1);
2070 
2071 	/* Clean queueable objects */
2072 	bnx2x_squeeze_objects(bp);
2073 
2074 	/* Free SKBs, SGEs, TPA pool and driver internals */
2075 	bnx2x_free_skbs(bp);
2076 	for_each_rx_queue(bp, i)
2077 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2078 
2079 	/* Release IRQs */
2080 	bnx2x_free_irq(bp);
2081 load_error2:
2082 	if (!BP_NOMCP(bp)) {
2083 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2084 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2085 	}
2086 
2087 	bp->port.pmf = 0;
2088 load_error1:
2089 	bnx2x_napi_disable(bp);
2090 	/* clear pf_load status, as it was already set */
2091 	bnx2x_clear_pf_load(bp);
2092 load_error0:
2093 	bnx2x_free_mem(bp);
2094 
2095 	return rc;
2096 #endif /* ! BNX2X_STOP_ON_ERROR */
2097 }
2098 
2099 /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode)2100 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2101 {
2102 	int i;
2103 	bool global = false;
2104 
2105 	/* mark driver is unloaded in shmem2 */
2106 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2107 		u32 val;
2108 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2109 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2110 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2111 	}
2112 
2113 	if ((bp->state == BNX2X_STATE_CLOSED) ||
2114 	    (bp->state == BNX2X_STATE_ERROR)) {
2115 		/* We can get here if the driver has been unloaded
2116 		 * during parity error recovery and is either waiting for a
2117 		 * leader to complete or for other functions to unload and
2118 		 * then ifdown has been issued. In this case we want to
2119 		 * unload and let other functions to complete a recovery
2120 		 * process.
2121 		 */
2122 		bp->recovery_state = BNX2X_RECOVERY_DONE;
2123 		bp->is_leader = 0;
2124 		bnx2x_release_leader_lock(bp);
2125 		smp_mb();
2126 
2127 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2128 		BNX2X_ERR("Can't unload in closed or error state\n");
2129 		return -EINVAL;
2130 	}
2131 
2132 	/*
2133 	 * It's important to set the bp->state to the value different from
2134 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2135 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2136 	 */
2137 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2138 	smp_mb();
2139 
2140 	/* Stop Tx */
2141 	bnx2x_tx_disable(bp);
2142 
2143 #ifdef BCM_CNIC
2144 	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2145 #endif
2146 
2147 	bp->rx_mode = BNX2X_RX_MODE_NONE;
2148 
2149 	del_timer_sync(&bp->timer);
2150 
2151 	/* Set ALWAYS_ALIVE bit in shmem */
2152 	bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2153 
2154 	bnx2x_drv_pulse(bp);
2155 
2156 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2157 	bnx2x_save_statistics(bp);
2158 
2159 	/* Cleanup the chip if needed */
2160 	if (unload_mode != UNLOAD_RECOVERY)
2161 		bnx2x_chip_cleanup(bp, unload_mode);
2162 	else {
2163 		/* Send the UNLOAD_REQUEST to the MCP */
2164 		bnx2x_send_unload_req(bp, unload_mode);
2165 
2166 		/*
2167 		 * Prevent transactions to host from the functions on the
2168 		 * engine that doesn't reset global blocks in case of global
2169 		 * attention once gloabl blocks are reset and gates are opened
2170 		 * (the engine which leader will perform the recovery
2171 		 * last).
2172 		 */
2173 		if (!CHIP_IS_E1x(bp))
2174 			bnx2x_pf_disable(bp);
2175 
2176 		/* Disable HW interrupts, NAPI */
2177 		bnx2x_netif_stop(bp, 1);
2178 
2179 		/* Release IRQs */
2180 		bnx2x_free_irq(bp);
2181 
2182 		/* Report UNLOAD_DONE to MCP */
2183 		bnx2x_send_unload_done(bp);
2184 	}
2185 
2186 	/*
2187 	 * At this stage no more interrupts will arrive so we may safly clean
2188 	 * the queueable objects here in case they failed to get cleaned so far.
2189 	 */
2190 	bnx2x_squeeze_objects(bp);
2191 
2192 	/* There should be no more pending SP commands at this stage */
2193 	bp->sp_state = 0;
2194 
2195 	bp->port.pmf = 0;
2196 
2197 	/* Free SKBs, SGEs, TPA pool and driver internals */
2198 	bnx2x_free_skbs(bp);
2199 	for_each_rx_queue(bp, i)
2200 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2201 
2202 	bnx2x_free_mem(bp);
2203 
2204 	bp->state = BNX2X_STATE_CLOSED;
2205 
2206 	/* Check if there are pending parity attentions. If there are - set
2207 	 * RECOVERY_IN_PROGRESS.
2208 	 */
2209 	if (bnx2x_chk_parity_attn(bp, &global, false)) {
2210 		bnx2x_set_reset_in_progress(bp);
2211 
2212 		/* Set RESET_IS_GLOBAL if needed */
2213 		if (global)
2214 			bnx2x_set_reset_global(bp);
2215 	}
2216 
2217 
2218 	/* The last driver must disable a "close the gate" if there is no
2219 	 * parity attention or "process kill" pending.
2220 	 */
2221 	if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2222 		bnx2x_disable_close_the_gate(bp);
2223 
2224 	return 0;
2225 }
2226 
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)2227 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2228 {
2229 	u16 pmcsr;
2230 
2231 	/* If there is no power capability, silently succeed */
2232 	if (!bp->pm_cap) {
2233 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
2234 		return 0;
2235 	}
2236 
2237 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2238 
2239 	switch (state) {
2240 	case PCI_D0:
2241 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2242 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2243 				       PCI_PM_CTRL_PME_STATUS));
2244 
2245 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2246 			/* delay required during transition out of D3hot */
2247 			msleep(20);
2248 		break;
2249 
2250 	case PCI_D3hot:
2251 		/* If there are other clients above don't
2252 		   shut down the power */
2253 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
2254 			return 0;
2255 		/* Don't shut down the power for emulation and FPGA */
2256 		if (CHIP_REV_IS_SLOW(bp))
2257 			return 0;
2258 
2259 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2260 		pmcsr |= 3;
2261 
2262 		if (bp->wol)
2263 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2264 
2265 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2266 				      pmcsr);
2267 
2268 		/* No more memory access after this point until
2269 		* device is brought back to D0.
2270 		*/
2271 		break;
2272 
2273 	default:
2274 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2275 		return -EINVAL;
2276 	}
2277 	return 0;
2278 }
2279 
2280 /*
2281  * net_device service functions
2282  */
bnx2x_poll(struct napi_struct * napi,int budget)2283 int bnx2x_poll(struct napi_struct *napi, int budget)
2284 {
2285 	int work_done = 0;
2286 	u8 cos;
2287 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2288 						 napi);
2289 	struct bnx2x *bp = fp->bp;
2290 
2291 	while (1) {
2292 #ifdef BNX2X_STOP_ON_ERROR
2293 		if (unlikely(bp->panic)) {
2294 			napi_complete(napi);
2295 			return 0;
2296 		}
2297 #endif
2298 
2299 		for_each_cos_in_tx_queue(fp, cos)
2300 			if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2301 				bnx2x_tx_int(bp, &fp->txdata[cos]);
2302 
2303 
2304 		if (bnx2x_has_rx_work(fp)) {
2305 			work_done += bnx2x_rx_int(fp, budget - work_done);
2306 
2307 			/* must not complete if we consumed full budget */
2308 			if (work_done >= budget)
2309 				break;
2310 		}
2311 
2312 		/* Fall out from the NAPI loop if needed */
2313 		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2314 #ifdef BCM_CNIC
2315 			/* No need to update SB for FCoE L2 ring as long as
2316 			 * it's connected to the default SB and the SB
2317 			 * has been updated when NAPI was scheduled.
2318 			 */
2319 			if (IS_FCOE_FP(fp)) {
2320 				napi_complete(napi);
2321 				break;
2322 			}
2323 #endif
2324 
2325 			bnx2x_update_fpsb_idx(fp);
2326 			/* bnx2x_has_rx_work() reads the status block,
2327 			 * thus we need to ensure that status block indices
2328 			 * have been actually read (bnx2x_update_fpsb_idx)
2329 			 * prior to this check (bnx2x_has_rx_work) so that
2330 			 * we won't write the "newer" value of the status block
2331 			 * to IGU (if there was a DMA right after
2332 			 * bnx2x_has_rx_work and if there is no rmb, the memory
2333 			 * reading (bnx2x_update_fpsb_idx) may be postponed
2334 			 * to right before bnx2x_ack_sb). In this case there
2335 			 * will never be another interrupt until there is
2336 			 * another update of the status block, while there
2337 			 * is still unhandled work.
2338 			 */
2339 			rmb();
2340 
2341 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2342 				napi_complete(napi);
2343 				/* Re-enable interrupts */
2344 				DP(NETIF_MSG_RX_STATUS,
2345 				   "Update index to %d\n", fp->fp_hc_idx);
2346 				bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2347 					     le16_to_cpu(fp->fp_hc_idx),
2348 					     IGU_INT_ENABLE, 1);
2349 				break;
2350 			}
2351 		}
2352 	}
2353 
2354 	return work_done;
2355 }
2356 
2357 /* we split the first BD into headers and data BDs
2358  * to ease the pain of our fellow microcode engineers
2359  * we use one mapping for both BDs
2360  */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,struct sw_tx_bd * tx_buf,struct eth_tx_start_bd ** tx_bd,u16 hlen,u16 bd_prod,int nbd)2361 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2362 				   struct bnx2x_fp_txdata *txdata,
2363 				   struct sw_tx_bd *tx_buf,
2364 				   struct eth_tx_start_bd **tx_bd, u16 hlen,
2365 				   u16 bd_prod, int nbd)
2366 {
2367 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2368 	struct eth_tx_bd *d_tx_bd;
2369 	dma_addr_t mapping;
2370 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
2371 
2372 	/* first fix first BD */
2373 	h_tx_bd->nbd = cpu_to_le16(nbd);
2374 	h_tx_bd->nbytes = cpu_to_le16(hlen);
2375 
2376 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x) nbd %d\n",
2377 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2378 
2379 	/* now get a new data BD
2380 	 * (after the pbd) and fill it */
2381 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2382 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2383 
2384 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2385 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2386 
2387 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2388 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2389 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2390 
2391 	/* this marks the BD as one that has no individual mapping */
2392 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2393 
2394 	DP(NETIF_MSG_TX_QUEUED,
2395 	   "TSO split data size is %d (%x:%x)\n",
2396 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2397 
2398 	/* update tx_bd */
2399 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2400 
2401 	return bd_prod;
2402 }
2403 
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)2404 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2405 {
2406 	if (fix > 0)
2407 		csum = (u16) ~csum_fold(csum_sub(csum,
2408 				csum_partial(t_header - fix, fix, 0)));
2409 
2410 	else if (fix < 0)
2411 		csum = (u16) ~csum_fold(csum_add(csum,
2412 				csum_partial(t_header, -fix, 0)));
2413 
2414 	return swab16(csum);
2415 }
2416 
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)2417 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2418 {
2419 	u32 rc;
2420 
2421 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2422 		rc = XMIT_PLAIN;
2423 
2424 	else {
2425 		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2426 			rc = XMIT_CSUM_V6;
2427 			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2428 				rc |= XMIT_CSUM_TCP;
2429 
2430 		} else {
2431 			rc = XMIT_CSUM_V4;
2432 			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2433 				rc |= XMIT_CSUM_TCP;
2434 		}
2435 	}
2436 
2437 	if (skb_is_gso_v6(skb))
2438 		rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2439 	else if (skb_is_gso(skb))
2440 		rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2441 
2442 	return rc;
2443 }
2444 
2445 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2446 /* check if packet requires linearization (packet is too fragmented)
2447    no need to check fragmentation if page size > 8K (there will be no
2448    violation to FW restrictions) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)2449 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2450 			     u32 xmit_type)
2451 {
2452 	int to_copy = 0;
2453 	int hlen = 0;
2454 	int first_bd_sz = 0;
2455 
2456 	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2457 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2458 
2459 		if (xmit_type & XMIT_GSO) {
2460 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2461 			/* Check if LSO packet needs to be copied:
2462 			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2463 			int wnd_size = MAX_FETCH_BD - 3;
2464 			/* Number of windows to check */
2465 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2466 			int wnd_idx = 0;
2467 			int frag_idx = 0;
2468 			u32 wnd_sum = 0;
2469 
2470 			/* Headers length */
2471 			hlen = (int)(skb_transport_header(skb) - skb->data) +
2472 				tcp_hdrlen(skb);
2473 
2474 			/* Amount of data (w/o headers) on linear part of SKB*/
2475 			first_bd_sz = skb_headlen(skb) - hlen;
2476 
2477 			wnd_sum  = first_bd_sz;
2478 
2479 			/* Calculate the first sum - it's special */
2480 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2481 				wnd_sum +=
2482 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2483 
2484 			/* If there was data on linear skb data - check it */
2485 			if (first_bd_sz > 0) {
2486 				if (unlikely(wnd_sum < lso_mss)) {
2487 					to_copy = 1;
2488 					goto exit_lbl;
2489 				}
2490 
2491 				wnd_sum -= first_bd_sz;
2492 			}
2493 
2494 			/* Others are easier: run through the frag list and
2495 			   check all windows */
2496 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2497 				wnd_sum +=
2498 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2499 
2500 				if (unlikely(wnd_sum < lso_mss)) {
2501 					to_copy = 1;
2502 					break;
2503 				}
2504 				wnd_sum -=
2505 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2506 			}
2507 		} else {
2508 			/* in non-LSO too fragmented packet should always
2509 			   be linearized */
2510 			to_copy = 1;
2511 		}
2512 	}
2513 
2514 exit_lbl:
2515 	if (unlikely(to_copy))
2516 		DP(NETIF_MSG_TX_QUEUED,
2517 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2518 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2519 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2520 
2521 	return to_copy;
2522 }
2523 #endif
2524 
bnx2x_set_pbd_gso_e2(struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)2525 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2526 					u32 xmit_type)
2527 {
2528 	*parsing_data |= (skb_shinfo(skb)->gso_size <<
2529 			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2530 			      ETH_TX_PARSE_BD_E2_LSO_MSS;
2531 	if ((xmit_type & XMIT_GSO_V6) &&
2532 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2533 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2534 }
2535 
2536 /**
2537  * bnx2x_set_pbd_gso - update PBD in GSO case.
2538  *
2539  * @skb:	packet skb
2540  * @pbd:	parse BD
2541  * @xmit_type:	xmit flags
2542  */
bnx2x_set_pbd_gso(struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)2543 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2544 				     struct eth_tx_parse_bd_e1x *pbd,
2545 				     u32 xmit_type)
2546 {
2547 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2548 	pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2549 	pbd->tcp_flags = pbd_tcp_flags(skb);
2550 
2551 	if (xmit_type & XMIT_GSO_V4) {
2552 		pbd->ip_id = swab16(ip_hdr(skb)->id);
2553 		pbd->tcp_pseudo_csum =
2554 			swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2555 						  ip_hdr(skb)->daddr,
2556 						  0, IPPROTO_TCP, 0));
2557 
2558 	} else
2559 		pbd->tcp_pseudo_csum =
2560 			swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2561 						&ipv6_hdr(skb)->daddr,
2562 						0, IPPROTO_TCP, 0));
2563 
2564 	pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2565 }
2566 
2567 /**
2568  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2569  *
2570  * @bp:			driver handle
2571  * @skb:		packet skb
2572  * @parsing_data:	data to be updated
2573  * @xmit_type:		xmit flags
2574  *
2575  * 57712 related
2576  */
bnx2x_set_pbd_csum_e2(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)2577 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2578 	u32 *parsing_data, u32 xmit_type)
2579 {
2580 	*parsing_data |=
2581 			((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2582 			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2583 			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2584 
2585 	if (xmit_type & XMIT_CSUM_TCP) {
2586 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2587 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2588 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2589 
2590 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2591 	} else
2592 		/* We support checksum offload for TCP and UDP only.
2593 		 * No need to pass the UDP header length - it's a constant.
2594 		 */
2595 		return skb_transport_header(skb) +
2596 				sizeof(struct udphdr) - skb->data;
2597 }
2598 
bnx2x_set_sbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_start_bd * tx_start_bd,u32 xmit_type)2599 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2600 	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2601 {
2602 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2603 
2604 	if (xmit_type & XMIT_CSUM_V4)
2605 		tx_start_bd->bd_flags.as_bitfield |=
2606 					ETH_TX_BD_FLAGS_IP_CSUM;
2607 	else
2608 		tx_start_bd->bd_flags.as_bitfield |=
2609 					ETH_TX_BD_FLAGS_IPV6;
2610 
2611 	if (!(xmit_type & XMIT_CSUM_TCP))
2612 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2613 }
2614 
2615 /**
2616  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2617  *
2618  * @bp:		driver handle
2619  * @skb:	packet skb
2620  * @pbd:	parse BD to be updated
2621  * @xmit_type:	xmit flags
2622  */
bnx2x_set_pbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)2623 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2624 	struct eth_tx_parse_bd_e1x *pbd,
2625 	u32 xmit_type)
2626 {
2627 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2628 
2629 	/* for now NS flag is not used in Linux */
2630 	pbd->global_data =
2631 		(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2632 			 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2633 
2634 	pbd->ip_hlen_w = (skb_transport_header(skb) -
2635 			skb_network_header(skb)) >> 1;
2636 
2637 	hlen += pbd->ip_hlen_w;
2638 
2639 	/* We support checksum offload for TCP and UDP only */
2640 	if (xmit_type & XMIT_CSUM_TCP)
2641 		hlen += tcp_hdrlen(skb) / 2;
2642 	else
2643 		hlen += sizeof(struct udphdr) / 2;
2644 
2645 	pbd->total_hlen_w = cpu_to_le16(hlen);
2646 	hlen = hlen*2;
2647 
2648 	if (xmit_type & XMIT_CSUM_TCP) {
2649 		pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2650 
2651 	} else {
2652 		s8 fix = SKB_CS_OFF(skb); /* signed! */
2653 
2654 		DP(NETIF_MSG_TX_QUEUED,
2655 		   "hlen %d  fix %d  csum before fix %x\n",
2656 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2657 
2658 		/* HW bug: fixup the CSUM */
2659 		pbd->tcp_pseudo_csum =
2660 			bnx2x_csum_fix(skb_transport_header(skb),
2661 				       SKB_CS(skb), fix);
2662 
2663 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2664 		   pbd->tcp_pseudo_csum);
2665 	}
2666 
2667 	return hlen;
2668 }
2669 
2670 /* called with netif_tx_lock
2671  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2672  * netif_wake_queue()
2673  */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)2674 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2675 {
2676 	struct bnx2x *bp = netdev_priv(dev);
2677 
2678 	struct bnx2x_fastpath *fp;
2679 	struct netdev_queue *txq;
2680 	struct bnx2x_fp_txdata *txdata;
2681 	struct sw_tx_bd *tx_buf;
2682 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
2683 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2684 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2685 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2686 	u32 pbd_e2_parsing_data = 0;
2687 	u16 pkt_prod, bd_prod;
2688 	int nbd, txq_index, fp_index, txdata_index;
2689 	dma_addr_t mapping;
2690 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
2691 	int i;
2692 	u8 hlen = 0;
2693 	__le16 pkt_size = 0;
2694 	struct ethhdr *eth;
2695 	u8 mac_type = UNICAST_ADDRESS;
2696 
2697 #ifdef BNX2X_STOP_ON_ERROR
2698 	if (unlikely(bp->panic))
2699 		return NETDEV_TX_BUSY;
2700 #endif
2701 
2702 	txq_index = skb_get_queue_mapping(skb);
2703 	txq = netdev_get_tx_queue(dev, txq_index);
2704 
2705 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2706 
2707 	/* decode the fastpath index and the cos index from the txq */
2708 	fp_index = TXQ_TO_FP(txq_index);
2709 	txdata_index = TXQ_TO_COS(txq_index);
2710 
2711 #ifdef BCM_CNIC
2712 	/*
2713 	 * Override the above for the FCoE queue:
2714 	 *   - FCoE fp entry is right after the ETH entries.
2715 	 *   - FCoE L2 queue uses bp->txdata[0] only.
2716 	 */
2717 	if (unlikely(!NO_FCOE(bp) && (txq_index ==
2718 				      bnx2x_fcoe_tx(bp, txq_index)))) {
2719 		fp_index = FCOE_IDX;
2720 		txdata_index = 0;
2721 	}
2722 #endif
2723 
2724 	/* enable this debug print to view the transmission queue being used
2725 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2726 	   txq_index, fp_index, txdata_index); */
2727 
2728 	/* locate the fastpath and the txdata */
2729 	fp = &bp->fp[fp_index];
2730 	txdata = &fp->txdata[txdata_index];
2731 
2732 	/* enable this debug print to view the tranmission details
2733 	DP(NETIF_MSG_TX_QUEUED,
2734 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2735 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
2736 
2737 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
2738 		     (skb_shinfo(skb)->nr_frags + 3))) {
2739 		fp->eth_q_stats.driver_xoff++;
2740 		netif_tx_stop_queue(txq);
2741 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2742 		return NETDEV_TX_BUSY;
2743 	}
2744 
2745 	DP(NETIF_MSG_TX_QUEUED,
2746 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
2747 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2748 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2749 
2750 	eth = (struct ethhdr *)skb->data;
2751 
2752 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
2753 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2754 		if (is_broadcast_ether_addr(eth->h_dest))
2755 			mac_type = BROADCAST_ADDRESS;
2756 		else
2757 			mac_type = MULTICAST_ADDRESS;
2758 	}
2759 
2760 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2761 	/* First, check if we need to linearize the skb (due to FW
2762 	   restrictions). No need to check fragmentation if page size > 8K
2763 	   (there will be no violation to FW restrictions) */
2764 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2765 		/* Statistics of linearization */
2766 		bp->lin_cnt++;
2767 		if (skb_linearize(skb) != 0) {
2768 			DP(NETIF_MSG_TX_QUEUED,
2769 			   "SKB linearization failed - silently dropping this SKB\n");
2770 			dev_kfree_skb_any(skb);
2771 			return NETDEV_TX_OK;
2772 		}
2773 	}
2774 #endif
2775 	/* Map skb linear data for DMA */
2776 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
2777 				 skb_headlen(skb), DMA_TO_DEVICE);
2778 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2779 		DP(NETIF_MSG_TX_QUEUED,
2780 		   "SKB mapping failed - silently dropping this SKB\n");
2781 		dev_kfree_skb_any(skb);
2782 		return NETDEV_TX_OK;
2783 	}
2784 	/*
2785 	Please read carefully. First we use one BD which we mark as start,
2786 	then we have a parsing info BD (used for TSO or xsum),
2787 	and only then we have the rest of the TSO BDs.
2788 	(don't forget to mark the last one as last,
2789 	and to unmap only AFTER you write to the BD ...)
2790 	And above all, all pdb sizes are in words - NOT DWORDS!
2791 	*/
2792 
2793 	/* get current pkt produced now - advance it just before sending packet
2794 	 * since mapping of pages may fail and cause packet to be dropped
2795 	 */
2796 	pkt_prod = txdata->tx_pkt_prod;
2797 	bd_prod = TX_BD(txdata->tx_bd_prod);
2798 
2799 	/* get a tx_buf and first BD
2800 	 * tx_start_bd may be changed during SPLIT,
2801 	 * but first_bd will always stay first
2802 	 */
2803 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2804 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2805 	first_bd = tx_start_bd;
2806 
2807 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2808 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2809 		 mac_type);
2810 
2811 	/* header nbd */
2812 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2813 
2814 	/* remember the first BD of the packet */
2815 	tx_buf->first_bd = txdata->tx_bd_prod;
2816 	tx_buf->skb = skb;
2817 	tx_buf->flags = 0;
2818 
2819 	DP(NETIF_MSG_TX_QUEUED,
2820 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2821 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2822 
2823 	if (vlan_tx_tag_present(skb)) {
2824 		tx_start_bd->vlan_or_ethertype =
2825 		    cpu_to_le16(vlan_tx_tag_get(skb));
2826 		tx_start_bd->bd_flags.as_bitfield |=
2827 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2828 	} else
2829 		tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2830 
2831 	/* turn on parsing and get a BD */
2832 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2833 
2834 	if (xmit_type & XMIT_CSUM)
2835 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2836 
2837 	if (!CHIP_IS_E1x(bp)) {
2838 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2839 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2840 		/* Set PBD in checksum offload case */
2841 		if (xmit_type & XMIT_CSUM)
2842 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2843 						     &pbd_e2_parsing_data,
2844 						     xmit_type);
2845 		if (IS_MF_SI(bp)) {
2846 			/*
2847 			 * fill in the MAC addresses in the PBD - for local
2848 			 * switching
2849 			 */
2850 			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2851 					      &pbd_e2->src_mac_addr_mid,
2852 					      &pbd_e2->src_mac_addr_lo,
2853 					      eth->h_source);
2854 			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2855 					      &pbd_e2->dst_mac_addr_mid,
2856 					      &pbd_e2->dst_mac_addr_lo,
2857 					      eth->h_dest);
2858 		}
2859 	} else {
2860 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2861 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2862 		/* Set PBD in checksum offload case */
2863 		if (xmit_type & XMIT_CSUM)
2864 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2865 
2866 	}
2867 
2868 	/* Setup the data pointer of the first BD of the packet */
2869 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2870 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2871 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2872 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2873 	pkt_size = tx_start_bd->nbytes;
2874 
2875 	DP(NETIF_MSG_TX_QUEUED,
2876 	   "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
2877 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2878 	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2879 	   tx_start_bd->bd_flags.as_bitfield,
2880 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2881 
2882 	if (xmit_type & XMIT_GSO) {
2883 
2884 		DP(NETIF_MSG_TX_QUEUED,
2885 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2886 		   skb->len, hlen, skb_headlen(skb),
2887 		   skb_shinfo(skb)->gso_size);
2888 
2889 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2890 
2891 		if (unlikely(skb_headlen(skb) > hlen))
2892 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2893 						 &tx_start_bd, hlen,
2894 						 bd_prod, ++nbd);
2895 		if (!CHIP_IS_E1x(bp))
2896 			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2897 					     xmit_type);
2898 		else
2899 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2900 	}
2901 
2902 	/* Set the PBD's parsing_data field if not zero
2903 	 * (for the chips newer than 57711).
2904 	 */
2905 	if (pbd_e2_parsing_data)
2906 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2907 
2908 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2909 
2910 	/* Handle fragmented skb */
2911 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2912 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2913 
2914 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2915 					   skb_frag_size(frag), DMA_TO_DEVICE);
2916 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2917 			unsigned int pkts_compl = 0, bytes_compl = 0;
2918 
2919 			DP(NETIF_MSG_TX_QUEUED,
2920 			   "Unable to map page - dropping packet...\n");
2921 
2922 			/* we need unmap all buffers already mapped
2923 			 * for this SKB;
2924 			 * first_bd->nbd need to be properly updated
2925 			 * before call to bnx2x_free_tx_pkt
2926 			 */
2927 			first_bd->nbd = cpu_to_le16(nbd);
2928 			bnx2x_free_tx_pkt(bp, txdata,
2929 					  TX_BD(txdata->tx_pkt_prod),
2930 					  &pkts_compl, &bytes_compl);
2931 			return NETDEV_TX_OK;
2932 		}
2933 
2934 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2935 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2936 		if (total_pkt_bd == NULL)
2937 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2938 
2939 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2940 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2941 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2942 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
2943 		nbd++;
2944 
2945 		DP(NETIF_MSG_TX_QUEUED,
2946 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2947 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2948 		   le16_to_cpu(tx_data_bd->nbytes));
2949 	}
2950 
2951 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2952 
2953 	/* update with actual num BDs */
2954 	first_bd->nbd = cpu_to_le16(nbd);
2955 
2956 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2957 
2958 	/* now send a tx doorbell, counting the next BD
2959 	 * if the packet contains or ends with it
2960 	 */
2961 	if (TX_BD_POFF(bd_prod) < nbd)
2962 		nbd++;
2963 
2964 	/* total_pkt_bytes should be set on the first data BD if
2965 	 * it's not an LSO packet and there is more than one
2966 	 * data BD. In this case pkt_size is limited by an MTU value.
2967 	 * However we prefer to set it for an LSO packet (while we don't
2968 	 * have to) in order to save some CPU cycles in a none-LSO
2969 	 * case, when we much more care about them.
2970 	 */
2971 	if (total_pkt_bd != NULL)
2972 		total_pkt_bd->total_pkt_bytes = pkt_size;
2973 
2974 	if (pbd_e1x)
2975 		DP(NETIF_MSG_TX_QUEUED,
2976 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2977 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2978 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2979 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2980 		    le16_to_cpu(pbd_e1x->total_hlen_w));
2981 	if (pbd_e2)
2982 		DP(NETIF_MSG_TX_QUEUED,
2983 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2984 		   pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2985 		   pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2986 		   pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2987 		   pbd_e2->parsing_data);
2988 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2989 
2990 	netdev_tx_sent_queue(txq, skb->len);
2991 
2992 	txdata->tx_pkt_prod++;
2993 	/*
2994 	 * Make sure that the BD data is updated before updating the producer
2995 	 * since FW might read the BD right after the producer is updated.
2996 	 * This is only applicable for weak-ordered memory model archs such
2997 	 * as IA-64. The following barrier is also mandatory since FW will
2998 	 * assumes packets must have BDs.
2999 	 */
3000 	wmb();
3001 
3002 	txdata->tx_db.data.prod += nbd;
3003 	barrier();
3004 
3005 	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3006 
3007 	mmiowb();
3008 
3009 	txdata->tx_bd_prod += nbd;
3010 
3011 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
3012 		netif_tx_stop_queue(txq);
3013 
3014 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
3015 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
3016 		 * fp->bd_tx_cons */
3017 		smp_mb();
3018 
3019 		fp->eth_q_stats.driver_xoff++;
3020 		if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3021 			netif_tx_wake_queue(txq);
3022 	}
3023 	txdata->tx_pkt++;
3024 
3025 	return NETDEV_TX_OK;
3026 }
3027 
3028 /**
3029  * bnx2x_setup_tc - routine to configure net_device for multi tc
3030  *
3031  * @netdev: net device to configure
3032  * @tc: number of traffic classes to enable
3033  *
3034  * callback connected to the ndo_setup_tc function pointer
3035  */
bnx2x_setup_tc(struct net_device * dev,u8 num_tc)3036 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3037 {
3038 	int cos, prio, count, offset;
3039 	struct bnx2x *bp = netdev_priv(dev);
3040 
3041 	/* setup tc must be called under rtnl lock */
3042 	ASSERT_RTNL();
3043 
3044 	/* no traffic classes requested. aborting */
3045 	if (!num_tc) {
3046 		netdev_reset_tc(dev);
3047 		return 0;
3048 	}
3049 
3050 	/* requested to support too many traffic classes */
3051 	if (num_tc > bp->max_cos) {
3052 		BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3053 			  num_tc, bp->max_cos);
3054 		return -EINVAL;
3055 	}
3056 
3057 	/* declare amount of supported traffic classes */
3058 	if (netdev_set_num_tc(dev, num_tc)) {
3059 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3060 		return -EINVAL;
3061 	}
3062 
3063 	/* configure priority to traffic class mapping */
3064 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3065 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3066 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3067 		   "mapping priority %d to tc %d\n",
3068 		   prio, bp->prio_to_cos[prio]);
3069 	}
3070 
3071 
3072 	/* Use this configuration to diffrentiate tc0 from other COSes
3073 	   This can be used for ets or pfc, and save the effort of setting
3074 	   up a multio class queue disc or negotiating DCBX with a switch
3075 	netdev_set_prio_tc_map(dev, 0, 0);
3076 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3077 	for (prio = 1; prio < 16; prio++) {
3078 		netdev_set_prio_tc_map(dev, prio, 1);
3079 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3080 	} */
3081 
3082 	/* configure traffic class to transmission queue mapping */
3083 	for (cos = 0; cos < bp->max_cos; cos++) {
3084 		count = BNX2X_NUM_ETH_QUEUES(bp);
3085 		offset = cos * MAX_TXQS_PER_COS;
3086 		netdev_set_tc_queue(dev, cos, count, offset);
3087 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3088 		   "mapping tc %d to offset %d count %d\n",
3089 		   cos, offset, count);
3090 	}
3091 
3092 	return 0;
3093 }
3094 
3095 /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)3096 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3097 {
3098 	struct sockaddr *addr = p;
3099 	struct bnx2x *bp = netdev_priv(dev);
3100 	int rc = 0;
3101 
3102 	if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3103 		BNX2X_ERR("Requested MAC address is not valid\n");
3104 		return -EINVAL;
3105 	}
3106 
3107 #ifdef BCM_CNIC
3108 	if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
3109 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3110 		return -EINVAL;
3111 	}
3112 #endif
3113 
3114 	if (netif_running(dev))  {
3115 		rc = bnx2x_set_eth_mac(bp, false);
3116 		if (rc)
3117 			return rc;
3118 	}
3119 
3120 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3121 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3122 
3123 	if (netif_running(dev))
3124 		rc = bnx2x_set_eth_mac(bp, true);
3125 
3126 	return rc;
3127 }
3128 
bnx2x_free_fp_mem_at(struct bnx2x * bp,int fp_index)3129 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3130 {
3131 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3132 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3133 	u8 cos;
3134 
3135 	/* Common */
3136 #ifdef BCM_CNIC
3137 	if (IS_FCOE_IDX(fp_index)) {
3138 		memset(sb, 0, sizeof(union host_hc_status_block));
3139 		fp->status_blk_mapping = 0;
3140 
3141 	} else {
3142 #endif
3143 		/* status blocks */
3144 		if (!CHIP_IS_E1x(bp))
3145 			BNX2X_PCI_FREE(sb->e2_sb,
3146 				       bnx2x_fp(bp, fp_index,
3147 						status_blk_mapping),
3148 				       sizeof(struct host_hc_status_block_e2));
3149 		else
3150 			BNX2X_PCI_FREE(sb->e1x_sb,
3151 				       bnx2x_fp(bp, fp_index,
3152 						status_blk_mapping),
3153 				       sizeof(struct host_hc_status_block_e1x));
3154 #ifdef BCM_CNIC
3155 	}
3156 #endif
3157 	/* Rx */
3158 	if (!skip_rx_queue(bp, fp_index)) {
3159 		bnx2x_free_rx_bds(fp);
3160 
3161 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
3162 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3163 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3164 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
3165 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
3166 
3167 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3168 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
3169 			       sizeof(struct eth_fast_path_rx_cqe) *
3170 			       NUM_RCQ_BD);
3171 
3172 		/* SGE ring */
3173 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3174 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3175 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
3176 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3177 	}
3178 
3179 	/* Tx */
3180 	if (!skip_tx_queue(bp, fp_index)) {
3181 		/* fastpath tx rings: tx_buf tx_desc */
3182 		for_each_cos_in_tx_queue(fp, cos) {
3183 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3184 
3185 			DP(NETIF_MSG_IFDOWN,
3186 			   "freeing tx memory of fp %d cos %d cid %d\n",
3187 			   fp_index, cos, txdata->cid);
3188 
3189 			BNX2X_FREE(txdata->tx_buf_ring);
3190 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
3191 				txdata->tx_desc_mapping,
3192 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3193 		}
3194 	}
3195 	/* end of fastpath */
3196 }
3197 
bnx2x_free_fp_mem(struct bnx2x * bp)3198 void bnx2x_free_fp_mem(struct bnx2x *bp)
3199 {
3200 	int i;
3201 	for_each_queue(bp, i)
3202 		bnx2x_free_fp_mem_at(bp, i);
3203 }
3204 
set_sb_shortcuts(struct bnx2x * bp,int index)3205 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3206 {
3207 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3208 	if (!CHIP_IS_E1x(bp)) {
3209 		bnx2x_fp(bp, index, sb_index_values) =
3210 			(__le16 *)status_blk.e2_sb->sb.index_values;
3211 		bnx2x_fp(bp, index, sb_running_index) =
3212 			(__le16 *)status_blk.e2_sb->sb.running_index;
3213 	} else {
3214 		bnx2x_fp(bp, index, sb_index_values) =
3215 			(__le16 *)status_blk.e1x_sb->sb.index_values;
3216 		bnx2x_fp(bp, index, sb_running_index) =
3217 			(__le16 *)status_blk.e1x_sb->sb.running_index;
3218 	}
3219 }
3220 
bnx2x_alloc_fp_mem_at(struct bnx2x * bp,int index)3221 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3222 {
3223 	union host_hc_status_block *sb;
3224 	struct bnx2x_fastpath *fp = &bp->fp[index];
3225 	int ring_size = 0;
3226 	u8 cos;
3227 	int rx_ring_size = 0;
3228 
3229 #ifdef BCM_CNIC
3230 	if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
3231 		rx_ring_size = MIN_RX_SIZE_NONTPA;
3232 		bp->rx_ring_size = rx_ring_size;
3233 	} else
3234 #endif
3235 	if (!bp->rx_ring_size) {
3236 		u32 cfg = SHMEM_RD(bp,
3237 			     dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3238 
3239 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3240 
3241 		/* Dercease ring size for 1G functions */
3242 		if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3243 		    PORT_HW_CFG_NET_SERDES_IF_SGMII)
3244 			rx_ring_size /= 10;
3245 
3246 		/* allocate at least number of buffers required by FW */
3247 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3248 				     MIN_RX_SIZE_TPA, rx_ring_size);
3249 
3250 		bp->rx_ring_size = rx_ring_size;
3251 	} else /* if rx_ring_size specified - use it */
3252 		rx_ring_size = bp->rx_ring_size;
3253 
3254 	/* Common */
3255 	sb = &bnx2x_fp(bp, index, status_blk);
3256 #ifdef BCM_CNIC
3257 	if (!IS_FCOE_IDX(index)) {
3258 #endif
3259 		/* status blocks */
3260 		if (!CHIP_IS_E1x(bp))
3261 			BNX2X_PCI_ALLOC(sb->e2_sb,
3262 				&bnx2x_fp(bp, index, status_blk_mapping),
3263 				sizeof(struct host_hc_status_block_e2));
3264 		else
3265 			BNX2X_PCI_ALLOC(sb->e1x_sb,
3266 				&bnx2x_fp(bp, index, status_blk_mapping),
3267 			    sizeof(struct host_hc_status_block_e1x));
3268 #ifdef BCM_CNIC
3269 	}
3270 #endif
3271 
3272 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3273 	 * set shortcuts for it.
3274 	 */
3275 	if (!IS_FCOE_IDX(index))
3276 		set_sb_shortcuts(bp, index);
3277 
3278 	/* Tx */
3279 	if (!skip_tx_queue(bp, index)) {
3280 		/* fastpath tx rings: tx_buf tx_desc */
3281 		for_each_cos_in_tx_queue(fp, cos) {
3282 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3283 
3284 			DP(NETIF_MSG_IFUP,
3285 			   "allocating tx memory of fp %d cos %d\n",
3286 			   index, cos);
3287 
3288 			BNX2X_ALLOC(txdata->tx_buf_ring,
3289 				sizeof(struct sw_tx_bd) * NUM_TX_BD);
3290 			BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3291 				&txdata->tx_desc_mapping,
3292 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3293 		}
3294 	}
3295 
3296 	/* Rx */
3297 	if (!skip_rx_queue(bp, index)) {
3298 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
3299 		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3300 				sizeof(struct sw_rx_bd) * NUM_RX_BD);
3301 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3302 				&bnx2x_fp(bp, index, rx_desc_mapping),
3303 				sizeof(struct eth_rx_bd) * NUM_RX_BD);
3304 
3305 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3306 				&bnx2x_fp(bp, index, rx_comp_mapping),
3307 				sizeof(struct eth_fast_path_rx_cqe) *
3308 				NUM_RCQ_BD);
3309 
3310 		/* SGE ring */
3311 		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3312 				sizeof(struct sw_rx_page) * NUM_RX_SGE);
3313 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3314 				&bnx2x_fp(bp, index, rx_sge_mapping),
3315 				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3316 		/* RX BD ring */
3317 		bnx2x_set_next_page_rx_bd(fp);
3318 
3319 		/* CQ ring */
3320 		bnx2x_set_next_page_rx_cq(fp);
3321 
3322 		/* BDs */
3323 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3324 		if (ring_size < rx_ring_size)
3325 			goto alloc_mem_err;
3326 	}
3327 
3328 	return 0;
3329 
3330 /* handles low memory cases */
3331 alloc_mem_err:
3332 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3333 						index, ring_size);
3334 	/* FW will drop all packets if queue is not big enough,
3335 	 * In these cases we disable the queue
3336 	 * Min size is different for OOO, TPA and non-TPA queues
3337 	 */
3338 	if (ring_size < (fp->disable_tpa ?
3339 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3340 			/* release memory allocated for this queue */
3341 			bnx2x_free_fp_mem_at(bp, index);
3342 			return -ENOMEM;
3343 	}
3344 	return 0;
3345 }
3346 
bnx2x_alloc_fp_mem(struct bnx2x * bp)3347 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3348 {
3349 	int i;
3350 
3351 	/**
3352 	 * 1. Allocate FP for leading - fatal if error
3353 	 * 2. {CNIC} Allocate FCoE FP - fatal if error
3354 	 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3355 	 * 4. Allocate RSS - fix number of queues if error
3356 	 */
3357 
3358 	/* leading */
3359 	if (bnx2x_alloc_fp_mem_at(bp, 0))
3360 		return -ENOMEM;
3361 
3362 #ifdef BCM_CNIC
3363 	if (!NO_FCOE(bp))
3364 		/* FCoE */
3365 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3366 			/* we will fail load process instead of mark
3367 			 * NO_FCOE_FLAG
3368 			 */
3369 			return -ENOMEM;
3370 #endif
3371 
3372 	/* RSS */
3373 	for_each_nondefault_eth_queue(bp, i)
3374 		if (bnx2x_alloc_fp_mem_at(bp, i))
3375 			break;
3376 
3377 	/* handle memory failures */
3378 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3379 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3380 
3381 		WARN_ON(delta < 0);
3382 #ifdef BCM_CNIC
3383 		/**
3384 		 * move non eth FPs next to last eth FP
3385 		 * must be done in that order
3386 		 * FCOE_IDX < FWD_IDX < OOO_IDX
3387 		 */
3388 
3389 		/* move FCoE fp even NO_FCOE_FLAG is on */
3390 		bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3391 #endif
3392 		bp->num_queues -= delta;
3393 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3394 			  bp->num_queues + delta, bp->num_queues);
3395 	}
3396 
3397 	return 0;
3398 }
3399 
bnx2x_free_mem_bp(struct bnx2x * bp)3400 void bnx2x_free_mem_bp(struct bnx2x *bp)
3401 {
3402 	kfree(bp->fp);
3403 	kfree(bp->msix_table);
3404 	kfree(bp->ilt);
3405 }
3406 
bnx2x_alloc_mem_bp(struct bnx2x * bp)3407 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3408 {
3409 	struct bnx2x_fastpath *fp;
3410 	struct msix_entry *tbl;
3411 	struct bnx2x_ilt *ilt;
3412 	int msix_table_size = 0;
3413 
3414 	/*
3415 	 * The biggest MSI-X table we might need is as a maximum number of fast
3416 	 * path IGU SBs plus default SB (for PF).
3417 	 */
3418 	msix_table_size = bp->igu_sb_cnt + 1;
3419 
3420 	/* fp array: RSS plus CNIC related L2 queues */
3421 	fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3422 		     sizeof(*fp), GFP_KERNEL);
3423 	if (!fp)
3424 		goto alloc_err;
3425 	bp->fp = fp;
3426 
3427 	/* msix table */
3428 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3429 	if (!tbl)
3430 		goto alloc_err;
3431 	bp->msix_table = tbl;
3432 
3433 	/* ilt */
3434 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3435 	if (!ilt)
3436 		goto alloc_err;
3437 	bp->ilt = ilt;
3438 
3439 	return 0;
3440 alloc_err:
3441 	bnx2x_free_mem_bp(bp);
3442 	return -ENOMEM;
3443 
3444 }
3445 
bnx2x_reload_if_running(struct net_device * dev)3446 int bnx2x_reload_if_running(struct net_device *dev)
3447 {
3448 	struct bnx2x *bp = netdev_priv(dev);
3449 
3450 	if (unlikely(!netif_running(dev)))
3451 		return 0;
3452 
3453 	bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3454 	return bnx2x_nic_load(bp, LOAD_NORMAL);
3455 }
3456 
bnx2x_get_cur_phy_idx(struct bnx2x * bp)3457 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3458 {
3459 	u32 sel_phy_idx = 0;
3460 	if (bp->link_params.num_phys <= 1)
3461 		return INT_PHY;
3462 
3463 	if (bp->link_vars.link_up) {
3464 		sel_phy_idx = EXT_PHY1;
3465 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
3466 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3467 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3468 			sel_phy_idx = EXT_PHY2;
3469 	} else {
3470 
3471 		switch (bnx2x_phy_selection(&bp->link_params)) {
3472 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3473 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3474 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3475 		       sel_phy_idx = EXT_PHY1;
3476 		       break;
3477 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3478 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3479 		       sel_phy_idx = EXT_PHY2;
3480 		       break;
3481 		}
3482 	}
3483 
3484 	return sel_phy_idx;
3485 
3486 }
bnx2x_get_link_cfg_idx(struct bnx2x * bp)3487 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3488 {
3489 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3490 	/*
3491 	 * The selected actived PHY is always after swapping (in case PHY
3492 	 * swapping is enabled). So when swapping is enabled, we need to reverse
3493 	 * the configuration
3494 	 */
3495 
3496 	if (bp->link_params.multi_phy_config &
3497 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3498 		if (sel_phy_idx == EXT_PHY1)
3499 			sel_phy_idx = EXT_PHY2;
3500 		else if (sel_phy_idx == EXT_PHY2)
3501 			sel_phy_idx = EXT_PHY1;
3502 	}
3503 	return LINK_CONFIG_IDX(sel_phy_idx);
3504 }
3505 
3506 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
bnx2x_fcoe_get_wwn(struct net_device * dev,u64 * wwn,int type)3507 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3508 {
3509 	struct bnx2x *bp = netdev_priv(dev);
3510 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3511 
3512 	switch (type) {
3513 	case NETDEV_FCOE_WWNN:
3514 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3515 				cp->fcoe_wwn_node_name_lo);
3516 		break;
3517 	case NETDEV_FCOE_WWPN:
3518 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3519 				cp->fcoe_wwn_port_name_lo);
3520 		break;
3521 	default:
3522 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3523 		return -EINVAL;
3524 	}
3525 
3526 	return 0;
3527 }
3528 #endif
3529 
3530 /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)3531 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3532 {
3533 	struct bnx2x *bp = netdev_priv(dev);
3534 
3535 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3536 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3537 		return -EAGAIN;
3538 	}
3539 
3540 	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3541 	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3542 		BNX2X_ERR("Can't support requested MTU size\n");
3543 		return -EINVAL;
3544 	}
3545 
3546 	/* This does not race with packet allocation
3547 	 * because the actual alloc size is
3548 	 * only updated as part of load
3549 	 */
3550 	dev->mtu = new_mtu;
3551 
3552 	bp->gro_check = bnx2x_need_gro_check(new_mtu);
3553 
3554 	return bnx2x_reload_if_running(dev);
3555 }
3556 
bnx2x_fix_features(struct net_device * dev,netdev_features_t features)3557 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3558 				     netdev_features_t features)
3559 {
3560 	struct bnx2x *bp = netdev_priv(dev);
3561 
3562 	/* TPA requires Rx CSUM offloading */
3563 	if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3564 		features &= ~NETIF_F_LRO;
3565 		features &= ~NETIF_F_GRO;
3566 	}
3567 
3568 	return features;
3569 }
3570 
bnx2x_set_features(struct net_device * dev,netdev_features_t features)3571 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3572 {
3573 	struct bnx2x *bp = netdev_priv(dev);
3574 	u32 flags = bp->flags;
3575 	bool bnx2x_reload = false;
3576 
3577 	if (features & NETIF_F_LRO)
3578 		flags |= TPA_ENABLE_FLAG;
3579 	else
3580 		flags &= ~TPA_ENABLE_FLAG;
3581 
3582 	if (features & NETIF_F_GRO)
3583 		flags |= GRO_ENABLE_FLAG;
3584 	else
3585 		flags &= ~GRO_ENABLE_FLAG;
3586 
3587 	if (features & NETIF_F_LOOPBACK) {
3588 		if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3589 			bp->link_params.loopback_mode = LOOPBACK_BMAC;
3590 			bnx2x_reload = true;
3591 		}
3592 	} else {
3593 		if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3594 			bp->link_params.loopback_mode = LOOPBACK_NONE;
3595 			bnx2x_reload = true;
3596 		}
3597 	}
3598 
3599 	if (flags ^ bp->flags) {
3600 		bp->flags = flags;
3601 		bnx2x_reload = true;
3602 	}
3603 
3604 	if (bnx2x_reload) {
3605 		if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3606 			return bnx2x_reload_if_running(dev);
3607 		/* else: bnx2x_nic_load() will be called at end of recovery */
3608 	}
3609 
3610 	return 0;
3611 }
3612 
bnx2x_tx_timeout(struct net_device * dev)3613 void bnx2x_tx_timeout(struct net_device *dev)
3614 {
3615 	struct bnx2x *bp = netdev_priv(dev);
3616 
3617 #ifdef BNX2X_STOP_ON_ERROR
3618 	if (!bp->panic)
3619 		bnx2x_panic();
3620 #endif
3621 
3622 	smp_mb__before_clear_bit();
3623 	set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3624 	smp_mb__after_clear_bit();
3625 
3626 	/* This allows the netif to be shutdown gracefully before resetting */
3627 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
3628 }
3629 
bnx2x_suspend(struct pci_dev * pdev,pm_message_t state)3630 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3631 {
3632 	struct net_device *dev = pci_get_drvdata(pdev);
3633 	struct bnx2x *bp;
3634 
3635 	if (!dev) {
3636 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3637 		return -ENODEV;
3638 	}
3639 	bp = netdev_priv(dev);
3640 
3641 	rtnl_lock();
3642 
3643 	pci_save_state(pdev);
3644 
3645 	if (!netif_running(dev)) {
3646 		rtnl_unlock();
3647 		return 0;
3648 	}
3649 
3650 	netif_device_detach(dev);
3651 
3652 	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3653 
3654 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3655 
3656 	rtnl_unlock();
3657 
3658 	return 0;
3659 }
3660 
bnx2x_resume(struct pci_dev * pdev)3661 int bnx2x_resume(struct pci_dev *pdev)
3662 {
3663 	struct net_device *dev = pci_get_drvdata(pdev);
3664 	struct bnx2x *bp;
3665 	int rc;
3666 
3667 	if (!dev) {
3668 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3669 		return -ENODEV;
3670 	}
3671 	bp = netdev_priv(dev);
3672 
3673 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3674 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
3675 		return -EAGAIN;
3676 	}
3677 
3678 	rtnl_lock();
3679 
3680 	pci_restore_state(pdev);
3681 
3682 	if (!netif_running(dev)) {
3683 		rtnl_unlock();
3684 		return 0;
3685 	}
3686 
3687 	bnx2x_set_power_state(bp, PCI_D0);
3688 	netif_device_attach(dev);
3689 
3690 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
3691 
3692 	rtnl_unlock();
3693 
3694 	return rc;
3695 }
3696 
3697 
bnx2x_set_ctx_validation(struct bnx2x * bp,struct eth_context * cxt,u32 cid)3698 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3699 			      u32 cid)
3700 {
3701 	/* ustorm cxt validation */
3702 	cxt->ustorm_ag_context.cdu_usage =
3703 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3704 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3705 	/* xcontext validation */
3706 	cxt->xstorm_ag_context.cdu_reserved =
3707 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3708 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3709 }
3710 
storm_memset_hc_timeout(struct bnx2x * bp,u8 port,u8 fw_sb_id,u8 sb_index,u8 ticks)3711 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3712 					     u8 fw_sb_id, u8 sb_index,
3713 					     u8 ticks)
3714 {
3715 
3716 	u32 addr = BAR_CSTRORM_INTMEM +
3717 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3718 	REG_WR8(bp, addr, ticks);
3719 	DP(NETIF_MSG_IFUP,
3720 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
3721 	   port, fw_sb_id, sb_index, ticks);
3722 }
3723 
storm_memset_hc_disable(struct bnx2x * bp,u8 port,u16 fw_sb_id,u8 sb_index,u8 disable)3724 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3725 					     u16 fw_sb_id, u8 sb_index,
3726 					     u8 disable)
3727 {
3728 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3729 	u32 addr = BAR_CSTRORM_INTMEM +
3730 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3731 	u16 flags = REG_RD16(bp, addr);
3732 	/* clear and set */
3733 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
3734 	flags |= enable_flag;
3735 	REG_WR16(bp, addr, flags);
3736 	DP(NETIF_MSG_IFUP,
3737 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
3738 	   port, fw_sb_id, sb_index, disable);
3739 }
3740 
bnx2x_update_coalesce_sb_index(struct bnx2x * bp,u8 fw_sb_id,u8 sb_index,u8 disable,u16 usec)3741 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3742 				    u8 sb_index, u8 disable, u16 usec)
3743 {
3744 	int port = BP_PORT(bp);
3745 	u8 ticks = usec / BNX2X_BTR;
3746 
3747 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3748 
3749 	disable = disable ? 1 : (usec ? 0 : 1);
3750 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3751 }
3752