1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Linux network driver for QLogic BR-series Converged Network Adapter. 4 */ 5 /* 6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 7 * Copyright (c) 2014-2015 QLogic Corporation 8 * All rights reserved 9 * www.qlogic.com 10 */ 11 #ifndef __BNAD_H__ 12 #define __BNAD_H__ 13 14 #include <linux/rtnetlink.h> 15 #include <linux/workqueue.h> 16 #include <linux/ipv6.h> 17 #include <linux/etherdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/firmware.h> 20 #include <linux/if_vlan.h> 21 22 /* Fix for IA64 */ 23 #include <asm/checksum.h> 24 #include <net/ip6_checksum.h> 25 26 #include <net/ip.h> 27 #include <net/tcp.h> 28 29 #include "bna.h" 30 31 #define BNAD_TXQ_DEPTH 2048 32 #define BNAD_RXQ_DEPTH 2048 33 34 #define BNAD_MAX_TX 1 35 #define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ 36 #define BNAD_TXQ_NUM 1 37 38 #define BNAD_MAX_RX 1 39 #define BNAD_MAX_RXP_PER_RX 16 40 #define BNAD_MAX_RXQ_PER_RXP 2 41 42 /* 43 * Control structure pointed to ccb->ctrl, which 44 * determines the NAPI / LRO behavior CCB 45 * There is 1:1 corres. between ccb & ctrl 46 */ 47 struct bnad_rx_ctrl { 48 struct bna_ccb *ccb; 49 struct bnad *bnad; 50 unsigned long flags; 51 struct napi_struct napi; 52 u64 rx_intr_ctr; 53 u64 rx_poll_ctr; 54 u64 rx_schedule; 55 u64 rx_keep_poll; 56 u64 rx_complete; 57 }; 58 59 #define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC 60 61 /* 62 * GLOBAL #defines (CONSTANTS) 63 */ 64 #define BNAD_NAME "bna" 65 #define BNAD_NAME_LEN 64 66 67 #define BNAD_MAILBOX_MSIX_INDEX 0 68 #define BNAD_MAILBOX_MSIX_VECTORS 1 69 #define BNAD_INTX_TX_IB_BITMASK 0x1 70 #define BNAD_INTX_RX_IB_BITMASK 0x2 71 72 #define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ 73 #define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ 74 75 #define BNAD_IOCETH_TIMEOUT 10000 76 77 #define BNAD_MIN_Q_DEPTH 512 78 #define BNAD_MAX_RXQ_DEPTH 16384 79 #define BNAD_MAX_TXQ_DEPTH 2048 80 81 #define BNAD_JUMBO_MTU 9000 82 83 #define BNAD_NETIF_WAKE_THRESHOLD 8 84 85 #define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3 86 87 /* Bit positions for tcb->flags */ 88 #define BNAD_TXQ_FREE_SENT 0 89 #define BNAD_TXQ_TX_STARTED 1 90 91 /* Bit positions for rcb->flags */ 92 #define BNAD_RXQ_STARTED 0 93 #define BNAD_RXQ_POST_OK 1 94 95 /* Resource limits */ 96 #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) 97 #define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) 98 99 #define BNAD_FRAME_SIZE(_mtu) \ 100 (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) 101 102 /* 103 * DATA STRUCTURES 104 */ 105 106 /* enums */ 107 enum bnad_intr_source { 108 BNAD_INTR_TX = 1, 109 BNAD_INTR_RX = 2 110 }; 111 112 enum bnad_link_state { 113 BNAD_LS_DOWN = 0, 114 BNAD_LS_UP = 1 115 }; 116 117 struct bnad_iocmd_comp { 118 struct bnad *bnad; 119 struct completion comp; 120 int comp_status; 121 }; 122 123 struct bnad_completion { 124 struct completion ioc_comp; 125 struct completion ucast_comp; 126 struct completion mcast_comp; 127 struct completion tx_comp; 128 struct completion rx_comp; 129 struct completion stats_comp; 130 struct completion enet_comp; 131 struct completion mtu_comp; 132 133 u8 ioc_comp_status; 134 u8 ucast_comp_status; 135 u8 mcast_comp_status; 136 u8 tx_comp_status; 137 u8 rx_comp_status; 138 u8 stats_comp_status; 139 u8 port_comp_status; 140 u8 mtu_comp_status; 141 }; 142 143 /* Tx Rx Control Stats */ 144 struct bnad_drv_stats { 145 u64 netif_queue_stop; 146 u64 netif_queue_wakeup; 147 u64 netif_queue_stopped; 148 u64 tso4; 149 u64 tso6; 150 u64 tso_err; 151 u64 tcpcsum_offload; 152 u64 udpcsum_offload; 153 u64 csum_help; 154 u64 tx_skb_too_short; 155 u64 tx_skb_stopping; 156 u64 tx_skb_max_vectors; 157 u64 tx_skb_mss_too_long; 158 u64 tx_skb_tso_too_short; 159 u64 tx_skb_tso_prepare; 160 u64 tx_skb_non_tso_too_long; 161 u64 tx_skb_tcp_hdr; 162 u64 tx_skb_udp_hdr; 163 u64 tx_skb_csum_err; 164 u64 tx_skb_headlen_too_long; 165 u64 tx_skb_headlen_zero; 166 u64 tx_skb_frag_zero; 167 u64 tx_skb_len_mismatch; 168 u64 tx_skb_map_failed; 169 170 u64 hw_stats_updates; 171 u64 netif_rx_dropped; 172 173 u64 link_toggle; 174 u64 cee_toggle; 175 176 u64 rxp_info_alloc_failed; 177 u64 mbox_intr_disabled; 178 u64 mbox_intr_enabled; 179 u64 tx_unmap_q_alloc_failed; 180 u64 rx_unmap_q_alloc_failed; 181 182 u64 rxbuf_alloc_failed; 183 u64 rxbuf_map_failed; 184 }; 185 186 /* Complete driver stats */ 187 struct bnad_stats { 188 struct bnad_drv_stats drv_stats; 189 struct bna_stats *bna_stats; 190 }; 191 192 /* Tx / Rx Resources */ 193 struct bnad_tx_res_info { 194 struct bna_res_info res_info[BNA_TX_RES_T_MAX]; 195 }; 196 197 struct bnad_rx_res_info { 198 struct bna_res_info res_info[BNA_RX_RES_T_MAX]; 199 }; 200 201 struct bnad_tx_info { 202 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 203 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 204 u32 tx_id; 205 struct delayed_work tx_cleanup_work; 206 } ____cacheline_aligned; 207 208 struct bnad_rx_info { 209 struct bna_rx *rx; /* 1:1 between rx_info & rx */ 210 211 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; 212 u32 rx_id; 213 struct work_struct rx_cleanup_work; 214 } ____cacheline_aligned; 215 216 struct bnad_tx_vector { 217 DEFINE_DMA_UNMAP_ADDR(dma_addr); 218 DEFINE_DMA_UNMAP_LEN(dma_len); 219 }; 220 221 struct bnad_tx_unmap { 222 struct sk_buff *skb; 223 u32 nvecs; 224 struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI]; 225 }; 226 227 struct bnad_rx_vector { 228 DEFINE_DMA_UNMAP_ADDR(dma_addr); 229 u32 len; 230 }; 231 232 struct bnad_rx_unmap { 233 struct page *page; 234 struct sk_buff *skb; 235 struct bnad_rx_vector vector; 236 u32 page_offset; 237 }; 238 239 enum bnad_rxbuf_type { 240 BNAD_RXBUF_NONE = 0, 241 BNAD_RXBUF_SK_BUFF = 1, 242 BNAD_RXBUF_PAGE = 2, 243 BNAD_RXBUF_MULTI_BUFF = 3 244 }; 245 246 #define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) 247 #define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) 248 249 struct bnad_rx_unmap_q { 250 int reuse_pi; 251 int alloc_order; 252 u32 map_size; 253 enum bnad_rxbuf_type type; 254 struct bnad_rx_unmap unmap[] ____cacheline_aligned; 255 }; 256 257 #define BNAD_PCI_DEV_IS_CAT2(_bnad) \ 258 ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) 259 260 /* Bit mask values for bnad->cfg_flags */ 261 #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ 262 #define BNAD_CF_PROMISC 0x02 263 #define BNAD_CF_ALLMULTI 0x04 264 #define BNAD_CF_DEFAULT 0x08 265 #define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ 266 267 /* Defines for run_flags bit-mask */ 268 /* Set, tested & cleared using xxx_bit() functions */ 269 /* Values indicated bit positions */ 270 #define BNAD_RF_CEE_RUNNING 0 271 #define BNAD_RF_MTU_SET 1 272 #define BNAD_RF_MBOX_IRQ_DISABLED 2 273 #define BNAD_RF_NETDEV_REGISTERED 3 274 #define BNAD_RF_DIM_TIMER_RUNNING 4 275 #define BNAD_RF_STATS_TIMER_RUNNING 5 276 #define BNAD_RF_TX_PRIO_SET 6 277 278 struct bnad { 279 struct net_device *netdev; 280 u32 id; 281 282 /* Data path */ 283 struct bnad_tx_info tx_info[BNAD_MAX_TX]; 284 struct bnad_rx_info rx_info[BNAD_MAX_RX]; 285 286 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 287 /* 288 * These q numbers are global only because 289 * they are used to calculate MSIx vectors. 290 * Actually the exact # of queues are per Tx/Rx 291 * object. 292 */ 293 u32 num_tx; 294 u32 num_rx; 295 u32 num_txq_per_tx; 296 u32 num_rxp_per_rx; 297 298 u32 txq_depth; 299 u32 rxq_depth; 300 301 u8 tx_coalescing_timeo; 302 u8 rx_coalescing_timeo; 303 304 struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned; 305 struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned; 306 307 void __iomem *bar0; /* BAR0 address */ 308 309 struct bna bna; 310 311 u32 cfg_flags; 312 unsigned long run_flags; 313 314 struct pci_dev *pcidev; 315 u64 mmio_start; 316 u64 mmio_len; 317 318 u32 msix_num; 319 struct msix_entry *msix_table; 320 321 struct mutex conf_mutex; 322 spinlock_t bna_lock ____cacheline_aligned; 323 324 /* Timers */ 325 struct timer_list ioc_timer; 326 struct timer_list dim_timer; 327 struct timer_list stats_timer; 328 329 /* Control path resources, memory & irq */ 330 struct bna_res_info res_info[BNA_RES_T_MAX]; 331 struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; 332 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; 333 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; 334 335 struct bnad_completion bnad_completions; 336 337 /* Burnt in MAC address */ 338 u8 perm_addr[ETH_ALEN]; 339 340 struct workqueue_struct *work_q; 341 342 /* Statistics */ 343 struct bnad_stats stats; 344 345 struct bnad_diag *diag; 346 347 char adapter_name[BNAD_NAME_LEN]; 348 char port_name[BNAD_NAME_LEN]; 349 char mbox_irq_name[BNAD_NAME_LEN]; 350 char wq_name[BNAD_NAME_LEN]; 351 352 /* debugfs specific data */ 353 char *regdata; 354 u32 reglen; 355 struct dentry *bnad_dentry_files[5]; 356 struct dentry *port_debugfs_root; 357 }; 358 359 struct bnad_drvinfo { 360 struct bfa_ioc_attr ioc_attr; 361 struct bfa_cee_attr cee_attr; 362 struct bfa_flash_attr flash_attr; 363 u32 cee_status; 364 u32 flash_status; 365 }; 366 367 /* 368 * EXTERN VARIABLES 369 */ 370 extern const struct firmware *bfi_fw; 371 372 /* 373 * EXTERN PROTOTYPES 374 */ 375 u32 *cna_get_firmware_buf(struct pci_dev *pdev); 376 /* Netdev entry point prototypes */ 377 void bnad_set_rx_mode(struct net_device *netdev); 378 struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); 379 int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr); 380 int bnad_enable_default_bcast(struct bnad *bnad); 381 void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); 382 void bnad_set_ethtool_ops(struct net_device *netdev); 383 void bnad_cb_completion(void *arg, enum bfa_status status); 384 385 /* Configuration & setup */ 386 void bnad_tx_coalescing_timeo_set(struct bnad *bnad); 387 void bnad_rx_coalescing_timeo_set(struct bnad *bnad); 388 389 int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 390 int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 391 void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); 392 void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); 393 394 /* Timer start/stop protos */ 395 void bnad_dim_timer_start(struct bnad *bnad); 396 397 /* Statistics */ 398 void bnad_netdev_qstats_fill(struct bnad *bnad, 399 struct rtnl_link_stats64 *stats); 400 void bnad_netdev_hwstats_fill(struct bnad *bnad, 401 struct rtnl_link_stats64 *stats); 402 403 /* Debugfs */ 404 void bnad_debugfs_init(struct bnad *bnad); 405 void bnad_debugfs_uninit(struct bnad *bnad); 406 407 /* MACROS */ 408 /* To set & get the stats counters */ 409 #define BNAD_UPDATE_CTR(_bnad, _ctr) \ 410 (((_bnad)->stats.drv_stats._ctr)++) 411 412 #define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr) 413 414 #define bnad_enable_rx_irq_unsafe(_ccb) \ 415 { \ 416 if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ 417 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ 418 (_ccb)->rx_coalescing_timeo); \ 419 bna_ib_ack((_ccb)->i_dbell, 0); \ 420 } \ 421 } 422 423 #endif /* __BNAD_H__ */ 424