1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <linux/atomic.h>
41 
42 #define MAX_MSIX_P_PORT		17
43 #define MAX_MSIX		64
44 #define MSIX_LEGACY_SZ		4
45 #define MIN_MSIX_P_PORT		5
46 
47 enum {
48 	MLX4_FLAG_MSI_X		= 1 << 0,
49 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
50 	MLX4_FLAG_MASTER	= 1 << 2,
51 	MLX4_FLAG_SLAVE		= 1 << 3,
52 	MLX4_FLAG_SRIOV		= 1 << 4,
53 };
54 
55 enum {
56 	MLX4_MAX_PORTS		= 2
57 };
58 
59 enum {
60 	MLX4_BOARD_ID_LEN = 64
61 };
62 
63 enum {
64 	MLX4_MAX_NUM_PF		= 16,
65 	MLX4_MAX_NUM_VF		= 64,
66 	MLX4_MFUNC_MAX		= 80,
67 	MLX4_MFUNC_EQ_NUM	= 4,
68 	MLX4_MFUNC_MAX_EQES     = 8,
69 	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
70 };
71 
72 enum {
73 	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0,
74 	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1,
75 	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2,
76 	MLX4_DEV_CAP_FLAG_XRC		= 1LL <<  3,
77 	MLX4_DEV_CAP_FLAG_SRQ		= 1LL <<  6,
78 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1LL <<  7,
79 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
80 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
81 	MLX4_DEV_CAP_FLAG_DPDP		= 1LL << 12,
82 	MLX4_DEV_CAP_FLAG_BLH		= 1LL << 15,
83 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1LL << 16,
84 	MLX4_DEV_CAP_FLAG_APM		= 1LL << 17,
85 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
86 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1LL << 19,
87 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1LL << 20,
88 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1LL << 21,
89 	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30,
90 	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32,
91 	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34,
92 	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37,
93 	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38,
94 	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40,
95 	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
96 	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
97 	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
98 	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55
99 };
100 
101 #define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
102 
103 enum {
104 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
105 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
106 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
107 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
108 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
109 };
110 
111 enum mlx4_event {
112 	MLX4_EVENT_TYPE_COMP		   = 0x00,
113 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
114 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
115 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
116 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
117 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
118 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
119 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
120 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
121 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
122 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
123 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
124 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
125 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
126 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
127 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
128 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
129 	MLX4_EVENT_TYPE_CMD		   = 0x0a,
130 	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19,
131 	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18,
132 	MLX4_EVENT_TYPE_FATAL_WARNING	   = 0x1b,
133 	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c,
134 	MLX4_EVENT_TYPE_NONE		   = 0xff,
135 };
136 
137 enum {
138 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
139 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
140 };
141 
142 enum {
143 	MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
144 };
145 
146 enum {
147 	MLX4_PERM_LOCAL_READ	= 1 << 10,
148 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
149 	MLX4_PERM_REMOTE_READ	= 1 << 12,
150 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
151 	MLX4_PERM_ATOMIC	= 1 << 14
152 };
153 
154 enum {
155 	MLX4_OPCODE_NOP			= 0x00,
156 	MLX4_OPCODE_SEND_INVAL		= 0x01,
157 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
158 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
159 	MLX4_OPCODE_SEND		= 0x0a,
160 	MLX4_OPCODE_SEND_IMM		= 0x0b,
161 	MLX4_OPCODE_LSO			= 0x0e,
162 	MLX4_OPCODE_RDMA_READ		= 0x10,
163 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
164 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
165 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
166 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
167 	MLX4_OPCODE_BIND_MW		= 0x18,
168 	MLX4_OPCODE_FMR			= 0x19,
169 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
170 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
171 
172 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
173 	MLX4_RECV_OPCODE_SEND		= 0x01,
174 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
175 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
176 
177 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
178 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
179 };
180 
181 enum {
182 	MLX4_STAT_RATE_OFFSET	= 5
183 };
184 
185 enum mlx4_protocol {
186 	MLX4_PROT_IB_IPV6 = 0,
187 	MLX4_PROT_ETH,
188 	MLX4_PROT_IB_IPV4,
189 	MLX4_PROT_FCOE
190 };
191 
192 enum {
193 	MLX4_MTT_FLAG_PRESENT		= 1
194 };
195 
196 enum mlx4_qp_region {
197 	MLX4_QP_REGION_FW = 0,
198 	MLX4_QP_REGION_ETH_ADDR,
199 	MLX4_QP_REGION_FC_ADDR,
200 	MLX4_QP_REGION_FC_EXCH,
201 	MLX4_NUM_QP_REGION
202 };
203 
204 enum mlx4_port_type {
205 	MLX4_PORT_TYPE_NONE	= 0,
206 	MLX4_PORT_TYPE_IB	= 1,
207 	MLX4_PORT_TYPE_ETH	= 2,
208 	MLX4_PORT_TYPE_AUTO	= 3
209 };
210 
211 enum mlx4_special_vlan_idx {
212 	MLX4_NO_VLAN_IDX        = 0,
213 	MLX4_VLAN_MISS_IDX,
214 	MLX4_VLAN_REGULAR
215 };
216 
217 enum mlx4_steer_type {
218 	MLX4_MC_STEER = 0,
219 	MLX4_UC_STEER,
220 	MLX4_NUM_STEERS
221 };
222 
223 enum {
224 	MLX4_NUM_FEXCH          = 64 * 1024,
225 };
226 
227 enum {
228 	MLX4_MAX_FAST_REG_PAGES = 511,
229 };
230 
mlx4_fw_ver(u64 major,u64 minor,u64 subminor)231 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
232 {
233 	return (major << 32) | (minor << 16) | subminor;
234 }
235 
236 struct mlx4_caps {
237 	u64			fw_ver;
238 	u32			function;
239 	int			num_ports;
240 	int			vl_cap[MLX4_MAX_PORTS + 1];
241 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
242 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
243 	u64			def_mac[MLX4_MAX_PORTS + 1];
244 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
245 	int			gid_table_len[MLX4_MAX_PORTS + 1];
246 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
247 	int			trans_type[MLX4_MAX_PORTS + 1];
248 	int			vendor_oui[MLX4_MAX_PORTS + 1];
249 	int			wavelength[MLX4_MAX_PORTS + 1];
250 	u64			trans_code[MLX4_MAX_PORTS + 1];
251 	int			local_ca_ack_delay;
252 	int			num_uars;
253 	u32			uar_page_size;
254 	int			bf_reg_size;
255 	int			bf_regs_per_page;
256 	int			max_sq_sg;
257 	int			max_rq_sg;
258 	int			num_qps;
259 	int			max_wqes;
260 	int			max_sq_desc_sz;
261 	int			max_rq_desc_sz;
262 	int			max_qp_init_rdma;
263 	int			max_qp_dest_rdma;
264 	int			sqp_start;
265 	int			num_srqs;
266 	int			max_srq_wqes;
267 	int			max_srq_sge;
268 	int			reserved_srqs;
269 	int			num_cqs;
270 	int			max_cqes;
271 	int			reserved_cqs;
272 	int			num_eqs;
273 	int			reserved_eqs;
274 	int			num_comp_vectors;
275 	int			comp_pool;
276 	int			num_mpts;
277 	int			max_fmr_maps;
278 	int			num_mtts;
279 	int			fmr_reserved_mtts;
280 	int			reserved_mtts;
281 	int			reserved_mrws;
282 	int			reserved_uars;
283 	int			num_mgms;
284 	int			num_amgms;
285 	int			reserved_mcgs;
286 	int			num_qp_per_mgm;
287 	int			num_pds;
288 	int			reserved_pds;
289 	int			max_xrcds;
290 	int			reserved_xrcds;
291 	int			mtt_entry_sz;
292 	u32			max_msg_sz;
293 	u32			page_size_cap;
294 	u64			flags;
295 	u32			bmme_flags;
296 	u32			reserved_lkey;
297 	u16			stat_rate_support;
298 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
299 	int			max_gso_sz;
300 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
301 	int			reserved_qps;
302 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
303 	int                     log_num_macs;
304 	int                     log_num_vlans;
305 	int                     log_num_prios;
306 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
307 	u8			supported_type[MLX4_MAX_PORTS + 1];
308 	u8                      suggested_type[MLX4_MAX_PORTS + 1];
309 	u8                      default_sense[MLX4_MAX_PORTS + 1];
310 	u32			port_mask[MLX4_MAX_PORTS + 1];
311 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
312 	u32			max_counters;
313 	u8			port_ib_mtu[MLX4_MAX_PORTS + 1];
314 };
315 
316 struct mlx4_buf_list {
317 	void		       *buf;
318 	dma_addr_t		map;
319 };
320 
321 struct mlx4_buf {
322 	struct mlx4_buf_list	direct;
323 	struct mlx4_buf_list   *page_list;
324 	int			nbufs;
325 	int			npages;
326 	int			page_shift;
327 };
328 
329 struct mlx4_mtt {
330 	u32			offset;
331 	int			order;
332 	int			page_shift;
333 };
334 
335 enum {
336 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
337 };
338 
339 struct mlx4_db_pgdir {
340 	struct list_head	list;
341 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
342 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
343 	unsigned long	       *bits[2];
344 	__be32		       *db_page;
345 	dma_addr_t		db_dma;
346 };
347 
348 struct mlx4_ib_user_db_page;
349 
350 struct mlx4_db {
351 	__be32			*db;
352 	union {
353 		struct mlx4_db_pgdir		*pgdir;
354 		struct mlx4_ib_user_db_page	*user_page;
355 	}			u;
356 	dma_addr_t		dma;
357 	int			index;
358 	int			order;
359 };
360 
361 struct mlx4_hwq_resources {
362 	struct mlx4_db		db;
363 	struct mlx4_mtt		mtt;
364 	struct mlx4_buf		buf;
365 };
366 
367 struct mlx4_mr {
368 	struct mlx4_mtt		mtt;
369 	u64			iova;
370 	u64			size;
371 	u32			key;
372 	u32			pd;
373 	u32			access;
374 	int			enabled;
375 };
376 
377 struct mlx4_fmr {
378 	struct mlx4_mr		mr;
379 	struct mlx4_mpt_entry  *mpt;
380 	__be64		       *mtts;
381 	dma_addr_t		dma_handle;
382 	int			max_pages;
383 	int			max_maps;
384 	int			maps;
385 	u8			page_shift;
386 };
387 
388 struct mlx4_uar {
389 	unsigned long		pfn;
390 	int			index;
391 	struct list_head	bf_list;
392 	unsigned		free_bf_bmap;
393 	void __iomem	       *map;
394 	void __iomem	       *bf_map;
395 };
396 
397 struct mlx4_bf {
398 	unsigned long		offset;
399 	int			buf_size;
400 	struct mlx4_uar	       *uar;
401 	void __iomem	       *reg;
402 };
403 
404 struct mlx4_cq {
405 	void (*comp)		(struct mlx4_cq *);
406 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
407 
408 	struct mlx4_uar	       *uar;
409 
410 	u32			cons_index;
411 
412 	__be32		       *set_ci_db;
413 	__be32		       *arm_db;
414 	int			arm_sn;
415 
416 	int			cqn;
417 	unsigned		vector;
418 
419 	atomic_t		refcount;
420 	struct completion	free;
421 };
422 
423 struct mlx4_qp {
424 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
425 
426 	int			qpn;
427 
428 	atomic_t		refcount;
429 	struct completion	free;
430 };
431 
432 struct mlx4_srq {
433 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
434 
435 	int			srqn;
436 	int			max;
437 	int			max_gs;
438 	int			wqe_shift;
439 
440 	atomic_t		refcount;
441 	struct completion	free;
442 };
443 
444 struct mlx4_av {
445 	__be32			port_pd;
446 	u8			reserved1;
447 	u8			g_slid;
448 	__be16			dlid;
449 	u8			reserved2;
450 	u8			gid_index;
451 	u8			stat_rate;
452 	u8			hop_limit;
453 	__be32			sl_tclass_flowlabel;
454 	u8			dgid[16];
455 };
456 
457 struct mlx4_eth_av {
458 	__be32		port_pd;
459 	u8		reserved1;
460 	u8		smac_idx;
461 	u16		reserved2;
462 	u8		reserved3;
463 	u8		gid_index;
464 	u8		stat_rate;
465 	u8		hop_limit;
466 	__be32		sl_tclass_flowlabel;
467 	u8		dgid[16];
468 	u32		reserved4[2];
469 	__be16		vlan;
470 	u8		mac[6];
471 };
472 
473 union mlx4_ext_av {
474 	struct mlx4_av		ib;
475 	struct mlx4_eth_av	eth;
476 };
477 
478 struct mlx4_counter {
479 	u8	reserved1[3];
480 	u8	counter_mode;
481 	__be32	num_ifc;
482 	u32	reserved2[2];
483 	__be64	rx_frames;
484 	__be64	rx_bytes;
485 	__be64	tx_frames;
486 	__be64	tx_bytes;
487 };
488 
489 struct mlx4_dev {
490 	struct pci_dev	       *pdev;
491 	unsigned long		flags;
492 	unsigned long		num_slaves;
493 	struct mlx4_caps	caps;
494 	struct radix_tree_root	qp_table_tree;
495 	u8			rev_id;
496 	char			board_id[MLX4_BOARD_ID_LEN];
497 	int			num_vfs;
498 };
499 
500 struct mlx4_init_port_param {
501 	int			set_guid0;
502 	int			set_node_guid;
503 	int			set_si_guid;
504 	u16			mtu;
505 	int			port_width_cap;
506 	u16			vl_cap;
507 	u16			max_gid;
508 	u16			max_pkey;
509 	u64			guid0;
510 	u64			node_guid;
511 	u64			si_guid;
512 };
513 
514 #define mlx4_foreach_port(port, dev, type)				\
515 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
516 		if ((type) == (dev)->caps.port_mask[(port)])
517 
518 #define mlx4_foreach_ib_transport_port(port, dev)                         \
519 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
520 		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
521 			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
522 
mlx4_is_master(struct mlx4_dev * dev)523 static inline int mlx4_is_master(struct mlx4_dev *dev)
524 {
525 	return dev->flags & MLX4_FLAG_MASTER;
526 }
527 
mlx4_is_qp_reserved(struct mlx4_dev * dev,u32 qpn)528 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
529 {
530 	return (qpn < dev->caps.sqp_start + 8);
531 }
532 
mlx4_is_mfunc(struct mlx4_dev * dev)533 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
534 {
535 	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
536 }
537 
mlx4_is_slave(struct mlx4_dev * dev)538 static inline int mlx4_is_slave(struct mlx4_dev *dev)
539 {
540 	return dev->flags & MLX4_FLAG_SLAVE;
541 }
542 
543 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
544 		   struct mlx4_buf *buf);
545 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
mlx4_buf_offset(struct mlx4_buf * buf,int offset)546 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
547 {
548 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
549 		return buf->direct.buf + offset;
550 	else
551 		return buf->page_list[offset >> PAGE_SHIFT].buf +
552 			(offset & (PAGE_SIZE - 1));
553 }
554 
555 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
556 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
557 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
558 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
559 
560 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
561 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
562 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
563 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
564 
565 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
566 		  struct mlx4_mtt *mtt);
567 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
568 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
569 
570 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
571 		  int npages, int page_shift, struct mlx4_mr *mr);
572 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
573 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
574 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
575 		   int start_index, int npages, u64 *page_list);
576 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
577 		       struct mlx4_buf *buf);
578 
579 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
580 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
581 
582 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
583 		       int size, int max_direct);
584 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
585 		       int size);
586 
587 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
588 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
589 		  unsigned vector, int collapsed);
590 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
591 
592 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
593 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
594 
595 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
596 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
597 
598 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
599 		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
600 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
601 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
602 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
603 
604 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
605 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
606 
607 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
608 			int block_mcast_loopback, enum mlx4_protocol prot);
609 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
610 			enum mlx4_protocol prot);
611 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
612 			  int block_mcast_loopback, enum mlx4_protocol protocol);
613 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
614 			  enum mlx4_protocol protocol);
615 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
616 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
617 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
618 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
619 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
620 
621 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
622 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
623 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
624 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
625 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
626 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
627 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
628 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
629 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
630 			   u8 promisc);
631 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
632 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
633 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
634 
635 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
636 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
637 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
638 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
639 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
640 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
641 		    u32 *lkey, u32 *rkey);
642 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
643 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
644 int mlx4_test_interrupts(struct mlx4_dev *dev);
645 int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector);
646 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
647 
648 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
649 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
650 
651 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
652 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
653 
654 #endif /* MLX4_DEVICE_H */
655