1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 
52 #include <asm/atomic.h>
53 #include <asm/uaccess.h>
54 
55 extern struct workqueue_struct *ib_wq;
56 
57 union ib_gid {
58 	u8	raw[16];
59 	struct {
60 		__be64	subnet_prefix;
61 		__be64	interface_id;
62 	} global;
63 };
64 
65 enum rdma_node_type {
66 	/* IB values map to NodeInfo:NodeType. */
67 	RDMA_NODE_IB_CA 	= 1,
68 	RDMA_NODE_IB_SWITCH,
69 	RDMA_NODE_IB_ROUTER,
70 	RDMA_NODE_RNIC
71 };
72 
73 enum rdma_transport_type {
74 	RDMA_TRANSPORT_IB,
75 	RDMA_TRANSPORT_IWARP
76 };
77 
78 enum rdma_transport_type
79 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80 
81 enum rdma_link_layer {
82 	IB_LINK_LAYER_UNSPECIFIED,
83 	IB_LINK_LAYER_INFINIBAND,
84 	IB_LINK_LAYER_ETHERNET,
85 };
86 
87 enum ib_device_cap_flags {
88 	IB_DEVICE_RESIZE_MAX_WR		= 1,
89 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
90 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
91 	IB_DEVICE_RAW_MULTI		= (1<<3),
92 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
93 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
94 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
95 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
96 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
97 	IB_DEVICE_INIT_TYPE		= (1<<9),
98 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
99 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
100 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
101 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
102 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
103 	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
104 	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
105 	IB_DEVICE_MEM_WINDOW		= (1<<17),
106 	/*
107 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
108 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
109 	 * messages and can verify the validity of checksum for
110 	 * incoming messages.  Setting this flag implies that the
111 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
112 	 */
113 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
114 	IB_DEVICE_UD_TSO		= (1<<19),
115 	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
116 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
117 };
118 
119 enum ib_atomic_cap {
120 	IB_ATOMIC_NONE,
121 	IB_ATOMIC_HCA,
122 	IB_ATOMIC_GLOB
123 };
124 
125 struct ib_device_attr {
126 	u64			fw_ver;
127 	__be64			sys_image_guid;
128 	u64			max_mr_size;
129 	u64			page_size_cap;
130 	u32			vendor_id;
131 	u32			vendor_part_id;
132 	u32			hw_ver;
133 	int			max_qp;
134 	int			max_qp_wr;
135 	int			device_cap_flags;
136 	int			max_sge;
137 	int			max_sge_rd;
138 	int			max_cq;
139 	int			max_cqe;
140 	int			max_mr;
141 	int			max_pd;
142 	int			max_qp_rd_atom;
143 	int			max_ee_rd_atom;
144 	int			max_res_rd_atom;
145 	int			max_qp_init_rd_atom;
146 	int			max_ee_init_rd_atom;
147 	enum ib_atomic_cap	atomic_cap;
148 	enum ib_atomic_cap	masked_atomic_cap;
149 	int			max_ee;
150 	int			max_rdd;
151 	int			max_mw;
152 	int			max_raw_ipv6_qp;
153 	int			max_raw_ethy_qp;
154 	int			max_mcast_grp;
155 	int			max_mcast_qp_attach;
156 	int			max_total_mcast_qp_attach;
157 	int			max_ah;
158 	int			max_fmr;
159 	int			max_map_per_fmr;
160 	int			max_srq;
161 	int			max_srq_wr;
162 	int			max_srq_sge;
163 	unsigned int		max_fast_reg_page_list_len;
164 	u16			max_pkeys;
165 	u8			local_ca_ack_delay;
166 };
167 
168 enum ib_mtu {
169 	IB_MTU_256  = 1,
170 	IB_MTU_512  = 2,
171 	IB_MTU_1024 = 3,
172 	IB_MTU_2048 = 4,
173 	IB_MTU_4096 = 5
174 };
175 
ib_mtu_enum_to_int(enum ib_mtu mtu)176 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
177 {
178 	switch (mtu) {
179 	case IB_MTU_256:  return  256;
180 	case IB_MTU_512:  return  512;
181 	case IB_MTU_1024: return 1024;
182 	case IB_MTU_2048: return 2048;
183 	case IB_MTU_4096: return 4096;
184 	default: 	  return -1;
185 	}
186 }
187 
188 enum ib_port_state {
189 	IB_PORT_NOP		= 0,
190 	IB_PORT_DOWN		= 1,
191 	IB_PORT_INIT		= 2,
192 	IB_PORT_ARMED		= 3,
193 	IB_PORT_ACTIVE		= 4,
194 	IB_PORT_ACTIVE_DEFER	= 5
195 };
196 
197 enum ib_port_cap_flags {
198 	IB_PORT_SM				= 1 <<  1,
199 	IB_PORT_NOTICE_SUP			= 1 <<  2,
200 	IB_PORT_TRAP_SUP			= 1 <<  3,
201 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
202 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
203 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
204 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
205 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
206 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
207 	IB_PORT_SM_DISABLED			= 1 << 10,
208 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
209 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
210 	IB_PORT_CM_SUP				= 1 << 16,
211 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
212 	IB_PORT_REINIT_SUP			= 1 << 18,
213 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
214 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
215 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
216 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
217 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
218 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
219 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
220 };
221 
222 enum ib_port_width {
223 	IB_WIDTH_1X	= 1,
224 	IB_WIDTH_4X	= 2,
225 	IB_WIDTH_8X	= 4,
226 	IB_WIDTH_12X	= 8
227 };
228 
ib_width_enum_to_int(enum ib_port_width width)229 static inline int ib_width_enum_to_int(enum ib_port_width width)
230 {
231 	switch (width) {
232 	case IB_WIDTH_1X:  return  1;
233 	case IB_WIDTH_4X:  return  4;
234 	case IB_WIDTH_8X:  return  8;
235 	case IB_WIDTH_12X: return 12;
236 	default: 	  return -1;
237 	}
238 }
239 
240 struct ib_protocol_stats {
241 	/* TBD... */
242 };
243 
244 struct iw_protocol_stats {
245 	u64	ipInReceives;
246 	u64	ipInHdrErrors;
247 	u64	ipInTooBigErrors;
248 	u64	ipInNoRoutes;
249 	u64	ipInAddrErrors;
250 	u64	ipInUnknownProtos;
251 	u64	ipInTruncatedPkts;
252 	u64	ipInDiscards;
253 	u64	ipInDelivers;
254 	u64	ipOutForwDatagrams;
255 	u64	ipOutRequests;
256 	u64	ipOutDiscards;
257 	u64	ipOutNoRoutes;
258 	u64	ipReasmTimeout;
259 	u64	ipReasmReqds;
260 	u64	ipReasmOKs;
261 	u64	ipReasmFails;
262 	u64	ipFragOKs;
263 	u64	ipFragFails;
264 	u64	ipFragCreates;
265 	u64	ipInMcastPkts;
266 	u64	ipOutMcastPkts;
267 	u64	ipInBcastPkts;
268 	u64	ipOutBcastPkts;
269 
270 	u64	tcpRtoAlgorithm;
271 	u64	tcpRtoMin;
272 	u64	tcpRtoMax;
273 	u64	tcpMaxConn;
274 	u64	tcpActiveOpens;
275 	u64	tcpPassiveOpens;
276 	u64	tcpAttemptFails;
277 	u64	tcpEstabResets;
278 	u64	tcpCurrEstab;
279 	u64	tcpInSegs;
280 	u64	tcpOutSegs;
281 	u64	tcpRetransSegs;
282 	u64	tcpInErrs;
283 	u64	tcpOutRsts;
284 };
285 
286 union rdma_protocol_stats {
287 	struct ib_protocol_stats	ib;
288 	struct iw_protocol_stats	iw;
289 };
290 
291 struct ib_port_attr {
292 	enum ib_port_state	state;
293 	enum ib_mtu		max_mtu;
294 	enum ib_mtu		active_mtu;
295 	int			gid_tbl_len;
296 	u32			port_cap_flags;
297 	u32			max_msg_sz;
298 	u32			bad_pkey_cntr;
299 	u32			qkey_viol_cntr;
300 	u16			pkey_tbl_len;
301 	u16			lid;
302 	u16			sm_lid;
303 	u8			lmc;
304 	u8			max_vl_num;
305 	u8			sm_sl;
306 	u8			subnet_timeout;
307 	u8			init_type_reply;
308 	u8			active_width;
309 	u8			active_speed;
310 	u8                      phys_state;
311 };
312 
313 enum ib_device_modify_flags {
314 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
315 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
316 };
317 
318 struct ib_device_modify {
319 	u64	sys_image_guid;
320 	char	node_desc[64];
321 };
322 
323 enum ib_port_modify_flags {
324 	IB_PORT_SHUTDOWN		= 1,
325 	IB_PORT_INIT_TYPE		= (1<<2),
326 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
327 };
328 
329 struct ib_port_modify {
330 	u32	set_port_cap_mask;
331 	u32	clr_port_cap_mask;
332 	u8	init_type;
333 };
334 
335 enum ib_event_type {
336 	IB_EVENT_CQ_ERR,
337 	IB_EVENT_QP_FATAL,
338 	IB_EVENT_QP_REQ_ERR,
339 	IB_EVENT_QP_ACCESS_ERR,
340 	IB_EVENT_COMM_EST,
341 	IB_EVENT_SQ_DRAINED,
342 	IB_EVENT_PATH_MIG,
343 	IB_EVENT_PATH_MIG_ERR,
344 	IB_EVENT_DEVICE_FATAL,
345 	IB_EVENT_PORT_ACTIVE,
346 	IB_EVENT_PORT_ERR,
347 	IB_EVENT_LID_CHANGE,
348 	IB_EVENT_PKEY_CHANGE,
349 	IB_EVENT_SM_CHANGE,
350 	IB_EVENT_SRQ_ERR,
351 	IB_EVENT_SRQ_LIMIT_REACHED,
352 	IB_EVENT_QP_LAST_WQE_REACHED,
353 	IB_EVENT_CLIENT_REREGISTER
354 };
355 
356 struct ib_event {
357 	struct ib_device	*device;
358 	union {
359 		struct ib_cq	*cq;
360 		struct ib_qp	*qp;
361 		struct ib_srq	*srq;
362 		u8		port_num;
363 	} element;
364 	enum ib_event_type	event;
365 };
366 
367 struct ib_event_handler {
368 	struct ib_device *device;
369 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
370 	struct list_head  list;
371 };
372 
373 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
374 	do {							\
375 		(_ptr)->device  = _device;			\
376 		(_ptr)->handler = _handler;			\
377 		INIT_LIST_HEAD(&(_ptr)->list);			\
378 	} while (0)
379 
380 struct ib_global_route {
381 	union ib_gid	dgid;
382 	u32		flow_label;
383 	u8		sgid_index;
384 	u8		hop_limit;
385 	u8		traffic_class;
386 };
387 
388 struct ib_grh {
389 	__be32		version_tclass_flow;
390 	__be16		paylen;
391 	u8		next_hdr;
392 	u8		hop_limit;
393 	union ib_gid	sgid;
394 	union ib_gid	dgid;
395 };
396 
397 enum {
398 	IB_MULTICAST_QPN = 0xffffff
399 };
400 
401 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
402 
403 enum ib_ah_flags {
404 	IB_AH_GRH	= 1
405 };
406 
407 enum ib_rate {
408 	IB_RATE_PORT_CURRENT = 0,
409 	IB_RATE_2_5_GBPS = 2,
410 	IB_RATE_5_GBPS   = 5,
411 	IB_RATE_10_GBPS  = 3,
412 	IB_RATE_20_GBPS  = 6,
413 	IB_RATE_30_GBPS  = 4,
414 	IB_RATE_40_GBPS  = 7,
415 	IB_RATE_60_GBPS  = 8,
416 	IB_RATE_80_GBPS  = 9,
417 	IB_RATE_120_GBPS = 10
418 };
419 
420 /**
421  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
422  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
423  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
424  * @rate: rate to convert.
425  */
426 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
427 
428 /**
429  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
430  * enum.
431  * @mult: multiple to convert.
432  */
433 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
434 
435 struct ib_ah_attr {
436 	struct ib_global_route	grh;
437 	u16			dlid;
438 	u8			sl;
439 	u8			src_path_bits;
440 	u8			static_rate;
441 	u8			ah_flags;
442 	u8			port_num;
443 };
444 
445 enum ib_wc_status {
446 	IB_WC_SUCCESS,
447 	IB_WC_LOC_LEN_ERR,
448 	IB_WC_LOC_QP_OP_ERR,
449 	IB_WC_LOC_EEC_OP_ERR,
450 	IB_WC_LOC_PROT_ERR,
451 	IB_WC_WR_FLUSH_ERR,
452 	IB_WC_MW_BIND_ERR,
453 	IB_WC_BAD_RESP_ERR,
454 	IB_WC_LOC_ACCESS_ERR,
455 	IB_WC_REM_INV_REQ_ERR,
456 	IB_WC_REM_ACCESS_ERR,
457 	IB_WC_REM_OP_ERR,
458 	IB_WC_RETRY_EXC_ERR,
459 	IB_WC_RNR_RETRY_EXC_ERR,
460 	IB_WC_LOC_RDD_VIOL_ERR,
461 	IB_WC_REM_INV_RD_REQ_ERR,
462 	IB_WC_REM_ABORT_ERR,
463 	IB_WC_INV_EECN_ERR,
464 	IB_WC_INV_EEC_STATE_ERR,
465 	IB_WC_FATAL_ERR,
466 	IB_WC_RESP_TIMEOUT_ERR,
467 	IB_WC_GENERAL_ERR
468 };
469 
470 enum ib_wc_opcode {
471 	IB_WC_SEND,
472 	IB_WC_RDMA_WRITE,
473 	IB_WC_RDMA_READ,
474 	IB_WC_COMP_SWAP,
475 	IB_WC_FETCH_ADD,
476 	IB_WC_BIND_MW,
477 	IB_WC_LSO,
478 	IB_WC_LOCAL_INV,
479 	IB_WC_FAST_REG_MR,
480 	IB_WC_MASKED_COMP_SWAP,
481 	IB_WC_MASKED_FETCH_ADD,
482 /*
483  * Set value of IB_WC_RECV so consumers can test if a completion is a
484  * receive by testing (opcode & IB_WC_RECV).
485  */
486 	IB_WC_RECV			= 1 << 7,
487 	IB_WC_RECV_RDMA_WITH_IMM
488 };
489 
490 enum ib_wc_flags {
491 	IB_WC_GRH		= 1,
492 	IB_WC_WITH_IMM		= (1<<1),
493 	IB_WC_WITH_INVALIDATE	= (1<<2),
494 };
495 
496 struct ib_wc {
497 	u64			wr_id;
498 	enum ib_wc_status	status;
499 	enum ib_wc_opcode	opcode;
500 	u32			vendor_err;
501 	u32			byte_len;
502 	struct ib_qp	       *qp;
503 	union {
504 		__be32		imm_data;
505 		u32		invalidate_rkey;
506 	} ex;
507 	u32			src_qp;
508 	int			wc_flags;
509 	u16			pkey_index;
510 	u16			slid;
511 	u8			sl;
512 	u8			dlid_path_bits;
513 	u8			port_num;	/* valid only for DR SMPs on switches */
514 	int			csum_ok;
515 };
516 
517 enum ib_cq_notify_flags {
518 	IB_CQ_SOLICITED			= 1 << 0,
519 	IB_CQ_NEXT_COMP			= 1 << 1,
520 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
521 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
522 };
523 
524 enum ib_srq_attr_mask {
525 	IB_SRQ_MAX_WR	= 1 << 0,
526 	IB_SRQ_LIMIT	= 1 << 1,
527 };
528 
529 struct ib_srq_attr {
530 	u32	max_wr;
531 	u32	max_sge;
532 	u32	srq_limit;
533 };
534 
535 struct ib_srq_init_attr {
536 	void		      (*event_handler)(struct ib_event *, void *);
537 	void		       *srq_context;
538 	struct ib_srq_attr	attr;
539 };
540 
541 struct ib_qp_cap {
542 	u32	max_send_wr;
543 	u32	max_recv_wr;
544 	u32	max_send_sge;
545 	u32	max_recv_sge;
546 	u32	max_inline_data;
547 };
548 
549 enum ib_sig_type {
550 	IB_SIGNAL_ALL_WR,
551 	IB_SIGNAL_REQ_WR
552 };
553 
554 enum ib_qp_type {
555 	/*
556 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
557 	 * here (and in that order) since the MAD layer uses them as
558 	 * indices into a 2-entry table.
559 	 */
560 	IB_QPT_SMI,
561 	IB_QPT_GSI,
562 
563 	IB_QPT_RC,
564 	IB_QPT_UC,
565 	IB_QPT_UD,
566 	IB_QPT_RAW_IPV6,
567 	IB_QPT_RAW_ETHERTYPE
568 };
569 
570 enum ib_qp_create_flags {
571 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
572 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
573 };
574 
575 struct ib_qp_init_attr {
576 	void                  (*event_handler)(struct ib_event *, void *);
577 	void		       *qp_context;
578 	struct ib_cq	       *send_cq;
579 	struct ib_cq	       *recv_cq;
580 	struct ib_srq	       *srq;
581 	struct ib_qp_cap	cap;
582 	enum ib_sig_type	sq_sig_type;
583 	enum ib_qp_type		qp_type;
584 	enum ib_qp_create_flags	create_flags;
585 	u8			port_num; /* special QP types only */
586 };
587 
588 enum ib_rnr_timeout {
589 	IB_RNR_TIMER_655_36 =  0,
590 	IB_RNR_TIMER_000_01 =  1,
591 	IB_RNR_TIMER_000_02 =  2,
592 	IB_RNR_TIMER_000_03 =  3,
593 	IB_RNR_TIMER_000_04 =  4,
594 	IB_RNR_TIMER_000_06 =  5,
595 	IB_RNR_TIMER_000_08 =  6,
596 	IB_RNR_TIMER_000_12 =  7,
597 	IB_RNR_TIMER_000_16 =  8,
598 	IB_RNR_TIMER_000_24 =  9,
599 	IB_RNR_TIMER_000_32 = 10,
600 	IB_RNR_TIMER_000_48 = 11,
601 	IB_RNR_TIMER_000_64 = 12,
602 	IB_RNR_TIMER_000_96 = 13,
603 	IB_RNR_TIMER_001_28 = 14,
604 	IB_RNR_TIMER_001_92 = 15,
605 	IB_RNR_TIMER_002_56 = 16,
606 	IB_RNR_TIMER_003_84 = 17,
607 	IB_RNR_TIMER_005_12 = 18,
608 	IB_RNR_TIMER_007_68 = 19,
609 	IB_RNR_TIMER_010_24 = 20,
610 	IB_RNR_TIMER_015_36 = 21,
611 	IB_RNR_TIMER_020_48 = 22,
612 	IB_RNR_TIMER_030_72 = 23,
613 	IB_RNR_TIMER_040_96 = 24,
614 	IB_RNR_TIMER_061_44 = 25,
615 	IB_RNR_TIMER_081_92 = 26,
616 	IB_RNR_TIMER_122_88 = 27,
617 	IB_RNR_TIMER_163_84 = 28,
618 	IB_RNR_TIMER_245_76 = 29,
619 	IB_RNR_TIMER_327_68 = 30,
620 	IB_RNR_TIMER_491_52 = 31
621 };
622 
623 enum ib_qp_attr_mask {
624 	IB_QP_STATE			= 1,
625 	IB_QP_CUR_STATE			= (1<<1),
626 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
627 	IB_QP_ACCESS_FLAGS		= (1<<3),
628 	IB_QP_PKEY_INDEX		= (1<<4),
629 	IB_QP_PORT			= (1<<5),
630 	IB_QP_QKEY			= (1<<6),
631 	IB_QP_AV			= (1<<7),
632 	IB_QP_PATH_MTU			= (1<<8),
633 	IB_QP_TIMEOUT			= (1<<9),
634 	IB_QP_RETRY_CNT			= (1<<10),
635 	IB_QP_RNR_RETRY			= (1<<11),
636 	IB_QP_RQ_PSN			= (1<<12),
637 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
638 	IB_QP_ALT_PATH			= (1<<14),
639 	IB_QP_MIN_RNR_TIMER		= (1<<15),
640 	IB_QP_SQ_PSN			= (1<<16),
641 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
642 	IB_QP_PATH_MIG_STATE		= (1<<18),
643 	IB_QP_CAP			= (1<<19),
644 	IB_QP_DEST_QPN			= (1<<20)
645 };
646 
647 enum ib_qp_state {
648 	IB_QPS_RESET,
649 	IB_QPS_INIT,
650 	IB_QPS_RTR,
651 	IB_QPS_RTS,
652 	IB_QPS_SQD,
653 	IB_QPS_SQE,
654 	IB_QPS_ERR
655 };
656 
657 enum ib_mig_state {
658 	IB_MIG_MIGRATED,
659 	IB_MIG_REARM,
660 	IB_MIG_ARMED
661 };
662 
663 struct ib_qp_attr {
664 	enum ib_qp_state	qp_state;
665 	enum ib_qp_state	cur_qp_state;
666 	enum ib_mtu		path_mtu;
667 	enum ib_mig_state	path_mig_state;
668 	u32			qkey;
669 	u32			rq_psn;
670 	u32			sq_psn;
671 	u32			dest_qp_num;
672 	int			qp_access_flags;
673 	struct ib_qp_cap	cap;
674 	struct ib_ah_attr	ah_attr;
675 	struct ib_ah_attr	alt_ah_attr;
676 	u16			pkey_index;
677 	u16			alt_pkey_index;
678 	u8			en_sqd_async_notify;
679 	u8			sq_draining;
680 	u8			max_rd_atomic;
681 	u8			max_dest_rd_atomic;
682 	u8			min_rnr_timer;
683 	u8			port_num;
684 	u8			timeout;
685 	u8			retry_cnt;
686 	u8			rnr_retry;
687 	u8			alt_port_num;
688 	u8			alt_timeout;
689 };
690 
691 enum ib_wr_opcode {
692 	IB_WR_RDMA_WRITE,
693 	IB_WR_RDMA_WRITE_WITH_IMM,
694 	IB_WR_SEND,
695 	IB_WR_SEND_WITH_IMM,
696 	IB_WR_RDMA_READ,
697 	IB_WR_ATOMIC_CMP_AND_SWP,
698 	IB_WR_ATOMIC_FETCH_AND_ADD,
699 	IB_WR_LSO,
700 	IB_WR_SEND_WITH_INV,
701 	IB_WR_RDMA_READ_WITH_INV,
702 	IB_WR_LOCAL_INV,
703 	IB_WR_FAST_REG_MR,
704 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
705 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
706 };
707 
708 enum ib_send_flags {
709 	IB_SEND_FENCE		= 1,
710 	IB_SEND_SIGNALED	= (1<<1),
711 	IB_SEND_SOLICITED	= (1<<2),
712 	IB_SEND_INLINE		= (1<<3),
713 	IB_SEND_IP_CSUM		= (1<<4)
714 };
715 
716 struct ib_sge {
717 	u64	addr;
718 	u32	length;
719 	u32	lkey;
720 };
721 
722 struct ib_fast_reg_page_list {
723 	struct ib_device       *device;
724 	u64		       *page_list;
725 	unsigned int		max_page_list_len;
726 };
727 
728 struct ib_send_wr {
729 	struct ib_send_wr      *next;
730 	u64			wr_id;
731 	struct ib_sge	       *sg_list;
732 	int			num_sge;
733 	enum ib_wr_opcode	opcode;
734 	int			send_flags;
735 	union {
736 		__be32		imm_data;
737 		u32		invalidate_rkey;
738 	} ex;
739 	union {
740 		struct {
741 			u64	remote_addr;
742 			u32	rkey;
743 		} rdma;
744 		struct {
745 			u64	remote_addr;
746 			u64	compare_add;
747 			u64	swap;
748 			u64	compare_add_mask;
749 			u64	swap_mask;
750 			u32	rkey;
751 		} atomic;
752 		struct {
753 			struct ib_ah *ah;
754 			void   *header;
755 			int     hlen;
756 			int     mss;
757 			u32	remote_qpn;
758 			u32	remote_qkey;
759 			u16	pkey_index; /* valid for GSI only */
760 			u8	port_num;   /* valid for DR SMPs on switch only */
761 		} ud;
762 		struct {
763 			u64				iova_start;
764 			struct ib_fast_reg_page_list   *page_list;
765 			unsigned int			page_shift;
766 			unsigned int			page_list_len;
767 			u32				length;
768 			int				access_flags;
769 			u32				rkey;
770 		} fast_reg;
771 	} wr;
772 };
773 
774 struct ib_recv_wr {
775 	struct ib_recv_wr      *next;
776 	u64			wr_id;
777 	struct ib_sge	       *sg_list;
778 	int			num_sge;
779 };
780 
781 enum ib_access_flags {
782 	IB_ACCESS_LOCAL_WRITE	= 1,
783 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
784 	IB_ACCESS_REMOTE_READ	= (1<<2),
785 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
786 	IB_ACCESS_MW_BIND	= (1<<4)
787 };
788 
789 struct ib_phys_buf {
790 	u64      addr;
791 	u64      size;
792 };
793 
794 struct ib_mr_attr {
795 	struct ib_pd	*pd;
796 	u64		device_virt_addr;
797 	u64		size;
798 	int		mr_access_flags;
799 	u32		lkey;
800 	u32		rkey;
801 };
802 
803 enum ib_mr_rereg_flags {
804 	IB_MR_REREG_TRANS	= 1,
805 	IB_MR_REREG_PD		= (1<<1),
806 	IB_MR_REREG_ACCESS	= (1<<2)
807 };
808 
809 struct ib_mw_bind {
810 	struct ib_mr   *mr;
811 	u64		wr_id;
812 	u64		addr;
813 	u32		length;
814 	int		send_flags;
815 	int		mw_access_flags;
816 };
817 
818 struct ib_fmr_attr {
819 	int	max_pages;
820 	int	max_maps;
821 	u8	page_shift;
822 };
823 
824 struct ib_ucontext {
825 	struct ib_device       *device;
826 	struct list_head	pd_list;
827 	struct list_head	mr_list;
828 	struct list_head	mw_list;
829 	struct list_head	cq_list;
830 	struct list_head	qp_list;
831 	struct list_head	srq_list;
832 	struct list_head	ah_list;
833 	int			closing;
834 };
835 
836 struct ib_uobject {
837 	u64			user_handle;	/* handle given to us by userspace */
838 	struct ib_ucontext     *context;	/* associated user context */
839 	void		       *object;		/* containing object */
840 	struct list_head	list;		/* link to context's list */
841 	int			id;		/* index into kernel idr */
842 	struct kref		ref;
843 	struct rw_semaphore	mutex;		/* protects .live */
844 	int			live;
845 };
846 
847 struct ib_udata {
848 	void __user *inbuf;
849 	void __user *outbuf;
850 	size_t       inlen;
851 	size_t       outlen;
852 };
853 
854 struct ib_pd {
855 	struct ib_device       *device;
856 	struct ib_uobject      *uobject;
857 	atomic_t          	usecnt; /* count all resources */
858 };
859 
860 struct ib_ah {
861 	struct ib_device	*device;
862 	struct ib_pd		*pd;
863 	struct ib_uobject	*uobject;
864 };
865 
866 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
867 
868 struct ib_cq {
869 	struct ib_device       *device;
870 	struct ib_uobject      *uobject;
871 	ib_comp_handler   	comp_handler;
872 	void                  (*event_handler)(struct ib_event *, void *);
873 	void                   *cq_context;
874 	int               	cqe;
875 	atomic_t          	usecnt; /* count number of work queues */
876 };
877 
878 struct ib_srq {
879 	struct ib_device       *device;
880 	struct ib_pd	       *pd;
881 	struct ib_uobject      *uobject;
882 	void		      (*event_handler)(struct ib_event *, void *);
883 	void		       *srq_context;
884 	atomic_t		usecnt;
885 };
886 
887 struct ib_qp {
888 	struct ib_device       *device;
889 	struct ib_pd	       *pd;
890 	struct ib_cq	       *send_cq;
891 	struct ib_cq	       *recv_cq;
892 	struct ib_srq	       *srq;
893 	struct ib_uobject      *uobject;
894 	void                  (*event_handler)(struct ib_event *, void *);
895 	void		       *qp_context;
896 	u32			qp_num;
897 	enum ib_qp_type		qp_type;
898 };
899 
900 struct ib_mr {
901 	struct ib_device  *device;
902 	struct ib_pd	  *pd;
903 	struct ib_uobject *uobject;
904 	u32		   lkey;
905 	u32		   rkey;
906 	atomic_t	   usecnt; /* count number of MWs */
907 };
908 
909 struct ib_mw {
910 	struct ib_device	*device;
911 	struct ib_pd		*pd;
912 	struct ib_uobject	*uobject;
913 	u32			rkey;
914 };
915 
916 struct ib_fmr {
917 	struct ib_device	*device;
918 	struct ib_pd		*pd;
919 	struct list_head	list;
920 	u32			lkey;
921 	u32			rkey;
922 };
923 
924 struct ib_mad;
925 struct ib_grh;
926 
927 enum ib_process_mad_flags {
928 	IB_MAD_IGNORE_MKEY	= 1,
929 	IB_MAD_IGNORE_BKEY	= 2,
930 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
931 };
932 
933 enum ib_mad_result {
934 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
935 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
936 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
937 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
938 };
939 
940 #define IB_DEVICE_NAME_MAX 64
941 
942 struct ib_cache {
943 	rwlock_t                lock;
944 	struct ib_event_handler event_handler;
945 	struct ib_pkey_cache  **pkey_cache;
946 	struct ib_gid_cache   **gid_cache;
947 	u8                     *lmc_cache;
948 };
949 
950 struct ib_dma_mapping_ops {
951 	int		(*mapping_error)(struct ib_device *dev,
952 					 u64 dma_addr);
953 	u64		(*map_single)(struct ib_device *dev,
954 				      void *ptr, size_t size,
955 				      enum dma_data_direction direction);
956 	void		(*unmap_single)(struct ib_device *dev,
957 					u64 addr, size_t size,
958 					enum dma_data_direction direction);
959 	u64		(*map_page)(struct ib_device *dev,
960 				    struct page *page, unsigned long offset,
961 				    size_t size,
962 				    enum dma_data_direction direction);
963 	void		(*unmap_page)(struct ib_device *dev,
964 				      u64 addr, size_t size,
965 				      enum dma_data_direction direction);
966 	int		(*map_sg)(struct ib_device *dev,
967 				  struct scatterlist *sg, int nents,
968 				  enum dma_data_direction direction);
969 	void		(*unmap_sg)(struct ib_device *dev,
970 				    struct scatterlist *sg, int nents,
971 				    enum dma_data_direction direction);
972 	u64		(*dma_address)(struct ib_device *dev,
973 				       struct scatterlist *sg);
974 	unsigned int	(*dma_len)(struct ib_device *dev,
975 				   struct scatterlist *sg);
976 	void		(*sync_single_for_cpu)(struct ib_device *dev,
977 					       u64 dma_handle,
978 					       size_t size,
979 					       enum dma_data_direction dir);
980 	void		(*sync_single_for_device)(struct ib_device *dev,
981 						  u64 dma_handle,
982 						  size_t size,
983 						  enum dma_data_direction dir);
984 	void		*(*alloc_coherent)(struct ib_device *dev,
985 					   size_t size,
986 					   u64 *dma_handle,
987 					   gfp_t flag);
988 	void		(*free_coherent)(struct ib_device *dev,
989 					 size_t size, void *cpu_addr,
990 					 u64 dma_handle);
991 };
992 
993 struct iw_cm_verbs;
994 
995 struct ib_device {
996 	struct device                *dma_device;
997 
998 	char                          name[IB_DEVICE_NAME_MAX];
999 
1000 	struct list_head              event_handler_list;
1001 	spinlock_t                    event_handler_lock;
1002 
1003 	spinlock_t                    client_data_lock;
1004 	struct list_head              core_list;
1005 	struct list_head              client_data_list;
1006 
1007 	struct ib_cache               cache;
1008 	int                          *pkey_tbl_len;
1009 	int                          *gid_tbl_len;
1010 
1011 	int			      num_comp_vectors;
1012 
1013 	struct iw_cm_verbs	     *iwcm;
1014 
1015 	int		           (*get_protocol_stats)(struct ib_device *device,
1016 							 union rdma_protocol_stats *stats);
1017 	int		           (*query_device)(struct ib_device *device,
1018 						   struct ib_device_attr *device_attr);
1019 	int		           (*query_port)(struct ib_device *device,
1020 						 u8 port_num,
1021 						 struct ib_port_attr *port_attr);
1022 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1023 						     u8 port_num);
1024 	int		           (*query_gid)(struct ib_device *device,
1025 						u8 port_num, int index,
1026 						union ib_gid *gid);
1027 	int		           (*query_pkey)(struct ib_device *device,
1028 						 u8 port_num, u16 index, u16 *pkey);
1029 	int		           (*modify_device)(struct ib_device *device,
1030 						    int device_modify_mask,
1031 						    struct ib_device_modify *device_modify);
1032 	int		           (*modify_port)(struct ib_device *device,
1033 						  u8 port_num, int port_modify_mask,
1034 						  struct ib_port_modify *port_modify);
1035 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1036 						     struct ib_udata *udata);
1037 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1038 	int                        (*mmap)(struct ib_ucontext *context,
1039 					   struct vm_area_struct *vma);
1040 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1041 					       struct ib_ucontext *context,
1042 					       struct ib_udata *udata);
1043 	int                        (*dealloc_pd)(struct ib_pd *pd);
1044 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1045 						struct ib_ah_attr *ah_attr);
1046 	int                        (*modify_ah)(struct ib_ah *ah,
1047 						struct ib_ah_attr *ah_attr);
1048 	int                        (*query_ah)(struct ib_ah *ah,
1049 					       struct ib_ah_attr *ah_attr);
1050 	int                        (*destroy_ah)(struct ib_ah *ah);
1051 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1052 						 struct ib_srq_init_attr *srq_init_attr,
1053 						 struct ib_udata *udata);
1054 	int                        (*modify_srq)(struct ib_srq *srq,
1055 						 struct ib_srq_attr *srq_attr,
1056 						 enum ib_srq_attr_mask srq_attr_mask,
1057 						 struct ib_udata *udata);
1058 	int                        (*query_srq)(struct ib_srq *srq,
1059 						struct ib_srq_attr *srq_attr);
1060 	int                        (*destroy_srq)(struct ib_srq *srq);
1061 	int                        (*post_srq_recv)(struct ib_srq *srq,
1062 						    struct ib_recv_wr *recv_wr,
1063 						    struct ib_recv_wr **bad_recv_wr);
1064 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1065 						struct ib_qp_init_attr *qp_init_attr,
1066 						struct ib_udata *udata);
1067 	int                        (*modify_qp)(struct ib_qp *qp,
1068 						struct ib_qp_attr *qp_attr,
1069 						int qp_attr_mask,
1070 						struct ib_udata *udata);
1071 	int                        (*query_qp)(struct ib_qp *qp,
1072 					       struct ib_qp_attr *qp_attr,
1073 					       int qp_attr_mask,
1074 					       struct ib_qp_init_attr *qp_init_attr);
1075 	int                        (*destroy_qp)(struct ib_qp *qp);
1076 	int                        (*post_send)(struct ib_qp *qp,
1077 						struct ib_send_wr *send_wr,
1078 						struct ib_send_wr **bad_send_wr);
1079 	int                        (*post_recv)(struct ib_qp *qp,
1080 						struct ib_recv_wr *recv_wr,
1081 						struct ib_recv_wr **bad_recv_wr);
1082 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1083 						int comp_vector,
1084 						struct ib_ucontext *context,
1085 						struct ib_udata *udata);
1086 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1087 						u16 cq_period);
1088 	int                        (*destroy_cq)(struct ib_cq *cq);
1089 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1090 						struct ib_udata *udata);
1091 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1092 					      struct ib_wc *wc);
1093 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1094 	int                        (*req_notify_cq)(struct ib_cq *cq,
1095 						    enum ib_cq_notify_flags flags);
1096 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1097 						      int wc_cnt);
1098 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1099 						 int mr_access_flags);
1100 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1101 						  struct ib_phys_buf *phys_buf_array,
1102 						  int num_phys_buf,
1103 						  int mr_access_flags,
1104 						  u64 *iova_start);
1105 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1106 						  u64 start, u64 length,
1107 						  u64 virt_addr,
1108 						  int mr_access_flags,
1109 						  struct ib_udata *udata);
1110 	int                        (*query_mr)(struct ib_mr *mr,
1111 					       struct ib_mr_attr *mr_attr);
1112 	int                        (*dereg_mr)(struct ib_mr *mr);
1113 	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1114 					       int max_page_list_len);
1115 	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1116 								   int page_list_len);
1117 	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1118 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1119 						    int mr_rereg_mask,
1120 						    struct ib_pd *pd,
1121 						    struct ib_phys_buf *phys_buf_array,
1122 						    int num_phys_buf,
1123 						    int mr_access_flags,
1124 						    u64 *iova_start);
1125 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1126 	int                        (*bind_mw)(struct ib_qp *qp,
1127 					      struct ib_mw *mw,
1128 					      struct ib_mw_bind *mw_bind);
1129 	int                        (*dealloc_mw)(struct ib_mw *mw);
1130 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1131 						int mr_access_flags,
1132 						struct ib_fmr_attr *fmr_attr);
1133 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1134 						   u64 *page_list, int list_len,
1135 						   u64 iova);
1136 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1137 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1138 	int                        (*attach_mcast)(struct ib_qp *qp,
1139 						   union ib_gid *gid,
1140 						   u16 lid);
1141 	int                        (*detach_mcast)(struct ib_qp *qp,
1142 						   union ib_gid *gid,
1143 						   u16 lid);
1144 	int                        (*process_mad)(struct ib_device *device,
1145 						  int process_mad_flags,
1146 						  u8 port_num,
1147 						  struct ib_wc *in_wc,
1148 						  struct ib_grh *in_grh,
1149 						  struct ib_mad *in_mad,
1150 						  struct ib_mad *out_mad);
1151 
1152 	struct ib_dma_mapping_ops   *dma_ops;
1153 
1154 	struct module               *owner;
1155 	struct device                dev;
1156 	struct kobject               *ports_parent;
1157 	struct list_head             port_list;
1158 
1159 	enum {
1160 		IB_DEV_UNINITIALIZED,
1161 		IB_DEV_REGISTERED,
1162 		IB_DEV_UNREGISTERED
1163 	}                            reg_state;
1164 
1165 	int			     uverbs_abi_ver;
1166 	u64			     uverbs_cmd_mask;
1167 
1168 	char			     node_desc[64];
1169 	__be64			     node_guid;
1170 	u32			     local_dma_lkey;
1171 	u8                           node_type;
1172 	u8                           phys_port_cnt;
1173 };
1174 
1175 struct ib_client {
1176 	char  *name;
1177 	void (*add)   (struct ib_device *);
1178 	void (*remove)(struct ib_device *);
1179 
1180 	struct list_head list;
1181 };
1182 
1183 struct ib_device *ib_alloc_device(size_t size);
1184 void ib_dealloc_device(struct ib_device *device);
1185 
1186 int ib_register_device(struct ib_device *device,
1187 		       int (*port_callback)(struct ib_device *,
1188 					    u8, struct kobject *));
1189 void ib_unregister_device(struct ib_device *device);
1190 
1191 int ib_register_client   (struct ib_client *client);
1192 void ib_unregister_client(struct ib_client *client);
1193 
1194 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1195 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1196 			 void *data);
1197 
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)1198 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1199 {
1200 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1201 }
1202 
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)1203 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1204 {
1205 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1206 }
1207 
1208 /**
1209  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1210  * contains all required attributes and no attributes not allowed for
1211  * the given QP state transition.
1212  * @cur_state: Current QP state
1213  * @next_state: Next QP state
1214  * @type: QP type
1215  * @mask: Mask of supplied QP attributes
1216  *
1217  * This function is a helper function that a low-level driver's
1218  * modify_qp method can use to validate the consumer's input.  It
1219  * checks that cur_state and next_state are valid QP states, that a
1220  * transition from cur_state to next_state is allowed by the IB spec,
1221  * and that the attribute mask supplied is allowed for the transition.
1222  */
1223 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1224 		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1225 
1226 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1227 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1228 void ib_dispatch_event(struct ib_event *event);
1229 
1230 int ib_query_device(struct ib_device *device,
1231 		    struct ib_device_attr *device_attr);
1232 
1233 int ib_query_port(struct ib_device *device,
1234 		  u8 port_num, struct ib_port_attr *port_attr);
1235 
1236 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1237 					       u8 port_num);
1238 
1239 int ib_query_gid(struct ib_device *device,
1240 		 u8 port_num, int index, union ib_gid *gid);
1241 
1242 int ib_query_pkey(struct ib_device *device,
1243 		  u8 port_num, u16 index, u16 *pkey);
1244 
1245 int ib_modify_device(struct ib_device *device,
1246 		     int device_modify_mask,
1247 		     struct ib_device_modify *device_modify);
1248 
1249 int ib_modify_port(struct ib_device *device,
1250 		   u8 port_num, int port_modify_mask,
1251 		   struct ib_port_modify *port_modify);
1252 
1253 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1254 		u8 *port_num, u16 *index);
1255 
1256 int ib_find_pkey(struct ib_device *device,
1257 		 u8 port_num, u16 pkey, u16 *index);
1258 
1259 /**
1260  * ib_alloc_pd - Allocates an unused protection domain.
1261  * @device: The device on which to allocate the protection domain.
1262  *
1263  * A protection domain object provides an association between QPs, shared
1264  * receive queues, address handles, memory regions, and memory windows.
1265  */
1266 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1267 
1268 /**
1269  * ib_dealloc_pd - Deallocates a protection domain.
1270  * @pd: The protection domain to deallocate.
1271  */
1272 int ib_dealloc_pd(struct ib_pd *pd);
1273 
1274 /**
1275  * ib_create_ah - Creates an address handle for the given address vector.
1276  * @pd: The protection domain associated with the address handle.
1277  * @ah_attr: The attributes of the address vector.
1278  *
1279  * The address handle is used to reference a local or global destination
1280  * in all UD QP post sends.
1281  */
1282 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1283 
1284 /**
1285  * ib_init_ah_from_wc - Initializes address handle attributes from a
1286  *   work completion.
1287  * @device: Device on which the received message arrived.
1288  * @port_num: Port on which the received message arrived.
1289  * @wc: Work completion associated with the received message.
1290  * @grh: References the received global route header.  This parameter is
1291  *   ignored unless the work completion indicates that the GRH is valid.
1292  * @ah_attr: Returned attributes that can be used when creating an address
1293  *   handle for replying to the message.
1294  */
1295 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1296 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1297 
1298 /**
1299  * ib_create_ah_from_wc - Creates an address handle associated with the
1300  *   sender of the specified work completion.
1301  * @pd: The protection domain associated with the address handle.
1302  * @wc: Work completion information associated with a received message.
1303  * @grh: References the received global route header.  This parameter is
1304  *   ignored unless the work completion indicates that the GRH is valid.
1305  * @port_num: The outbound port number to associate with the address.
1306  *
1307  * The address handle is used to reference a local or global destination
1308  * in all UD QP post sends.
1309  */
1310 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1311 				   struct ib_grh *grh, u8 port_num);
1312 
1313 /**
1314  * ib_modify_ah - Modifies the address vector associated with an address
1315  *   handle.
1316  * @ah: The address handle to modify.
1317  * @ah_attr: The new address vector attributes to associate with the
1318  *   address handle.
1319  */
1320 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1321 
1322 /**
1323  * ib_query_ah - Queries the address vector associated with an address
1324  *   handle.
1325  * @ah: The address handle to query.
1326  * @ah_attr: The address vector attributes associated with the address
1327  *   handle.
1328  */
1329 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1330 
1331 /**
1332  * ib_destroy_ah - Destroys an address handle.
1333  * @ah: The address handle to destroy.
1334  */
1335 int ib_destroy_ah(struct ib_ah *ah);
1336 
1337 /**
1338  * ib_create_srq - Creates a SRQ associated with the specified protection
1339  *   domain.
1340  * @pd: The protection domain associated with the SRQ.
1341  * @srq_init_attr: A list of initial attributes required to create the
1342  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1343  *   the actual capabilities of the created SRQ.
1344  *
1345  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1346  * requested size of the SRQ, and set to the actual values allocated
1347  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1348  * will always be at least as large as the requested values.
1349  */
1350 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1351 			     struct ib_srq_init_attr *srq_init_attr);
1352 
1353 /**
1354  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1355  * @srq: The SRQ to modify.
1356  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1357  *   the current values of selected SRQ attributes are returned.
1358  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1359  *   are being modified.
1360  *
1361  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1362  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1363  * the number of receives queued drops below the limit.
1364  */
1365 int ib_modify_srq(struct ib_srq *srq,
1366 		  struct ib_srq_attr *srq_attr,
1367 		  enum ib_srq_attr_mask srq_attr_mask);
1368 
1369 /**
1370  * ib_query_srq - Returns the attribute list and current values for the
1371  *   specified SRQ.
1372  * @srq: The SRQ to query.
1373  * @srq_attr: The attributes of the specified SRQ.
1374  */
1375 int ib_query_srq(struct ib_srq *srq,
1376 		 struct ib_srq_attr *srq_attr);
1377 
1378 /**
1379  * ib_destroy_srq - Destroys the specified SRQ.
1380  * @srq: The SRQ to destroy.
1381  */
1382 int ib_destroy_srq(struct ib_srq *srq);
1383 
1384 /**
1385  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1386  * @srq: The SRQ to post the work request on.
1387  * @recv_wr: A list of work requests to post on the receive queue.
1388  * @bad_recv_wr: On an immediate failure, this parameter will reference
1389  *   the work request that failed to be posted on the QP.
1390  */
ib_post_srq_recv(struct ib_srq * srq,struct ib_recv_wr * recv_wr,struct ib_recv_wr ** bad_recv_wr)1391 static inline int ib_post_srq_recv(struct ib_srq *srq,
1392 				   struct ib_recv_wr *recv_wr,
1393 				   struct ib_recv_wr **bad_recv_wr)
1394 {
1395 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1396 }
1397 
1398 /**
1399  * ib_create_qp - Creates a QP associated with the specified protection
1400  *   domain.
1401  * @pd: The protection domain associated with the QP.
1402  * @qp_init_attr: A list of initial attributes required to create the
1403  *   QP.  If QP creation succeeds, then the attributes are updated to
1404  *   the actual capabilities of the created QP.
1405  */
1406 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1407 			   struct ib_qp_init_attr *qp_init_attr);
1408 
1409 /**
1410  * ib_modify_qp - Modifies the attributes for the specified QP and then
1411  *   transitions the QP to the given state.
1412  * @qp: The QP to modify.
1413  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1414  *   the current values of selected QP attributes are returned.
1415  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1416  *   are being modified.
1417  */
1418 int ib_modify_qp(struct ib_qp *qp,
1419 		 struct ib_qp_attr *qp_attr,
1420 		 int qp_attr_mask);
1421 
1422 /**
1423  * ib_query_qp - Returns the attribute list and current values for the
1424  *   specified QP.
1425  * @qp: The QP to query.
1426  * @qp_attr: The attributes of the specified QP.
1427  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1428  * @qp_init_attr: Additional attributes of the selected QP.
1429  *
1430  * The qp_attr_mask may be used to limit the query to gathering only the
1431  * selected attributes.
1432  */
1433 int ib_query_qp(struct ib_qp *qp,
1434 		struct ib_qp_attr *qp_attr,
1435 		int qp_attr_mask,
1436 		struct ib_qp_init_attr *qp_init_attr);
1437 
1438 /**
1439  * ib_destroy_qp - Destroys the specified QP.
1440  * @qp: The QP to destroy.
1441  */
1442 int ib_destroy_qp(struct ib_qp *qp);
1443 
1444 /**
1445  * ib_post_send - Posts a list of work requests to the send queue of
1446  *   the specified QP.
1447  * @qp: The QP to post the work request on.
1448  * @send_wr: A list of work requests to post on the send queue.
1449  * @bad_send_wr: On an immediate failure, this parameter will reference
1450  *   the work request that failed to be posted on the QP.
1451  *
1452  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1453  * error is returned, the QP state shall not be affected,
1454  * ib_post_send() will return an immediate error after queueing any
1455  * earlier work requests in the list.
1456  */
ib_post_send(struct ib_qp * qp,struct ib_send_wr * send_wr,struct ib_send_wr ** bad_send_wr)1457 static inline int ib_post_send(struct ib_qp *qp,
1458 			       struct ib_send_wr *send_wr,
1459 			       struct ib_send_wr **bad_send_wr)
1460 {
1461 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1462 }
1463 
1464 /**
1465  * ib_post_recv - Posts a list of work requests to the receive queue of
1466  *   the specified QP.
1467  * @qp: The QP to post the work request on.
1468  * @recv_wr: A list of work requests to post on the receive queue.
1469  * @bad_recv_wr: On an immediate failure, this parameter will reference
1470  *   the work request that failed to be posted on the QP.
1471  */
ib_post_recv(struct ib_qp * qp,struct ib_recv_wr * recv_wr,struct ib_recv_wr ** bad_recv_wr)1472 static inline int ib_post_recv(struct ib_qp *qp,
1473 			       struct ib_recv_wr *recv_wr,
1474 			       struct ib_recv_wr **bad_recv_wr)
1475 {
1476 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1477 }
1478 
1479 /**
1480  * ib_create_cq - Creates a CQ on the specified device.
1481  * @device: The device on which to create the CQ.
1482  * @comp_handler: A user-specified callback that is invoked when a
1483  *   completion event occurs on the CQ.
1484  * @event_handler: A user-specified callback that is invoked when an
1485  *   asynchronous event not associated with a completion occurs on the CQ.
1486  * @cq_context: Context associated with the CQ returned to the user via
1487  *   the associated completion and event handlers.
1488  * @cqe: The minimum size of the CQ.
1489  * @comp_vector - Completion vector used to signal completion events.
1490  *     Must be >= 0 and < context->num_comp_vectors.
1491  *
1492  * Users can examine the cq structure to determine the actual CQ size.
1493  */
1494 struct ib_cq *ib_create_cq(struct ib_device *device,
1495 			   ib_comp_handler comp_handler,
1496 			   void (*event_handler)(struct ib_event *, void *),
1497 			   void *cq_context, int cqe, int comp_vector);
1498 
1499 /**
1500  * ib_resize_cq - Modifies the capacity of the CQ.
1501  * @cq: The CQ to resize.
1502  * @cqe: The minimum size of the CQ.
1503  *
1504  * Users can examine the cq structure to determine the actual CQ size.
1505  */
1506 int ib_resize_cq(struct ib_cq *cq, int cqe);
1507 
1508 /**
1509  * ib_modify_cq - Modifies moderation params of the CQ
1510  * @cq: The CQ to modify.
1511  * @cq_count: number of CQEs that will trigger an event
1512  * @cq_period: max period of time in usec before triggering an event
1513  *
1514  */
1515 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1516 
1517 /**
1518  * ib_destroy_cq - Destroys the specified CQ.
1519  * @cq: The CQ to destroy.
1520  */
1521 int ib_destroy_cq(struct ib_cq *cq);
1522 
1523 /**
1524  * ib_poll_cq - poll a CQ for completion(s)
1525  * @cq:the CQ being polled
1526  * @num_entries:maximum number of completions to return
1527  * @wc:array of at least @num_entries &struct ib_wc where completions
1528  *   will be returned
1529  *
1530  * Poll a CQ for (possibly multiple) completions.  If the return value
1531  * is < 0, an error occurred.  If the return value is >= 0, it is the
1532  * number of completions returned.  If the return value is
1533  * non-negative and < num_entries, then the CQ was emptied.
1534  */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)1535 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1536 			     struct ib_wc *wc)
1537 {
1538 	return cq->device->poll_cq(cq, num_entries, wc);
1539 }
1540 
1541 /**
1542  * ib_peek_cq - Returns the number of unreaped completions currently
1543  *   on the specified CQ.
1544  * @cq: The CQ to peek.
1545  * @wc_cnt: A minimum number of unreaped completions to check for.
1546  *
1547  * If the number of unreaped completions is greater than or equal to wc_cnt,
1548  * this function returns wc_cnt, otherwise, it returns the actual number of
1549  * unreaped completions.
1550  */
1551 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1552 
1553 /**
1554  * ib_req_notify_cq - Request completion notification on a CQ.
1555  * @cq: The CQ to generate an event for.
1556  * @flags:
1557  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1558  *   to request an event on the next solicited event or next work
1559  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1560  *   may also be |ed in to request a hint about missed events, as
1561  *   described below.
1562  *
1563  * Return Value:
1564  *    < 0 means an error occurred while requesting notification
1565  *   == 0 means notification was requested successfully, and if
1566  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1567  *        were missed and it is safe to wait for another event.  In
1568  *        this case is it guaranteed that any work completions added
1569  *        to the CQ since the last CQ poll will trigger a completion
1570  *        notification event.
1571  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1572  *        in.  It means that the consumer must poll the CQ again to
1573  *        make sure it is empty to avoid missing an event because of a
1574  *        race between requesting notification and an entry being
1575  *        added to the CQ.  This return value means it is possible
1576  *        (but not guaranteed) that a work completion has been added
1577  *        to the CQ since the last poll without triggering a
1578  *        completion notification event.
1579  */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)1580 static inline int ib_req_notify_cq(struct ib_cq *cq,
1581 				   enum ib_cq_notify_flags flags)
1582 {
1583 	return cq->device->req_notify_cq(cq, flags);
1584 }
1585 
1586 /**
1587  * ib_req_ncomp_notif - Request completion notification when there are
1588  *   at least the specified number of unreaped completions on the CQ.
1589  * @cq: The CQ to generate an event for.
1590  * @wc_cnt: The number of unreaped completions that should be on the
1591  *   CQ before an event is generated.
1592  */
ib_req_ncomp_notif(struct ib_cq * cq,int wc_cnt)1593 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1594 {
1595 	return cq->device->req_ncomp_notif ?
1596 		cq->device->req_ncomp_notif(cq, wc_cnt) :
1597 		-ENOSYS;
1598 }
1599 
1600 /**
1601  * ib_get_dma_mr - Returns a memory region for system memory that is
1602  *   usable for DMA.
1603  * @pd: The protection domain associated with the memory region.
1604  * @mr_access_flags: Specifies the memory access rights.
1605  *
1606  * Note that the ib_dma_*() functions defined below must be used
1607  * to create/destroy addresses used with the Lkey or Rkey returned
1608  * by ib_get_dma_mr().
1609  */
1610 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1611 
1612 /**
1613  * ib_dma_mapping_error - check a DMA addr for error
1614  * @dev: The device for which the dma_addr was created
1615  * @dma_addr: The DMA address to check
1616  */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)1617 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1618 {
1619 	if (dev->dma_ops)
1620 		return dev->dma_ops->mapping_error(dev, dma_addr);
1621 	return dma_mapping_error(dev->dma_device, dma_addr);
1622 }
1623 
1624 /**
1625  * ib_dma_map_single - Map a kernel virtual address to DMA address
1626  * @dev: The device for which the dma_addr is to be created
1627  * @cpu_addr: The kernel virtual address
1628  * @size: The size of the region in bytes
1629  * @direction: The direction of the DMA
1630  */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)1631 static inline u64 ib_dma_map_single(struct ib_device *dev,
1632 				    void *cpu_addr, size_t size,
1633 				    enum dma_data_direction direction)
1634 {
1635 	if (dev->dma_ops)
1636 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1637 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1638 }
1639 
1640 /**
1641  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1642  * @dev: The device for which the DMA address was created
1643  * @addr: The DMA address
1644  * @size: The size of the region in bytes
1645  * @direction: The direction of the DMA
1646  */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)1647 static inline void ib_dma_unmap_single(struct ib_device *dev,
1648 				       u64 addr, size_t size,
1649 				       enum dma_data_direction direction)
1650 {
1651 	if (dev->dma_ops)
1652 		dev->dma_ops->unmap_single(dev, addr, size, direction);
1653 	else
1654 		dma_unmap_single(dev->dma_device, addr, size, direction);
1655 }
1656 
ib_dma_map_single_attrs(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)1657 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1658 					  void *cpu_addr, size_t size,
1659 					  enum dma_data_direction direction,
1660 					  struct dma_attrs *attrs)
1661 {
1662 	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1663 				    direction, attrs);
1664 }
1665 
ib_dma_unmap_single_attrs(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)1666 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1667 					     u64 addr, size_t size,
1668 					     enum dma_data_direction direction,
1669 					     struct dma_attrs *attrs)
1670 {
1671 	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1672 				      direction, attrs);
1673 }
1674 
1675 /**
1676  * ib_dma_map_page - Map a physical page to DMA address
1677  * @dev: The device for which the dma_addr is to be created
1678  * @page: The page to be mapped
1679  * @offset: The offset within the page
1680  * @size: The size of the region in bytes
1681  * @direction: The direction of the DMA
1682  */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)1683 static inline u64 ib_dma_map_page(struct ib_device *dev,
1684 				  struct page *page,
1685 				  unsigned long offset,
1686 				  size_t size,
1687 					 enum dma_data_direction direction)
1688 {
1689 	if (dev->dma_ops)
1690 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1691 	return dma_map_page(dev->dma_device, page, offset, size, direction);
1692 }
1693 
1694 /**
1695  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1696  * @dev: The device for which the DMA address was created
1697  * @addr: The DMA address
1698  * @size: The size of the region in bytes
1699  * @direction: The direction of the DMA
1700  */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)1701 static inline void ib_dma_unmap_page(struct ib_device *dev,
1702 				     u64 addr, size_t size,
1703 				     enum dma_data_direction direction)
1704 {
1705 	if (dev->dma_ops)
1706 		dev->dma_ops->unmap_page(dev, addr, size, direction);
1707 	else
1708 		dma_unmap_page(dev->dma_device, addr, size, direction);
1709 }
1710 
1711 /**
1712  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1713  * @dev: The device for which the DMA addresses are to be created
1714  * @sg: The array of scatter/gather entries
1715  * @nents: The number of scatter/gather entries
1716  * @direction: The direction of the DMA
1717  */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)1718 static inline int ib_dma_map_sg(struct ib_device *dev,
1719 				struct scatterlist *sg, int nents,
1720 				enum dma_data_direction direction)
1721 {
1722 	if (dev->dma_ops)
1723 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1724 	return dma_map_sg(dev->dma_device, sg, nents, direction);
1725 }
1726 
1727 /**
1728  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1729  * @dev: The device for which the DMA addresses were created
1730  * @sg: The array of scatter/gather entries
1731  * @nents: The number of scatter/gather entries
1732  * @direction: The direction of the DMA
1733  */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)1734 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1735 				   struct scatterlist *sg, int nents,
1736 				   enum dma_data_direction direction)
1737 {
1738 	if (dev->dma_ops)
1739 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1740 	else
1741 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1742 }
1743 
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)1744 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1745 				      struct scatterlist *sg, int nents,
1746 				      enum dma_data_direction direction,
1747 				      struct dma_attrs *attrs)
1748 {
1749 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1750 }
1751 
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)1752 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1753 					 struct scatterlist *sg, int nents,
1754 					 enum dma_data_direction direction,
1755 					 struct dma_attrs *attrs)
1756 {
1757 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1758 }
1759 /**
1760  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1761  * @dev: The device for which the DMA addresses were created
1762  * @sg: The scatter/gather entry
1763  */
ib_sg_dma_address(struct ib_device * dev,struct scatterlist * sg)1764 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1765 				    struct scatterlist *sg)
1766 {
1767 	if (dev->dma_ops)
1768 		return dev->dma_ops->dma_address(dev, sg);
1769 	return sg_dma_address(sg);
1770 }
1771 
1772 /**
1773  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1774  * @dev: The device for which the DMA addresses were created
1775  * @sg: The scatter/gather entry
1776  */
ib_sg_dma_len(struct ib_device * dev,struct scatterlist * sg)1777 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1778 					 struct scatterlist *sg)
1779 {
1780 	if (dev->dma_ops)
1781 		return dev->dma_ops->dma_len(dev, sg);
1782 	return sg_dma_len(sg);
1783 }
1784 
1785 /**
1786  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1787  * @dev: The device for which the DMA address was created
1788  * @addr: The DMA address
1789  * @size: The size of the region in bytes
1790  * @dir: The direction of the DMA
1791  */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)1792 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1793 					      u64 addr,
1794 					      size_t size,
1795 					      enum dma_data_direction dir)
1796 {
1797 	if (dev->dma_ops)
1798 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1799 	else
1800 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1801 }
1802 
1803 /**
1804  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1805  * @dev: The device for which the DMA address was created
1806  * @addr: The DMA address
1807  * @size: The size of the region in bytes
1808  * @dir: The direction of the DMA
1809  */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)1810 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1811 						 u64 addr,
1812 						 size_t size,
1813 						 enum dma_data_direction dir)
1814 {
1815 	if (dev->dma_ops)
1816 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1817 	else
1818 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1819 }
1820 
1821 /**
1822  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1823  * @dev: The device for which the DMA address is requested
1824  * @size: The size of the region to allocate in bytes
1825  * @dma_handle: A pointer for returning the DMA address of the region
1826  * @flag: memory allocator flags
1827  */
ib_dma_alloc_coherent(struct ib_device * dev,size_t size,u64 * dma_handle,gfp_t flag)1828 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1829 					   size_t size,
1830 					   u64 *dma_handle,
1831 					   gfp_t flag)
1832 {
1833 	if (dev->dma_ops)
1834 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1835 	else {
1836 		dma_addr_t handle;
1837 		void *ret;
1838 
1839 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1840 		*dma_handle = handle;
1841 		return ret;
1842 	}
1843 }
1844 
1845 /**
1846  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1847  * @dev: The device for which the DMA addresses were allocated
1848  * @size: The size of the region
1849  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1850  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1851  */
ib_dma_free_coherent(struct ib_device * dev,size_t size,void * cpu_addr,u64 dma_handle)1852 static inline void ib_dma_free_coherent(struct ib_device *dev,
1853 					size_t size, void *cpu_addr,
1854 					u64 dma_handle)
1855 {
1856 	if (dev->dma_ops)
1857 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1858 	else
1859 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1860 }
1861 
1862 /**
1863  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1864  *   by an HCA.
1865  * @pd: The protection domain associated assigned to the registered region.
1866  * @phys_buf_array: Specifies a list of physical buffers to use in the
1867  *   memory region.
1868  * @num_phys_buf: Specifies the size of the phys_buf_array.
1869  * @mr_access_flags: Specifies the memory access rights.
1870  * @iova_start: The offset of the region's starting I/O virtual address.
1871  */
1872 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1873 			     struct ib_phys_buf *phys_buf_array,
1874 			     int num_phys_buf,
1875 			     int mr_access_flags,
1876 			     u64 *iova_start);
1877 
1878 /**
1879  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1880  *   Conceptually, this call performs the functions deregister memory region
1881  *   followed by register physical memory region.  Where possible,
1882  *   resources are reused instead of deallocated and reallocated.
1883  * @mr: The memory region to modify.
1884  * @mr_rereg_mask: A bit-mask used to indicate which of the following
1885  *   properties of the memory region are being modified.
1886  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1887  *   the new protection domain to associated with the memory region,
1888  *   otherwise, this parameter is ignored.
1889  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1890  *   field specifies a list of physical buffers to use in the new
1891  *   translation, otherwise, this parameter is ignored.
1892  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1893  *   field specifies the size of the phys_buf_array, otherwise, this
1894  *   parameter is ignored.
1895  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1896  *   field specifies the new memory access rights, otherwise, this
1897  *   parameter is ignored.
1898  * @iova_start: The offset of the region's starting I/O virtual address.
1899  */
1900 int ib_rereg_phys_mr(struct ib_mr *mr,
1901 		     int mr_rereg_mask,
1902 		     struct ib_pd *pd,
1903 		     struct ib_phys_buf *phys_buf_array,
1904 		     int num_phys_buf,
1905 		     int mr_access_flags,
1906 		     u64 *iova_start);
1907 
1908 /**
1909  * ib_query_mr - Retrieves information about a specific memory region.
1910  * @mr: The memory region to retrieve information about.
1911  * @mr_attr: The attributes of the specified memory region.
1912  */
1913 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1914 
1915 /**
1916  * ib_dereg_mr - Deregisters a memory region and removes it from the
1917  *   HCA translation table.
1918  * @mr: The memory region to deregister.
1919  */
1920 int ib_dereg_mr(struct ib_mr *mr);
1921 
1922 /**
1923  * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1924  *   IB_WR_FAST_REG_MR send work request.
1925  * @pd: The protection domain associated with the region.
1926  * @max_page_list_len: requested max physical buffer list length to be
1927  *   used with fast register work requests for this MR.
1928  */
1929 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1930 
1931 /**
1932  * ib_alloc_fast_reg_page_list - Allocates a page list array
1933  * @device - ib device pointer.
1934  * @page_list_len - size of the page list array to be allocated.
1935  *
1936  * This allocates and returns a struct ib_fast_reg_page_list * and a
1937  * page_list array that is at least page_list_len in size.  The actual
1938  * size is returned in max_page_list_len.  The caller is responsible
1939  * for initializing the contents of the page_list array before posting
1940  * a send work request with the IB_WC_FAST_REG_MR opcode.
1941  *
1942  * The page_list array entries must be translated using one of the
1943  * ib_dma_*() functions just like the addresses passed to
1944  * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
1945  * ib_fast_reg_page_list must not be modified by the caller until the
1946  * IB_WC_FAST_REG_MR work request completes.
1947  */
1948 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1949 				struct ib_device *device, int page_list_len);
1950 
1951 /**
1952  * ib_free_fast_reg_page_list - Deallocates a previously allocated
1953  *   page list array.
1954  * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1955  */
1956 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1957 
1958 /**
1959  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1960  *   R_Key and L_Key.
1961  * @mr - struct ib_mr pointer to be updated.
1962  * @newkey - new key to be used.
1963  */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)1964 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1965 {
1966 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1967 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1968 }
1969 
1970 /**
1971  * ib_alloc_mw - Allocates a memory window.
1972  * @pd: The protection domain associated with the memory window.
1973  */
1974 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1975 
1976 /**
1977  * ib_bind_mw - Posts a work request to the send queue of the specified
1978  *   QP, which binds the memory window to the given address range and
1979  *   remote access attributes.
1980  * @qp: QP to post the bind work request on.
1981  * @mw: The memory window to bind.
1982  * @mw_bind: Specifies information about the memory window, including
1983  *   its address range, remote access rights, and associated memory region.
1984  */
ib_bind_mw(struct ib_qp * qp,struct ib_mw * mw,struct ib_mw_bind * mw_bind)1985 static inline int ib_bind_mw(struct ib_qp *qp,
1986 			     struct ib_mw *mw,
1987 			     struct ib_mw_bind *mw_bind)
1988 {
1989 	/* XXX reference counting in corresponding MR? */
1990 	return mw->device->bind_mw ?
1991 		mw->device->bind_mw(qp, mw, mw_bind) :
1992 		-ENOSYS;
1993 }
1994 
1995 /**
1996  * ib_dealloc_mw - Deallocates a memory window.
1997  * @mw: The memory window to deallocate.
1998  */
1999 int ib_dealloc_mw(struct ib_mw *mw);
2000 
2001 /**
2002  * ib_alloc_fmr - Allocates a unmapped fast memory region.
2003  * @pd: The protection domain associated with the unmapped region.
2004  * @mr_access_flags: Specifies the memory access rights.
2005  * @fmr_attr: Attributes of the unmapped region.
2006  *
2007  * A fast memory region must be mapped before it can be used as part of
2008  * a work request.
2009  */
2010 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2011 			    int mr_access_flags,
2012 			    struct ib_fmr_attr *fmr_attr);
2013 
2014 /**
2015  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2016  * @fmr: The fast memory region to associate with the pages.
2017  * @page_list: An array of physical pages to map to the fast memory region.
2018  * @list_len: The number of pages in page_list.
2019  * @iova: The I/O virtual address to use with the mapped region.
2020  */
ib_map_phys_fmr(struct ib_fmr * fmr,u64 * page_list,int list_len,u64 iova)2021 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2022 				  u64 *page_list, int list_len,
2023 				  u64 iova)
2024 {
2025 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2026 }
2027 
2028 /**
2029  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2030  * @fmr_list: A linked list of fast memory regions to unmap.
2031  */
2032 int ib_unmap_fmr(struct list_head *fmr_list);
2033 
2034 /**
2035  * ib_dealloc_fmr - Deallocates a fast memory region.
2036  * @fmr: The fast memory region to deallocate.
2037  */
2038 int ib_dealloc_fmr(struct ib_fmr *fmr);
2039 
2040 /**
2041  * ib_attach_mcast - Attaches the specified QP to a multicast group.
2042  * @qp: QP to attach to the multicast group.  The QP must be type
2043  *   IB_QPT_UD.
2044  * @gid: Multicast group GID.
2045  * @lid: Multicast group LID in host byte order.
2046  *
2047  * In order to send and receive multicast packets, subnet
2048  * administration must have created the multicast group and configured
2049  * the fabric appropriately.  The port associated with the specified
2050  * QP must also be a member of the multicast group.
2051  */
2052 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2053 
2054 /**
2055  * ib_detach_mcast - Detaches the specified QP from a multicast group.
2056  * @qp: QP to detach from the multicast group.
2057  * @gid: Multicast group GID.
2058  * @lid: Multicast group LID in host byte order.
2059  */
2060 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2061 
2062 #endif /* IB_VERBS_H */
2063