1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 enum gdma_request_type {
19 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20 	GDMA_QUERY_MAX_RESOURCES	= 2,
21 	GDMA_LIST_DEVICES		= 3,
22 	GDMA_REGISTER_DEVICE		= 4,
23 	GDMA_DEREGISTER_DEVICE		= 5,
24 	GDMA_GENERATE_TEST_EQE		= 10,
25 	GDMA_CREATE_QUEUE		= 12,
26 	GDMA_DISABLE_QUEUE		= 13,
27 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29 	GDMA_CREATE_DMA_REGION		= 25,
30 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31 	GDMA_DESTROY_DMA_REGION		= 27,
32 	GDMA_CREATE_PD			= 29,
33 	GDMA_DESTROY_PD			= 30,
34 	GDMA_CREATE_MR			= 31,
35 	GDMA_DESTROY_MR			= 32,
36 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
37 };
38 
39 #define GDMA_RESOURCE_DOORBELL_PAGE	27
40 
41 enum gdma_queue_type {
42 	GDMA_INVALID_QUEUE,
43 	GDMA_SQ,
44 	GDMA_RQ,
45 	GDMA_CQ,
46 	GDMA_EQ,
47 };
48 
49 enum gdma_work_request_flags {
50 	GDMA_WR_NONE			= 0,
51 	GDMA_WR_OOB_IN_SGL		= BIT(0),
52 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
53 };
54 
55 enum gdma_eqe_type {
56 	GDMA_EQE_COMPLETION		= 3,
57 	GDMA_EQE_TEST_EVENT		= 64,
58 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
59 	GDMA_EQE_HWC_INIT_DATA		= 130,
60 	GDMA_EQE_HWC_INIT_DONE		= 131,
61 	GDMA_EQE_HWC_SOC_RECONFIG	= 132,
62 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
63 };
64 
65 enum {
66 	GDMA_DEVICE_NONE	= 0,
67 	GDMA_DEVICE_HWC		= 1,
68 	GDMA_DEVICE_MANA	= 2,
69 };
70 
71 struct gdma_resource {
72 	/* Protect the bitmap */
73 	spinlock_t lock;
74 
75 	/* The bitmap size in bits. */
76 	u32 size;
77 
78 	/* The bitmap tracks the resources. */
79 	unsigned long *map;
80 };
81 
82 union gdma_doorbell_entry {
83 	u64	as_uint64;
84 
85 	struct {
86 		u64 id		: 24;
87 		u64 reserved	: 8;
88 		u64 tail_ptr	: 31;
89 		u64 arm		: 1;
90 	} cq;
91 
92 	struct {
93 		u64 id		: 24;
94 		u64 wqe_cnt	: 8;
95 		u64 tail_ptr	: 32;
96 	} rq;
97 
98 	struct {
99 		u64 id		: 24;
100 		u64 reserved	: 8;
101 		u64 tail_ptr	: 32;
102 	} sq;
103 
104 	struct {
105 		u64 id		: 16;
106 		u64 reserved	: 16;
107 		u64 tail_ptr	: 31;
108 		u64 arm		: 1;
109 	} eq;
110 }; /* HW DATA */
111 
112 struct gdma_msg_hdr {
113 	u32 hdr_type;
114 	u32 msg_type;
115 	u16 msg_version;
116 	u16 hwc_msg_id;
117 	u32 msg_size;
118 }; /* HW DATA */
119 
120 struct gdma_dev_id {
121 	union {
122 		struct {
123 			u16 type;
124 			u16 instance;
125 		};
126 
127 		u32 as_uint32;
128 	};
129 }; /* HW DATA */
130 
131 struct gdma_req_hdr {
132 	struct gdma_msg_hdr req;
133 	struct gdma_msg_hdr resp; /* The expected response */
134 	struct gdma_dev_id dev_id;
135 	u32 activity_id;
136 }; /* HW DATA */
137 
138 struct gdma_resp_hdr {
139 	struct gdma_msg_hdr response;
140 	struct gdma_dev_id dev_id;
141 	u32 activity_id;
142 	u32 status;
143 	u32 reserved;
144 }; /* HW DATA */
145 
146 struct gdma_general_req {
147 	struct gdma_req_hdr hdr;
148 }; /* HW DATA */
149 
150 #define GDMA_MESSAGE_V1 1
151 #define GDMA_MESSAGE_V2 2
152 
153 struct gdma_general_resp {
154 	struct gdma_resp_hdr hdr;
155 }; /* HW DATA */
156 
157 #define GDMA_STANDARD_HEADER_TYPE 0
158 
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)159 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
160 					u32 req_size, u32 resp_size)
161 {
162 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
163 	hdr->req.msg_type = code;
164 	hdr->req.msg_version = GDMA_MESSAGE_V1;
165 	hdr->req.msg_size = req_size;
166 
167 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
168 	hdr->resp.msg_type = code;
169 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
170 	hdr->resp.msg_size = resp_size;
171 }
172 
173 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
174 struct gdma_sge {
175 	u64 address;
176 	u32 mem_key;
177 	u32 size;
178 }; /* HW DATA */
179 
180 struct gdma_wqe_request {
181 	struct gdma_sge *sgl;
182 	u32 num_sge;
183 
184 	u32 inline_oob_size;
185 	const void *inline_oob_data;
186 
187 	u32 flags;
188 	u32 client_data_unit;
189 };
190 
191 enum gdma_page_type {
192 	GDMA_PAGE_TYPE_4K,
193 };
194 
195 #define GDMA_INVALID_DMA_REGION 0
196 
197 struct gdma_mem_info {
198 	struct device *dev;
199 
200 	dma_addr_t dma_handle;
201 	void *virt_addr;
202 	u64 length;
203 
204 	/* Allocated by the PF driver */
205 	u64 dma_region_handle;
206 };
207 
208 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
209 
210 struct gdma_dev {
211 	struct gdma_context *gdma_context;
212 
213 	struct gdma_dev_id dev_id;
214 
215 	u32 pdid;
216 	u32 doorbell;
217 	u32 gpa_mkey;
218 
219 	/* GDMA driver specific pointer */
220 	void *driver_data;
221 
222 	struct auxiliary_device *adev;
223 };
224 
225 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
226 
227 #define GDMA_CQE_SIZE 64
228 #define GDMA_EQE_SIZE 16
229 #define GDMA_MAX_SQE_SIZE 512
230 #define GDMA_MAX_RQE_SIZE 256
231 
232 #define GDMA_COMP_DATA_SIZE 0x3C
233 
234 #define GDMA_EVENT_DATA_SIZE 0xC
235 
236 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
237 #define GDMA_WQE_BU_SIZE 32
238 
239 #define INVALID_PDID		UINT_MAX
240 #define INVALID_DOORBELL	UINT_MAX
241 #define INVALID_MEM_KEY		UINT_MAX
242 #define INVALID_QUEUE_ID	UINT_MAX
243 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
244 
245 struct gdma_comp {
246 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
247 	u32 wq_num;
248 	bool is_sq;
249 };
250 
251 struct gdma_event {
252 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
253 	u8  type;
254 };
255 
256 struct gdma_queue;
257 
258 struct mana_eq {
259 	struct gdma_queue *eq;
260 };
261 
262 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
263 			      struct gdma_event *e);
264 
265 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
266 
267 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
268  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
269  * driver increases the 'head' in BUs rather than in bytes, and notifies
270  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
271  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
272  *
273  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
274  * processed, the driver increases the 'tail' to indicate that WQEs have
275  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
276  *
277  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
278  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
279  * the owner bits mechanism to detect if the queue has become empty.
280  */
281 struct gdma_queue {
282 	struct gdma_dev *gdma_dev;
283 
284 	enum gdma_queue_type type;
285 	u32 id;
286 
287 	struct gdma_mem_info mem_info;
288 
289 	void *queue_mem_ptr;
290 	u32 queue_size;
291 
292 	bool monitor_avl_buf;
293 
294 	u32 head;
295 	u32 tail;
296 
297 	/* Extra fields specific to EQ/CQ. */
298 	union {
299 		struct {
300 			bool disable_needed;
301 
302 			gdma_eq_callback *callback;
303 			void *context;
304 
305 			unsigned int msix_index;
306 
307 			u32 log2_throttle_limit;
308 		} eq;
309 
310 		struct {
311 			gdma_cq_callback *callback;
312 			void *context;
313 
314 			struct gdma_queue *parent; /* For CQ/EQ relationship */
315 		} cq;
316 	};
317 };
318 
319 struct gdma_queue_spec {
320 	enum gdma_queue_type type;
321 	bool monitor_avl_buf;
322 	unsigned int queue_size;
323 
324 	/* Extra fields specific to EQ/CQ. */
325 	union {
326 		struct {
327 			gdma_eq_callback *callback;
328 			void *context;
329 
330 			unsigned long log2_throttle_limit;
331 		} eq;
332 
333 		struct {
334 			gdma_cq_callback *callback;
335 			void *context;
336 
337 			struct gdma_queue *parent_eq;
338 
339 		} cq;
340 	};
341 };
342 
343 #define MANA_IRQ_NAME_SZ 32
344 
345 struct gdma_irq_context {
346 	void (*handler)(void *arg);
347 	void *arg;
348 	char name[MANA_IRQ_NAME_SZ];
349 };
350 
351 struct gdma_context {
352 	struct device		*dev;
353 
354 	/* Per-vPort max number of queues */
355 	unsigned int		max_num_queues;
356 	unsigned int		max_num_msix;
357 	unsigned int		num_msix_usable;
358 	struct gdma_resource	msix_resource;
359 	struct gdma_irq_context	*irq_contexts;
360 
361 	/* L2 MTU */
362 	u16 adapter_mtu;
363 
364 	/* This maps a CQ index to the queue structure. */
365 	unsigned int		max_num_cqs;
366 	struct gdma_queue	**cq_table;
367 
368 	/* Protect eq_test_event and test_event_eq_id  */
369 	struct mutex		eq_test_event_mutex;
370 	struct completion	eq_test_event;
371 	u32			test_event_eq_id;
372 
373 	bool			is_pf;
374 	phys_addr_t		bar0_pa;
375 	void __iomem		*bar0_va;
376 	void __iomem		*shm_base;
377 	void __iomem		*db_page_base;
378 	phys_addr_t		phys_db_page_base;
379 	u32 db_page_size;
380 	int                     numa_node;
381 
382 	/* Shared memory chanenl (used to bootstrap HWC) */
383 	struct shm_channel	shm_channel;
384 
385 	/* Hardware communication channel (HWC) */
386 	struct gdma_dev		hwc;
387 
388 	/* Azure network adapter */
389 	struct gdma_dev		mana;
390 };
391 
392 #define MAX_NUM_GDMA_DEVICES	4
393 
mana_gd_is_mana(struct gdma_dev * gd)394 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
395 {
396 	return gd->dev_id.type == GDMA_DEVICE_MANA;
397 }
398 
mana_gd_is_hwc(struct gdma_dev * gd)399 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
400 {
401 	return gd->dev_id.type == GDMA_DEVICE_HWC;
402 }
403 
404 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
405 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
406 
407 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
408 
409 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
410 			     const struct gdma_queue_spec *spec,
411 			     struct gdma_queue **queue_ptr);
412 
413 int mana_gd_create_mana_eq(struct gdma_dev *gd,
414 			   const struct gdma_queue_spec *spec,
415 			   struct gdma_queue **queue_ptr);
416 
417 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
418 			      const struct gdma_queue_spec *spec,
419 			      struct gdma_queue **queue_ptr);
420 
421 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
422 
423 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
424 
425 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
426 
427 struct gdma_wqe {
428 	u32 reserved	:24;
429 	u32 last_vbytes	:8;
430 
431 	union {
432 		u32 flags;
433 
434 		struct {
435 			u32 num_sge		:8;
436 			u32 inline_oob_size_div4:3;
437 			u32 client_oob_in_sgl	:1;
438 			u32 reserved1		:4;
439 			u32 client_data_unit	:14;
440 			u32 reserved2		:2;
441 		};
442 	};
443 }; /* HW DATA */
444 
445 #define INLINE_OOB_SMALL_SIZE 8
446 #define INLINE_OOB_LARGE_SIZE 24
447 
448 #define MAX_TX_WQE_SIZE 512
449 #define MAX_RX_WQE_SIZE 256
450 
451 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
452 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
453 			sizeof(struct gdma_sge))
454 
455 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
456 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
457 
458 struct gdma_cqe {
459 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
460 
461 	union {
462 		u32 as_uint32;
463 
464 		struct {
465 			u32 wq_num	: 24;
466 			u32 is_sq	: 1;
467 			u32 reserved	: 4;
468 			u32 owner_bits	: 3;
469 		};
470 	} cqe_info;
471 }; /* HW DATA */
472 
473 #define GDMA_CQE_OWNER_BITS 3
474 
475 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
476 
477 #define SET_ARM_BIT 1
478 
479 #define GDMA_EQE_OWNER_BITS 3
480 
481 union gdma_eqe_info {
482 	u32 as_uint32;
483 
484 	struct {
485 		u32 type	: 8;
486 		u32 reserved1	: 8;
487 		u32 client_id	: 2;
488 		u32 reserved2	: 11;
489 		u32 owner_bits	: 3;
490 	};
491 }; /* HW DATA */
492 
493 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
494 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
495 
496 struct gdma_eqe {
497 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
498 	u32 eqe_info;
499 }; /* HW DATA */
500 
501 #define GDMA_REG_DB_PAGE_OFFSET	8
502 #define GDMA_REG_DB_PAGE_SIZE	0x10
503 #define GDMA_REG_SHM_OFFSET	0x18
504 
505 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
506 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
507 #define GDMA_PF_REG_SHM_OFF		0x70
508 
509 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
510 
511 #define MANA_PF_DEVICE_ID 0x00B9
512 #define MANA_VF_DEVICE_ID 0x00BA
513 
514 struct gdma_posted_wqe_info {
515 	u32 wqe_size_in_bu;
516 };
517 
518 /* GDMA_GENERATE_TEST_EQE */
519 struct gdma_generate_test_event_req {
520 	struct gdma_req_hdr hdr;
521 	u32 queue_index;
522 }; /* HW DATA */
523 
524 /* GDMA_VERIFY_VF_DRIVER_VERSION */
525 enum {
526 	GDMA_PROTOCOL_V1	= 1,
527 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
528 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
529 };
530 
531 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
532 
533 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
534  * so the driver is able to reliably support features like busy_poll.
535  */
536 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
537 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
538 
539 #define GDMA_DRV_CAP_FLAGS1 \
540 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
541 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
542 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
543 
544 #define GDMA_DRV_CAP_FLAGS2 0
545 
546 #define GDMA_DRV_CAP_FLAGS3 0
547 
548 #define GDMA_DRV_CAP_FLAGS4 0
549 
550 struct gdma_verify_ver_req {
551 	struct gdma_req_hdr hdr;
552 
553 	/* Mandatory fields required for protocol establishment */
554 	u64 protocol_ver_min;
555 	u64 protocol_ver_max;
556 
557 	/* Gdma Driver Capability Flags */
558 	u64 gd_drv_cap_flags1;
559 	u64 gd_drv_cap_flags2;
560 	u64 gd_drv_cap_flags3;
561 	u64 gd_drv_cap_flags4;
562 
563 	/* Advisory fields */
564 	u64 drv_ver;
565 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
566 	u32 reserved;
567 	u32 os_ver_major;
568 	u32 os_ver_minor;
569 	u32 os_ver_build;
570 	u32 os_ver_platform;
571 	u64 reserved_2;
572 	u8 os_ver_str1[128];
573 	u8 os_ver_str2[128];
574 	u8 os_ver_str3[128];
575 	u8 os_ver_str4[128];
576 }; /* HW DATA */
577 
578 struct gdma_verify_ver_resp {
579 	struct gdma_resp_hdr hdr;
580 	u64 gdma_protocol_ver;
581 	u64 pf_cap_flags1;
582 	u64 pf_cap_flags2;
583 	u64 pf_cap_flags3;
584 	u64 pf_cap_flags4;
585 }; /* HW DATA */
586 
587 /* GDMA_QUERY_MAX_RESOURCES */
588 struct gdma_query_max_resources_resp {
589 	struct gdma_resp_hdr hdr;
590 	u32 status;
591 	u32 max_sq;
592 	u32 max_rq;
593 	u32 max_cq;
594 	u32 max_eq;
595 	u32 max_db;
596 	u32 max_mst;
597 	u32 max_cq_mod_ctx;
598 	u32 max_mod_cq;
599 	u32 max_msix;
600 }; /* HW DATA */
601 
602 /* GDMA_LIST_DEVICES */
603 struct gdma_list_devices_resp {
604 	struct gdma_resp_hdr hdr;
605 	u32 num_of_devs;
606 	u32 reserved;
607 	struct gdma_dev_id devs[64];
608 }; /* HW DATA */
609 
610 /* GDMA_REGISTER_DEVICE */
611 struct gdma_register_device_resp {
612 	struct gdma_resp_hdr hdr;
613 	u32 pdid;
614 	u32 gpa_mkey;
615 	u32 db_id;
616 }; /* HW DATA */
617 
618 struct gdma_allocate_resource_range_req {
619 	struct gdma_req_hdr hdr;
620 	u32 resource_type;
621 	u32 num_resources;
622 	u32 alignment;
623 	u32 allocated_resources;
624 };
625 
626 struct gdma_allocate_resource_range_resp {
627 	struct gdma_resp_hdr hdr;
628 	u32 allocated_resources;
629 };
630 
631 struct gdma_destroy_resource_range_req {
632 	struct gdma_req_hdr hdr;
633 	u32 resource_type;
634 	u32 num_resources;
635 	u32 allocated_resources;
636 };
637 
638 /* GDMA_CREATE_QUEUE */
639 struct gdma_create_queue_req {
640 	struct gdma_req_hdr hdr;
641 	u32 type;
642 	u32 reserved1;
643 	u32 pdid;
644 	u32 doolbell_id;
645 	u64 gdma_region;
646 	u32 reserved2;
647 	u32 queue_size;
648 	u32 log2_throttle_limit;
649 	u32 eq_pci_msix_index;
650 	u32 cq_mod_ctx_id;
651 	u32 cq_parent_eq_id;
652 	u8  rq_drop_on_overrun;
653 	u8  rq_err_on_wqe_overflow;
654 	u8  rq_chain_rec_wqes;
655 	u8  sq_hw_db;
656 	u32 reserved3;
657 }; /* HW DATA */
658 
659 struct gdma_create_queue_resp {
660 	struct gdma_resp_hdr hdr;
661 	u32 queue_index;
662 }; /* HW DATA */
663 
664 /* GDMA_DISABLE_QUEUE */
665 struct gdma_disable_queue_req {
666 	struct gdma_req_hdr hdr;
667 	u32 type;
668 	u32 queue_index;
669 	u32 alloc_res_id_on_creation;
670 }; /* HW DATA */
671 
672 /* GDMA_QUERY_HWC_TIMEOUT */
673 struct gdma_query_hwc_timeout_req {
674 	struct gdma_req_hdr hdr;
675 	u32 timeout_ms;
676 	u32 reserved;
677 };
678 
679 struct gdma_query_hwc_timeout_resp {
680 	struct gdma_resp_hdr hdr;
681 	u32 timeout_ms;
682 	u32 reserved;
683 };
684 
685 enum atb_page_size {
686 	ATB_PAGE_SIZE_4K,
687 	ATB_PAGE_SIZE_8K,
688 	ATB_PAGE_SIZE_16K,
689 	ATB_PAGE_SIZE_32K,
690 	ATB_PAGE_SIZE_64K,
691 	ATB_PAGE_SIZE_128K,
692 	ATB_PAGE_SIZE_256K,
693 	ATB_PAGE_SIZE_512K,
694 	ATB_PAGE_SIZE_1M,
695 	ATB_PAGE_SIZE_2M,
696 	ATB_PAGE_SIZE_MAX,
697 };
698 
699 enum gdma_mr_access_flags {
700 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
701 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
702 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
703 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
704 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
705 };
706 
707 /* GDMA_CREATE_DMA_REGION */
708 struct gdma_create_dma_region_req {
709 	struct gdma_req_hdr hdr;
710 
711 	/* The total size of the DMA region */
712 	u64 length;
713 
714 	/* The offset in the first page */
715 	u32 offset_in_page;
716 
717 	/* enum gdma_page_type */
718 	u32 gdma_page_type;
719 
720 	/* The total number of pages */
721 	u32 page_count;
722 
723 	/* If page_addr_list_len is smaller than page_count,
724 	 * the remaining page addresses will be added via the
725 	 * message GDMA_DMA_REGION_ADD_PAGES.
726 	 */
727 	u32 page_addr_list_len;
728 	u64 page_addr_list[];
729 }; /* HW DATA */
730 
731 struct gdma_create_dma_region_resp {
732 	struct gdma_resp_hdr hdr;
733 	u64 dma_region_handle;
734 }; /* HW DATA */
735 
736 /* GDMA_DMA_REGION_ADD_PAGES */
737 struct gdma_dma_region_add_pages_req {
738 	struct gdma_req_hdr hdr;
739 
740 	u64 dma_region_handle;
741 
742 	u32 page_addr_list_len;
743 	u32 reserved3;
744 
745 	u64 page_addr_list[];
746 }; /* HW DATA */
747 
748 /* GDMA_DESTROY_DMA_REGION */
749 struct gdma_destroy_dma_region_req {
750 	struct gdma_req_hdr hdr;
751 
752 	u64 dma_region_handle;
753 }; /* HW DATA */
754 
755 enum gdma_pd_flags {
756 	GDMA_PD_FLAG_INVALID = 0,
757 };
758 
759 struct gdma_create_pd_req {
760 	struct gdma_req_hdr hdr;
761 	enum gdma_pd_flags flags;
762 	u32 reserved;
763 };/* HW DATA */
764 
765 struct gdma_create_pd_resp {
766 	struct gdma_resp_hdr hdr;
767 	u64 pd_handle;
768 	u32 pd_id;
769 	u32 reserved;
770 };/* HW DATA */
771 
772 struct gdma_destroy_pd_req {
773 	struct gdma_req_hdr hdr;
774 	u64 pd_handle;
775 };/* HW DATA */
776 
777 struct gdma_destory_pd_resp {
778 	struct gdma_resp_hdr hdr;
779 };/* HW DATA */
780 
781 enum gdma_mr_type {
782 	/* Guest Virtual Address - MRs of this type allow access
783 	 * to memory mapped by PTEs associated with this MR using a virtual
784 	 * address that is set up in the MST
785 	 */
786 	GDMA_MR_TYPE_GVA = 2,
787 };
788 
789 struct gdma_create_mr_params {
790 	u64 pd_handle;
791 	enum gdma_mr_type mr_type;
792 	union {
793 		struct {
794 			u64 dma_region_handle;
795 			u64 virtual_address;
796 			enum gdma_mr_access_flags access_flags;
797 		} gva;
798 	};
799 };
800 
801 struct gdma_create_mr_request {
802 	struct gdma_req_hdr hdr;
803 	u64 pd_handle;
804 	enum gdma_mr_type mr_type;
805 	u32 reserved_1;
806 
807 	union {
808 		struct {
809 			u64 dma_region_handle;
810 			u64 virtual_address;
811 			enum gdma_mr_access_flags access_flags;
812 		} gva;
813 
814 	};
815 	u32 reserved_2;
816 };/* HW DATA */
817 
818 struct gdma_create_mr_response {
819 	struct gdma_resp_hdr hdr;
820 	u64 mr_handle;
821 	u32 lkey;
822 	u32 rkey;
823 };/* HW DATA */
824 
825 struct gdma_destroy_mr_request {
826 	struct gdma_req_hdr hdr;
827 	u64 mr_handle;
828 };/* HW DATA */
829 
830 struct gdma_destroy_mr_response {
831 	struct gdma_resp_hdr hdr;
832 };/* HW DATA */
833 
834 int mana_gd_verify_vf_version(struct pci_dev *pdev);
835 
836 int mana_gd_register_device(struct gdma_dev *gd);
837 int mana_gd_deregister_device(struct gdma_dev *gd);
838 
839 int mana_gd_post_work_request(struct gdma_queue *wq,
840 			      const struct gdma_wqe_request *wqe_req,
841 			      struct gdma_posted_wqe_info *wqe_info);
842 
843 int mana_gd_post_and_ring(struct gdma_queue *queue,
844 			  const struct gdma_wqe_request *wqe,
845 			  struct gdma_posted_wqe_info *wqe_info);
846 
847 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
848 void mana_gd_free_res_map(struct gdma_resource *r);
849 
850 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
851 			      struct gdma_queue *queue);
852 
853 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
854 			 struct gdma_mem_info *gmi);
855 
856 void mana_gd_free_memory(struct gdma_mem_info *gmi);
857 
858 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
859 			 u32 resp_len, void *resp);
860 
861 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
862 
863 #endif /* _GDMA_H */
864