1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6
7 #ifndef __ERDMA_H__
8 #define __ERDMA_H__
9
10 #include <linux/bitfield.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/xarray.h>
14 #include <rdma/ib_verbs.h>
15
16 #include "erdma_hw.h"
17
18 #define DRV_MODULE_NAME "erdma"
19 #define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
20
21 struct erdma_eq {
22 void *qbuf;
23 dma_addr_t qbuf_dma_addr;
24
25 spinlock_t lock;
26
27 u32 depth;
28
29 u16 ci;
30 u16 rsvd;
31
32 atomic64_t event_num;
33 atomic64_t notify_num;
34
35 void __iomem *db;
36 u64 *db_record;
37 };
38
39 struct erdma_cmdq_sq {
40 void *qbuf;
41 dma_addr_t qbuf_dma_addr;
42
43 spinlock_t lock;
44
45 u32 depth;
46 u16 ci;
47 u16 pi;
48
49 u16 wqebb_cnt;
50
51 u64 *db_record;
52 };
53
54 struct erdma_cmdq_cq {
55 void *qbuf;
56 dma_addr_t qbuf_dma_addr;
57
58 spinlock_t lock;
59
60 u32 depth;
61 u32 ci;
62 u32 cmdsn;
63
64 u64 *db_record;
65
66 atomic64_t armed_num;
67 };
68
69 enum {
70 ERDMA_CMD_STATUS_INIT,
71 ERDMA_CMD_STATUS_ISSUED,
72 ERDMA_CMD_STATUS_FINISHED,
73 ERDMA_CMD_STATUS_TIMEOUT
74 };
75
76 struct erdma_comp_wait {
77 struct completion wait_event;
78 u32 cmd_status;
79 u32 ctx_id;
80 u16 sq_pi;
81 u8 comp_status;
82 u8 rsvd;
83 u32 comp_data[4];
84 };
85
86 enum {
87 ERDMA_CMDQ_STATE_OK_BIT = 0,
88 ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
89 ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
90 };
91
92 #define ERDMA_CMDQ_TIMEOUT_MS 15000
93 #define ERDMA_REG_ACCESS_WAIT_MS 20
94 #define ERDMA_WAIT_DEV_DONE_CNT 500
95
96 struct erdma_cmdq {
97 unsigned long *comp_wait_bitmap;
98 struct erdma_comp_wait *wait_pool;
99 spinlock_t lock;
100
101 bool use_event;
102
103 struct erdma_cmdq_sq sq;
104 struct erdma_cmdq_cq cq;
105 struct erdma_eq eq;
106
107 unsigned long state;
108
109 struct semaphore credits;
110 u16 max_outstandings;
111 };
112
113 #define COMPROMISE_CC ERDMA_CC_CUBIC
114 enum erdma_cc_alg {
115 ERDMA_CC_NEWRENO = 0,
116 ERDMA_CC_CUBIC,
117 ERDMA_CC_HPCC_RTT,
118 ERDMA_CC_HPCC_ECN,
119 ERDMA_CC_HPCC_INT,
120 ERDMA_CC_METHODS_NUM
121 };
122
123 struct erdma_devattr {
124 u32 fw_version;
125
126 unsigned char peer_addr[ETH_ALEN];
127 unsigned long cap_flags;
128
129 int numa_node;
130 enum erdma_cc_alg cc;
131 u32 irq_num;
132
133 u32 max_qp;
134 u32 max_send_wr;
135 u32 max_recv_wr;
136 u32 max_ord;
137 u32 max_ird;
138
139 u32 max_send_sge;
140 u32 max_recv_sge;
141 u32 max_sge_rd;
142 u32 max_cq;
143 u32 max_cqe;
144 u64 max_mr_size;
145 u32 max_mr;
146 u32 max_pd;
147 u32 max_mw;
148 u32 local_dma_key;
149 };
150
151 #define ERDMA_IRQNAME_SIZE 50
152
153 struct erdma_irq {
154 char name[ERDMA_IRQNAME_SIZE];
155 u32 msix_vector;
156 cpumask_t affinity_hint_mask;
157 };
158
159 struct erdma_eq_cb {
160 bool ready;
161 void *dev; /* All EQs use this fields to get erdma_dev struct */
162 struct erdma_irq irq;
163 struct erdma_eq eq;
164 struct tasklet_struct tasklet;
165 };
166
167 struct erdma_resource_cb {
168 unsigned long *bitmap;
169 spinlock_t lock;
170 u32 next_alloc_idx;
171 u32 max_cap;
172 };
173
174 enum {
175 ERDMA_RES_TYPE_PD = 0,
176 ERDMA_RES_TYPE_STAG_IDX = 1,
177 ERDMA_RES_CNT = 2,
178 };
179
180 #define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
181 #define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
182
183 struct erdma_dev {
184 struct ib_device ibdev;
185 struct net_device *netdev;
186 struct pci_dev *pdev;
187 struct notifier_block netdev_nb;
188 struct workqueue_struct *reflush_wq;
189
190 resource_size_t func_bar_addr;
191 resource_size_t func_bar_len;
192 u8 __iomem *func_bar;
193
194 struct erdma_devattr attrs;
195 /* physical port state (only one port per device) */
196 enum ib_port_state state;
197 u32 mtu;
198
199 /* cmdq and aeq use the same msix vector */
200 struct erdma_irq comm_irq;
201 struct erdma_cmdq cmdq;
202 struct erdma_eq aeq;
203 struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
204
205 spinlock_t lock;
206 struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
207 struct xarray qp_xa;
208 struct xarray cq_xa;
209
210 u32 next_alloc_qpn;
211 u32 next_alloc_cqn;
212
213 atomic_t num_ctx;
214 struct list_head cep_list;
215 };
216
get_queue_entry(void * qbuf,u32 idx,u32 depth,u32 shift)217 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
218 {
219 idx &= (depth - 1);
220
221 return qbuf + (idx << shift);
222 }
223
to_edev(struct ib_device * ibdev)224 static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
225 {
226 return container_of(ibdev, struct erdma_dev, ibdev);
227 }
228
erdma_reg_read32(struct erdma_dev * dev,u32 reg)229 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
230 {
231 return readl(dev->func_bar + reg);
232 }
233
erdma_reg_read64(struct erdma_dev * dev,u32 reg)234 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
235 {
236 return readq(dev->func_bar + reg);
237 }
238
erdma_reg_write32(struct erdma_dev * dev,u32 reg,u32 value)239 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
240 {
241 writel(value, dev->func_bar + reg);
242 }
243
erdma_reg_write64(struct erdma_dev * dev,u32 reg,u64 value)244 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
245 {
246 writeq(value, dev->func_bar + reg);
247 }
248
erdma_reg_read32_filed(struct erdma_dev * dev,u32 reg,u32 filed_mask)249 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
250 u32 filed_mask)
251 {
252 u32 val = erdma_reg_read32(dev, reg);
253
254 return FIELD_GET(filed_mask, val);
255 }
256
257 #define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val)
258
259 int erdma_cmdq_init(struct erdma_dev *dev);
260 void erdma_finish_cmdq_init(struct erdma_dev *dev);
261 void erdma_cmdq_destroy(struct erdma_dev *dev);
262
263 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
264 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
265 u64 *resp0, u64 *resp1);
266 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
267
268 int erdma_ceqs_init(struct erdma_dev *dev);
269 void erdma_ceqs_uninit(struct erdma_dev *dev);
270 void notify_eq(struct erdma_eq *eq);
271 void *get_next_valid_eqe(struct erdma_eq *eq);
272
273 int erdma_aeq_init(struct erdma_dev *dev);
274 void erdma_aeq_destroy(struct erdma_dev *dev);
275
276 void erdma_aeq_event_handler(struct erdma_dev *dev);
277 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
278
279 #endif
280