1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/pci.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include <rdma/uverbs_ioctl.h>
38 #include "hns_roce_common.h"
39 #include "hns_roce_device.h"
40 #include "hns_roce_hem.h"
41
flush_work_handle(struct work_struct * work)42 static void flush_work_handle(struct work_struct *work)
43 {
44 struct hns_roce_work *flush_work = container_of(work,
45 struct hns_roce_work, work);
46 struct hns_roce_qp *hr_qp = container_of(flush_work,
47 struct hns_roce_qp, flush_work);
48 struct device *dev = flush_work->hr_dev->dev;
49 struct ib_qp_attr attr;
50 int attr_mask;
51 int ret;
52
53 attr_mask = IB_QP_STATE;
54 attr.qp_state = IB_QPS_ERR;
55
56 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
58 if (ret)
59 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
60 ret);
61 }
62
63 /*
64 * make sure we signal QP destroy leg that flush QP was completed
65 * so that it can safely proceed ahead now and destroy QP
66 */
67 if (refcount_dec_and_test(&hr_qp->refcount))
68 complete(&hr_qp->free);
69 }
70
init_flush_work(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
72 {
73 struct hns_roce_work *flush_work = &hr_qp->flush_work;
74
75 flush_work->hr_dev = hr_dev;
76 INIT_WORK(&flush_work->work, flush_work_handle);
77 refcount_inc(&hr_qp->refcount);
78 queue_work(hr_dev->irq_workq, &flush_work->work);
79 }
80
flush_cqe(struct hns_roce_dev * dev,struct hns_roce_qp * qp)81 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
82 {
83 /*
84 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
85 * gets into errored mode. Hence, as a workaround to this
86 * hardware limitation, driver needs to assist in flushing. But
87 * the flushing operation uses mailbox to convey the QP state to
88 * the hardware and which can sleep due to the mutex protection
89 * around the mailbox calls. Hence, use the deferred flush for
90 * now.
91 */
92 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
93 init_flush_work(dev, qp);
94 }
95
hns_roce_qp_event(struct hns_roce_dev * hr_dev,u32 qpn,int event_type)96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
97 {
98 struct device *dev = hr_dev->dev;
99 struct hns_roce_qp *qp;
100
101 xa_lock(&hr_dev->qp_table_xa);
102 qp = __hns_roce_qp_lookup(hr_dev, qpn);
103 if (qp)
104 refcount_inc(&qp->refcount);
105 xa_unlock(&hr_dev->qp_table_xa);
106
107 if (!qp) {
108 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
109 return;
110 }
111
112 if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
113 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
114 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
115 event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
116 event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
117 qp->state = IB_QPS_ERR;
118
119 flush_cqe(hr_dev, qp);
120 }
121
122 qp->event(qp, (enum hns_roce_event)event_type);
123
124 if (refcount_dec_and_test(&qp->refcount))
125 complete(&qp->free);
126 }
127
hns_roce_ib_qp_event(struct hns_roce_qp * hr_qp,enum hns_roce_event type)128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
129 enum hns_roce_event type)
130 {
131 struct ib_qp *ibqp = &hr_qp->ibqp;
132 struct ib_event event;
133
134 if (ibqp->event_handler) {
135 event.device = ibqp->device;
136 event.element.qp = ibqp;
137 switch (type) {
138 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
139 event.event = IB_EVENT_PATH_MIG;
140 break;
141 case HNS_ROCE_EVENT_TYPE_COMM_EST:
142 event.event = IB_EVENT_COMM_EST;
143 break;
144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
145 event.event = IB_EVENT_SQ_DRAINED;
146 break;
147 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
148 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
149 break;
150 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
151 event.event = IB_EVENT_QP_FATAL;
152 break;
153 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
154 event.event = IB_EVENT_PATH_MIG_ERR;
155 break;
156 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
157 event.event = IB_EVENT_QP_REQ_ERR;
158 break;
159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
160 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
161 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
162 event.event = IB_EVENT_QP_ACCESS_ERR;
163 break;
164 default:
165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
166 type, hr_qp->qpn);
167 return;
168 }
169 ibqp->event_handler(&event, ibqp->qp_context);
170 }
171 }
172
get_least_load_bankid_for_qp(struct hns_roce_bank * bank)173 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
174 {
175 u32 least_load = bank[0].inuse;
176 u8 bankid = 0;
177 u32 bankcnt;
178 u8 i;
179
180 for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
181 bankcnt = bank[i].inuse;
182 if (bankcnt < least_load) {
183 least_load = bankcnt;
184 bankid = i;
185 }
186 }
187
188 return bankid;
189 }
190
alloc_qpn_with_bankid(struct hns_roce_bank * bank,u8 bankid,unsigned long * qpn)191 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
192 unsigned long *qpn)
193 {
194 int id;
195
196 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
197 if (id < 0) {
198 id = ida_alloc_range(&bank->ida, bank->min, bank->max,
199 GFP_KERNEL);
200 if (id < 0)
201 return id;
202 }
203
204 /* the QPN should keep increasing until the max value is reached. */
205 bank->next = (id + 1) > bank->max ? bank->min : id + 1;
206
207 /* the lower 3 bits is bankid */
208 *qpn = (id << 3) | bankid;
209
210 return 0;
211 }
alloc_qpn(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)212 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
213 {
214 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
215 unsigned long num = 0;
216 u8 bankid;
217 int ret;
218
219 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
220 num = 1;
221 hr_qp->doorbell_qpn = 1;
222 } else {
223 mutex_lock(&qp_table->bank_mutex);
224 bankid = get_least_load_bankid_for_qp(qp_table->bank);
225
226 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
227 &num);
228 if (ret) {
229 ibdev_err(&hr_dev->ib_dev,
230 "failed to alloc QPN, ret = %d\n", ret);
231 mutex_unlock(&qp_table->bank_mutex);
232 return ret;
233 }
234
235 qp_table->bank[bankid].inuse++;
236 mutex_unlock(&qp_table->bank_mutex);
237
238 hr_qp->doorbell_qpn = (u32)num;
239 }
240
241 hr_qp->qpn = num;
242
243 return 0;
244 }
245
add_qp_to_list(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_cq * send_cq,struct ib_cq * recv_cq)246 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
247 struct hns_roce_qp *hr_qp,
248 struct ib_cq *send_cq, struct ib_cq *recv_cq)
249 {
250 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
251 unsigned long flags;
252
253 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
254 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
255
256 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
257 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
258
259 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
260 if (hr_send_cq)
261 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
262 if (hr_recv_cq)
263 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
264
265 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
266 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
267 }
268
hns_roce_qp_store(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr)269 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
270 struct hns_roce_qp *hr_qp,
271 struct ib_qp_init_attr *init_attr)
272 {
273 struct xarray *xa = &hr_dev->qp_table_xa;
274 int ret;
275
276 if (!hr_qp->qpn)
277 return -EINVAL;
278
279 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
280 if (ret)
281 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
282 else
283 /* add QP to device's QP list for softwc */
284 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
285 init_attr->recv_cq);
286
287 return ret;
288 }
289
alloc_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)290 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
291 {
292 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
293 struct device *dev = hr_dev->dev;
294 int ret;
295
296 if (!hr_qp->qpn)
297 return -EINVAL;
298
299 /* Alloc memory for QPC */
300 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
301 if (ret) {
302 dev_err(dev, "Failed to get QPC table\n");
303 goto err_out;
304 }
305
306 /* Alloc memory for IRRL */
307 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
308 if (ret) {
309 dev_err(dev, "Failed to get IRRL table\n");
310 goto err_put_qp;
311 }
312
313 if (hr_dev->caps.trrl_entry_sz) {
314 /* Alloc memory for TRRL */
315 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
316 hr_qp->qpn);
317 if (ret) {
318 dev_err(dev, "Failed to get TRRL table\n");
319 goto err_put_irrl;
320 }
321 }
322
323 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
324 /* Alloc memory for SCC CTX */
325 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
326 hr_qp->qpn);
327 if (ret) {
328 dev_err(dev, "Failed to get SCC CTX table\n");
329 goto err_put_trrl;
330 }
331 }
332
333 return 0;
334
335 err_put_trrl:
336 if (hr_dev->caps.trrl_entry_sz)
337 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
338
339 err_put_irrl:
340 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
341
342 err_put_qp:
343 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
344
345 err_out:
346 return ret;
347 }
348
qp_user_mmap_entry_remove(struct hns_roce_qp * hr_qp)349 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
350 {
351 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
352 }
353
hns_roce_qp_remove(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)354 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
355 {
356 struct xarray *xa = &hr_dev->qp_table_xa;
357 unsigned long flags;
358
359 list_del(&hr_qp->node);
360
361 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
362 list_del(&hr_qp->sq_node);
363
364 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
365 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
366 list_del(&hr_qp->rq_node);
367
368 xa_lock_irqsave(xa, flags);
369 __xa_erase(xa, hr_qp->qpn);
370 xa_unlock_irqrestore(xa, flags);
371 }
372
free_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)373 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
374 {
375 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
376
377 if (hr_dev->caps.trrl_entry_sz)
378 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
379 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
380 }
381
get_qp_bankid(unsigned long qpn)382 static inline u8 get_qp_bankid(unsigned long qpn)
383 {
384 /* The lower 3 bits of QPN are used to hash to different banks */
385 return (u8)(qpn & GENMASK(2, 0));
386 }
387
free_qpn(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)388 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
389 {
390 u8 bankid;
391
392 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
393 return;
394
395 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
396 return;
397
398 bankid = get_qp_bankid(hr_qp->qpn);
399
400 ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
401
402 mutex_lock(&hr_dev->qp_table.bank_mutex);
403 hr_dev->qp_table.bank[bankid].inuse--;
404 mutex_unlock(&hr_dev->qp_table.bank_mutex);
405 }
406
proc_rq_sge(struct hns_roce_dev * dev,struct hns_roce_qp * hr_qp,bool user)407 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
408 bool user)
409 {
410 u32 max_sge = dev->caps.max_rq_sg;
411
412 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
413 return max_sge;
414
415 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
416 * calculate number of max_sge with reserved SGEs when allocating wqe
417 * buf, so there is no need to do this again in kernel. But the number
418 * may exceed the capacity of SGEs recorded in the firmware, so the
419 * kernel driver should just adapt the value accordingly.
420 */
421 if (user)
422 max_sge = roundup_pow_of_two(max_sge + 1);
423 else
424 hr_qp->rq.rsv_sge = 1;
425
426 return max_sge;
427 }
428
set_rq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp,int has_rq,bool user)429 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
430 struct hns_roce_qp *hr_qp, int has_rq, bool user)
431 {
432 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
433 u32 cnt;
434
435 /* If srq exist, set zero for relative number of rq */
436 if (!has_rq) {
437 hr_qp->rq.wqe_cnt = 0;
438 hr_qp->rq.max_gs = 0;
439 hr_qp->rq_inl_buf.wqe_cnt = 0;
440 cap->max_recv_wr = 0;
441 cap->max_recv_sge = 0;
442
443 return 0;
444 }
445
446 /* Check the validity of QP support capacity */
447 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
448 cap->max_recv_sge > max_sge) {
449 ibdev_err(&hr_dev->ib_dev,
450 "RQ config error, depth = %u, sge = %u\n",
451 cap->max_recv_wr, cap->max_recv_sge);
452 return -EINVAL;
453 }
454
455 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
456 if (cnt > hr_dev->caps.max_wqes) {
457 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
458 cap->max_recv_wr);
459 return -EINVAL;
460 }
461
462 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
463 hr_qp->rq.rsv_sge);
464
465 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
466 hr_qp->rq.max_gs);
467
468 hr_qp->rq.wqe_cnt = cnt;
469 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
470 hr_qp->ibqp.qp_type != IB_QPT_UD &&
471 hr_qp->ibqp.qp_type != IB_QPT_GSI)
472 hr_qp->rq_inl_buf.wqe_cnt = cnt;
473 else
474 hr_qp->rq_inl_buf.wqe_cnt = 0;
475
476 cap->max_recv_wr = cnt;
477 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
478
479 return 0;
480 }
481
get_wqe_ext_sge_cnt(struct hns_roce_qp * qp)482 static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
483 {
484 /* GSI/UD QP only has extended sge */
485 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
486 return qp->sq.max_gs;
487
488 if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
489 return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
490
491 return 0;
492 }
493
set_ext_sge_param(struct hns_roce_dev * hr_dev,u32 sq_wqe_cnt,struct hns_roce_qp * hr_qp,struct ib_qp_cap * cap)494 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
495 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
496 {
497 u32 total_sge_cnt;
498 u32 wqe_sge_cnt;
499
500 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
501
502 hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
503
504 wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
505
506 /* If the number of extended sge is not zero, they MUST use the
507 * space of HNS_HW_PAGE_SIZE at least.
508 */
509 if (wqe_sge_cnt) {
510 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
511 hr_qp->sge.sge_cnt = max(total_sge_cnt,
512 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
513 }
514 }
515
check_sq_size_with_integrity(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_ib_create_qp * ucmd)516 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
517 struct ib_qp_cap *cap,
518 struct hns_roce_ib_create_qp *ucmd)
519 {
520 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
521 u8 max_sq_stride = ilog2(roundup_sq_stride);
522
523 /* Sanity check SQ size before proceeding */
524 if (ucmd->log_sq_stride > max_sq_stride ||
525 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
526 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
527 return -EINVAL;
528 }
529
530 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
531 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
532 cap->max_send_sge);
533 return -EINVAL;
534 }
535
536 return 0;
537 }
538
set_user_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp,struct hns_roce_ib_create_qp * ucmd)539 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
540 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
541 struct hns_roce_ib_create_qp *ucmd)
542 {
543 struct ib_device *ibdev = &hr_dev->ib_dev;
544 u32 cnt = 0;
545 int ret;
546
547 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
548 cnt > hr_dev->caps.max_wqes)
549 return -EINVAL;
550
551 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
552 if (ret) {
553 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
554 ret);
555 return ret;
556 }
557
558 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
559
560 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
561 hr_qp->sq.wqe_cnt = cnt;
562
563 return 0;
564 }
565
set_wqe_buf_attr(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_buf_attr * buf_attr)566 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
567 struct hns_roce_qp *hr_qp,
568 struct hns_roce_buf_attr *buf_attr)
569 {
570 int buf_size;
571 int idx = 0;
572
573 hr_qp->buff_size = 0;
574
575 /* SQ WQE */
576 hr_qp->sq.offset = 0;
577 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
578 hr_qp->sq.wqe_shift);
579 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
580 buf_attr->region[idx].size = buf_size;
581 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
582 idx++;
583 hr_qp->buff_size += buf_size;
584 }
585
586 /* extend SGE WQE in SQ */
587 hr_qp->sge.offset = hr_qp->buff_size;
588 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
589 hr_qp->sge.sge_shift);
590 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
591 buf_attr->region[idx].size = buf_size;
592 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
593 idx++;
594 hr_qp->buff_size += buf_size;
595 }
596
597 /* RQ WQE */
598 hr_qp->rq.offset = hr_qp->buff_size;
599 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
600 hr_qp->rq.wqe_shift);
601 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
602 buf_attr->region[idx].size = buf_size;
603 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
604 idx++;
605 hr_qp->buff_size += buf_size;
606 }
607
608 if (hr_qp->buff_size < 1)
609 return -EINVAL;
610
611 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
612 buf_attr->region_count = idx;
613
614 return 0;
615 }
616
set_kernel_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp)617 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
618 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
619 {
620 struct ib_device *ibdev = &hr_dev->ib_dev;
621 u32 cnt;
622
623 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
624 cap->max_send_sge > hr_dev->caps.max_sq_sg) {
625 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
626 return -EINVAL;
627 }
628
629 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
630 if (cnt > hr_dev->caps.max_wqes) {
631 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
632 cnt);
633 return -EINVAL;
634 }
635
636 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
637 hr_qp->sq.wqe_cnt = cnt;
638
639 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
640
641 /* sync the parameters of kernel QP to user's configuration */
642 cap->max_send_wr = cnt;
643 cap->max_send_sge = hr_qp->sq.max_gs;
644
645 return 0;
646 }
647
hns_roce_qp_has_sq(struct ib_qp_init_attr * attr)648 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
649 {
650 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
651 return 0;
652
653 return 1;
654 }
655
hns_roce_qp_has_rq(struct ib_qp_init_attr * attr)656 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
657 {
658 if (attr->qp_type == IB_QPT_XRC_INI ||
659 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
660 !attr->cap.max_recv_wr)
661 return 0;
662
663 return 1;
664 }
665
alloc_rq_inline_buf(struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr)666 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
667 struct ib_qp_init_attr *init_attr)
668 {
669 u32 max_recv_sge = init_attr->cap.max_recv_sge;
670 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
671 struct hns_roce_rinl_wqe *wqe_list;
672 int i;
673
674 /* allocate recv inline buf */
675 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
676 GFP_KERNEL);
677 if (!wqe_list)
678 goto err;
679
680 /* Allocate a continuous buffer for all inline sge we need */
681 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
682 sizeof(struct hns_roce_rinl_sge)),
683 GFP_KERNEL);
684 if (!wqe_list[0].sg_list)
685 goto err_wqe_list;
686
687 /* Assign buffers of sg_list to each inline wqe */
688 for (i = 1; i < wqe_cnt; i++)
689 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
690
691 hr_qp->rq_inl_buf.wqe_list = wqe_list;
692
693 return 0;
694
695 err_wqe_list:
696 kfree(wqe_list);
697
698 err:
699 return -ENOMEM;
700 }
701
free_rq_inline_buf(struct hns_roce_qp * hr_qp)702 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
703 {
704 if (hr_qp->rq_inl_buf.wqe_list)
705 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
706 kfree(hr_qp->rq_inl_buf.wqe_list);
707 }
708
alloc_qp_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,unsigned long addr)709 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
710 struct ib_qp_init_attr *init_attr,
711 struct ib_udata *udata, unsigned long addr)
712 {
713 struct ib_device *ibdev = &hr_dev->ib_dev;
714 struct hns_roce_buf_attr buf_attr = {};
715 int ret;
716
717 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
718 ret = alloc_rq_inline_buf(hr_qp, init_attr);
719 if (ret) {
720 ibdev_err(ibdev,
721 "failed to alloc inline buf, ret = %d.\n",
722 ret);
723 return ret;
724 }
725 } else {
726 hr_qp->rq_inl_buf.wqe_list = NULL;
727 }
728
729 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
730 if (ret) {
731 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
732 goto err_inline;
733 }
734 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
735 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
736 udata, addr);
737 if (ret) {
738 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
739 goto err_inline;
740 }
741
742 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
743 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
744
745 return 0;
746
747 err_inline:
748 free_rq_inline_buf(hr_qp);
749
750 return ret;
751 }
752
free_qp_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)753 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
754 {
755 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
756 free_rq_inline_buf(hr_qp);
757 }
758
user_qp_has_sdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp_resp * resp,struct hns_roce_ib_create_qp * ucmd)759 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
760 struct ib_qp_init_attr *init_attr,
761 struct ib_udata *udata,
762 struct hns_roce_ib_create_qp_resp *resp,
763 struct hns_roce_ib_create_qp *ucmd)
764 {
765 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
766 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
767 hns_roce_qp_has_sq(init_attr) &&
768 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
769 }
770
user_qp_has_rdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp_resp * resp)771 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
772 struct ib_qp_init_attr *init_attr,
773 struct ib_udata *udata,
774 struct hns_roce_ib_create_qp_resp *resp)
775 {
776 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
777 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
778 hns_roce_qp_has_rq(init_attr));
779 }
780
kernel_qp_has_rdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr)781 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
782 struct ib_qp_init_attr *init_attr)
783 {
784 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
785 hns_roce_qp_has_rq(init_attr));
786 }
787
qp_mmap_entry(struct hns_roce_qp * hr_qp,struct hns_roce_dev * hr_dev,struct ib_udata * udata,struct hns_roce_ib_create_qp_resp * resp)788 static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
789 struct hns_roce_dev *hr_dev,
790 struct ib_udata *udata,
791 struct hns_roce_ib_create_qp_resp *resp)
792 {
793 struct hns_roce_ucontext *uctx =
794 rdma_udata_to_drv_context(udata,
795 struct hns_roce_ucontext, ibucontext);
796 struct rdma_user_mmap_entry *rdma_entry;
797 u64 address;
798
799 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
800
801 hr_qp->dwqe_mmap_entry =
802 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
803 HNS_ROCE_DWQE_SIZE,
804 HNS_ROCE_MMAP_TYPE_DWQE);
805
806 if (!hr_qp->dwqe_mmap_entry) {
807 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
808 return -ENOMEM;
809 }
810
811 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
812 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
813
814 return 0;
815 }
816
alloc_user_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp * ucmd,struct hns_roce_ib_create_qp_resp * resp)817 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
818 struct hns_roce_qp *hr_qp,
819 struct ib_qp_init_attr *init_attr,
820 struct ib_udata *udata,
821 struct hns_roce_ib_create_qp *ucmd,
822 struct hns_roce_ib_create_qp_resp *resp)
823 {
824 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
825 struct hns_roce_ucontext, ibucontext);
826 struct ib_device *ibdev = &hr_dev->ib_dev;
827 int ret;
828
829 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
830 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
831 if (ret) {
832 ibdev_err(ibdev,
833 "failed to map user SQ doorbell, ret = %d.\n",
834 ret);
835 goto err_out;
836 }
837 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
838 }
839
840 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
841 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
842 if (ret) {
843 ibdev_err(ibdev,
844 "failed to map user RQ doorbell, ret = %d.\n",
845 ret);
846 goto err_sdb;
847 }
848 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
849 }
850
851 return 0;
852
853 err_sdb:
854 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
855 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
856 err_out:
857 return ret;
858 }
859
alloc_kernel_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr)860 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
861 struct hns_roce_qp *hr_qp,
862 struct ib_qp_init_attr *init_attr)
863 {
864 struct ib_device *ibdev = &hr_dev->ib_dev;
865 int ret;
866
867 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
868 hr_qp->sq.db_reg = hr_dev->mem_base +
869 HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
870 else
871 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
872 DB_REG_OFFSET * hr_dev->priv_uar.index;
873
874 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
875 DB_REG_OFFSET * hr_dev->priv_uar.index;
876
877 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
878 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
879 if (ret) {
880 ibdev_err(ibdev,
881 "failed to alloc kernel RQ doorbell, ret = %d.\n",
882 ret);
883 return ret;
884 }
885 *hr_qp->rdb.db_record = 0;
886 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
887 }
888
889 return 0;
890 }
891
alloc_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp * ucmd,struct hns_roce_ib_create_qp_resp * resp)892 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
893 struct ib_qp_init_attr *init_attr,
894 struct ib_udata *udata,
895 struct hns_roce_ib_create_qp *ucmd,
896 struct hns_roce_ib_create_qp_resp *resp)
897 {
898 int ret;
899
900 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
901 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
902
903 if (udata) {
904 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
905 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
906 if (ret)
907 return ret;
908 }
909
910 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
911 resp);
912 if (ret)
913 goto err_remove_qp;
914 } else {
915 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
916 if (ret)
917 return ret;
918 }
919
920 return 0;
921
922 err_remove_qp:
923 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
924 qp_user_mmap_entry_remove(hr_qp);
925
926 return ret;
927 }
928
free_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_udata * udata)929 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
930 struct ib_udata *udata)
931 {
932 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
933 udata, struct hns_roce_ucontext, ibucontext);
934
935 if (udata) {
936 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
937 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
938 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
939 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
940 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
941 qp_user_mmap_entry_remove(hr_qp);
942 } else {
943 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
944 hns_roce_free_db(hr_dev, &hr_qp->rdb);
945 }
946 }
947
alloc_kernel_wrid(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)948 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
949 struct hns_roce_qp *hr_qp)
950 {
951 struct ib_device *ibdev = &hr_dev->ib_dev;
952 u64 *sq_wrid = NULL;
953 u64 *rq_wrid = NULL;
954 int ret;
955
956 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
957 if (ZERO_OR_NULL_PTR(sq_wrid)) {
958 ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
959 return -ENOMEM;
960 }
961
962 if (hr_qp->rq.wqe_cnt) {
963 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
964 if (ZERO_OR_NULL_PTR(rq_wrid)) {
965 ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
966 ret = -ENOMEM;
967 goto err_sq;
968 }
969 }
970
971 hr_qp->sq.wrid = sq_wrid;
972 hr_qp->rq.wrid = rq_wrid;
973 return 0;
974 err_sq:
975 kfree(sq_wrid);
976
977 return ret;
978 }
979
free_kernel_wrid(struct hns_roce_qp * hr_qp)980 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
981 {
982 kfree(hr_qp->rq.wrid);
983 kfree(hr_qp->sq.wrid);
984 }
985
set_qp_param(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp * ucmd)986 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
987 struct ib_qp_init_attr *init_attr,
988 struct ib_udata *udata,
989 struct hns_roce_ib_create_qp *ucmd)
990 {
991 struct ib_device *ibdev = &hr_dev->ib_dev;
992 int ret;
993
994 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
995 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
996
997 hr_qp->max_inline_data = init_attr->cap.max_inline_data;
998
999 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1000 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
1001 else
1002 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
1003
1004 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
1005 hns_roce_qp_has_rq(init_attr), !!udata);
1006 if (ret) {
1007 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
1008 ret);
1009 return ret;
1010 }
1011
1012 if (udata) {
1013 ret = ib_copy_from_udata(ucmd, udata,
1014 min(udata->inlen, sizeof(*ucmd)));
1015 if (ret) {
1016 ibdev_err(ibdev,
1017 "failed to copy QP ucmd, ret = %d\n", ret);
1018 return ret;
1019 }
1020
1021 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
1022 if (ret)
1023 ibdev_err(ibdev,
1024 "failed to set user SQ size, ret = %d.\n",
1025 ret);
1026 } else {
1027 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
1028 if (ret)
1029 ibdev_err(ibdev,
1030 "failed to set kernel SQ size, ret = %d.\n",
1031 ret);
1032 }
1033
1034 return ret;
1035 }
1036
hns_roce_create_qp_common(struct hns_roce_dev * hr_dev,struct ib_pd * ib_pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_qp * hr_qp)1037 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
1038 struct ib_pd *ib_pd,
1039 struct ib_qp_init_attr *init_attr,
1040 struct ib_udata *udata,
1041 struct hns_roce_qp *hr_qp)
1042 {
1043 struct hns_roce_ib_create_qp_resp resp = {};
1044 struct ib_device *ibdev = &hr_dev->ib_dev;
1045 struct hns_roce_ib_create_qp ucmd;
1046 int ret;
1047
1048 mutex_init(&hr_qp->mutex);
1049 spin_lock_init(&hr_qp->sq.lock);
1050 spin_lock_init(&hr_qp->rq.lock);
1051
1052 hr_qp->state = IB_QPS_RESET;
1053 hr_qp->flush_flag = 0;
1054
1055 if (init_attr->create_flags)
1056 return -EOPNOTSUPP;
1057
1058 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
1059 if (ret) {
1060 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
1061 return ret;
1062 }
1063
1064 if (!udata) {
1065 ret = alloc_kernel_wrid(hr_dev, hr_qp);
1066 if (ret) {
1067 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
1068 ret);
1069 return ret;
1070 }
1071 }
1072
1073 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
1074 if (ret) {
1075 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
1076 goto err_buf;
1077 }
1078
1079 ret = alloc_qpn(hr_dev, hr_qp);
1080 if (ret) {
1081 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
1082 goto err_qpn;
1083 }
1084
1085 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1086 if (ret) {
1087 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
1088 ret);
1089 goto err_db;
1090 }
1091
1092 ret = alloc_qpc(hr_dev, hr_qp);
1093 if (ret) {
1094 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
1095 ret);
1096 goto err_qpc;
1097 }
1098
1099 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1100 if (ret) {
1101 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
1102 goto err_store;
1103 }
1104
1105 if (udata) {
1106 resp.cap_flags = hr_qp->en_flags;
1107 ret = ib_copy_to_udata(udata, &resp,
1108 min(udata->outlen, sizeof(resp)));
1109 if (ret) {
1110 ibdev_err(ibdev, "copy qp resp failed!\n");
1111 goto err_store;
1112 }
1113 }
1114
1115 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1116 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1117 if (ret)
1118 goto err_flow_ctrl;
1119 }
1120
1121 hr_qp->ibqp.qp_num = hr_qp->qpn;
1122 hr_qp->event = hns_roce_ib_qp_event;
1123 refcount_set(&hr_qp->refcount, 1);
1124 init_completion(&hr_qp->free);
1125
1126 return 0;
1127
1128 err_flow_ctrl:
1129 hns_roce_qp_remove(hr_dev, hr_qp);
1130 err_store:
1131 free_qpc(hr_dev, hr_qp);
1132 err_qpc:
1133 free_qp_db(hr_dev, hr_qp, udata);
1134 err_db:
1135 free_qpn(hr_dev, hr_qp);
1136 err_qpn:
1137 free_qp_buf(hr_dev, hr_qp);
1138 err_buf:
1139 free_kernel_wrid(hr_qp);
1140 return ret;
1141 }
1142
hns_roce_qp_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_udata * udata)1143 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1144 struct ib_udata *udata)
1145 {
1146 if (refcount_dec_and_test(&hr_qp->refcount))
1147 complete(&hr_qp->free);
1148 wait_for_completion(&hr_qp->free);
1149
1150 free_qpc(hr_dev, hr_qp);
1151 free_qpn(hr_dev, hr_qp);
1152 free_qp_buf(hr_dev, hr_qp);
1153 free_kernel_wrid(hr_qp);
1154 free_qp_db(hr_dev, hr_qp, udata);
1155 }
1156
check_qp_type(struct hns_roce_dev * hr_dev,enum ib_qp_type type,bool is_user)1157 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
1158 bool is_user)
1159 {
1160 switch (type) {
1161 case IB_QPT_XRC_INI:
1162 case IB_QPT_XRC_TGT:
1163 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
1164 goto out;
1165 break;
1166 case IB_QPT_UD:
1167 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
1168 is_user)
1169 goto out;
1170 break;
1171 case IB_QPT_RC:
1172 case IB_QPT_GSI:
1173 break;
1174 default:
1175 goto out;
1176 }
1177
1178 return 0;
1179
1180 out:
1181 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
1182
1183 return -EOPNOTSUPP;
1184 }
1185
hns_roce_create_qp(struct ib_qp * qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1186 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1187 struct ib_udata *udata)
1188 {
1189 struct ib_device *ibdev = qp->device;
1190 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
1191 struct hns_roce_qp *hr_qp = to_hr_qp(qp);
1192 struct ib_pd *pd = qp->pd;
1193 int ret;
1194
1195 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
1196 if (ret)
1197 return ret;
1198
1199 if (init_attr->qp_type == IB_QPT_XRC_TGT)
1200 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
1201
1202 if (init_attr->qp_type == IB_QPT_GSI) {
1203 hr_qp->port = init_attr->port_num - 1;
1204 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1205 }
1206
1207 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
1208 if (ret)
1209 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
1210 init_attr->qp_type, ret);
1211
1212 return ret;
1213 }
1214
to_hr_qp_type(int qp_type)1215 int to_hr_qp_type(int qp_type)
1216 {
1217 switch (qp_type) {
1218 case IB_QPT_RC:
1219 return SERV_TYPE_RC;
1220 case IB_QPT_UD:
1221 case IB_QPT_GSI:
1222 return SERV_TYPE_UD;
1223 case IB_QPT_XRC_INI:
1224 case IB_QPT_XRC_TGT:
1225 return SERV_TYPE_XRC;
1226 default:
1227 return -1;
1228 }
1229 }
1230
check_mtu_validate(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_attr * attr,int attr_mask)1231 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1232 struct hns_roce_qp *hr_qp,
1233 struct ib_qp_attr *attr, int attr_mask)
1234 {
1235 enum ib_mtu active_mtu;
1236 int p;
1237
1238 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1239 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1240
1241 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1242 attr->path_mtu > hr_dev->caps.max_mtu) ||
1243 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1244 ibdev_err(&hr_dev->ib_dev,
1245 "attr path_mtu(%d)invalid while modify qp",
1246 attr->path_mtu);
1247 return -EINVAL;
1248 }
1249
1250 return 0;
1251 }
1252
hns_roce_check_qp_attr(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask)1253 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1254 int attr_mask)
1255 {
1256 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1257 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1258 int p;
1259
1260 if ((attr_mask & IB_QP_PORT) &&
1261 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1262 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1263 attr->port_num);
1264 return -EINVAL;
1265 }
1266
1267 if (attr_mask & IB_QP_PKEY_INDEX) {
1268 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1269 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1270 ibdev_err(&hr_dev->ib_dev,
1271 "invalid attr, pkey_index = %u.\n",
1272 attr->pkey_index);
1273 return -EINVAL;
1274 }
1275 }
1276
1277 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1278 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1279 ibdev_err(&hr_dev->ib_dev,
1280 "invalid attr, max_rd_atomic = %u.\n",
1281 attr->max_rd_atomic);
1282 return -EINVAL;
1283 }
1284
1285 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1286 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1287 ibdev_err(&hr_dev->ib_dev,
1288 "invalid attr, max_dest_rd_atomic = %u.\n",
1289 attr->max_dest_rd_atomic);
1290 return -EINVAL;
1291 }
1292
1293 if (attr_mask & IB_QP_PATH_MTU)
1294 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1295
1296 return 0;
1297 }
1298
hns_roce_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1299 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1300 int attr_mask, struct ib_udata *udata)
1301 {
1302 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1303 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1304 enum ib_qp_state cur_state, new_state;
1305 int ret = -EINVAL;
1306
1307 mutex_lock(&hr_qp->mutex);
1308
1309 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1310 goto out;
1311
1312 cur_state = hr_qp->state;
1313 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1314
1315 if (ibqp->uobject &&
1316 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1317 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1318 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1319
1320 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1321 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1322 } else {
1323 ibdev_warn(&hr_dev->ib_dev,
1324 "flush cqe is not supported in userspace!\n");
1325 goto out;
1326 }
1327 }
1328
1329 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1330 attr_mask)) {
1331 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1332 goto out;
1333 }
1334
1335 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1336 if (ret)
1337 goto out;
1338
1339 if (cur_state == new_state && cur_state == IB_QPS_RESET)
1340 goto out;
1341
1342 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1343 new_state);
1344
1345 out:
1346 mutex_unlock(&hr_qp->mutex);
1347
1348 return ret;
1349 }
1350
hns_roce_lock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1351 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1352 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1353 {
1354 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1355 __acquire(&send_cq->lock);
1356 __acquire(&recv_cq->lock);
1357 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1358 spin_lock_irq(&send_cq->lock);
1359 __acquire(&recv_cq->lock);
1360 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1361 spin_lock_irq(&recv_cq->lock);
1362 __acquire(&send_cq->lock);
1363 } else if (send_cq == recv_cq) {
1364 spin_lock_irq(&send_cq->lock);
1365 __acquire(&recv_cq->lock);
1366 } else if (send_cq->cqn < recv_cq->cqn) {
1367 spin_lock_irq(&send_cq->lock);
1368 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1369 } else {
1370 spin_lock_irq(&recv_cq->lock);
1371 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1372 }
1373 }
1374
hns_roce_unlock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1375 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1376 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1377 __releases(&recv_cq->lock)
1378 {
1379 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1380 __release(&recv_cq->lock);
1381 __release(&send_cq->lock);
1382 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1383 __release(&recv_cq->lock);
1384 spin_unlock(&send_cq->lock);
1385 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1386 __release(&send_cq->lock);
1387 spin_unlock(&recv_cq->lock);
1388 } else if (send_cq == recv_cq) {
1389 __release(&recv_cq->lock);
1390 spin_unlock_irq(&send_cq->lock);
1391 } else if (send_cq->cqn < recv_cq->cqn) {
1392 spin_unlock(&recv_cq->lock);
1393 spin_unlock_irq(&send_cq->lock);
1394 } else {
1395 spin_unlock(&send_cq->lock);
1396 spin_unlock_irq(&recv_cq->lock);
1397 }
1398 }
1399
get_wqe(struct hns_roce_qp * hr_qp,u32 offset)1400 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
1401 {
1402 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1403 }
1404
hns_roce_get_recv_wqe(struct hns_roce_qp * hr_qp,unsigned int n)1405 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1406 {
1407 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1408 }
1409
hns_roce_get_send_wqe(struct hns_roce_qp * hr_qp,unsigned int n)1410 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1411 {
1412 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1413 }
1414
hns_roce_get_extend_sge(struct hns_roce_qp * hr_qp,unsigned int n)1415 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
1416 {
1417 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1418 }
1419
hns_roce_wq_overflow(struct hns_roce_wq * hr_wq,u32 nreq,struct ib_cq * ib_cq)1420 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1421 struct ib_cq *ib_cq)
1422 {
1423 struct hns_roce_cq *hr_cq;
1424 u32 cur;
1425
1426 cur = hr_wq->head - hr_wq->tail;
1427 if (likely(cur + nreq < hr_wq->wqe_cnt))
1428 return false;
1429
1430 hr_cq = to_hr_cq(ib_cq);
1431 spin_lock(&hr_cq->lock);
1432 cur = hr_wq->head - hr_wq->tail;
1433 spin_unlock(&hr_cq->lock);
1434
1435 return cur + nreq >= hr_wq->wqe_cnt;
1436 }
1437
hns_roce_init_qp_table(struct hns_roce_dev * hr_dev)1438 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1439 {
1440 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1441 unsigned int reserved_from_bot;
1442 unsigned int i;
1443
1444 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
1445 sizeof(u32), GFP_KERNEL);
1446 if (!qp_table->idx_table.spare_idx)
1447 return -ENOMEM;
1448
1449 mutex_init(&qp_table->scc_mutex);
1450 mutex_init(&qp_table->bank_mutex);
1451 xa_init(&hr_dev->qp_table_xa);
1452
1453 reserved_from_bot = hr_dev->caps.reserved_qps;
1454
1455 for (i = 0; i < reserved_from_bot; i++) {
1456 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
1457 hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
1458 }
1459
1460 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
1461 ida_init(&hr_dev->qp_table.bank[i].ida);
1462 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
1463 HNS_ROCE_QP_BANK_NUM - 1;
1464 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
1465 }
1466
1467 return 0;
1468 }
1469
hns_roce_cleanup_qp_table(struct hns_roce_dev * hr_dev)1470 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1471 {
1472 int i;
1473
1474 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
1475 ida_destroy(&hr_dev->qp_table.bank[i].ida);
1476 kfree(hr_dev->qp_table.idx_table.spare_idx);
1477 }
1478