1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include "hns_roce_device.h"
35 #include "hns_roce_hem.h"
36 #include "hns_roce_common.h"
37
38 #define HEM_INDEX_BUF BIT(0)
39 #define HEM_INDEX_L0 BIT(1)
40 #define HEM_INDEX_L1 BIT(2)
41 struct hns_roce_hem_index {
42 u64 buf;
43 u64 l0;
44 u64 l1;
45 u32 inited; /* indicate which index is available */
46 };
47
hns_roce_check_whether_mhop(struct hns_roce_dev * hr_dev,u32 type)48 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
49 {
50 int hop_num = 0;
51
52 switch (type) {
53 case HEM_TYPE_QPC:
54 hop_num = hr_dev->caps.qpc_hop_num;
55 break;
56 case HEM_TYPE_MTPT:
57 hop_num = hr_dev->caps.mpt_hop_num;
58 break;
59 case HEM_TYPE_CQC:
60 hop_num = hr_dev->caps.cqc_hop_num;
61 break;
62 case HEM_TYPE_SRQC:
63 hop_num = hr_dev->caps.srqc_hop_num;
64 break;
65 case HEM_TYPE_SCCC:
66 hop_num = hr_dev->caps.sccc_hop_num;
67 break;
68 case HEM_TYPE_QPC_TIMER:
69 hop_num = hr_dev->caps.qpc_timer_hop_num;
70 break;
71 case HEM_TYPE_CQC_TIMER:
72 hop_num = hr_dev->caps.cqc_timer_hop_num;
73 break;
74 case HEM_TYPE_GMV:
75 hop_num = hr_dev->caps.gmv_hop_num;
76 break;
77 default:
78 return false;
79 }
80
81 return hop_num ? true : false;
82 }
83
hns_roce_check_hem_null(struct hns_roce_hem ** hem,u64 hem_idx,u32 bt_chunk_num,u64 hem_max_num)84 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
85 u32 bt_chunk_num, u64 hem_max_num)
86 {
87 u64 start_idx = round_down(hem_idx, bt_chunk_num);
88 u64 check_max_num = start_idx + bt_chunk_num;
89 u64 i;
90
91 for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
92 if (i != hem_idx && hem[i])
93 return false;
94
95 return true;
96 }
97
hns_roce_check_bt_null(u64 ** bt,u64 ba_idx,u32 bt_chunk_num)98 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
99 {
100 u64 start_idx = round_down(ba_idx, bt_chunk_num);
101 int i;
102
103 for (i = 0; i < bt_chunk_num; i++)
104 if (i != ba_idx && bt[start_idx + i])
105 return false;
106
107 return true;
108 }
109
hns_roce_get_bt_num(u32 table_type,u32 hop_num)110 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
111 {
112 if (check_whether_bt_num_3(table_type, hop_num))
113 return 3;
114 else if (check_whether_bt_num_2(table_type, hop_num))
115 return 2;
116 else if (check_whether_bt_num_1(table_type, hop_num))
117 return 1;
118 else
119 return 0;
120 }
121
get_hem_table_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_mhop * mhop,u32 type)122 static int get_hem_table_config(struct hns_roce_dev *hr_dev,
123 struct hns_roce_hem_mhop *mhop,
124 u32 type)
125 {
126 struct device *dev = hr_dev->dev;
127
128 switch (type) {
129 case HEM_TYPE_QPC:
130 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
131 + PAGE_SHIFT);
132 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
133 + PAGE_SHIFT);
134 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
135 mhop->hop_num = hr_dev->caps.qpc_hop_num;
136 break;
137 case HEM_TYPE_MTPT:
138 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
139 + PAGE_SHIFT);
140 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
141 + PAGE_SHIFT);
142 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
143 mhop->hop_num = hr_dev->caps.mpt_hop_num;
144 break;
145 case HEM_TYPE_CQC:
146 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
147 + PAGE_SHIFT);
148 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
149 + PAGE_SHIFT);
150 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
151 mhop->hop_num = hr_dev->caps.cqc_hop_num;
152 break;
153 case HEM_TYPE_SCCC:
154 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
155 + PAGE_SHIFT);
156 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
157 + PAGE_SHIFT);
158 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
159 mhop->hop_num = hr_dev->caps.sccc_hop_num;
160 break;
161 case HEM_TYPE_QPC_TIMER:
162 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
163 + PAGE_SHIFT);
164 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
165 + PAGE_SHIFT);
166 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
167 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
168 break;
169 case HEM_TYPE_CQC_TIMER:
170 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
171 + PAGE_SHIFT);
172 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
173 + PAGE_SHIFT);
174 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
175 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
176 break;
177 case HEM_TYPE_SRQC:
178 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
179 + PAGE_SHIFT);
180 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
181 + PAGE_SHIFT);
182 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
183 mhop->hop_num = hr_dev->caps.srqc_hop_num;
184 break;
185 case HEM_TYPE_GMV:
186 mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
187 PAGE_SHIFT);
188 mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
189 PAGE_SHIFT);
190 mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
191 mhop->hop_num = hr_dev->caps.gmv_hop_num;
192 break;
193 default:
194 dev_err(dev, "table %u not support multi-hop addressing!\n",
195 type);
196 return -EINVAL;
197 }
198
199 return 0;
200 }
201
hns_roce_calc_hem_mhop(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long * obj,struct hns_roce_hem_mhop * mhop)202 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
203 struct hns_roce_hem_table *table, unsigned long *obj,
204 struct hns_roce_hem_mhop *mhop)
205 {
206 struct device *dev = hr_dev->dev;
207 u32 chunk_ba_num;
208 u32 chunk_size;
209 u32 table_idx;
210 u32 bt_num;
211
212 if (get_hem_table_config(hr_dev, mhop, table->type))
213 return -EINVAL;
214
215 if (!obj)
216 return 0;
217
218 /*
219 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
220 * MTT/CQE alloc hem for bt pages.
221 */
222 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
223 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
224 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
225 mhop->bt_chunk_size;
226 table_idx = *obj / (chunk_size / table->obj_size);
227 switch (bt_num) {
228 case 3:
229 mhop->l2_idx = table_idx & (chunk_ba_num - 1);
230 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
231 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
232 break;
233 case 2:
234 mhop->l1_idx = table_idx & (chunk_ba_num - 1);
235 mhop->l0_idx = table_idx / chunk_ba_num;
236 break;
237 case 1:
238 mhop->l0_idx = table_idx;
239 break;
240 default:
241 dev_err(dev, "table %u not support hop_num = %u!\n",
242 table->type, mhop->hop_num);
243 return -EINVAL;
244 }
245 if (mhop->l0_idx >= mhop->ba_l0_num)
246 mhop->l0_idx %= mhop->ba_l0_num;
247
248 return 0;
249 }
250
hns_roce_alloc_hem(struct hns_roce_dev * hr_dev,int npages,unsigned long hem_alloc_size,gfp_t gfp_mask)251 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
252 int npages,
253 unsigned long hem_alloc_size,
254 gfp_t gfp_mask)
255 {
256 struct hns_roce_hem_chunk *chunk = NULL;
257 struct hns_roce_hem *hem;
258 struct scatterlist *mem;
259 int order;
260 void *buf;
261
262 WARN_ON(gfp_mask & __GFP_HIGHMEM);
263
264 hem = kmalloc(sizeof(*hem),
265 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
266 if (!hem)
267 return NULL;
268
269 INIT_LIST_HEAD(&hem->chunk_list);
270
271 order = get_order(hem_alloc_size);
272
273 while (npages > 0) {
274 if (!chunk) {
275 chunk = kmalloc(sizeof(*chunk),
276 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
277 if (!chunk)
278 goto fail;
279
280 sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
281 chunk->npages = 0;
282 chunk->nsg = 0;
283 memset(chunk->buf, 0, sizeof(chunk->buf));
284 list_add_tail(&chunk->list, &hem->chunk_list);
285 }
286
287 while (1 << order > npages)
288 --order;
289
290 /*
291 * Alloc memory one time. If failed, don't alloc small block
292 * memory, directly return fail.
293 */
294 mem = &chunk->mem[chunk->npages];
295 buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
296 &sg_dma_address(mem), gfp_mask);
297 if (!buf)
298 goto fail;
299
300 chunk->buf[chunk->npages] = buf;
301 sg_dma_len(mem) = PAGE_SIZE << order;
302
303 ++chunk->npages;
304 ++chunk->nsg;
305 npages -= 1 << order;
306 }
307
308 return hem;
309
310 fail:
311 hns_roce_free_hem(hr_dev, hem);
312 return NULL;
313 }
314
hns_roce_free_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem * hem)315 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
316 {
317 struct hns_roce_hem_chunk *chunk, *tmp;
318 int i;
319
320 if (!hem)
321 return;
322
323 list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
324 for (i = 0; i < chunk->npages; ++i)
325 dma_free_coherent(hr_dev->dev,
326 sg_dma_len(&chunk->mem[i]),
327 chunk->buf[i],
328 sg_dma_address(&chunk->mem[i]));
329 kfree(chunk);
330 }
331
332 kfree(hem);
333 }
334
calc_hem_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)335 static int calc_hem_config(struct hns_roce_dev *hr_dev,
336 struct hns_roce_hem_table *table, unsigned long obj,
337 struct hns_roce_hem_mhop *mhop,
338 struct hns_roce_hem_index *index)
339 {
340 struct ib_device *ibdev = &hr_dev->ib_dev;
341 unsigned long mhop_obj = obj;
342 u32 l0_idx, l1_idx, l2_idx;
343 u32 chunk_ba_num;
344 u32 bt_num;
345 int ret;
346
347 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
348 if (ret)
349 return ret;
350
351 l0_idx = mhop->l0_idx;
352 l1_idx = mhop->l1_idx;
353 l2_idx = mhop->l2_idx;
354 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
355 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
356 switch (bt_num) {
357 case 3:
358 index->l1 = l0_idx * chunk_ba_num + l1_idx;
359 index->l0 = l0_idx;
360 index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
361 l1_idx * chunk_ba_num + l2_idx;
362 break;
363 case 2:
364 index->l0 = l0_idx;
365 index->buf = l0_idx * chunk_ba_num + l1_idx;
366 break;
367 case 1:
368 index->buf = l0_idx;
369 break;
370 default:
371 ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
372 table->type, mhop->hop_num);
373 return -EINVAL;
374 }
375
376 if (unlikely(index->buf >= table->num_hem)) {
377 ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
378 table->type, index->buf, table->num_hem);
379 return -EINVAL;
380 }
381
382 return 0;
383 }
384
free_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)385 static void free_mhop_hem(struct hns_roce_dev *hr_dev,
386 struct hns_roce_hem_table *table,
387 struct hns_roce_hem_mhop *mhop,
388 struct hns_roce_hem_index *index)
389 {
390 u32 bt_size = mhop->bt_chunk_size;
391 struct device *dev = hr_dev->dev;
392
393 if (index->inited & HEM_INDEX_BUF) {
394 hns_roce_free_hem(hr_dev, table->hem[index->buf]);
395 table->hem[index->buf] = NULL;
396 }
397
398 if (index->inited & HEM_INDEX_L1) {
399 dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
400 table->bt_l1_dma_addr[index->l1]);
401 table->bt_l1[index->l1] = NULL;
402 }
403
404 if (index->inited & HEM_INDEX_L0) {
405 dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
406 table->bt_l0_dma_addr[index->l0]);
407 table->bt_l0[index->l0] = NULL;
408 }
409 }
410
alloc_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)411 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
412 struct hns_roce_hem_table *table,
413 struct hns_roce_hem_mhop *mhop,
414 struct hns_roce_hem_index *index)
415 {
416 u32 bt_size = mhop->bt_chunk_size;
417 struct device *dev = hr_dev->dev;
418 struct hns_roce_hem_iter iter;
419 gfp_t flag;
420 u64 bt_ba;
421 u32 size;
422 int ret;
423
424 /* alloc L1 BA's chunk */
425 if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
426 check_whether_bt_num_2(table->type, mhop->hop_num)) &&
427 !table->bt_l0[index->l0]) {
428 table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
429 &table->bt_l0_dma_addr[index->l0],
430 GFP_KERNEL);
431 if (!table->bt_l0[index->l0]) {
432 ret = -ENOMEM;
433 goto out;
434 }
435 index->inited |= HEM_INDEX_L0;
436 }
437
438 /* alloc L2 BA's chunk */
439 if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
440 !table->bt_l1[index->l1]) {
441 table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
442 &table->bt_l1_dma_addr[index->l1],
443 GFP_KERNEL);
444 if (!table->bt_l1[index->l1]) {
445 ret = -ENOMEM;
446 goto err_alloc_hem;
447 }
448 index->inited |= HEM_INDEX_L1;
449 *(table->bt_l0[index->l0] + mhop->l1_idx) =
450 table->bt_l1_dma_addr[index->l1];
451 }
452
453 /*
454 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
455 * alloc bt space chunk for MTT/CQE.
456 */
457 size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
458 flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
459 table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
460 size, flag);
461 if (!table->hem[index->buf]) {
462 ret = -ENOMEM;
463 goto err_alloc_hem;
464 }
465
466 index->inited |= HEM_INDEX_BUF;
467 hns_roce_hem_first(table->hem[index->buf], &iter);
468 bt_ba = hns_roce_hem_addr(&iter);
469 if (table->type < HEM_TYPE_MTT) {
470 if (mhop->hop_num == 2)
471 *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
472 else if (mhop->hop_num == 1)
473 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
474 } else if (mhop->hop_num == 2) {
475 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
476 }
477
478 return 0;
479 err_alloc_hem:
480 free_mhop_hem(hr_dev, table, mhop, index);
481 out:
482 return ret;
483 }
484
set_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)485 static int set_mhop_hem(struct hns_roce_dev *hr_dev,
486 struct hns_roce_hem_table *table, unsigned long obj,
487 struct hns_roce_hem_mhop *mhop,
488 struct hns_roce_hem_index *index)
489 {
490 struct ib_device *ibdev = &hr_dev->ib_dev;
491 u32 step_idx;
492 int ret = 0;
493
494 if (index->inited & HEM_INDEX_L0) {
495 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
496 if (ret) {
497 ibdev_err(ibdev, "set HEM step 0 failed!\n");
498 goto out;
499 }
500 }
501
502 if (index->inited & HEM_INDEX_L1) {
503 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
504 if (ret) {
505 ibdev_err(ibdev, "set HEM step 1 failed!\n");
506 goto out;
507 }
508 }
509
510 if (index->inited & HEM_INDEX_BUF) {
511 if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
512 step_idx = 0;
513 else
514 step_idx = mhop->hop_num;
515 ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
516 if (ret)
517 ibdev_err(ibdev, "set HEM step last failed!\n");
518 }
519 out:
520 return ret;
521 }
522
hns_roce_table_mhop_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)523 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
524 struct hns_roce_hem_table *table,
525 unsigned long obj)
526 {
527 struct ib_device *ibdev = &hr_dev->ib_dev;
528 struct hns_roce_hem_index index = {};
529 struct hns_roce_hem_mhop mhop = {};
530 int ret;
531
532 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
533 if (ret) {
534 ibdev_err(ibdev, "calc hem config failed!\n");
535 return ret;
536 }
537
538 mutex_lock(&table->mutex);
539 if (table->hem[index.buf]) {
540 refcount_inc(&table->hem[index.buf]->refcount);
541 goto out;
542 }
543
544 ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
545 if (ret) {
546 ibdev_err(ibdev, "alloc mhop hem failed!\n");
547 goto out;
548 }
549
550 /* set HEM base address to hardware */
551 if (table->type < HEM_TYPE_MTT) {
552 ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
553 if (ret) {
554 ibdev_err(ibdev, "set HEM address to HW failed!\n");
555 goto err_alloc;
556 }
557 }
558
559 refcount_set(&table->hem[index.buf]->refcount, 1);
560 goto out;
561
562 err_alloc:
563 free_mhop_hem(hr_dev, table, &mhop, &index);
564 out:
565 mutex_unlock(&table->mutex);
566 return ret;
567 }
568
hns_roce_table_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)569 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
570 struct hns_roce_hem_table *table, unsigned long obj)
571 {
572 struct device *dev = hr_dev->dev;
573 unsigned long i;
574 int ret = 0;
575
576 if (hns_roce_check_whether_mhop(hr_dev, table->type))
577 return hns_roce_table_mhop_get(hr_dev, table, obj);
578
579 i = obj / (table->table_chunk_size / table->obj_size);
580
581 mutex_lock(&table->mutex);
582
583 if (table->hem[i]) {
584 refcount_inc(&table->hem[i]->refcount);
585 goto out;
586 }
587
588 table->hem[i] = hns_roce_alloc_hem(hr_dev,
589 table->table_chunk_size >> PAGE_SHIFT,
590 table->table_chunk_size,
591 (table->lowmem ? GFP_KERNEL :
592 GFP_HIGHUSER) | __GFP_NOWARN);
593 if (!table->hem[i]) {
594 ret = -ENOMEM;
595 goto out;
596 }
597
598 /* Set HEM base address(128K/page, pa) to Hardware */
599 if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
600 hns_roce_free_hem(hr_dev, table->hem[i]);
601 table->hem[i] = NULL;
602 ret = -ENODEV;
603 dev_err(dev, "set HEM base address to HW failed.\n");
604 goto out;
605 }
606
607 refcount_set(&table->hem[i]->refcount, 1);
608 out:
609 mutex_unlock(&table->mutex);
610 return ret;
611 }
612
clear_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)613 static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
614 struct hns_roce_hem_table *table, unsigned long obj,
615 struct hns_roce_hem_mhop *mhop,
616 struct hns_roce_hem_index *index)
617 {
618 struct ib_device *ibdev = &hr_dev->ib_dev;
619 u32 hop_num = mhop->hop_num;
620 u32 chunk_ba_num;
621 u32 step_idx;
622
623 index->inited = HEM_INDEX_BUF;
624 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
625 if (check_whether_bt_num_2(table->type, hop_num)) {
626 if (hns_roce_check_hem_null(table->hem, index->buf,
627 chunk_ba_num, table->num_hem))
628 index->inited |= HEM_INDEX_L0;
629 } else if (check_whether_bt_num_3(table->type, hop_num)) {
630 if (hns_roce_check_hem_null(table->hem, index->buf,
631 chunk_ba_num, table->num_hem)) {
632 index->inited |= HEM_INDEX_L1;
633 if (hns_roce_check_bt_null(table->bt_l1, index->l1,
634 chunk_ba_num))
635 index->inited |= HEM_INDEX_L0;
636 }
637 }
638
639 if (table->type < HEM_TYPE_MTT) {
640 if (hop_num == HNS_ROCE_HOP_NUM_0)
641 step_idx = 0;
642 else
643 step_idx = hop_num;
644
645 if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
646 ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
647
648 if (index->inited & HEM_INDEX_L1)
649 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
650 ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
651
652 if (index->inited & HEM_INDEX_L0)
653 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
654 ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
655 }
656 }
657
hns_roce_table_mhop_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,int check_refcount)658 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
659 struct hns_roce_hem_table *table,
660 unsigned long obj,
661 int check_refcount)
662 {
663 struct ib_device *ibdev = &hr_dev->ib_dev;
664 struct hns_roce_hem_index index = {};
665 struct hns_roce_hem_mhop mhop = {};
666 int ret;
667
668 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
669 if (ret) {
670 ibdev_err(ibdev, "calc hem config failed!\n");
671 return;
672 }
673
674 if (!check_refcount)
675 mutex_lock(&table->mutex);
676 else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
677 &table->mutex))
678 return;
679
680 clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
681 free_mhop_hem(hr_dev, table, &mhop, &index);
682
683 mutex_unlock(&table->mutex);
684 }
685
hns_roce_table_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)686 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
687 struct hns_roce_hem_table *table, unsigned long obj)
688 {
689 struct device *dev = hr_dev->dev;
690 unsigned long i;
691
692 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
693 hns_roce_table_mhop_put(hr_dev, table, obj, 1);
694 return;
695 }
696
697 i = obj / (table->table_chunk_size / table->obj_size);
698
699 if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
700 &table->mutex))
701 return;
702
703 if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT))
704 dev_warn(dev, "failed to clear HEM base address.\n");
705
706 hns_roce_free_hem(hr_dev, table->hem[i]);
707 table->hem[i] = NULL;
708
709 mutex_unlock(&table->mutex);
710 }
711
hns_roce_table_find(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,dma_addr_t * dma_handle)712 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
713 struct hns_roce_hem_table *table,
714 unsigned long obj, dma_addr_t *dma_handle)
715 {
716 struct hns_roce_hem_chunk *chunk;
717 struct hns_roce_hem_mhop mhop;
718 struct hns_roce_hem *hem;
719 unsigned long mhop_obj = obj;
720 unsigned long obj_per_chunk;
721 unsigned long idx_offset;
722 int offset, dma_offset;
723 void *addr = NULL;
724 u32 hem_idx = 0;
725 int length;
726 int i, j;
727
728 if (!table->lowmem)
729 return NULL;
730
731 mutex_lock(&table->mutex);
732
733 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
734 obj_per_chunk = table->table_chunk_size / table->obj_size;
735 hem = table->hem[obj / obj_per_chunk];
736 idx_offset = obj % obj_per_chunk;
737 dma_offset = offset = idx_offset * table->obj_size;
738 } else {
739 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
740
741 if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
742 goto out;
743 /* mtt mhop */
744 i = mhop.l0_idx;
745 j = mhop.l1_idx;
746 if (mhop.hop_num == 2)
747 hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
748 else if (mhop.hop_num == 1 ||
749 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
750 hem_idx = i;
751
752 hem = table->hem[hem_idx];
753 dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
754 if (mhop.hop_num == 2)
755 dma_offset = offset = 0;
756 }
757
758 if (!hem)
759 goto out;
760
761 list_for_each_entry(chunk, &hem->chunk_list, list) {
762 for (i = 0; i < chunk->npages; ++i) {
763 length = sg_dma_len(&chunk->mem[i]);
764 if (dma_handle && dma_offset >= 0) {
765 if (length > (u32)dma_offset)
766 *dma_handle = sg_dma_address(
767 &chunk->mem[i]) + dma_offset;
768 dma_offset -= length;
769 }
770
771 if (length > (u32)offset) {
772 addr = chunk->buf[i] + offset;
773 goto out;
774 }
775 offset -= length;
776 }
777 }
778
779 out:
780 mutex_unlock(&table->mutex);
781 return addr;
782 }
783
hns_roce_init_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,u32 type,unsigned long obj_size,unsigned long nobj,int use_lowmem)784 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
785 struct hns_roce_hem_table *table, u32 type,
786 unsigned long obj_size, unsigned long nobj,
787 int use_lowmem)
788 {
789 unsigned long obj_per_chunk;
790 unsigned long num_hem;
791
792 if (!hns_roce_check_whether_mhop(hr_dev, type)) {
793 table->table_chunk_size = hr_dev->caps.chunk_sz;
794 obj_per_chunk = table->table_chunk_size / obj_size;
795 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
796
797 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
798 if (!table->hem)
799 return -ENOMEM;
800 } else {
801 struct hns_roce_hem_mhop mhop = {};
802 unsigned long buf_chunk_size;
803 unsigned long bt_chunk_size;
804 unsigned long bt_chunk_num;
805 unsigned long num_bt_l0;
806 u32 hop_num;
807
808 if (get_hem_table_config(hr_dev, &mhop, type))
809 return -EINVAL;
810
811 buf_chunk_size = mhop.buf_chunk_size;
812 bt_chunk_size = mhop.bt_chunk_size;
813 num_bt_l0 = mhop.ba_l0_num;
814 hop_num = mhop.hop_num;
815
816 obj_per_chunk = buf_chunk_size / obj_size;
817 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
818 bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
819
820 if (type >= HEM_TYPE_MTT)
821 num_bt_l0 = bt_chunk_num;
822
823 table->hem = kcalloc(num_hem, sizeof(*table->hem),
824 GFP_KERNEL);
825 if (!table->hem)
826 goto err_kcalloc_hem_buf;
827
828 if (check_whether_bt_num_3(type, hop_num)) {
829 unsigned long num_bt_l1;
830
831 num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
832 table->bt_l1 = kcalloc(num_bt_l1,
833 sizeof(*table->bt_l1),
834 GFP_KERNEL);
835 if (!table->bt_l1)
836 goto err_kcalloc_bt_l1;
837
838 table->bt_l1_dma_addr = kcalloc(num_bt_l1,
839 sizeof(*table->bt_l1_dma_addr),
840 GFP_KERNEL);
841
842 if (!table->bt_l1_dma_addr)
843 goto err_kcalloc_l1_dma;
844 }
845
846 if (check_whether_bt_num_2(type, hop_num) ||
847 check_whether_bt_num_3(type, hop_num)) {
848 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
849 GFP_KERNEL);
850 if (!table->bt_l0)
851 goto err_kcalloc_bt_l0;
852
853 table->bt_l0_dma_addr = kcalloc(num_bt_l0,
854 sizeof(*table->bt_l0_dma_addr),
855 GFP_KERNEL);
856 if (!table->bt_l0_dma_addr)
857 goto err_kcalloc_l0_dma;
858 }
859 }
860
861 table->type = type;
862 table->num_hem = num_hem;
863 table->obj_size = obj_size;
864 table->lowmem = use_lowmem;
865 mutex_init(&table->mutex);
866
867 return 0;
868
869 err_kcalloc_l0_dma:
870 kfree(table->bt_l0);
871 table->bt_l0 = NULL;
872
873 err_kcalloc_bt_l0:
874 kfree(table->bt_l1_dma_addr);
875 table->bt_l1_dma_addr = NULL;
876
877 err_kcalloc_l1_dma:
878 kfree(table->bt_l1);
879 table->bt_l1 = NULL;
880
881 err_kcalloc_bt_l1:
882 kfree(table->hem);
883 table->hem = NULL;
884
885 err_kcalloc_hem_buf:
886 return -ENOMEM;
887 }
888
hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)889 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
890 struct hns_roce_hem_table *table)
891 {
892 struct hns_roce_hem_mhop mhop;
893 u32 buf_chunk_size;
894 u64 obj;
895 int i;
896
897 if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
898 return;
899 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
900 mhop.bt_chunk_size;
901
902 for (i = 0; i < table->num_hem; ++i) {
903 obj = i * buf_chunk_size / table->obj_size;
904 if (table->hem[i])
905 hns_roce_table_mhop_put(hr_dev, table, obj, 0);
906 }
907
908 kfree(table->hem);
909 table->hem = NULL;
910 kfree(table->bt_l1);
911 table->bt_l1 = NULL;
912 kfree(table->bt_l1_dma_addr);
913 table->bt_l1_dma_addr = NULL;
914 kfree(table->bt_l0);
915 table->bt_l0 = NULL;
916 kfree(table->bt_l0_dma_addr);
917 table->bt_l0_dma_addr = NULL;
918 }
919
hns_roce_cleanup_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)920 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
921 struct hns_roce_hem_table *table)
922 {
923 struct device *dev = hr_dev->dev;
924 unsigned long i;
925
926 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
927 hns_roce_cleanup_mhop_hem_table(hr_dev, table);
928 return;
929 }
930
931 for (i = 0; i < table->num_hem; ++i)
932 if (table->hem[i]) {
933 if (hr_dev->hw->clear_hem(hr_dev, table,
934 i * table->table_chunk_size / table->obj_size, 0))
935 dev_err(dev, "Clear HEM base address failed.\n");
936
937 hns_roce_free_hem(hr_dev, table->hem[i]);
938 }
939
940 kfree(table->hem);
941 }
942
hns_roce_cleanup_hem(struct hns_roce_dev * hr_dev)943 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
944 {
945 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
946 hns_roce_cleanup_hem_table(hr_dev,
947 &hr_dev->srq_table.table);
948 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
949 if (hr_dev->caps.qpc_timer_entry_sz)
950 hns_roce_cleanup_hem_table(hr_dev,
951 &hr_dev->qpc_timer_table);
952 if (hr_dev->caps.cqc_timer_entry_sz)
953 hns_roce_cleanup_hem_table(hr_dev,
954 &hr_dev->cqc_timer_table);
955 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
956 hns_roce_cleanup_hem_table(hr_dev,
957 &hr_dev->qp_table.sccc_table);
958 if (hr_dev->caps.trrl_entry_sz)
959 hns_roce_cleanup_hem_table(hr_dev,
960 &hr_dev->qp_table.trrl_table);
961
962 if (hr_dev->caps.gmv_entry_sz)
963 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
964
965 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
966 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
967 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
968 }
969
970 struct hns_roce_hem_item {
971 struct list_head list; /* link all hems in the same bt level */
972 struct list_head sibling; /* link all hems in last hop for mtt */
973 void *addr;
974 dma_addr_t dma_addr;
975 size_t count; /* max ba numbers */
976 int start; /* start buf offset in this hem */
977 int end; /* end buf offset in this hem */
978 };
979
980 /* All HEM items are linked in a tree structure */
981 struct hns_roce_hem_head {
982 struct list_head branch[HNS_ROCE_MAX_BT_REGION];
983 struct list_head root;
984 struct list_head leaf;
985 };
986
987 static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev * hr_dev,int start,int end,int count,bool exist_bt,int bt_level)988 hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
989 bool exist_bt, int bt_level)
990 {
991 struct hns_roce_hem_item *hem;
992
993 hem = kzalloc(sizeof(*hem), GFP_KERNEL);
994 if (!hem)
995 return NULL;
996
997 if (exist_bt) {
998 hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
999 &hem->dma_addr, GFP_KERNEL);
1000 if (!hem->addr) {
1001 kfree(hem);
1002 return NULL;
1003 }
1004 }
1005
1006 hem->count = count;
1007 hem->start = start;
1008 hem->end = end;
1009 INIT_LIST_HEAD(&hem->list);
1010 INIT_LIST_HEAD(&hem->sibling);
1011
1012 return hem;
1013 }
1014
hem_list_free_item(struct hns_roce_dev * hr_dev,struct hns_roce_hem_item * hem,bool exist_bt)1015 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1016 struct hns_roce_hem_item *hem, bool exist_bt)
1017 {
1018 if (exist_bt)
1019 dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1020 hem->addr, hem->dma_addr);
1021 kfree(hem);
1022 }
1023
hem_list_free_all(struct hns_roce_dev * hr_dev,struct list_head * head,bool exist_bt)1024 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1025 struct list_head *head, bool exist_bt)
1026 {
1027 struct hns_roce_hem_item *hem, *temp_hem;
1028
1029 list_for_each_entry_safe(hem, temp_hem, head, list) {
1030 list_del(&hem->list);
1031 hem_list_free_item(hr_dev, hem, exist_bt);
1032 }
1033 }
1034
hem_list_link_bt(struct hns_roce_dev * hr_dev,void * base_addr,u64 table_addr)1035 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1036 u64 table_addr)
1037 {
1038 *(u64 *)(base_addr) = table_addr;
1039 }
1040
1041 /* assign L0 table address to hem from root bt */
hem_list_assign_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_item * hem,void * cpu_addr,u64 phy_addr)1042 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1043 struct hns_roce_hem_item *hem, void *cpu_addr,
1044 u64 phy_addr)
1045 {
1046 hem->addr = cpu_addr;
1047 hem->dma_addr = (dma_addr_t)phy_addr;
1048 }
1049
hem_list_page_is_in_range(struct hns_roce_hem_item * hem,int offset)1050 static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
1051 int offset)
1052 {
1053 return (hem->start <= offset && offset <= hem->end);
1054 }
1055
hem_list_search_item(struct list_head * ba_list,int page_offset)1056 static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1057 int page_offset)
1058 {
1059 struct hns_roce_hem_item *hem, *temp_hem;
1060 struct hns_roce_hem_item *found = NULL;
1061
1062 list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1063 if (hem_list_page_is_in_range(hem, page_offset)) {
1064 found = hem;
1065 break;
1066 }
1067 }
1068
1069 return found;
1070 }
1071
hem_list_is_bottom_bt(int hopnum,int bt_level)1072 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1073 {
1074 /*
1075 * hopnum base address table levels
1076 * 0 L0(buf)
1077 * 1 L0 -> buf
1078 * 2 L0 -> L1 -> buf
1079 * 3 L0 -> L1 -> L2 -> buf
1080 */
1081 return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1082 }
1083
1084 /*
1085 * calc base address entries num
1086 * @hopnum: num of mutihop addressing
1087 * @bt_level: base address table level
1088 * @unit: ba entries per bt page
1089 */
hem_list_calc_ba_range(int hopnum,int bt_level,int unit)1090 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1091 {
1092 u32 step;
1093 int max;
1094 int i;
1095
1096 if (hopnum <= bt_level)
1097 return 0;
1098 /*
1099 * hopnum bt_level range
1100 * 1 0 unit
1101 * ------------
1102 * 2 0 unit * unit
1103 * 2 1 unit
1104 * ------------
1105 * 3 0 unit * unit * unit
1106 * 3 1 unit * unit
1107 * 3 2 unit
1108 */
1109 step = 1;
1110 max = hopnum - bt_level;
1111 for (i = 0; i < max; i++)
1112 step = step * unit;
1113
1114 return step;
1115 }
1116
1117 /*
1118 * calc the root ba entries which could cover all regions
1119 * @regions: buf region array
1120 * @region_cnt: array size of @regions
1121 * @unit: ba entries per bt page
1122 */
hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region * regions,int region_cnt,int unit)1123 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1124 int region_cnt, int unit)
1125 {
1126 struct hns_roce_buf_region *r;
1127 int total = 0;
1128 int step;
1129 int i;
1130
1131 for (i = 0; i < region_cnt; i++) {
1132 r = (struct hns_roce_buf_region *)®ions[i];
1133 if (r->hopnum > 1) {
1134 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1135 if (step > 0)
1136 total += (r->count + step - 1) / step;
1137 } else {
1138 total += r->count;
1139 }
1140 }
1141
1142 return total;
1143 }
1144
hem_list_alloc_mid_bt(struct hns_roce_dev * hr_dev,const struct hns_roce_buf_region * r,int unit,int offset,struct list_head * mid_bt,struct list_head * btm_bt)1145 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1146 const struct hns_roce_buf_region *r, int unit,
1147 int offset, struct list_head *mid_bt,
1148 struct list_head *btm_bt)
1149 {
1150 struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1151 struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1152 struct hns_roce_hem_item *cur, *pre;
1153 const int hopnum = r->hopnum;
1154 int start_aligned;
1155 int distance;
1156 int ret = 0;
1157 int max_ofs;
1158 int level;
1159 u32 step;
1160 int end;
1161
1162 if (hopnum <= 1)
1163 return 0;
1164
1165 if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1166 dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1167 return -EINVAL;
1168 }
1169
1170 if (offset < r->offset) {
1171 dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1172 offset, r->offset);
1173 return -EINVAL;
1174 }
1175
1176 distance = offset - r->offset;
1177 max_ofs = r->offset + r->count - 1;
1178 for (level = 0; level < hopnum; level++)
1179 INIT_LIST_HEAD(&temp_list[level]);
1180
1181 /* config L1 bt to last bt and link them to corresponding parent */
1182 for (level = 1; level < hopnum; level++) {
1183 cur = hem_list_search_item(&mid_bt[level], offset);
1184 if (cur) {
1185 hem_ptrs[level] = cur;
1186 continue;
1187 }
1188
1189 step = hem_list_calc_ba_range(hopnum, level, unit);
1190 if (step < 1) {
1191 ret = -EINVAL;
1192 goto err_exit;
1193 }
1194
1195 start_aligned = (distance / step) * step + r->offset;
1196 end = min_t(int, start_aligned + step - 1, max_ofs);
1197 cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1198 true, level);
1199 if (!cur) {
1200 ret = -ENOMEM;
1201 goto err_exit;
1202 }
1203 hem_ptrs[level] = cur;
1204 list_add(&cur->list, &temp_list[level]);
1205 if (hem_list_is_bottom_bt(hopnum, level))
1206 list_add(&cur->sibling, &temp_list[0]);
1207
1208 /* link bt to parent bt */
1209 if (level > 1) {
1210 pre = hem_ptrs[level - 1];
1211 step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1212 hem_list_link_bt(hr_dev, pre->addr + step,
1213 cur->dma_addr);
1214 }
1215 }
1216
1217 list_splice(&temp_list[0], btm_bt);
1218 for (level = 1; level < hopnum; level++)
1219 list_splice(&temp_list[level], &mid_bt[level]);
1220
1221 return 0;
1222
1223 err_exit:
1224 for (level = 1; level < hopnum; level++)
1225 hem_list_free_all(hr_dev, &temp_list[level], true);
1226
1227 return ret;
1228 }
1229
1230 static struct hns_roce_hem_item *
alloc_root_hem(struct hns_roce_dev * hr_dev,int unit,int * max_ba_num,const struct hns_roce_buf_region * regions,int region_cnt)1231 alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
1232 const struct hns_roce_buf_region *regions, int region_cnt)
1233 {
1234 const struct hns_roce_buf_region *r;
1235 struct hns_roce_hem_item *hem;
1236 int ba_num;
1237 int offset;
1238
1239 ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1240 if (ba_num < 1)
1241 return ERR_PTR(-ENOMEM);
1242
1243 if (ba_num > unit)
1244 return ERR_PTR(-ENOBUFS);
1245
1246 offset = regions[0].offset;
1247 /* indicate to last region */
1248 r = ®ions[region_cnt - 1];
1249 hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1250 ba_num, true, 0);
1251 if (!hem)
1252 return ERR_PTR(-ENOMEM);
1253
1254 *max_ba_num = ba_num;
1255
1256 return hem;
1257 }
1258
alloc_fake_root_bt(struct hns_roce_dev * hr_dev,void * cpu_base,u64 phy_base,const struct hns_roce_buf_region * r,struct list_head * branch_head,struct list_head * leaf_head)1259 static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1260 u64 phy_base, const struct hns_roce_buf_region *r,
1261 struct list_head *branch_head,
1262 struct list_head *leaf_head)
1263 {
1264 struct hns_roce_hem_item *hem;
1265
1266 hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
1267 r->count, false, 0);
1268 if (!hem)
1269 return -ENOMEM;
1270
1271 hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1272 list_add(&hem->list, branch_head);
1273 list_add(&hem->sibling, leaf_head);
1274
1275 return r->count;
1276 }
1277
setup_middle_bt(struct hns_roce_dev * hr_dev,void * cpu_base,int unit,const struct hns_roce_buf_region * r,const struct list_head * branch_head)1278 static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1279 int unit, const struct hns_roce_buf_region *r,
1280 const struct list_head *branch_head)
1281 {
1282 struct hns_roce_hem_item *hem, *temp_hem;
1283 int total = 0;
1284 int offset;
1285 int step;
1286
1287 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1288 if (step < 1)
1289 return -EINVAL;
1290
1291 /* if exist mid bt, link L1 to L0 */
1292 list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
1293 offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
1294 hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
1295 total++;
1296 }
1297
1298 return total;
1299 }
1300
1301 static int
setup_root_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,int max_ba_num,struct hns_roce_hem_head * head,const struct hns_roce_buf_region * regions,int region_cnt)1302 setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
1303 int unit, int max_ba_num, struct hns_roce_hem_head *head,
1304 const struct hns_roce_buf_region *regions, int region_cnt)
1305 {
1306 const struct hns_roce_buf_region *r;
1307 struct hns_roce_hem_item *root_hem;
1308 void *cpu_base;
1309 u64 phy_base;
1310 int i, total;
1311 int ret;
1312
1313 root_hem = list_first_entry(&head->root,
1314 struct hns_roce_hem_item, list);
1315 if (!root_hem)
1316 return -ENOMEM;
1317
1318 total = 0;
1319 for (i = 0; i < region_cnt && total < max_ba_num; i++) {
1320 r = ®ions[i];
1321 if (!r->count)
1322 continue;
1323
1324 /* all regions's mid[x][0] shared the root_bt's trunk */
1325 cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1326 phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1327
1328 /* if hopnum is 0 or 1, cut a new fake hem from the root bt
1329 * which's address share to all regions.
1330 */
1331 if (hem_list_is_bottom_bt(r->hopnum, 0))
1332 ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
1333 &head->branch[i], &head->leaf);
1334 else
1335 ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
1336 &hem_list->mid_bt[i][1]);
1337
1338 if (ret < 0)
1339 return ret;
1340
1341 total += ret;
1342 }
1343
1344 list_splice(&head->leaf, &hem_list->btm_bt);
1345 list_splice(&head->root, &hem_list->root_bt);
1346 for (i = 0; i < region_cnt; i++)
1347 list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
1348
1349 return 0;
1350 }
1351
hem_list_alloc_root_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,const struct hns_roce_buf_region * regions,int region_cnt)1352 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1353 struct hns_roce_hem_list *hem_list, int unit,
1354 const struct hns_roce_buf_region *regions,
1355 int region_cnt)
1356 {
1357 struct hns_roce_hem_item *root_hem;
1358 struct hns_roce_hem_head head;
1359 int max_ba_num;
1360 int ret;
1361 int i;
1362
1363 root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
1364 if (root_hem)
1365 return 0;
1366
1367 max_ba_num = 0;
1368 root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
1369 region_cnt);
1370 if (IS_ERR(root_hem))
1371 return PTR_ERR(root_hem);
1372
1373 /* List head for storing all allocated HEM items */
1374 INIT_LIST_HEAD(&head.root);
1375 INIT_LIST_HEAD(&head.leaf);
1376 for (i = 0; i < region_cnt; i++)
1377 INIT_LIST_HEAD(&head.branch[i]);
1378
1379 hem_list->root_ba = root_hem->dma_addr;
1380 list_add(&root_hem->list, &head.root);
1381 ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
1382 region_cnt);
1383 if (ret) {
1384 for (i = 0; i < region_cnt; i++)
1385 hem_list_free_all(hr_dev, &head.branch[i], false);
1386
1387 hem_list_free_all(hr_dev, &head.root, true);
1388 }
1389
1390 return ret;
1391 }
1392
1393 /* construct the base address table and link them by address hop config */
hns_roce_hem_list_request(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,const struct hns_roce_buf_region * regions,int region_cnt,unsigned int bt_pg_shift)1394 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1395 struct hns_roce_hem_list *hem_list,
1396 const struct hns_roce_buf_region *regions,
1397 int region_cnt, unsigned int bt_pg_shift)
1398 {
1399 const struct hns_roce_buf_region *r;
1400 int ofs, end;
1401 int unit;
1402 int ret;
1403 int i;
1404
1405 if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1406 dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1407 region_cnt);
1408 return -EINVAL;
1409 }
1410
1411 unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1412 for (i = 0; i < region_cnt; i++) {
1413 r = ®ions[i];
1414 if (!r->count)
1415 continue;
1416
1417 end = r->offset + r->count;
1418 for (ofs = r->offset; ofs < end; ofs += unit) {
1419 ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1420 hem_list->mid_bt[i],
1421 &hem_list->btm_bt);
1422 if (ret) {
1423 dev_err(hr_dev->dev,
1424 "alloc hem trunk fail ret=%d!\n", ret);
1425 goto err_alloc;
1426 }
1427 }
1428 }
1429
1430 ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1431 region_cnt);
1432 if (ret)
1433 dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
1434 else
1435 return 0;
1436
1437 err_alloc:
1438 hns_roce_hem_list_release(hr_dev, hem_list);
1439
1440 return ret;
1441 }
1442
hns_roce_hem_list_release(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list)1443 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1444 struct hns_roce_hem_list *hem_list)
1445 {
1446 int i, j;
1447
1448 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1449 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1450 hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1451 j != 0);
1452
1453 hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1454 INIT_LIST_HEAD(&hem_list->btm_bt);
1455 hem_list->root_ba = 0;
1456 }
1457
hns_roce_hem_list_init(struct hns_roce_hem_list * hem_list)1458 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1459 {
1460 int i, j;
1461
1462 INIT_LIST_HEAD(&hem_list->root_bt);
1463 INIT_LIST_HEAD(&hem_list->btm_bt);
1464 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1465 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1466 INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1467 }
1468
hns_roce_hem_list_find_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int offset,int * mtt_cnt,u64 * phy_addr)1469 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1470 struct hns_roce_hem_list *hem_list,
1471 int offset, int *mtt_cnt, u64 *phy_addr)
1472 {
1473 struct list_head *head = &hem_list->btm_bt;
1474 struct hns_roce_hem_item *hem, *temp_hem;
1475 void *cpu_base = NULL;
1476 u64 phy_base = 0;
1477 int nr = 0;
1478
1479 list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1480 if (hem_list_page_is_in_range(hem, offset)) {
1481 nr = offset - hem->start;
1482 cpu_base = hem->addr + nr * BA_BYTE_LEN;
1483 phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
1484 nr = hem->end + 1 - offset;
1485 break;
1486 }
1487 }
1488
1489 if (mtt_cnt)
1490 *mtt_cnt = nr;
1491
1492 if (phy_addr)
1493 *phy_addr = phy_base;
1494
1495 return cpu_base;
1496 }
1497