1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3 * Copyright(c) 2020 Cornelis Networks, Inc.
4 * Copyright(c) 2015-2018 Intel Corporation.
5 */
6 #include <asm/page.h>
7 #include <linux/string.h>
8
9 #include "mmu_rb.h"
10 #include "user_exp_rcv.h"
11 #include "trace.h"
12
13 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
14 struct exp_tid_set *set,
15 struct hfi1_filedata *fd);
16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
17 static int set_rcvarray_entry(struct hfi1_filedata *fd,
18 struct tid_user_buf *tbuf,
19 u32 rcventry, struct tid_group *grp,
20 u16 pageidx, unsigned int npages);
21 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
22 struct tid_rb_node *tnode);
23 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
24 const struct mmu_notifier_range *range,
25 unsigned long cur_seq);
26 static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
27 const struct mmu_notifier_range *range,
28 unsigned long cur_seq);
29 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
30 struct tid_group *grp,
31 unsigned int start, u16 count,
32 u32 *tidlist, unsigned int *tididx,
33 unsigned int *pmapped);
34 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
35 static void __clear_tid_node(struct hfi1_filedata *fd,
36 struct tid_rb_node *node);
37 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
38
39 static const struct mmu_interval_notifier_ops tid_mn_ops = {
40 .invalidate = tid_rb_invalidate,
41 };
42 static const struct mmu_interval_notifier_ops tid_cover_ops = {
43 .invalidate = tid_cover_invalidate,
44 };
45
46 /*
47 * Initialize context and file private data needed for Expected
48 * receive caching. This needs to be done after the context has
49 * been configured with the eager/expected RcvEntry counts.
50 */
hfi1_user_exp_rcv_init(struct hfi1_filedata * fd,struct hfi1_ctxtdata * uctxt)51 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
52 struct hfi1_ctxtdata *uctxt)
53 {
54 int ret = 0;
55
56 fd->entry_to_rb = kcalloc(uctxt->expected_count,
57 sizeof(struct rb_node *),
58 GFP_KERNEL);
59 if (!fd->entry_to_rb)
60 return -ENOMEM;
61
62 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
63 fd->invalid_tid_idx = 0;
64 fd->invalid_tids = kcalloc(uctxt->expected_count,
65 sizeof(*fd->invalid_tids),
66 GFP_KERNEL);
67 if (!fd->invalid_tids) {
68 kfree(fd->entry_to_rb);
69 fd->entry_to_rb = NULL;
70 return -ENOMEM;
71 }
72 fd->use_mn = true;
73 }
74
75 /*
76 * PSM does not have a good way to separate, count, and
77 * effectively enforce a limit on RcvArray entries used by
78 * subctxts (when context sharing is used) when TID caching
79 * is enabled. To help with that, we calculate a per-process
80 * RcvArray entry share and enforce that.
81 * If TID caching is not in use, PSM deals with usage on its
82 * own. In that case, we allow any subctxt to take all of the
83 * entries.
84 *
85 * Make sure that we set the tid counts only after successful
86 * init.
87 */
88 spin_lock(&fd->tid_lock);
89 if (uctxt->subctxt_cnt && fd->use_mn) {
90 u16 remainder;
91
92 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
93 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
94 if (remainder && fd->subctxt < remainder)
95 fd->tid_limit++;
96 } else {
97 fd->tid_limit = uctxt->expected_count;
98 }
99 spin_unlock(&fd->tid_lock);
100
101 return ret;
102 }
103
hfi1_user_exp_rcv_free(struct hfi1_filedata * fd)104 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
105 {
106 struct hfi1_ctxtdata *uctxt = fd->uctxt;
107
108 mutex_lock(&uctxt->exp_mutex);
109 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
110 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
111 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
112 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
113 mutex_unlock(&uctxt->exp_mutex);
114
115 kfree(fd->invalid_tids);
116 fd->invalid_tids = NULL;
117
118 kfree(fd->entry_to_rb);
119 fd->entry_to_rb = NULL;
120 }
121
122 /*
123 * Release pinned receive buffer pages.
124 *
125 * @mapped: true if the pages have been DMA mapped. false otherwise.
126 * @idx: Index of the first page to unpin.
127 * @npages: No of pages to unpin.
128 *
129 * If the pages have been DMA mapped (indicated by mapped parameter), their
130 * info will be passed via a struct tid_rb_node. If they haven't been mapped,
131 * their info will be passed via a struct tid_user_buf.
132 */
unpin_rcv_pages(struct hfi1_filedata * fd,struct tid_user_buf * tidbuf,struct tid_rb_node * node,unsigned int idx,unsigned int npages,bool mapped)133 static void unpin_rcv_pages(struct hfi1_filedata *fd,
134 struct tid_user_buf *tidbuf,
135 struct tid_rb_node *node,
136 unsigned int idx,
137 unsigned int npages,
138 bool mapped)
139 {
140 struct page **pages;
141 struct hfi1_devdata *dd = fd->uctxt->dd;
142 struct mm_struct *mm;
143
144 if (mapped) {
145 dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
146 node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
147 pages = &node->pages[idx];
148 mm = mm_from_tid_node(node);
149 } else {
150 pages = &tidbuf->pages[idx];
151 mm = current->mm;
152 }
153 hfi1_release_user_pages(mm, pages, npages, mapped);
154 fd->tid_n_pinned -= npages;
155 }
156
157 /*
158 * Pin receive buffer pages.
159 */
pin_rcv_pages(struct hfi1_filedata * fd,struct tid_user_buf * tidbuf)160 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
161 {
162 int pinned;
163 unsigned int npages;
164 unsigned long vaddr = tidbuf->vaddr;
165 struct page **pages = NULL;
166 struct hfi1_devdata *dd = fd->uctxt->dd;
167
168 /* Get the number of pages the user buffer spans */
169 npages = num_user_pages(vaddr, tidbuf->length);
170 if (!npages)
171 return -EINVAL;
172
173 if (npages > fd->uctxt->expected_count) {
174 dd_dev_err(dd, "Expected buffer too big\n");
175 return -EINVAL;
176 }
177
178 /* Allocate the array of struct page pointers needed for pinning */
179 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
180 if (!pages)
181 return -ENOMEM;
182
183 /*
184 * Pin all the pages of the user buffer. If we can't pin all the
185 * pages, accept the amount pinned so far and program only that.
186 * User space knows how to deal with partially programmed buffers.
187 */
188 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
189 kfree(pages);
190 return -ENOMEM;
191 }
192
193 pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
194 if (pinned <= 0) {
195 kfree(pages);
196 return pinned;
197 }
198 tidbuf->pages = pages;
199 tidbuf->npages = npages;
200 fd->tid_n_pinned += pinned;
201 return pinned;
202 }
203
204 /*
205 * RcvArray entry allocation for Expected Receives is done by the
206 * following algorithm:
207 *
208 * The context keeps 3 lists of groups of RcvArray entries:
209 * 1. List of empty groups - tid_group_list
210 * This list is created during user context creation and
211 * contains elements which describe sets (of 8) of empty
212 * RcvArray entries.
213 * 2. List of partially used groups - tid_used_list
214 * This list contains sets of RcvArray entries which are
215 * not completely used up. Another mapping request could
216 * use some of all of the remaining entries.
217 * 3. List of full groups - tid_full_list
218 * This is the list where sets that are completely used
219 * up go.
220 *
221 * An attempt to optimize the usage of RcvArray entries is
222 * made by finding all sets of physically contiguous pages in a
223 * user's buffer.
224 * These physically contiguous sets are further split into
225 * sizes supported by the receive engine of the HFI. The
226 * resulting sets of pages are stored in struct tid_pageset,
227 * which describes the sets as:
228 * * .count - number of pages in this set
229 * * .idx - starting index into struct page ** array
230 * of this set
231 *
232 * From this point on, the algorithm deals with the page sets
233 * described above. The number of pagesets is divided by the
234 * RcvArray group size to produce the number of full groups
235 * needed.
236 *
237 * Groups from the 3 lists are manipulated using the following
238 * rules:
239 * 1. For each set of 8 pagesets, a complete group from
240 * tid_group_list is taken, programmed, and moved to
241 * the tid_full_list list.
242 * 2. For all remaining pagesets:
243 * 2.1 If the tid_used_list is empty and the tid_group_list
244 * is empty, stop processing pageset and return only
245 * what has been programmed up to this point.
246 * 2.2 If the tid_used_list is empty and the tid_group_list
247 * is not empty, move a group from tid_group_list to
248 * tid_used_list.
249 * 2.3 For each group is tid_used_group, program as much as
250 * can fit into the group. If the group becomes fully
251 * used, move it to tid_full_list.
252 */
hfi1_user_exp_rcv_setup(struct hfi1_filedata * fd,struct hfi1_tid_info * tinfo)253 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
254 struct hfi1_tid_info *tinfo)
255 {
256 int ret = 0, need_group = 0, pinned;
257 struct hfi1_ctxtdata *uctxt = fd->uctxt;
258 struct hfi1_devdata *dd = uctxt->dd;
259 unsigned int ngroups, pageidx = 0, pageset_count,
260 tididx = 0, mapped, mapped_pages = 0;
261 u32 *tidlist = NULL;
262 struct tid_user_buf *tidbuf;
263 unsigned long mmu_seq = 0;
264
265 if (!PAGE_ALIGNED(tinfo->vaddr))
266 return -EINVAL;
267 if (tinfo->length == 0)
268 return -EINVAL;
269
270 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
271 if (!tidbuf)
272 return -ENOMEM;
273
274 mutex_init(&tidbuf->cover_mutex);
275 tidbuf->vaddr = tinfo->vaddr;
276 tidbuf->length = tinfo->length;
277 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
278 GFP_KERNEL);
279 if (!tidbuf->psets) {
280 ret = -ENOMEM;
281 goto fail_release_mem;
282 }
283
284 if (fd->use_mn) {
285 ret = mmu_interval_notifier_insert(
286 &tidbuf->notifier, current->mm,
287 tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
288 &tid_cover_ops);
289 if (ret)
290 goto fail_release_mem;
291 mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
292 }
293
294 pinned = pin_rcv_pages(fd, tidbuf);
295 if (pinned <= 0) {
296 ret = (pinned < 0) ? pinned : -ENOSPC;
297 goto fail_unpin;
298 }
299
300 /* Find sets of physically contiguous pages */
301 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
302
303 /* Reserve the number of expected tids to be used. */
304 spin_lock(&fd->tid_lock);
305 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
306 pageset_count = fd->tid_limit - fd->tid_used;
307 else
308 pageset_count = tidbuf->n_psets;
309 fd->tid_used += pageset_count;
310 spin_unlock(&fd->tid_lock);
311
312 if (!pageset_count) {
313 ret = -ENOSPC;
314 goto fail_unreserve;
315 }
316
317 ngroups = pageset_count / dd->rcv_entries.group_size;
318 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
319 if (!tidlist) {
320 ret = -ENOMEM;
321 goto fail_unreserve;
322 }
323
324 tididx = 0;
325
326 /*
327 * From this point on, we are going to be using shared (between master
328 * and subcontexts) context resources. We need to take the lock.
329 */
330 mutex_lock(&uctxt->exp_mutex);
331 /*
332 * The first step is to program the RcvArray entries which are complete
333 * groups.
334 */
335 while (ngroups && uctxt->tid_group_list.count) {
336 struct tid_group *grp =
337 tid_group_pop(&uctxt->tid_group_list);
338
339 ret = program_rcvarray(fd, tidbuf, grp,
340 pageidx, dd->rcv_entries.group_size,
341 tidlist, &tididx, &mapped);
342 /*
343 * If there was a failure to program the RcvArray
344 * entries for the entire group, reset the grp fields
345 * and add the grp back to the free group list.
346 */
347 if (ret <= 0) {
348 tid_group_add_tail(grp, &uctxt->tid_group_list);
349 hfi1_cdbg(TID,
350 "Failed to program RcvArray group %d", ret);
351 goto unlock;
352 }
353
354 tid_group_add_tail(grp, &uctxt->tid_full_list);
355 ngroups--;
356 pageidx += ret;
357 mapped_pages += mapped;
358 }
359
360 while (pageidx < pageset_count) {
361 struct tid_group *grp, *ptr;
362 /*
363 * If we don't have any partially used tid groups, check
364 * if we have empty groups. If so, take one from there and
365 * put in the partially used list.
366 */
367 if (!uctxt->tid_used_list.count || need_group) {
368 if (!uctxt->tid_group_list.count)
369 goto unlock;
370
371 grp = tid_group_pop(&uctxt->tid_group_list);
372 tid_group_add_tail(grp, &uctxt->tid_used_list);
373 need_group = 0;
374 }
375 /*
376 * There is an optimization opportunity here - instead of
377 * fitting as many page sets as we can, check for a group
378 * later on in the list that could fit all of them.
379 */
380 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
381 list) {
382 unsigned use = min_t(unsigned, pageset_count - pageidx,
383 grp->size - grp->used);
384
385 ret = program_rcvarray(fd, tidbuf, grp,
386 pageidx, use, tidlist,
387 &tididx, &mapped);
388 if (ret < 0) {
389 hfi1_cdbg(TID,
390 "Failed to program RcvArray entries %d",
391 ret);
392 goto unlock;
393 } else if (ret > 0) {
394 if (grp->used == grp->size)
395 tid_group_move(grp,
396 &uctxt->tid_used_list,
397 &uctxt->tid_full_list);
398 pageidx += ret;
399 mapped_pages += mapped;
400 need_group = 0;
401 /* Check if we are done so we break out early */
402 if (pageidx >= pageset_count)
403 break;
404 } else if (WARN_ON(ret == 0)) {
405 /*
406 * If ret is 0, we did not program any entries
407 * into this group, which can only happen if
408 * we've screwed up the accounting somewhere.
409 * Warn and try to continue.
410 */
411 need_group = 1;
412 }
413 }
414 }
415 unlock:
416 mutex_unlock(&uctxt->exp_mutex);
417 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
418 mapped_pages, ret);
419
420 /* fail if nothing was programmed, set error if none provided */
421 if (tididx == 0) {
422 if (ret >= 0)
423 ret = -ENOSPC;
424 goto fail_unreserve;
425 }
426
427 /* adjust reserved tid_used to actual count */
428 spin_lock(&fd->tid_lock);
429 fd->tid_used -= pageset_count - tididx;
430 spin_unlock(&fd->tid_lock);
431
432 /* unpin all pages not covered by a TID */
433 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
434 false);
435
436 if (fd->use_mn) {
437 /* check for an invalidate during setup */
438 bool fail = false;
439
440 mutex_lock(&tidbuf->cover_mutex);
441 fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
442 mutex_unlock(&tidbuf->cover_mutex);
443
444 if (fail) {
445 ret = -EBUSY;
446 goto fail_unprogram;
447 }
448 }
449
450 tinfo->tidcnt = tididx;
451 tinfo->length = mapped_pages * PAGE_SIZE;
452
453 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
454 tidlist, sizeof(tidlist[0]) * tididx)) {
455 ret = -EFAULT;
456 goto fail_unprogram;
457 }
458
459 if (fd->use_mn)
460 mmu_interval_notifier_remove(&tidbuf->notifier);
461 kfree(tidbuf->pages);
462 kfree(tidbuf->psets);
463 kfree(tidbuf);
464 kfree(tidlist);
465 return 0;
466
467 fail_unprogram:
468 /* unprogram, unmap, and unpin all allocated TIDs */
469 tinfo->tidlist = (unsigned long)tidlist;
470 hfi1_user_exp_rcv_clear(fd, tinfo);
471 tinfo->tidlist = 0;
472 pinned = 0; /* nothing left to unpin */
473 pageset_count = 0; /* nothing left reserved */
474 fail_unreserve:
475 spin_lock(&fd->tid_lock);
476 fd->tid_used -= pageset_count;
477 spin_unlock(&fd->tid_lock);
478 fail_unpin:
479 if (fd->use_mn)
480 mmu_interval_notifier_remove(&tidbuf->notifier);
481 if (pinned > 0)
482 unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
483 fail_release_mem:
484 kfree(tidbuf->pages);
485 kfree(tidbuf->psets);
486 kfree(tidbuf);
487 kfree(tidlist);
488 return ret;
489 }
490
hfi1_user_exp_rcv_clear(struct hfi1_filedata * fd,struct hfi1_tid_info * tinfo)491 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
492 struct hfi1_tid_info *tinfo)
493 {
494 int ret = 0;
495 struct hfi1_ctxtdata *uctxt = fd->uctxt;
496 u32 *tidinfo;
497 unsigned tididx;
498
499 if (unlikely(tinfo->tidcnt > fd->tid_used))
500 return -EINVAL;
501
502 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
503 sizeof(tidinfo[0]) * tinfo->tidcnt);
504 if (IS_ERR(tidinfo))
505 return PTR_ERR(tidinfo);
506
507 mutex_lock(&uctxt->exp_mutex);
508 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
509 ret = unprogram_rcvarray(fd, tidinfo[tididx]);
510 if (ret) {
511 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
512 ret);
513 break;
514 }
515 }
516 spin_lock(&fd->tid_lock);
517 fd->tid_used -= tididx;
518 spin_unlock(&fd->tid_lock);
519 tinfo->tidcnt = tididx;
520 mutex_unlock(&uctxt->exp_mutex);
521
522 kfree(tidinfo);
523 return ret;
524 }
525
hfi1_user_exp_rcv_invalid(struct hfi1_filedata * fd,struct hfi1_tid_info * tinfo)526 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
527 struct hfi1_tid_info *tinfo)
528 {
529 struct hfi1_ctxtdata *uctxt = fd->uctxt;
530 unsigned long *ev = uctxt->dd->events +
531 (uctxt_offset(uctxt) + fd->subctxt);
532 u32 *array;
533 int ret = 0;
534
535 /*
536 * copy_to_user() can sleep, which will leave the invalid_lock
537 * locked and cause the MMU notifier to be blocked on the lock
538 * for a long time.
539 * Copy the data to a local buffer so we can release the lock.
540 */
541 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
542 if (!array)
543 return -EFAULT;
544
545 spin_lock(&fd->invalid_lock);
546 if (fd->invalid_tid_idx) {
547 memcpy(array, fd->invalid_tids, sizeof(*array) *
548 fd->invalid_tid_idx);
549 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
550 fd->invalid_tid_idx);
551 tinfo->tidcnt = fd->invalid_tid_idx;
552 fd->invalid_tid_idx = 0;
553 /*
554 * Reset the user flag while still holding the lock.
555 * Otherwise, PSM can miss events.
556 */
557 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
558 } else {
559 tinfo->tidcnt = 0;
560 }
561 spin_unlock(&fd->invalid_lock);
562
563 if (tinfo->tidcnt) {
564 if (copy_to_user((void __user *)tinfo->tidlist,
565 array, sizeof(*array) * tinfo->tidcnt))
566 ret = -EFAULT;
567 }
568 kfree(array);
569
570 return ret;
571 }
572
find_phys_blocks(struct tid_user_buf * tidbuf,unsigned int npages)573 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
574 {
575 unsigned pagecount, pageidx, setcount = 0, i;
576 unsigned long pfn, this_pfn;
577 struct page **pages = tidbuf->pages;
578 struct tid_pageset *list = tidbuf->psets;
579
580 if (!npages)
581 return 0;
582
583 /*
584 * Look for sets of physically contiguous pages in the user buffer.
585 * This will allow us to optimize Expected RcvArray entry usage by
586 * using the bigger supported sizes.
587 */
588 pfn = page_to_pfn(pages[0]);
589 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
590 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
591
592 /*
593 * If the pfn's are not sequential, pages are not physically
594 * contiguous.
595 */
596 if (this_pfn != ++pfn) {
597 /*
598 * At this point we have to loop over the set of
599 * physically contiguous pages and break them down it
600 * sizes supported by the HW.
601 * There are two main constraints:
602 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
603 * If the total set size is bigger than that
604 * program only a MAX_EXPECTED_BUFFER chunk.
605 * 2. The buffer size has to be a power of two. If
606 * it is not, round down to the closes power of
607 * 2 and program that size.
608 */
609 while (pagecount) {
610 int maxpages = pagecount;
611 u32 bufsize = pagecount * PAGE_SIZE;
612
613 if (bufsize > MAX_EXPECTED_BUFFER)
614 maxpages =
615 MAX_EXPECTED_BUFFER >>
616 PAGE_SHIFT;
617 else if (!is_power_of_2(bufsize))
618 maxpages =
619 rounddown_pow_of_two(bufsize) >>
620 PAGE_SHIFT;
621
622 list[setcount].idx = pageidx;
623 list[setcount].count = maxpages;
624 pagecount -= maxpages;
625 pageidx += maxpages;
626 setcount++;
627 }
628 pageidx = i;
629 pagecount = 1;
630 pfn = this_pfn;
631 } else {
632 pagecount++;
633 }
634 }
635 return setcount;
636 }
637
638 /**
639 * program_rcvarray() - program an RcvArray group with receive buffers
640 * @fd: filedata pointer
641 * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
642 * virtual address, buffer length, page pointers, pagesets (array of
643 * struct tid_pageset holding information on physically contiguous
644 * chunks from the user buffer), and other fields.
645 * @grp: RcvArray group
646 * @start: starting index into sets array
647 * @count: number of struct tid_pageset's to program
648 * @tidlist: the array of u32 elements when the information about the
649 * programmed RcvArray entries is to be encoded.
650 * @tididx: starting offset into tidlist
651 * @pmapped: (output parameter) number of pages programmed into the RcvArray
652 * entries.
653 *
654 * This function will program up to 'count' number of RcvArray entries from the
655 * group 'grp'. To make best use of write-combining writes, the function will
656 * perform writes to the unused RcvArray entries which will be ignored by the
657 * HW. Each RcvArray entry will be programmed with a physically contiguous
658 * buffer chunk from the user's virtual buffer.
659 *
660 * Return:
661 * -EINVAL if the requested count is larger than the size of the group,
662 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
663 * number of RcvArray entries programmed.
664 */
program_rcvarray(struct hfi1_filedata * fd,struct tid_user_buf * tbuf,struct tid_group * grp,unsigned int start,u16 count,u32 * tidlist,unsigned int * tididx,unsigned int * pmapped)665 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
666 struct tid_group *grp,
667 unsigned int start, u16 count,
668 u32 *tidlist, unsigned int *tididx,
669 unsigned int *pmapped)
670 {
671 struct hfi1_ctxtdata *uctxt = fd->uctxt;
672 struct hfi1_devdata *dd = uctxt->dd;
673 u16 idx;
674 u32 tidinfo = 0, rcventry, useidx = 0;
675 int mapped = 0;
676
677 /* Count should never be larger than the group size */
678 if (count > grp->size)
679 return -EINVAL;
680
681 /* Find the first unused entry in the group */
682 for (idx = 0; idx < grp->size; idx++) {
683 if (!(grp->map & (1 << idx))) {
684 useidx = idx;
685 break;
686 }
687 rcv_array_wc_fill(dd, grp->base + idx);
688 }
689
690 idx = 0;
691 while (idx < count) {
692 u16 npages, pageidx, setidx = start + idx;
693 int ret = 0;
694
695 /*
696 * If this entry in the group is used, move to the next one.
697 * If we go past the end of the group, exit the loop.
698 */
699 if (useidx >= grp->size) {
700 break;
701 } else if (grp->map & (1 << useidx)) {
702 rcv_array_wc_fill(dd, grp->base + useidx);
703 useidx++;
704 continue;
705 }
706
707 rcventry = grp->base + useidx;
708 npages = tbuf->psets[setidx].count;
709 pageidx = tbuf->psets[setidx].idx;
710
711 ret = set_rcvarray_entry(fd, tbuf,
712 rcventry, grp, pageidx,
713 npages);
714 if (ret)
715 return ret;
716 mapped += npages;
717
718 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
719 EXP_TID_SET(LEN, npages);
720 tidlist[(*tididx)++] = tidinfo;
721 grp->used++;
722 grp->map |= 1 << useidx++;
723 idx++;
724 }
725
726 /* Fill the rest of the group with "blank" writes */
727 for (; useidx < grp->size; useidx++)
728 rcv_array_wc_fill(dd, grp->base + useidx);
729 *pmapped = mapped;
730 return idx;
731 }
732
set_rcvarray_entry(struct hfi1_filedata * fd,struct tid_user_buf * tbuf,u32 rcventry,struct tid_group * grp,u16 pageidx,unsigned int npages)733 static int set_rcvarray_entry(struct hfi1_filedata *fd,
734 struct tid_user_buf *tbuf,
735 u32 rcventry, struct tid_group *grp,
736 u16 pageidx, unsigned int npages)
737 {
738 int ret;
739 struct hfi1_ctxtdata *uctxt = fd->uctxt;
740 struct tid_rb_node *node;
741 struct hfi1_devdata *dd = uctxt->dd;
742 dma_addr_t phys;
743 struct page **pages = tbuf->pages + pageidx;
744
745 /*
746 * Allocate the node first so we can handle a potential
747 * failure before we've programmed anything.
748 */
749 node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL);
750 if (!node)
751 return -ENOMEM;
752
753 phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
754 npages * PAGE_SIZE, DMA_FROM_DEVICE);
755 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
756 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
757 phys);
758 kfree(node);
759 return -EFAULT;
760 }
761
762 node->fdata = fd;
763 mutex_init(&node->invalidate_mutex);
764 node->phys = page_to_phys(pages[0]);
765 node->npages = npages;
766 node->rcventry = rcventry;
767 node->dma_addr = phys;
768 node->grp = grp;
769 node->freed = false;
770 memcpy(node->pages, pages, flex_array_size(node, pages, npages));
771
772 if (fd->use_mn) {
773 ret = mmu_interval_notifier_insert(
774 &node->notifier, current->mm,
775 tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
776 &tid_mn_ops);
777 if (ret)
778 goto out_unmap;
779 }
780 fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
781
782 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
783 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
784 node->notifier.interval_tree.start, node->phys,
785 phys);
786 return 0;
787
788 out_unmap:
789 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
790 node->rcventry, node->notifier.interval_tree.start,
791 node->phys, ret);
792 dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
793 DMA_FROM_DEVICE);
794 kfree(node);
795 return -EFAULT;
796 }
797
unprogram_rcvarray(struct hfi1_filedata * fd,u32 tidinfo)798 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
799 {
800 struct hfi1_ctxtdata *uctxt = fd->uctxt;
801 struct hfi1_devdata *dd = uctxt->dd;
802 struct tid_rb_node *node;
803 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
804 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
805
806 if (tididx >= uctxt->expected_count) {
807 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
808 tididx, uctxt->ctxt);
809 return -EINVAL;
810 }
811
812 if (tidctrl == 0x3)
813 return -EINVAL;
814
815 rcventry = tididx + (tidctrl - 1);
816
817 node = fd->entry_to_rb[rcventry];
818 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
819 return -EBADF;
820
821 if (fd->use_mn)
822 mmu_interval_notifier_remove(&node->notifier);
823 cacheless_tid_rb_remove(fd, node);
824
825 return 0;
826 }
827
__clear_tid_node(struct hfi1_filedata * fd,struct tid_rb_node * node)828 static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
829 {
830 struct hfi1_ctxtdata *uctxt = fd->uctxt;
831 struct hfi1_devdata *dd = uctxt->dd;
832
833 mutex_lock(&node->invalidate_mutex);
834 if (node->freed)
835 goto done;
836 node->freed = true;
837
838 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
839 node->npages,
840 node->notifier.interval_tree.start, node->phys,
841 node->dma_addr);
842
843 /* Make sure device has seen the write before pages are unpinned */
844 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
845
846 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
847 done:
848 mutex_unlock(&node->invalidate_mutex);
849 }
850
clear_tid_node(struct hfi1_filedata * fd,struct tid_rb_node * node)851 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
852 {
853 struct hfi1_ctxtdata *uctxt = fd->uctxt;
854
855 __clear_tid_node(fd, node);
856
857 node->grp->used--;
858 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
859
860 if (node->grp->used == node->grp->size - 1)
861 tid_group_move(node->grp, &uctxt->tid_full_list,
862 &uctxt->tid_used_list);
863 else if (!node->grp->used)
864 tid_group_move(node->grp, &uctxt->tid_used_list,
865 &uctxt->tid_group_list);
866 kfree(node);
867 }
868
869 /*
870 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
871 * clearing nodes in the non-cached case.
872 */
unlock_exp_tids(struct hfi1_ctxtdata * uctxt,struct exp_tid_set * set,struct hfi1_filedata * fd)873 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
874 struct exp_tid_set *set,
875 struct hfi1_filedata *fd)
876 {
877 struct tid_group *grp, *ptr;
878 int i;
879
880 list_for_each_entry_safe(grp, ptr, &set->list, list) {
881 list_del_init(&grp->list);
882
883 for (i = 0; i < grp->size; i++) {
884 if (grp->map & (1 << i)) {
885 u16 rcventry = grp->base + i;
886 struct tid_rb_node *node;
887
888 node = fd->entry_to_rb[rcventry -
889 uctxt->expected_base];
890 if (!node || node->rcventry != rcventry)
891 continue;
892
893 if (fd->use_mn)
894 mmu_interval_notifier_remove(
895 &node->notifier);
896 cacheless_tid_rb_remove(fd, node);
897 }
898 }
899 }
900 }
901
tid_rb_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)902 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
903 const struct mmu_notifier_range *range,
904 unsigned long cur_seq)
905 {
906 struct tid_rb_node *node =
907 container_of(mni, struct tid_rb_node, notifier);
908 struct hfi1_filedata *fdata = node->fdata;
909 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
910
911 if (node->freed)
912 return true;
913
914 /* take action only if unmapping */
915 if (range->event != MMU_NOTIFY_UNMAP)
916 return true;
917
918 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
919 node->notifier.interval_tree.start,
920 node->rcventry, node->npages, node->dma_addr);
921
922 /* clear the hardware rcvarray entry */
923 __clear_tid_node(fdata, node);
924
925 spin_lock(&fdata->invalid_lock);
926 if (fdata->invalid_tid_idx < uctxt->expected_count) {
927 fdata->invalid_tids[fdata->invalid_tid_idx] =
928 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
929 fdata->invalid_tids[fdata->invalid_tid_idx] |=
930 EXP_TID_SET(LEN, node->npages);
931 if (!fdata->invalid_tid_idx) {
932 unsigned long *ev;
933
934 /*
935 * hfi1_set_uevent_bits() sets a user event flag
936 * for all processes. Because calling into the
937 * driver to process TID cache invalidations is
938 * expensive and TID cache invalidations are
939 * handled on a per-process basis, we can
940 * optimize this to set the flag only for the
941 * process in question.
942 */
943 ev = uctxt->dd->events +
944 (uctxt_offset(uctxt) + fdata->subctxt);
945 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
946 }
947 fdata->invalid_tid_idx++;
948 }
949 spin_unlock(&fdata->invalid_lock);
950 return true;
951 }
952
tid_cover_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)953 static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
954 const struct mmu_notifier_range *range,
955 unsigned long cur_seq)
956 {
957 struct tid_user_buf *tidbuf =
958 container_of(mni, struct tid_user_buf, notifier);
959
960 /* take action only if unmapping */
961 if (range->event == MMU_NOTIFY_UNMAP) {
962 mutex_lock(&tidbuf->cover_mutex);
963 mmu_interval_set_seq(mni, cur_seq);
964 mutex_unlock(&tidbuf->cover_mutex);
965 }
966
967 return true;
968 }
969
cacheless_tid_rb_remove(struct hfi1_filedata * fdata,struct tid_rb_node * tnode)970 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
971 struct tid_rb_node *tnode)
972 {
973 u32 base = fdata->uctxt->expected_base;
974
975 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
976 clear_tid_node(fdata, tnode);
977 }
978