1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6 
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9 
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
14 
15 struct ib_ucontext;
16 struct ib_umem_odp;
17 struct dma_buf_attach_ops;
18 
19 struct ib_umem {
20 	struct ib_device       *ibdev;
21 	struct mm_struct       *owning_mm;
22 	u64 iova;
23 	size_t			length;
24 	unsigned long		address;
25 	u32 writable : 1;
26 	u32 is_odp : 1;
27 	u32 is_dmabuf : 1;
28 	struct work_struct	work;
29 	struct sg_append_table sgt_append;
30 };
31 
32 struct ib_umem_dmabuf {
33 	struct ib_umem umem;
34 	struct dma_buf_attachment *attach;
35 	struct sg_table *sgt;
36 	struct scatterlist *first_sg;
37 	struct scatterlist *last_sg;
38 	unsigned long first_sg_offset;
39 	unsigned long last_sg_trim;
40 	void *private;
41 	u8 pinned : 1;
42 };
43 
to_ib_umem_dmabuf(struct ib_umem * umem)44 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45 {
46 	return container_of(umem, struct ib_umem_dmabuf, umem);
47 }
48 
49 /* Returns the offset of the umem start relative to the first page. */
ib_umem_offset(struct ib_umem * umem)50 static inline int ib_umem_offset(struct ib_umem *umem)
51 {
52 	return umem->address & ~PAGE_MASK;
53 }
54 
ib_umem_dma_offset(struct ib_umem * umem,unsigned long pgsz)55 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
56 					       unsigned long pgsz)
57 {
58 	return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
59 	       (pgsz - 1);
60 }
61 
ib_umem_num_dma_blocks(struct ib_umem * umem,unsigned long pgsz)62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
63 					    unsigned long pgsz)
64 {
65 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 			 ALIGN_DOWN(umem->iova, pgsz))) /
67 	       pgsz;
68 }
69 
ib_umem_num_pages(struct ib_umem * umem)70 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
71 {
72 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
73 }
74 
__rdma_umem_block_iter_start(struct ib_block_iter * biter,struct ib_umem * umem,unsigned long pgsz)75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
76 						struct ib_umem *umem,
77 						unsigned long pgsz)
78 {
79 	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 				umem->sgt_append.sgt.nents, pgsz);
81 }
82 
83 /**
84  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
85  * @umem: umem to iterate over
86  * @pgsz: Page size to split the list into
87  *
88  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
89  * returned DMA blocks will be aligned to pgsz and span the range:
90  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
91  *
92  * Performs exactly ib_umem_num_dma_blocks() iterations.
93  */
94 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
95 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
96 	     __rdma_block_iter_next(biter);)
97 
98 #ifdef CONFIG_INFINIBAND_USER_MEM
99 
100 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
101 			    size_t size, int access);
102 void ib_umem_release(struct ib_umem *umem);
103 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
104 		      size_t length);
105 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
106 				     unsigned long pgsz_bitmap,
107 				     unsigned long virt);
108 
109 /**
110  * ib_umem_find_best_pgoff - Find best HW page size
111  *
112  * @umem: umem struct
113  * @pgsz_bitmap bitmap of HW supported page sizes
114  * @pgoff_bitmask: Mask of bits that can be represented with an offset
115  *
116  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
117  * an IOVA it accepts a bitmask specifying what address bits can be represented
118  * with a page offset.
119  *
120  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
121  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
122  * "111111000000".
123  *
124  * If the pgoff_bitmask requires either alignment in the low bit or an
125  * unavailable page size for the high bits, this function returns 0.
126  */
ib_umem_find_best_pgoff(struct ib_umem * umem,unsigned long pgsz_bitmap,u64 pgoff_bitmask)127 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
128 						    unsigned long pgsz_bitmap,
129 						    u64 pgoff_bitmask)
130 {
131 	struct scatterlist *sg = umem->sgt_append.sgt.sgl;
132 	dma_addr_t dma_addr;
133 
134 	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
135 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
136 				      dma_addr & pgoff_bitmask);
137 }
138 
139 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
140 					  unsigned long offset, size_t size,
141 					  int fd, int access,
142 					  const struct dma_buf_attach_ops *ops);
143 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
144 						 unsigned long offset,
145 						 size_t size, int fd,
146 						 int access);
147 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
148 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
149 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
150 
151 #else /* CONFIG_INFINIBAND_USER_MEM */
152 
153 #include <linux/err.h>
154 
ib_umem_get(struct ib_device * device,unsigned long addr,size_t size,int access)155 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
156 					  unsigned long addr, size_t size,
157 					  int access)
158 {
159 	return ERR_PTR(-EOPNOTSUPP);
160 }
ib_umem_release(struct ib_umem * umem)161 static inline void ib_umem_release(struct ib_umem *umem) { }
ib_umem_copy_from(void * dst,struct ib_umem * umem,size_t offset,size_t length)162 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
163 		      		    size_t length) {
164 	return -EOPNOTSUPP;
165 }
ib_umem_find_best_pgsz(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned long virt)166 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
167 						   unsigned long pgsz_bitmap,
168 						   unsigned long virt)
169 {
170 	return 0;
171 }
ib_umem_find_best_pgoff(struct ib_umem * umem,unsigned long pgsz_bitmap,u64 pgoff_bitmask)172 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
173 						    unsigned long pgsz_bitmap,
174 						    u64 pgoff_bitmask)
175 {
176 	return 0;
177 }
178 static inline
ib_umem_dmabuf_get(struct ib_device * device,unsigned long offset,size_t size,int fd,int access,struct dma_buf_attach_ops * ops)179 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
180 					  unsigned long offset,
181 					  size_t size, int fd,
182 					  int access,
183 					  struct dma_buf_attach_ops *ops)
184 {
185 	return ERR_PTR(-EOPNOTSUPP);
186 }
187 static inline struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned(struct ib_device * device,unsigned long offset,size_t size,int fd,int access)188 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
189 			  size_t size, int fd, int access)
190 {
191 	return ERR_PTR(-EOPNOTSUPP);
192 }
ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf * umem_dmabuf)193 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
194 {
195 	return -EOPNOTSUPP;
196 }
ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf * umem_dmabuf)197 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
ib_umem_dmabuf_release(struct ib_umem_dmabuf * umem_dmabuf)198 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
199 
200 #endif /* CONFIG_INFINIBAND_USER_MEM */
201 #endif /* IB_UMEM_H */
202