1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 /**
11  * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
12  *                        the buffer descriptor.
13  *
14  * @mmg: parent unifed memory manager
15  * @handle: requested buffer handle
16  *
17  * Find the buffer in the store and return a pointer to its descriptor.
18  * Increase buffer refcount. If not found - return NULL.
19  */
hl_mmap_mem_buf_get(struct hl_mem_mgr * mmg,u64 handle)20 struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
21 {
22 	struct hl_mmap_mem_buf *buf;
23 
24 	spin_lock(&mmg->lock);
25 	buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
26 	if (!buf) {
27 		spin_unlock(&mmg->lock);
28 		dev_warn(mmg->dev,
29 			 "Buff get failed, no match to handle %#llx\n", handle);
30 		return NULL;
31 	}
32 	kref_get(&buf->refcount);
33 	spin_unlock(&mmg->lock);
34 	return buf;
35 }
36 
37 /**
38  * hl_mmap_mem_buf_destroy - destroy the unused buffer
39  *
40  * @buf: memory manager buffer descriptor
41  *
42  * Internal function, used as a final step of buffer release. Shall be invoked
43  * only when the buffer is no longer in use (removed from idr). Will call the
44  * release callback (if applicable), and free the memory.
45  */
hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf * buf)46 static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
47 {
48 	if (buf->behavior->release)
49 		buf->behavior->release(buf);
50 
51 	kfree(buf);
52 }
53 
54 /**
55  * hl_mmap_mem_buf_release - release buffer
56  *
57  * @kref: kref that reached 0.
58  *
59  * Internal function, used as a kref release callback, when the last user of
60  * the buffer is released. Shall be called from an interrupt context.
61  */
hl_mmap_mem_buf_release(struct kref * kref)62 static void hl_mmap_mem_buf_release(struct kref *kref)
63 {
64 	struct hl_mmap_mem_buf *buf =
65 		container_of(kref, struct hl_mmap_mem_buf, refcount);
66 
67 	spin_lock(&buf->mmg->lock);
68 	idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
69 	spin_unlock(&buf->mmg->lock);
70 
71 	hl_mmap_mem_buf_destroy(buf);
72 }
73 
74 /**
75  * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
76  *
77  * @kref: kref that reached 0.
78  *
79  * Internal function, used for kref put by handle. Assumes mmg lock is taken.
80  * Will remove the buffer from idr, without destroying it.
81  */
hl_mmap_mem_buf_remove_idr_locked(struct kref * kref)82 static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
83 {
84 	struct hl_mmap_mem_buf *buf =
85 		container_of(kref, struct hl_mmap_mem_buf, refcount);
86 
87 	idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
88 }
89 
90 /**
91  * hl_mmap_mem_buf_put - decrease the reference to the buffer
92  *
93  * @buf: memory manager buffer descriptor
94  *
95  * Decrease the reference to the buffer, and release it if it was the last one.
96  * Shall be called from an interrupt context.
97  */
hl_mmap_mem_buf_put(struct hl_mmap_mem_buf * buf)98 int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
99 {
100 	return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
101 }
102 
103 /**
104  * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
105  *                              given handle.
106  *
107  * @mmg: parent unifed memory manager
108  * @handle: requested buffer handle
109  *
110  * Decrease the reference to the buffer, and release it if it was the last one.
111  * Shall not be called from an interrupt context. Return -EINVAL if handle was
112  * not found, else return the put outcome (0 or 1).
113  */
hl_mmap_mem_buf_put_handle(struct hl_mem_mgr * mmg,u64 handle)114 int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
115 {
116 	struct hl_mmap_mem_buf *buf;
117 
118 	spin_lock(&mmg->lock);
119 	buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
120 	if (!buf) {
121 		spin_unlock(&mmg->lock);
122 		dev_dbg(mmg->dev,
123 			 "Buff put failed, no match to handle %#llx\n", handle);
124 		return -EINVAL;
125 	}
126 
127 	if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
128 		spin_unlock(&mmg->lock);
129 		hl_mmap_mem_buf_destroy(buf);
130 		return 1;
131 	}
132 
133 	spin_unlock(&mmg->lock);
134 	return 0;
135 }
136 
137 /**
138  * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
139  *
140  * @mmg: parent unifed memory manager
141  * @behavior: behavior object describing this buffer polymorphic behavior
142  * @gfp: gfp flags to use for the memory allocations
143  * @args: additional args passed to behavior->alloc
144  *
145  * Allocate and register a new memory buffer inside the give memory manager.
146  * Return the pointer to the new buffer on success or NULL on failure.
147  */
148 struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr * mmg,struct hl_mmap_mem_buf_behavior * behavior,gfp_t gfp,void * args)149 hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
150 		      struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
151 		      void *args)
152 {
153 	struct hl_mmap_mem_buf *buf;
154 	int rc;
155 
156 	buf = kzalloc(sizeof(*buf), gfp);
157 	if (!buf)
158 		return NULL;
159 
160 	spin_lock(&mmg->lock);
161 	rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
162 	spin_unlock(&mmg->lock);
163 	if (rc < 0) {
164 		dev_err(mmg->dev,
165 			"%s: Failed to allocate IDR for a new buffer, rc=%d\n",
166 			behavior->topic, rc);
167 		goto free_buf;
168 	}
169 
170 	buf->mmg = mmg;
171 	buf->behavior = behavior;
172 	buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
173 	kref_init(&buf->refcount);
174 
175 	rc = buf->behavior->alloc(buf, gfp, args);
176 	if (rc) {
177 		dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
178 			behavior->topic, rc);
179 		goto remove_idr;
180 	}
181 
182 	return buf;
183 
184 remove_idr:
185 	spin_lock(&mmg->lock);
186 	idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
187 	spin_unlock(&mmg->lock);
188 free_buf:
189 	kfree(buf);
190 	return NULL;
191 }
192 
193 /**
194  * hl_mmap_mem_buf_vm_close - handle mmap close
195  *
196  * @vma: the vma object for which mmap was closed.
197  *
198  * Put the memory buffer if it is no longer mapped.
199  */
hl_mmap_mem_buf_vm_close(struct vm_area_struct * vma)200 static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
201 {
202 	struct hl_mmap_mem_buf *buf =
203 		(struct hl_mmap_mem_buf *)vma->vm_private_data;
204 	long new_mmap_size;
205 
206 	new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
207 
208 	if (new_mmap_size > 0) {
209 		buf->real_mapped_size = new_mmap_size;
210 		return;
211 	}
212 
213 	atomic_set(&buf->mmap, 0);
214 	hl_mmap_mem_buf_put(buf);
215 	vma->vm_private_data = NULL;
216 }
217 
218 static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
219 	.close = hl_mmap_mem_buf_vm_close
220 };
221 
222 /**
223  * hl_mem_mgr_mmap - map the given buffer to the user
224  *
225  * @mmg: unifed memory manager
226  * @vma: the vma object for which mmap was closed.
227  * @args: additional args passed to behavior->mmap
228  *
229  * Map the buffer specified by the vma->vm_pgoff to the given vma.
230  */
hl_mem_mgr_mmap(struct hl_mem_mgr * mmg,struct vm_area_struct * vma,void * args)231 int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
232 		    void *args)
233 {
234 	struct hl_mmap_mem_buf *buf;
235 	u64 user_mem_size;
236 	u64 handle;
237 	int rc;
238 
239 	/* We use the page offset to hold the idr and thus we need to clear
240 	 * it before doing the mmap itself
241 	 */
242 	handle = vma->vm_pgoff << PAGE_SHIFT;
243 	vma->vm_pgoff = 0;
244 
245 	/* Reference was taken here */
246 	buf = hl_mmap_mem_buf_get(mmg, handle);
247 	if (!buf) {
248 		dev_err(mmg->dev,
249 			"Memory mmap failed, no match to handle %#llx\n", handle);
250 		return -EINVAL;
251 	}
252 
253 	/* Validation check */
254 	user_mem_size = vma->vm_end - vma->vm_start;
255 	if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
256 		dev_err(mmg->dev,
257 			"%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
258 			buf->behavior->topic, user_mem_size, buf->mappable_size);
259 		rc = -EINVAL;
260 		goto put_mem;
261 	}
262 
263 #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
264 	if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
265 		       user_mem_size)) {
266 #else
267 	if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
268 		       user_mem_size)) {
269 #endif
270 		dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
271 			buf->behavior->topic, vma->vm_start);
272 
273 		rc = -EINVAL;
274 		goto put_mem;
275 	}
276 
277 	if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
278 		dev_err(mmg->dev,
279 			"%s, Memory mmap failed, already mmaped to user\n",
280 			buf->behavior->topic);
281 		rc = -EINVAL;
282 		goto put_mem;
283 	}
284 
285 	vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
286 
287 	/* Note: We're transferring the memory reference to vma->vm_private_data here. */
288 
289 	vma->vm_private_data = buf;
290 
291 	rc = buf->behavior->mmap(buf, vma, args);
292 	if (rc) {
293 		atomic_set(&buf->mmap, 0);
294 		goto put_mem;
295 	}
296 
297 	buf->real_mapped_size = buf->mappable_size;
298 	vma->vm_pgoff = handle >> PAGE_SHIFT;
299 
300 	return 0;
301 
302 put_mem:
303 	hl_mmap_mem_buf_put(buf);
304 	return rc;
305 }
306 
307 /**
308  * hl_mem_mgr_init - initialize unified memory manager
309  *
310  * @dev: owner device pointer
311  * @mmg: structure to initialize
312  *
313  * Initialize an instance of unified memory manager
314  */
315 void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
316 {
317 	mmg->dev = dev;
318 	spin_lock_init(&mmg->lock);
319 	idr_init(&mmg->handles);
320 }
321 
322 /**
323  * hl_mem_mgr_fini - release unified memory manager
324  *
325  * @mmg: parent unifed memory manager
326  *
327  * Release the unified memory manager. Shall be called from an interrupt context.
328  */
329 void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
330 {
331 	struct hl_mmap_mem_buf *buf;
332 	struct idr *idp;
333 	const char *topic;
334 	u32 id;
335 
336 	idp = &mmg->handles;
337 
338 	idr_for_each_entry(idp, buf, id) {
339 		topic = buf->behavior->topic;
340 		if (hl_mmap_mem_buf_put(buf) != 1)
341 			dev_err(mmg->dev,
342 				"%s: Buff handle %u for CTX is still alive\n",
343 				topic, id);
344 	}
345 
346 	/* TODO: can it happen that some buffer is still in use at this point? */
347 
348 	idr_destroy(&mmg->handles);
349 }
350