1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
encaps_handle_do_release(struct hl_cs_encaps_sig_handle * handle,bool put_hw_sob,bool put_ctx)12 static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
13 bool put_ctx)
14 {
15 struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
16
17 if (put_hw_sob)
18 hw_sob_put(handle->hw_sob);
19
20 spin_lock(&mgr->lock);
21 idr_remove(&mgr->handles, handle->id);
22 spin_unlock(&mgr->lock);
23
24 if (put_ctx)
25 hl_ctx_put(handle->ctx);
26
27 kfree(handle);
28 }
29
hl_encaps_release_handle_and_put_ctx(struct kref * ref)30 void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
31 {
32 struct hl_cs_encaps_sig_handle *handle =
33 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
34
35 encaps_handle_do_release(handle, false, true);
36 }
37
hl_encaps_release_handle_and_put_sob(struct kref * ref)38 static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
39 {
40 struct hl_cs_encaps_sig_handle *handle =
41 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
42
43 encaps_handle_do_release(handle, true, false);
44 }
45
hl_encaps_release_handle_and_put_sob_ctx(struct kref * ref)46 void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
47 {
48 struct hl_cs_encaps_sig_handle *handle =
49 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
50
51 encaps_handle_do_release(handle, true, true);
52 }
53
hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr * mgr)54 static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
55 {
56 spin_lock_init(&mgr->lock);
57 idr_init(&mgr->handles);
58 }
59
hl_encaps_sig_mgr_fini(struct hl_device * hdev,struct hl_encaps_signals_mgr * mgr)60 static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
61 {
62 struct hl_cs_encaps_sig_handle *handle;
63 struct idr *idp;
64 u32 id;
65
66 idp = &mgr->handles;
67
68 /* The IDR is expected to be empty at this stage, because any left signal should have been
69 * released as part of CS roll-back.
70 */
71 if (!idr_is_empty(idp)) {
72 dev_warn(hdev->dev,
73 "device released while some encaps signals handles are still allocated\n");
74 idr_for_each_entry(idp, handle, id)
75 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
76 }
77
78 idr_destroy(&mgr->handles);
79 }
80
hl_ctx_fini(struct hl_ctx * ctx)81 static void hl_ctx_fini(struct hl_ctx *ctx)
82 {
83 struct hl_device *hdev = ctx->hdev;
84 int i;
85
86 /* Release all allocated HW block mapped list entries and destroy
87 * the mutex.
88 */
89 hl_hw_block_mem_fini(ctx);
90
91 /*
92 * If we arrived here, there are no jobs waiting for this context
93 * on its queues so we can safely remove it.
94 * This is because for each CS, we increment the ref count and for
95 * every CS that was finished we decrement it and we won't arrive
96 * to this function unless the ref count is 0
97 */
98
99 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
100 hl_fence_put(ctx->cs_pending[i]);
101
102 kfree(ctx->cs_pending);
103
104 if (ctx->asid != HL_KERNEL_ASID_ID) {
105 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
106
107 /* The engines are stopped as there is no executing CS, but the
108 * Coresight might be still working by accessing addresses
109 * related to the stopped engines. Hence stop it explicitly.
110 */
111 if (hdev->in_debug)
112 hl_device_set_debug_mode(hdev, ctx, false);
113
114 hdev->asic_funcs->ctx_fini(ctx);
115
116 hl_dec_ctx_fini(ctx);
117
118 hl_cb_va_pool_fini(ctx);
119 hl_vm_ctx_fini(ctx);
120 hl_asid_free(hdev, ctx->asid);
121 hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
122 } else {
123 dev_dbg(hdev->dev, "closing kernel context\n");
124 hdev->asic_funcs->ctx_fini(ctx);
125 hl_vm_ctx_fini(ctx);
126 hl_mmu_ctx_fini(ctx);
127 }
128 }
129
hl_ctx_do_release(struct kref * ref)130 void hl_ctx_do_release(struct kref *ref)
131 {
132 struct hl_ctx *ctx;
133
134 ctx = container_of(ref, struct hl_ctx, refcount);
135
136 hl_ctx_fini(ctx);
137
138 if (ctx->hpriv) {
139 struct hl_fpriv *hpriv = ctx->hpriv;
140
141 mutex_lock(&hpriv->ctx_lock);
142 hpriv->ctx = NULL;
143 mutex_unlock(&hpriv->ctx_lock);
144
145 hl_hpriv_put(hpriv);
146 }
147
148 kfree(ctx);
149 }
150
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)151 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
152 {
153 struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
154 struct hl_ctx *ctx;
155 int rc;
156
157 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
158 if (!ctx) {
159 rc = -ENOMEM;
160 goto out_err;
161 }
162
163 mutex_lock(&ctx_mgr->lock);
164 rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
165 mutex_unlock(&ctx_mgr->lock);
166
167 if (rc < 0) {
168 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
169 goto free_ctx;
170 }
171
172 ctx->handle = rc;
173
174 rc = hl_ctx_init(hdev, ctx, false);
175 if (rc)
176 goto remove_from_idr;
177
178 hl_hpriv_get(hpriv);
179 ctx->hpriv = hpriv;
180
181 /* TODO: remove for multiple contexts per process */
182 hpriv->ctx = ctx;
183
184 /* TODO: remove the following line for multiple process support */
185 hdev->is_compute_ctx_active = true;
186
187 return 0;
188
189 remove_from_idr:
190 mutex_lock(&ctx_mgr->lock);
191 idr_remove(&ctx_mgr->handles, ctx->handle);
192 mutex_unlock(&ctx_mgr->lock);
193 free_ctx:
194 kfree(ctx);
195 out_err:
196 return rc;
197 }
198
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)199 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
200 {
201 int rc = 0, i;
202
203 ctx->hdev = hdev;
204
205 kref_init(&ctx->refcount);
206
207 ctx->cs_sequence = 1;
208 spin_lock_init(&ctx->cs_lock);
209 atomic_set(&ctx->thread_ctx_switch_token, 1);
210 ctx->thread_ctx_switch_wait_token = 0;
211 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
212 sizeof(struct hl_fence *),
213 GFP_KERNEL);
214 if (!ctx->cs_pending)
215 return -ENOMEM;
216
217 INIT_LIST_HEAD(&ctx->outcome_store.used_list);
218 INIT_LIST_HEAD(&ctx->outcome_store.free_list);
219 hash_init(ctx->outcome_store.outcome_map);
220 for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
221 list_add(&ctx->outcome_store.nodes_pool[i].list_link,
222 &ctx->outcome_store.free_list);
223
224 hl_hw_block_mem_init(ctx);
225
226 if (is_kernel_ctx) {
227 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
228 rc = hl_vm_ctx_init(ctx);
229 if (rc) {
230 dev_err(hdev->dev, "Failed to init mem ctx module\n");
231 rc = -ENOMEM;
232 goto err_hw_block_mem_fini;
233 }
234
235 rc = hdev->asic_funcs->ctx_init(ctx);
236 if (rc) {
237 dev_err(hdev->dev, "ctx_init failed\n");
238 goto err_vm_ctx_fini;
239 }
240 } else {
241 ctx->asid = hl_asid_alloc(hdev);
242 if (!ctx->asid) {
243 dev_err(hdev->dev, "No free ASID, failed to create context\n");
244 rc = -ENOMEM;
245 goto err_hw_block_mem_fini;
246 }
247
248 rc = hl_vm_ctx_init(ctx);
249 if (rc) {
250 dev_err(hdev->dev, "Failed to init mem ctx module\n");
251 rc = -ENOMEM;
252 goto err_asid_free;
253 }
254
255 rc = hl_cb_va_pool_init(ctx);
256 if (rc) {
257 dev_err(hdev->dev,
258 "Failed to init VA pool for mapped CB\n");
259 goto err_vm_ctx_fini;
260 }
261
262 rc = hdev->asic_funcs->ctx_init(ctx);
263 if (rc) {
264 dev_err(hdev->dev, "ctx_init failed\n");
265 goto err_cb_va_pool_fini;
266 }
267
268 hl_encaps_sig_mgr_init(&ctx->sig_mgr);
269
270 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
271 }
272
273 return 0;
274
275 err_cb_va_pool_fini:
276 hl_cb_va_pool_fini(ctx);
277 err_vm_ctx_fini:
278 hl_vm_ctx_fini(ctx);
279 err_asid_free:
280 if (ctx->asid != HL_KERNEL_ASID_ID)
281 hl_asid_free(hdev, ctx->asid);
282 err_hw_block_mem_fini:
283 hl_hw_block_mem_fini(ctx);
284 kfree(ctx->cs_pending);
285
286 return rc;
287 }
288
hl_ctx_get_unless_zero(struct hl_ctx * ctx)289 static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
290 {
291 return kref_get_unless_zero(&ctx->refcount);
292 }
293
hl_ctx_get(struct hl_ctx * ctx)294 void hl_ctx_get(struct hl_ctx *ctx)
295 {
296 kref_get(&ctx->refcount);
297 }
298
hl_ctx_put(struct hl_ctx * ctx)299 int hl_ctx_put(struct hl_ctx *ctx)
300 {
301 return kref_put(&ctx->refcount, hl_ctx_do_release);
302 }
303
hl_get_compute_ctx(struct hl_device * hdev)304 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
305 {
306 struct hl_ctx *ctx = NULL;
307 struct hl_fpriv *hpriv;
308
309 mutex_lock(&hdev->fpriv_list_lock);
310
311 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
312 mutex_lock(&hpriv->ctx_lock);
313 ctx = hpriv->ctx;
314 if (ctx && !hl_ctx_get_unless_zero(ctx))
315 ctx = NULL;
316 mutex_unlock(&hpriv->ctx_lock);
317
318 /* There can only be a single user which has opened the compute device, so exit
319 * immediately once we find its context or if we see that it has been released
320 */
321 break;
322 }
323
324 mutex_unlock(&hdev->fpriv_list_lock);
325
326 return ctx;
327 }
328
329 /*
330 * hl_ctx_get_fence_locked - get CS fence under CS lock
331 *
332 * @ctx: pointer to the context structure.
333 * @seq: CS sequences number
334 *
335 * @return valid fence pointer on success, NULL if fence is gone, otherwise
336 * error pointer.
337 *
338 * NOTE: this function shall be called with cs_lock locked
339 */
hl_ctx_get_fence_locked(struct hl_ctx * ctx,u64 seq)340 static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
341 {
342 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
343 struct hl_fence *fence;
344
345 if (seq >= ctx->cs_sequence)
346 return ERR_PTR(-EINVAL);
347
348 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
349 return NULL;
350
351 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
352 hl_fence_get(fence);
353 return fence;
354 }
355
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)356 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
357 {
358 struct hl_fence *fence;
359
360 spin_lock(&ctx->cs_lock);
361
362 fence = hl_ctx_get_fence_locked(ctx, seq);
363
364 spin_unlock(&ctx->cs_lock);
365
366 return fence;
367 }
368
369 /*
370 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
371 *
372 * @ctx: pointer to the context structure.
373 * @seq_arr: array of CS sequences to wait for
374 * @fence: fence array to store the CS fences
375 * @arr_len: length of seq_arr and fence_arr
376 *
377 * @return 0 on success, otherwise non 0 error code
378 */
hl_ctx_get_fences(struct hl_ctx * ctx,u64 * seq_arr,struct hl_fence ** fence,u32 arr_len)379 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
380 struct hl_fence **fence, u32 arr_len)
381 {
382 struct hl_fence **fence_arr_base = fence;
383 int i, rc = 0;
384
385 spin_lock(&ctx->cs_lock);
386
387 for (i = 0; i < arr_len; i++, fence++) {
388 u64 seq = seq_arr[i];
389
390 *fence = hl_ctx_get_fence_locked(ctx, seq);
391
392 if (IS_ERR(*fence)) {
393 dev_err(ctx->hdev->dev,
394 "Failed to get fence for CS with seq 0x%llx\n",
395 seq);
396 rc = PTR_ERR(*fence);
397 break;
398 }
399 }
400
401 spin_unlock(&ctx->cs_lock);
402
403 if (rc)
404 hl_fences_put(fence_arr_base, i);
405
406 return rc;
407 }
408
409 /*
410 * hl_ctx_mgr_init - initialize the context manager
411 *
412 * @ctx_mgr: pointer to context manager structure
413 *
414 * This manager is an object inside the hpriv object of the user process.
415 * The function is called when a user process opens the FD.
416 */
hl_ctx_mgr_init(struct hl_ctx_mgr * ctx_mgr)417 void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
418 {
419 mutex_init(&ctx_mgr->lock);
420 idr_init(&ctx_mgr->handles);
421 }
422
423 /*
424 * hl_ctx_mgr_fini - finalize the context manager
425 *
426 * @hdev: pointer to device structure
427 * @ctx_mgr: pointer to context manager structure
428 *
429 * This function goes over all the contexts in the manager and frees them.
430 * It is called when a process closes the FD.
431 */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * ctx_mgr)432 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
433 {
434 struct hl_ctx *ctx;
435 struct idr *idp;
436 u32 id;
437
438 idp = &ctx_mgr->handles;
439
440 idr_for_each_entry(idp, ctx, id)
441 kref_put(&ctx->refcount, hl_ctx_do_release);
442
443 idr_destroy(&ctx_mgr->handles);
444 mutex_destroy(&ctx_mgr->lock);
445 }
446