1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
hl_encaps_handle_do_release(struct kref * ref)12 void hl_encaps_handle_do_release(struct kref *ref)
13 {
14 struct hl_cs_encaps_sig_handle *handle =
15 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
16 struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
17
18 spin_lock(&mgr->lock);
19 idr_remove(&mgr->handles, handle->id);
20 spin_unlock(&mgr->lock);
21
22 hl_ctx_put(handle->ctx);
23 kfree(handle);
24 }
25
hl_encaps_handle_do_release_sob(struct kref * ref)26 static void hl_encaps_handle_do_release_sob(struct kref *ref)
27 {
28 struct hl_cs_encaps_sig_handle *handle =
29 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
30 struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
31
32 /* if we're here, then there was a signals reservation but cs with
33 * encaps signals wasn't submitted, so need to put refcount
34 * to hw_sob taken at the reservation.
35 */
36 hw_sob_put(handle->hw_sob);
37
38 spin_lock(&mgr->lock);
39 idr_remove(&mgr->handles, handle->id);
40 spin_unlock(&mgr->lock);
41
42 hl_ctx_put(handle->ctx);
43 kfree(handle);
44 }
45
hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr * mgr)46 static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
47 {
48 spin_lock_init(&mgr->lock);
49 idr_init(&mgr->handles);
50 }
51
hl_encaps_sig_mgr_fini(struct hl_device * hdev,struct hl_encaps_signals_mgr * mgr)52 static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
53 struct hl_encaps_signals_mgr *mgr)
54 {
55 struct hl_cs_encaps_sig_handle *handle;
56 struct idr *idp;
57 u32 id;
58
59 idp = &mgr->handles;
60
61 if (!idr_is_empty(idp)) {
62 dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
63 idr_for_each_entry(idp, handle, id)
64 kref_put(&handle->refcount,
65 hl_encaps_handle_do_release_sob);
66 }
67
68 idr_destroy(&mgr->handles);
69 }
70
hl_ctx_fini(struct hl_ctx * ctx)71 static void hl_ctx_fini(struct hl_ctx *ctx)
72 {
73 struct hl_device *hdev = ctx->hdev;
74 int i;
75
76 /* Release all allocated HW block mapped list entries and destroy
77 * the mutex.
78 */
79 hl_hw_block_mem_fini(ctx);
80
81 /*
82 * If we arrived here, there are no jobs waiting for this context
83 * on its queues so we can safely remove it.
84 * This is because for each CS, we increment the ref count and for
85 * every CS that was finished we decrement it and we won't arrive
86 * to this function unless the ref count is 0
87 */
88
89 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
90 hl_fence_put(ctx->cs_pending[i]);
91
92 kfree(ctx->cs_pending);
93
94 if (ctx->asid != HL_KERNEL_ASID_ID) {
95 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
96
97 /* The engines are stopped as there is no executing CS, but the
98 * Coresight might be still working by accessing addresses
99 * related to the stopped engines. Hence stop it explicitly.
100 */
101 if (hdev->in_debug)
102 hl_device_set_debug_mode(hdev, ctx, false);
103
104 hdev->asic_funcs->ctx_fini(ctx);
105
106 hl_dec_ctx_fini(ctx);
107
108 hl_cb_va_pool_fini(ctx);
109 hl_vm_ctx_fini(ctx);
110 hl_asid_free(hdev, ctx->asid);
111 hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
112 } else {
113 dev_dbg(hdev->dev, "closing kernel context\n");
114 hdev->asic_funcs->ctx_fini(ctx);
115 hl_vm_ctx_fini(ctx);
116 hl_mmu_ctx_fini(ctx);
117 }
118 }
119
hl_ctx_do_release(struct kref * ref)120 void hl_ctx_do_release(struct kref *ref)
121 {
122 struct hl_ctx *ctx;
123
124 ctx = container_of(ref, struct hl_ctx, refcount);
125
126 hl_ctx_fini(ctx);
127
128 if (ctx->hpriv) {
129 struct hl_fpriv *hpriv = ctx->hpriv;
130
131 mutex_lock(&hpriv->ctx_lock);
132 hpriv->ctx = NULL;
133 mutex_unlock(&hpriv->ctx_lock);
134
135 hl_hpriv_put(hpriv);
136 }
137
138 kfree(ctx);
139 }
140
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)141 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
142 {
143 struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
144 struct hl_ctx *ctx;
145 int rc;
146
147 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
148 if (!ctx) {
149 rc = -ENOMEM;
150 goto out_err;
151 }
152
153 mutex_lock(&ctx_mgr->lock);
154 rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
155 mutex_unlock(&ctx_mgr->lock);
156
157 if (rc < 0) {
158 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
159 goto free_ctx;
160 }
161
162 ctx->handle = rc;
163
164 rc = hl_ctx_init(hdev, ctx, false);
165 if (rc)
166 goto remove_from_idr;
167
168 hl_hpriv_get(hpriv);
169 ctx->hpriv = hpriv;
170
171 /* TODO: remove for multiple contexts per process */
172 hpriv->ctx = ctx;
173
174 /* TODO: remove the following line for multiple process support */
175 hdev->is_compute_ctx_active = true;
176
177 return 0;
178
179 remove_from_idr:
180 mutex_lock(&ctx_mgr->lock);
181 idr_remove(&ctx_mgr->handles, ctx->handle);
182 mutex_unlock(&ctx_mgr->lock);
183 free_ctx:
184 kfree(ctx);
185 out_err:
186 return rc;
187 }
188
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)189 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
190 {
191 int rc = 0, i;
192
193 ctx->hdev = hdev;
194
195 kref_init(&ctx->refcount);
196
197 ctx->cs_sequence = 1;
198 spin_lock_init(&ctx->cs_lock);
199 atomic_set(&ctx->thread_ctx_switch_token, 1);
200 ctx->thread_ctx_switch_wait_token = 0;
201 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
202 sizeof(struct hl_fence *),
203 GFP_KERNEL);
204 if (!ctx->cs_pending)
205 return -ENOMEM;
206
207 INIT_LIST_HEAD(&ctx->outcome_store.used_list);
208 INIT_LIST_HEAD(&ctx->outcome_store.free_list);
209 hash_init(ctx->outcome_store.outcome_map);
210 for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
211 list_add(&ctx->outcome_store.nodes_pool[i].list_link,
212 &ctx->outcome_store.free_list);
213
214 hl_hw_block_mem_init(ctx);
215
216 if (is_kernel_ctx) {
217 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
218 rc = hl_vm_ctx_init(ctx);
219 if (rc) {
220 dev_err(hdev->dev, "Failed to init mem ctx module\n");
221 rc = -ENOMEM;
222 goto err_hw_block_mem_fini;
223 }
224
225 rc = hdev->asic_funcs->ctx_init(ctx);
226 if (rc) {
227 dev_err(hdev->dev, "ctx_init failed\n");
228 goto err_vm_ctx_fini;
229 }
230 } else {
231 ctx->asid = hl_asid_alloc(hdev);
232 if (!ctx->asid) {
233 dev_err(hdev->dev, "No free ASID, failed to create context\n");
234 rc = -ENOMEM;
235 goto err_hw_block_mem_fini;
236 }
237
238 rc = hl_vm_ctx_init(ctx);
239 if (rc) {
240 dev_err(hdev->dev, "Failed to init mem ctx module\n");
241 rc = -ENOMEM;
242 goto err_asid_free;
243 }
244
245 rc = hl_cb_va_pool_init(ctx);
246 if (rc) {
247 dev_err(hdev->dev,
248 "Failed to init VA pool for mapped CB\n");
249 goto err_vm_ctx_fini;
250 }
251
252 rc = hdev->asic_funcs->ctx_init(ctx);
253 if (rc) {
254 dev_err(hdev->dev, "ctx_init failed\n");
255 goto err_cb_va_pool_fini;
256 }
257
258 hl_encaps_sig_mgr_init(&ctx->sig_mgr);
259
260 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
261 }
262
263 return 0;
264
265 err_cb_va_pool_fini:
266 hl_cb_va_pool_fini(ctx);
267 err_vm_ctx_fini:
268 hl_vm_ctx_fini(ctx);
269 err_asid_free:
270 if (ctx->asid != HL_KERNEL_ASID_ID)
271 hl_asid_free(hdev, ctx->asid);
272 err_hw_block_mem_fini:
273 hl_hw_block_mem_fini(ctx);
274 kfree(ctx->cs_pending);
275
276 return rc;
277 }
278
hl_ctx_get_unless_zero(struct hl_ctx * ctx)279 static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
280 {
281 return kref_get_unless_zero(&ctx->refcount);
282 }
283
hl_ctx_get(struct hl_ctx * ctx)284 void hl_ctx_get(struct hl_ctx *ctx)
285 {
286 kref_get(&ctx->refcount);
287 }
288
hl_ctx_put(struct hl_ctx * ctx)289 int hl_ctx_put(struct hl_ctx *ctx)
290 {
291 return kref_put(&ctx->refcount, hl_ctx_do_release);
292 }
293
hl_get_compute_ctx(struct hl_device * hdev)294 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
295 {
296 struct hl_ctx *ctx = NULL;
297 struct hl_fpriv *hpriv;
298
299 mutex_lock(&hdev->fpriv_list_lock);
300
301 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
302 mutex_lock(&hpriv->ctx_lock);
303 ctx = hpriv->ctx;
304 if (ctx && !hl_ctx_get_unless_zero(ctx))
305 ctx = NULL;
306 mutex_unlock(&hpriv->ctx_lock);
307
308 /* There can only be a single user which has opened the compute device, so exit
309 * immediately once we find its context or if we see that it has been released
310 */
311 break;
312 }
313
314 mutex_unlock(&hdev->fpriv_list_lock);
315
316 return ctx;
317 }
318
319 /*
320 * hl_ctx_get_fence_locked - get CS fence under CS lock
321 *
322 * @ctx: pointer to the context structure.
323 * @seq: CS sequences number
324 *
325 * @return valid fence pointer on success, NULL if fence is gone, otherwise
326 * error pointer.
327 *
328 * NOTE: this function shall be called with cs_lock locked
329 */
hl_ctx_get_fence_locked(struct hl_ctx * ctx,u64 seq)330 static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
331 {
332 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
333 struct hl_fence *fence;
334
335 if (seq >= ctx->cs_sequence)
336 return ERR_PTR(-EINVAL);
337
338 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
339 return NULL;
340
341 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
342 hl_fence_get(fence);
343 return fence;
344 }
345
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)346 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
347 {
348 struct hl_fence *fence;
349
350 spin_lock(&ctx->cs_lock);
351
352 fence = hl_ctx_get_fence_locked(ctx, seq);
353
354 spin_unlock(&ctx->cs_lock);
355
356 return fence;
357 }
358
359 /*
360 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
361 *
362 * @ctx: pointer to the context structure.
363 * @seq_arr: array of CS sequences to wait for
364 * @fence: fence array to store the CS fences
365 * @arr_len: length of seq_arr and fence_arr
366 *
367 * @return 0 on success, otherwise non 0 error code
368 */
hl_ctx_get_fences(struct hl_ctx * ctx,u64 * seq_arr,struct hl_fence ** fence,u32 arr_len)369 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
370 struct hl_fence **fence, u32 arr_len)
371 {
372 struct hl_fence **fence_arr_base = fence;
373 int i, rc = 0;
374
375 spin_lock(&ctx->cs_lock);
376
377 for (i = 0; i < arr_len; i++, fence++) {
378 u64 seq = seq_arr[i];
379
380 *fence = hl_ctx_get_fence_locked(ctx, seq);
381
382 if (IS_ERR(*fence)) {
383 dev_err(ctx->hdev->dev,
384 "Failed to get fence for CS with seq 0x%llx\n",
385 seq);
386 rc = PTR_ERR(*fence);
387 break;
388 }
389 }
390
391 spin_unlock(&ctx->cs_lock);
392
393 if (rc)
394 hl_fences_put(fence_arr_base, i);
395
396 return rc;
397 }
398
399 /*
400 * hl_ctx_mgr_init - initialize the context manager
401 *
402 * @ctx_mgr: pointer to context manager structure
403 *
404 * This manager is an object inside the hpriv object of the user process.
405 * The function is called when a user process opens the FD.
406 */
hl_ctx_mgr_init(struct hl_ctx_mgr * ctx_mgr)407 void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
408 {
409 mutex_init(&ctx_mgr->lock);
410 idr_init(&ctx_mgr->handles);
411 }
412
413 /*
414 * hl_ctx_mgr_fini - finalize the context manager
415 *
416 * @hdev: pointer to device structure
417 * @ctx_mgr: pointer to context manager structure
418 *
419 * This function goes over all the contexts in the manager and frees them.
420 * It is called when a process closes the FD.
421 */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * ctx_mgr)422 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
423 {
424 struct hl_ctx *ctx;
425 struct idr *idp;
426 u32 id;
427
428 idp = &ctx_mgr->handles;
429
430 idr_for_each_entry(idp, ctx, id)
431 kref_put(&ctx->refcount, hl_ctx_do_release);
432
433 idr_destroy(&ctx_mgr->handles);
434 mutex_destroy(&ctx_mgr->lock);
435 }
436