Lines Matching refs:ipc
23 struct ivpu_ipc_hdr ipc; member
65 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_mem_fini() local
67 ivpu_bo_free_internal(ipc->mem_rx); in ivpu_ipc_mem_fini()
68 ivpu_bo_free_internal(ipc->mem_tx); in ivpu_ipc_mem_fini()
75 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_prepare() local
80 tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf)); in ivpu_ipc_tx_prepare()
87 tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr); in ivpu_ipc_tx_prepare()
89 gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf)); in ivpu_ipc_tx_prepare()
95 if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE) in ivpu_ipc_tx_prepare()
104 tx_buf->ipc.data_addr = jsm_vpu_addr; in ivpu_ipc_tx_prepare()
106 tx_buf->ipc.data_size = sizeof(*req); in ivpu_ipc_tx_prepare()
107 tx_buf->ipc.channel = cons->channel; in ivpu_ipc_tx_prepare()
108 tx_buf->ipc.src_node = 0; in ivpu_ipc_tx_prepare()
109 tx_buf->ipc.dst_node = 1; in ivpu_ipc_tx_prepare()
110 tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED; in ivpu_ipc_tx_prepare()
115 req->request_id = atomic_inc_return(&ipc->request_id); in ivpu_ipc_tx_prepare()
123 ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr); in ivpu_ipc_tx_prepare()
130 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_release() local
133 gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf)); in ivpu_ipc_tx_release()
144 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_add() local
154 spin_lock_irq(&ipc->cons_list_lock); in ivpu_ipc_consumer_add()
155 list_add_tail(&cons->link, &ipc->cons_list); in ivpu_ipc_consumer_add()
156 spin_unlock_irq(&ipc->cons_list_lock); in ivpu_ipc_consumer_add()
161 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_del() local
164 spin_lock_irq(&ipc->cons_list_lock); in ivpu_ipc_consumer_del()
166 spin_unlock_irq(&ipc->cons_list_lock); in ivpu_ipc_consumer_del()
172 atomic_dec(&ipc->rx_msg_count); in ivpu_ipc_consumer_del()
183 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_send() local
186 mutex_lock(&ipc->lock); in ivpu_ipc_send()
188 if (!ipc->on) { in ivpu_ipc_send()
200 mutex_unlock(&ipc->lock); in ivpu_ipc_send()
208 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_receive() local
247 atomic_dec(&ipc->rx_msg_count); in ivpu_ipc_receive()
334 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_dispatch() local
338 lockdep_assert_held(&ipc->cons_list_lock); in ivpu_ipc_dispatch()
346 atomic_inc(&ipc->rx_msg_count); in ivpu_ipc_dispatch()
360 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_irq_handler() local
379 ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr); in ivpu_ipc_irq_handler()
388 jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr); in ivpu_ipc_irq_handler()
397 if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) { in ivpu_ipc_irq_handler()
404 spin_lock_irqsave(&ipc->cons_list_lock, flags); in ivpu_ipc_irq_handler()
405 list_for_each_entry(cons, &ipc->cons_list, link) { in ivpu_ipc_irq_handler()
412 spin_unlock_irqrestore(&ipc->cons_list_lock, flags); in ivpu_ipc_irq_handler()
425 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_init() local
428 ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); in ivpu_ipc_init()
429 if (!ipc->mem_tx) in ivpu_ipc_init()
432 ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); in ivpu_ipc_init()
433 if (!ipc->mem_rx) in ivpu_ipc_init()
436 ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), in ivpu_ipc_init()
438 if (IS_ERR(ipc->mm_tx)) { in ivpu_ipc_init()
439 ret = PTR_ERR(ipc->mm_tx); in ivpu_ipc_init()
440 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx); in ivpu_ipc_init()
444 ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1); in ivpu_ipc_init()
450 INIT_LIST_HEAD(&ipc->cons_list); in ivpu_ipc_init()
451 spin_lock_init(&ipc->cons_list_lock); in ivpu_ipc_init()
452 drmm_mutex_init(&vdev->drm, &ipc->lock); in ivpu_ipc_init()
458 ivpu_bo_free_internal(ipc->mem_rx); in ivpu_ipc_init()
460 ivpu_bo_free_internal(ipc->mem_tx); in ivpu_ipc_init()
471 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_enable() local
473 mutex_lock(&ipc->lock); in ivpu_ipc_enable()
474 ipc->on = true; in ivpu_ipc_enable()
475 mutex_unlock(&ipc->lock); in ivpu_ipc_enable()
480 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_disable() local
484 mutex_lock(&ipc->lock); in ivpu_ipc_disable()
485 ipc->on = false; in ivpu_ipc_disable()
486 mutex_unlock(&ipc->lock); in ivpu_ipc_disable()
488 spin_lock_irqsave(&ipc->cons_list_lock, flags); in ivpu_ipc_disable()
489 list_for_each_entry_safe(cons, c, &ipc->cons_list, link) in ivpu_ipc_disable()
491 spin_unlock_irqrestore(&ipc->cons_list_lock, flags); in ivpu_ipc_disable()
496 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_reset() local
498 mutex_lock(&ipc->lock); in ivpu_ipc_reset()
500 memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size); in ivpu_ipc_reset()
501 memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size); in ivpu_ipc_reset()
504 mutex_unlock(&ipc->lock); in ivpu_ipc_reset()