Home
last modified time | relevance | path

Searched refs:uctx (Results 1 – 25 of 39) sorted by relevance

12

/linux-6.1.9/drivers/gpu/drm/vmwgfx/
Dvmwgfx_context.c118 struct vmw_user_context *uctx) in vmw_context_cotables_unref() argument
126 spin_lock(&uctx->cotable_lock); in vmw_context_cotables_unref()
127 res = uctx->cotables[i]; in vmw_context_cotables_unref()
128 uctx->cotables[i] = NULL; in vmw_context_cotables_unref()
129 spin_unlock(&uctx->cotable_lock); in vmw_context_cotables_unref()
138 struct vmw_user_context *uctx = in vmw_hw_context_destroy() local
150 vmw_cmdbuf_res_man_destroy(uctx->man); in vmw_hw_context_destroy()
152 vmw_binding_state_kill(uctx->cbs); in vmw_hw_context_destroy()
159 vmw_context_cotables_unref(dev_priv, uctx); in vmw_hw_context_destroy()
182 struct vmw_user_context *uctx = in vmw_gb_context_init() local
[all …]
/linux-6.1.9/drivers/infiniband/hw/cxgb4/
Dresource.c108 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) in c4iw_get_cqid() argument
114 mutex_lock(&uctx->lock); in c4iw_get_cqid()
115 if (!list_empty(&uctx->cqids)) { in c4iw_get_cqid()
116 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, in c4iw_get_cqid()
133 list_add_tail(&entry->entry, &uctx->cqids); in c4iw_get_cqid()
144 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_get_cqid()
150 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_get_cqid()
154 mutex_unlock(&uctx->lock); in c4iw_get_cqid()
164 struct c4iw_dev_ucontext *uctx) in c4iw_put_cqid() argument
173 mutex_lock(&uctx->lock); in c4iw_put_cqid()
[all …]
Dcq.c38 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, in destroy_cq() argument
67 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
71 struct c4iw_dev_ucontext *uctx, in create_cq() argument
77 int user = (uctx != &rdev->uctx); in create_cq()
83 ucontext = container_of(uctx, struct c4iw_ucontext, uctx); in create_cq()
85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
179 c4iw_put_cqid(rdev, cq->cqid, uctx); in create_cq()
991 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, in c4iw_destroy_cq()
1079 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
1150 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
Diw_cxgb4.h175 struct c4iw_dev_ucontext uctx; member
523 struct c4iw_dev_ucontext uctx; member
936 struct c4iw_dev_ucontext *uctx);
955 struct c4iw_dev_ucontext *uctx);
957 struct c4iw_dev_ucontext *uctx);
1013 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1015 struct c4iw_dev_ucontext *uctx);
1016 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1018 struct c4iw_dev_ucontext *uctx);
Dqp.c151 struct c4iw_dev_ucontext *uctx, int has_rq) in destroy_qp() argument
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp()
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
201 struct c4iw_dev_ucontext *uctx, in create_qp() argument
205 int user = (uctx != &rdev->uctx); in create_qp()
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in create_qp()
410 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in create_qp()
2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
[all …]
Ddevice.c748 struct c4iw_dev_ucontext *uctx) in c4iw_release_dev_ucontext() argument
753 mutex_lock(&uctx->lock); in c4iw_release_dev_ucontext()
754 list_for_each_safe(pos, nxt, &uctx->qpids) { in c4iw_release_dev_ucontext()
767 list_for_each_safe(pos, nxt, &uctx->cqids) { in c4iw_release_dev_ucontext()
772 mutex_unlock(&uctx->lock); in c4iw_release_dev_ucontext()
776 struct c4iw_dev_ucontext *uctx) in c4iw_init_dev_ucontext() argument
778 INIT_LIST_HEAD(&uctx->qpids); in c4iw_init_dev_ucontext()
779 INIT_LIST_HEAD(&uctx->cqids); in c4iw_init_dev_ucontext()
780 mutex_init(&uctx->lock); in c4iw_init_dev_ucontext()
789 c4iw_init_dev_ucontext(rdev, &rdev->uctx); in c4iw_rdev_open()
[all …]
/linux-6.1.9/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c193 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, in ocrdma_add_mmap() argument
205 mutex_lock(&uctx->mm_list_lock); in ocrdma_add_mmap()
206 list_add_tail(&mm->entry, &uctx->mm_head); in ocrdma_add_mmap()
207 mutex_unlock(&uctx->mm_list_lock); in ocrdma_add_mmap()
211 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, in ocrdma_del_mmap() argument
216 mutex_lock(&uctx->mm_list_lock); in ocrdma_del_mmap()
217 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { in ocrdma_del_mmap()
225 mutex_unlock(&uctx->mm_list_lock); in ocrdma_del_mmap()
228 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, in ocrdma_search_mmap() argument
234 mutex_lock(&uctx->mm_list_lock); in ocrdma_search_mmap()
[all …]
Docrdma_verbs.h64 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
65 void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
Docrdma_ah.c196 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { in ocrdma_create_ah()
197 ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr); in ocrdma_create_ah()
/linux-6.1.9/security/selinux/
Dxfrm.c75 struct xfrm_user_sec_ctx *uctx, in selinux_xfrm_alloc_user() argument
83 if (ctxp == NULL || uctx == NULL || in selinux_xfrm_alloc_user()
84 uctx->ctx_doi != XFRM_SC_DOI_LSM || in selinux_xfrm_alloc_user()
85 uctx->ctx_alg != XFRM_SC_ALG_SELINUX) in selinux_xfrm_alloc_user()
88 str_len = uctx->ctx_len; in selinux_xfrm_alloc_user()
99 memcpy(ctx->ctx_str, &uctx[1], str_len); in selinux_xfrm_alloc_user()
286 struct xfrm_user_sec_ctx *uctx, in selinux_xfrm_policy_alloc() argument
289 return selinux_xfrm_alloc_user(ctxp, uctx, gfp); in selinux_xfrm_policy_alloc()
335 struct xfrm_user_sec_ctx *uctx) in selinux_xfrm_state_alloc() argument
337 return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL); in selinux_xfrm_state_alloc()
/linux-6.1.9/tools/testing/selftests/powerpc/pmu/sampling_tests/
Dmmcr2_fcs_fch_test.c21 ucontext_t *uctx = data; in sig_usr2_handler() local
23 is_hv = !!(uctx->uc_mcontext.gp_regs[PT_MSR] & MSR_HV); in sig_usr2_handler()
/linux-6.1.9/security/selinux/include/
Dxfrm.h16 struct xfrm_user_sec_ctx *uctx,
23 struct xfrm_user_sec_ctx *uctx);
/linux-6.1.9/drivers/infiniband/hw/hns/
Dhns_roce_cq.c230 struct hns_roce_ucontext *uctx; in alloc_cq_db() local
236 uctx = rdma_udata_to_drv_context(udata, in alloc_cq_db()
238 err = hns_roce_db_map_user(uctx, addr, &hr_cq->db); in alloc_cq_db()
263 struct hns_roce_ucontext *uctx; in free_cq_db() local
270 uctx = rdma_udata_to_drv_context(udata, in free_cq_db()
273 hns_roce_db_unmap_user(uctx, &hr_cq->db); in free_cq_db()
Dhns_roce_main.c340 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) in hns_roce_alloc_uar_entry() argument
342 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); in hns_roce_alloc_uar_entry()
347 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB); in hns_roce_alloc_uar_entry()
354 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, in hns_roce_alloc_ucontext() argument
357 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); in hns_roce_alloc_ucontext()
358 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); in hns_roce_alloc_ucontext()
386 ret = hns_roce_alloc_uar_entry(uctx); in hns_roce_alloc_ucontext()
425 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) in hns_roce_mmap() argument
433 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); in hns_roce_mmap()
449 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, in hns_roce_mmap()
Dhns_roce_qp.c862 struct hns_roce_ucontext *uctx = in qp_mmap_entry() local
871 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, in qp_mmap_entry()
893 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, in alloc_user_qp_db() local
899 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); in alloc_user_qp_db()
910 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); in alloc_user_qp_db()
924 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in alloc_user_qp_db()
1001 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( in free_qp_db() local
1006 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); in free_qp_db()
1008 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in free_qp_db()
1061 struct hns_roce_ucontext *uctx; in set_qp_param() local
[all …]
/linux-6.1.9/drivers/infiniband/hw/qedr/
Dverbs.h45 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
46 void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
/linux-6.1.9/drivers/infiniband/hw/erdma/
Derdma_verbs.c236 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address, in erdma_user_mmap_entry_insert() argument
251 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry, in erdma_user_mmap_entry_insert()
614 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, in init_user_qp() argument
640 ret = erdma_map_user_dbrecords(uctx, db_info_va, in init_user_qp()
660 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx) in free_user_qp() argument
664 erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page); in free_user_qp()
672 struct erdma_ucontext *uctx = rdma_udata_to_drv_context( in erdma_create_qp() local
707 if (uctx) { in erdma_create_qp()
713 ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len, in erdma_create_qp()
745 if (uctx) in erdma_create_qp()
[all …]
/linux-6.1.9/drivers/infiniband/sw/siw/
Dsiw_verbs.c47 struct siw_ucontext *uctx = to_siw_ctx(ctx); in siw_mmap() local
60 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma); in siw_mmap()
62 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n", in siw_mmap()
118 struct siw_ucontext *uctx = to_siw_ctx(base_ctx); in siw_dealloc_ucontext() local
120 atomic_dec(&uctx->sdev->num_ctx); in siw_dealloc_ucontext()
259 siw_mmap_entry_insert(struct siw_ucontext *uctx, in siw_mmap_entry_insert() argument
272 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext, in siw_mmap_entry_insert()
302 struct siw_ucontext *uctx = in siw_create_qp() local
445 siw_mmap_entry_insert(uctx, qp->sendq, in siw_create_qp()
456 siw_mmap_entry_insert(uctx, qp->recvq, in siw_create_qp()
[all …]
/linux-6.1.9/net/key/
Daf_key.c466 struct xfrm_user_sec_ctx *uctx = NULL; in pfkey_sadb2xfrm_user_sec_ctx() local
469 uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp); in pfkey_sadb2xfrm_user_sec_ctx()
471 if (!uctx) in pfkey_sadb2xfrm_user_sec_ctx()
474 uctx->len = pfkey_sec_ctx_len(sec_ctx); in pfkey_sadb2xfrm_user_sec_ctx()
475 uctx->exttype = sec_ctx->sadb_x_sec_exttype; in pfkey_sadb2xfrm_user_sec_ctx()
476 uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi; in pfkey_sadb2xfrm_user_sec_ctx()
477 uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg; in pfkey_sadb2xfrm_user_sec_ctx()
478 uctx->ctx_len = sec_ctx->sadb_x_ctx_len; in pfkey_sadb2xfrm_user_sec_ctx()
479 memcpy(uctx + 1, sec_ctx + 1, in pfkey_sadb2xfrm_user_sec_ctx()
480 uctx->ctx_len); in pfkey_sadb2xfrm_user_sec_ctx()
[all …]
/linux-6.1.9/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.c633 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) in usnic_ib_alloc_ucontext() argument
635 struct ib_device *ibdev = uctx->device; in usnic_ib_alloc_ucontext()
636 struct usnic_ib_ucontext *context = to_ucontext(uctx); in usnic_ib_alloc_ucontext()
663 struct usnic_ib_ucontext *uctx = to_ucontext(context); in usnic_ib_mmap() local
682 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { in usnic_ib_mmap()
Dusnic_ib_verbs.h65 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
/linux-6.1.9/drivers/infiniband/hw/bnxt_re/
Dib_verbs.c694 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( in bnxt_re_create_ah() local
699 spin_lock_irqsave(&uctx->sh_lock, flag); in bnxt_re_create_ah()
700 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); in bnxt_re_create_ah()
703 spin_unlock_irqrestore(&uctx->sh_lock, flag); in bnxt_re_create_ah()
2841 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( in bnxt_re_create_cq() local
2856 cq->qplib_cq.dpi = &uctx->dpi; in bnxt_re_create_cq()
3846 struct bnxt_re_ucontext *uctx = in bnxt_re_alloc_ucontext() local
3862 uctx->rdev = rdev; in bnxt_re_alloc_ucontext()
3864 uctx->shpg = (void *)__get_free_page(GFP_KERNEL); in bnxt_re_alloc_ucontext()
3865 if (!uctx->shpg) { in bnxt_re_alloc_ucontext()
[all …]
/linux-6.1.9/net/xfrm/
Dxfrm_user.c117 struct xfrm_user_sec_ctx *uctx; in verify_sec_ctx_len() local
122 uctx = nla_data(rt); in verify_sec_ctx_len()
123 if (uctx->len > nla_len(rt) || in verify_sec_ctx_len()
124 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) { in verify_sec_ctx_len()
913 struct xfrm_user_sec_ctx *uctx; in copy_sec_ctx() local
915 int ctx_size = sizeof(*uctx) + s->ctx_len; in copy_sec_ctx()
921 uctx = nla_data(attr); in copy_sec_ctx()
922 uctx->exttype = XFRMA_SEC_CTX; in copy_sec_ctx()
923 uctx->len = ctx_size; in copy_sec_ctx()
924 uctx->ctx_doi = s->ctx_doi; in copy_sec_ctx()
[all …]
Dxfrm_state.c1485 struct xfrm_user_sec_ctx *uctx; in clone_security() local
1486 int size = sizeof(*uctx) + security->ctx_len; in clone_security()
1489 uctx = kmalloc(size, GFP_KERNEL); in clone_security()
1490 if (!uctx) in clone_security()
1493 uctx->exttype = XFRMA_SEC_CTX; in clone_security()
1494 uctx->len = size; in clone_security()
1495 uctx->ctx_doi = security->ctx_doi; in clone_security()
1496 uctx->ctx_alg = security->ctx_alg; in clone_security()
1497 uctx->ctx_len = security->ctx_len; in clone_security()
1498 memcpy(uctx + 1, security->ctx_str, security->ctx_len); in clone_security()
[all …]
/linux-6.1.9/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_verbs.c313 int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) in pvrdma_alloc_ucontext() argument
315 struct ib_device *ibdev = uctx->device; in pvrdma_alloc_ucontext()
317 struct pvrdma_ucontext *context = to_vucontext(uctx); in pvrdma_alloc_ucontext()

12