Home
last modified time | relevance | path

Searched refs:rdma (Results 1 – 25 of 100) sorted by relevance

1234

/linux-5.19.10/net/9p/
Dtrans_rdma.c153 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() local
155 if (rdma->port != P9_PORT) in p9_rdma_show_options()
156 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options()
157 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options()
158 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options()
159 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
160 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
161 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options()
162 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options()
163 if (rdma->privport) in p9_rdma_show_options()
[all …]
/linux-5.19.10/drivers/gpu/drm/mediatek/
Dmtk_disp_rdma.c50 #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) argument
91 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in rdma_update_bits() local
92 unsigned int tmp = readl(rdma->regs + reg); in rdma_update_bits()
95 writel(tmp, rdma->regs + reg); in rdma_update_bits()
102 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in mtk_rdma_register_vblank_cb() local
104 rdma->vblank_cb = vblank_cb; in mtk_rdma_register_vblank_cb()
105 rdma->vblank_cb_data = vblank_cb_data; in mtk_rdma_register_vblank_cb()
110 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in mtk_rdma_unregister_vblank_cb() local
112 rdma->vblank_cb = NULL; in mtk_rdma_unregister_vblank_cb()
113 rdma->vblank_cb_data = NULL; in mtk_rdma_unregister_vblank_cb()
[all …]
/linux-5.19.10/net/sunrpc/xprtrdma/
Dsvc_rdma_sendto.c116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_send_cid_init() argument
119 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_send_cid_init()
120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_send_cid_init()
124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_send_ctxt_alloc() argument
133 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); in svc_rdma_send_ctxt_alloc()
137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc()
140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_send_ctxt_alloc()
141 rdma->sc_max_req_size, DMA_TO_DEVICE); in svc_rdma_send_ctxt_alloc()
142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_send_ctxt_alloc()
145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc()
[all …]
Dsvc_rdma_recvfrom.c118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_recv_cid_init() argument
121 cid->ci_queue_id = rdma->sc_rq_cq->res.id; in svc_rdma_recv_cid_init()
122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_recv_cid_init()
126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_recv_ctxt_alloc() argument
135 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_recv_ctxt_alloc()
138 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_recv_ctxt_alloc()
139 rdma->sc_max_req_size, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_alloc()
140 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_recv_ctxt_alloc()
143 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc()
155 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc()
[all …]
Dsvc_rdma_rw.c54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument
59 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
60 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
61 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
83 trace_svcrdma_no_rwctx_err(rdma, sges); in svc_rdma_get_rw_ctxt()
87 static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in __svc_rdma_put_rw_ctxt() argument
95 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
98 __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
106 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
111 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
[all …]
Dsvc_rdma_transport.c274 struct svcxprt_rdma *rdma = cma_id->context; in svc_rdma_cma_handler() local
275 struct svc_xprt *xprt = &rdma->sc_xprt; in svc_rdma_cma_handler()
279 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); in svc_rdma_cma_handler()
538 struct svcxprt_rdma *rdma = in svc_rdma_detach() local
541 rdma_disconnect(rdma->sc_cm_id); in svc_rdma_detach()
546 struct svcxprt_rdma *rdma = in __svc_rdma_free() local
550 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) in __svc_rdma_free()
551 ib_drain_qp(rdma->sc_qp); in __svc_rdma_free()
553 svc_rdma_flush_recv_queues(rdma); in __svc_rdma_free()
555 svc_rdma_destroy_rw_ctxts(rdma); in __svc_rdma_free()
[all …]
Dsvc_rdma_backchannel.c75 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, in svc_rdma_bc_sendto() argument
82 rctxt = svc_rdma_recv_ctxt_get(rdma); in svc_rdma_bc_sendto()
86 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqst->rq_snd_buf); in svc_rdma_bc_sendto()
87 svc_rdma_recv_ctxt_put(rdma, rctxt); in svc_rdma_bc_sendto()
96 ret = svc_rdma_send(rdma, sctxt); in svc_rdma_bc_sendto()
101 svc_rdma_send_ctxt_put(rdma, sctxt); in svc_rdma_bc_sendto()
145 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) in rpcrdma_bc_send_request() argument
153 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request()
169 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request()
175 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request()
[all …]
/linux-5.19.10/drivers/gpu/drm/meson/
Dmeson_rdma.c26 if (!priv->rdma.addr) { in meson_rdma_init()
28 priv->rdma.addr = in meson_rdma_init()
30 &priv->rdma.addr_dma, in meson_rdma_init()
32 if (!priv->rdma.addr) in meson_rdma_init()
36 priv->rdma.offset = 0; in meson_rdma_init()
50 if (!priv->rdma.addr && !priv->rdma.addr_dma) in meson_rdma_free()
56 priv->rdma.addr, priv->rdma.addr_dma); in meson_rdma_free()
58 priv->rdma.addr = NULL; in meson_rdma_free()
59 priv->rdma.addr_dma = (dma_addr_t)0; in meson_rdma_free()
88 priv->rdma.offset = 0; in meson_rdma_reset()
[all …]
/linux-5.19.10/include/linux/sunrpc/
Dsvc_rdma.h172 extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
173 extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
175 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma);
176 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
178 extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
183 extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
184 extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
187 extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
190 extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
195 extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
[all …]
/linux-5.19.10/Documentation/devicetree/bindings/display/mediatek/
Dmediatek,rdma.yaml4 $id: http://devicetree.org/schemas/display/mediatek/mediatek,rdma.yaml#
27 - const: mediatek,mt2701-disp-rdma
29 - const: mediatek,mt8173-disp-rdma
31 - const: mediatek,mt8183-disp-rdma
33 - const: mediatek,mt8195-disp-rdma
36 - mediatek,mt7623-disp-rdma
37 - mediatek,mt2712-disp-rdma
38 - const: mediatek,mt2701-disp-rdma
41 - mediatek,mt8186-disp-rdma
42 - mediatek,mt8192-disp-rdma
[all …]
/linux-5.19.10/Documentation/admin-guide/cgroup-v1/
Drdma.rst28 Currently user space applications can easily take away all the rdma verb
31 rdma resources. This can lead to service unavailability.
34 of processes can be limited. Through this controller different rdma
43 by rdma cgroup, which can be extended later if required.
52 of the child processes which shares the address space, rdma resources are
56 rdma resources. Linking resources around css also ensures that cgroups can be
60 Whenever RDMA resource charging occurs, owner rdma cgroup is returned to
61 the caller. Same rdma cgroup should be passed while uncharging the resource.
81 IB stack honors limits enforced by the rdma controller. When application
86 Following resources can be accounted by rdma controller.
[all …]
/linux-5.19.10/Documentation/ABI/stable/
Dsysfs-driver-ib_srp4 Contact: linux-rdma@vger.kernel.org
77 Contact: linux-rdma@vger.kernel.org
83 Contact: linux-rdma@vger.kernel.org
89 Contact: linux-rdma@vger.kernel.org
97 Contact: linux-rdma@vger.kernel.org
104 Contact: linux-rdma@vger.kernel.org
111 Contact: linux-rdma@vger.kernel.org
117 Contact: linux-rdma@vger.kernel.org
124 Contact: linux-rdma@vger.kernel.org
131 Contact: linux-rdma@vger.kernel.org
[all …]
Dsysfs-class-infiniband9 Contact: linux-rdma@vger.kernel.org
24 Contact: linux-rdma@vger.kernel.org
34 Contact: linux-rdma@vger.kernel.org
49 Contact: linux-rdma@vger.kernel.org
80 Contact: linux-rdma@vger.kernel.org
108 Contact: linux-rdma@vger.kernel.org
203 Contact: linux-rdma@vger.kernel.org
222 Contact: linux-rdma@vger.kernel.org
229 Contact: linux-rdma@vger.kernel.org
241 Contact: linux-rdma@vger.kernel.org
[all …]
Dsysfs-transport-srp4 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
11 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
20 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
36 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
50 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
/linux-5.19.10/include/trace/events/
Drpcrdma.h1487 const struct svcxprt_rdma *rdma,
1491 TP_ARGS(rdma, status),
1495 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1500 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1511 const struct svcxprt_rdma *rdma, \
1514 TP_ARGS(rdma, status))
1786 const struct svcxprt_rdma *rdma,
1791 TP_ARGS(rdma, dma_addr, length),
1796 __string(device, rdma->sc_cm_id->device->name)
1797 __string(addr, rdma->sc_xprt.xpt_remotebuf)
[all …]
/linux-5.19.10/drivers/net/ethernet/seeq/
Dsgiseeq.c69 volatile struct hpc_dma_desc rdma; member
213 sp->rx_desc[i].rdma.pbuf = dma_addr; in seeq_init_ring()
215 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; in seeq_init_ring()
218 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; in seeq_init_ring()
263 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, in sgiseeq_dump_rings()
264 r[i].rdma.pnext); in sgiseeq_dump_rings()
267 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, in sgiseeq_dump_rings()
268 r[i].rdma.pnext); in sgiseeq_dump_rings()
355 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { in sgiseeq_rx()
356 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; in sgiseeq_rx()
[all …]
/linux-5.19.10/Documentation/devicetree/bindings/media/
Dmediatek-mdp.txt12 "mediatek,mt8173-mdp-rdma" - read DMA
24 "mediatek,mt8173-mdp-rdma"
32 mdp_rdma0: rdma@14001000 {
33 compatible = "mediatek,mt8173-mdp-rdma";
43 mdp_rdma1: rdma@14002000 {
44 compatible = "mediatek,mt8173-mdp-rdma";
/linux-5.19.10/drivers/infiniband/ulp/rtrs/
Drtrs-clt-stats.c38 s->rdma.failover_cnt++; in rtrs_clt_inc_failover_cnt()
94 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str()
124 memset(&s->rdma, 0, sizeof(s->rdma)); in rtrs_clt_reset_rdma_stats()
175 s->rdma.dir[d].cnt++; in rtrs_clt_update_rdma_stats()
176 s->rdma.dir[d].size_total += size; in rtrs_clt_update_rdma_stats()
/linux-5.19.10/drivers/nvme/target/
DMakefile7 obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
17 nvmet-rdma-y += rdma.o
/linux-5.19.10/drivers/net/ethernet/chelsio/cxgb3/
Dcxgb3_offload.c281 struct rdma_info *rdma = data; in cxgb_rdma_ctl() local
284 rdma->udbell_physbase = pci_resource_start(pdev, 2); in cxgb_rdma_ctl()
285 rdma->udbell_len = pci_resource_len(pdev, 2); in cxgb_rdma_ctl()
286 rdma->tpt_base = in cxgb_rdma_ctl()
288 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); in cxgb_rdma_ctl()
289 rdma->pbl_base = in cxgb_rdma_ctl()
291 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); in cxgb_rdma_ctl()
292 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); in cxgb_rdma_ctl()
293 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); in cxgb_rdma_ctl()
294 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; in cxgb_rdma_ctl()
[all …]
/linux-5.19.10/drivers/nvme/host/
DMakefile8 obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
24 nvme-rdma-y += rdma.o
/linux-5.19.10/drivers/infiniband/
DKconfig25 need libibumad from rdma-core
26 <https://github.com/linux-rdma/rdma-core>.
38 rdma-core <https://github.com/linux-rdma/rdma-core>.
/linux-5.19.10/drivers/macintosh/
Drack-meter.c146 struct rackmeter_dma *rdma = rm->dma_buf_v; in rackmeter_do_pause() local
155 memset(rdma->buf1, 0, sizeof(rdma->buf1)); in rackmeter_do_pause()
156 memset(rdma->buf2, 0, sizeof(rdma->buf2)); in rackmeter_do_pause()
371 struct resource ri2s, rdma; in rackmeter_probe() local
431 of_address_to_resource(i2s, 1, &rdma)) { in rackmeter_probe()
441 pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); in rackmeter_probe()
475 rm->dma_regs = ioremap(rdma.start, 0x100); in rackmeter_probe()
/linux-5.19.10/Documentation/devicetree/bindings/power/
Damlogic,meson-ee-pwrc.yaml80 - const: rdma
99 - const: rdma
142 - const: rdma
180 "rdma", "venci", "vencp", "vdac",
/linux-5.19.10/drivers/infiniband/hw/mlx5/
Dodp.c77 } rdma; member
1296 u32 rkey = pfault->rdma.r_key; in mlx5_ib_mr_rdma_pfault_handler()
1305 pfault->rdma.rdma_va += pfault->bytes_committed; in mlx5_ib_mr_rdma_pfault_handler()
1306 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, in mlx5_ib_mr_rdma_pfault_handler()
1307 pfault->rdma.rdma_op_len); in mlx5_ib_mr_rdma_pfault_handler()
1310 address = pfault->rdma.rdma_va; in mlx5_ib_mr_rdma_pfault_handler()
1311 length = pfault->rdma.rdma_op_len; in mlx5_ib_mr_rdma_pfault_handler()
1318 length = pfault->rdma.packet_size; in mlx5_ib_mr_rdma_pfault_handler()
1413 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; in mlx5_ib_eq_pf_process()
1415 be32_to_cpu(pf_eqe->rdma.pftype_token) & in mlx5_ib_eq_pf_process()
[all …]

1234