/linux-2.6.39/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 72 static void *get_wqe(struct mthca_srq *srq, int n) in get_wqe() argument 74 if (srq->is_direct) in get_wqe() 75 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe() 77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe() 78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe() 97 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument 102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context() 104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context() 115 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument 126 max = srq->max; in mthca_arbel_init_srq_context() [all …]
|
D | mthca_provider.c | 474 struct mthca_srq *srq; in mthca_create_srq() local 477 srq = kmalloc(sizeof *srq, GFP_KERNEL); in mthca_create_srq() 478 if (!srq) in mthca_create_srq() 496 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq() 497 srq->db_index = ucmd.db_index; in mthca_create_srq() 501 &init_attr->attr, srq); in mthca_create_srq() 510 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { in mthca_create_srq() 511 mthca_free_srq(to_mdev(pd->device), srq); in mthca_create_srq() 516 return &srq->ibsrq; in mthca_create_srq() 519 kfree(srq); in mthca_create_srq() [all …]
|
D | mthca_dev.h | 249 struct mthca_array srq; member 507 struct mthca_srq *srq); 513 struct ib_srq_attr *attr, struct mthca_srq *srq); 514 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 517 int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 521 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 522 int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 524 int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
|
D | mthca_cq.c | 279 struct mthca_srq *srq) in mthca_cq_clean() argument 311 if (srq && is_recv_cqe(cqe)) in mthca_cq_clean() 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean() 544 } else if ((*cur_qp)->ibqp.srq) { in mthca_poll_one() 545 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); in mthca_poll_one() local 548 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one() 549 entry->wr_id = srq->wrid[wqe_index]; in mthca_poll_one() 550 mthca_free_srq_wqe(srq, wqe); in mthca_poll_one()
|
/linux-2.6.39/drivers/infiniband/hw/mlx4/ |
D | srq.c | 41 static void *get_wqe(struct mlx4_ib_srq *srq, int n) in get_wqe() argument 43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe() 46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) in mlx4_ib_srq_event() argument 49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx4_ib_srq_event() 53 event.element.srq = ibsrq; in mlx4_ib_srq_event() 63 "on SRQ %06x\n", type, srq->srqn); in mlx4_ib_srq_event() 76 struct mlx4_ib_srq *srq; in mlx4_ib_create_srq() local 89 srq = kmalloc(sizeof *srq, GFP_KERNEL); in mlx4_ib_create_srq() 90 if (!srq) in mlx4_ib_create_srq() 93 mutex_init(&srq->mutex); in mlx4_ib_create_srq() [all …]
|
D | mlx4_ib.h | 287 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 288 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 299 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 300 int mlx4_ib_destroy_srq(struct ib_srq *srq); 301 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
|
D | cq.c | 554 struct mlx4_ib_srq *srq; in mlx4_ib_poll_one() local 627 } else if ((*cur_qp)->ibqp.srq) { in mlx4_ib_poll_one() 628 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx4_ib_poll_one() 630 wc->wr_id = srq->wrid[wqe_ctr]; in mlx4_ib_poll_one() 631 mlx4_ib_free_srq_wqe(srq, wqe_ctr); in mlx4_ib_poll_one() 768 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument 793 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) in __mlx4_ib_cq_clean() 794 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); in __mlx4_ib_cq_clean() 816 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in mlx4_ib_cq_clean() argument 819 __mlx4_ib_cq_clean(cq, qpn, srq); in mlx4_ib_cq_clean()
|
D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
|
/linux-2.6.39/drivers/infiniband/hw/ipath/ |
D | ipath_srq.c | 51 struct ipath_srq *srq = to_isrq(ibsrq); in ipath_post_srq_receive() local 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive() 67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive() 68 wq = srq->rq.wq; in ipath_post_srq_receive() 70 if (next >= srq->rq.size) in ipath_post_srq_receive() 73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 106 struct ipath_srq *srq; in ipath_create_srq() local 121 srq = kmalloc(sizeof(*srq), GFP_KERNEL); in ipath_create_srq() [all …]
|
D | ipath_ud.c | 57 struct ipath_srq *srq; in ipath_ud_loopback() local 107 if (qp->ibqp.srq) { in ipath_ud_loopback() 108 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback() 109 handler = srq->ibsrq.event_handler; in ipath_ud_loopback() 110 rq = &srq->rq; in ipath_ud_loopback() 112 srq = NULL; in ipath_ud_loopback() 164 if (n < srq->limit) { in ipath_ud_loopback() 167 srq->limit = 0; in ipath_ud_loopback() 170 ev.element.srq = qp->ibqp.srq; in ipath_ud_loopback() 172 handler(&ev, srq->ibsrq.srq_context); in ipath_ud_loopback()
|
D | ipath_ruc.c | 171 struct ipath_srq *srq; in ipath_get_rwqe() local 177 if (qp->ibqp.srq) { in ipath_get_rwqe() 178 srq = to_isrq(qp->ibqp.srq); in ipath_get_rwqe() 179 handler = srq->ibsrq.event_handler; in ipath_get_rwqe() 180 rq = &srq->rq; in ipath_get_rwqe() 182 srq = NULL; in ipath_get_rwqe() 231 if (n < srq->limit) { in ipath_get_rwqe() 234 srq->limit = 0; in ipath_get_rwqe() 237 ev.element.srq = qp->ibqp.srq; in ipath_get_rwqe() 239 handler(&ev, srq->ibsrq.srq_context); in ipath_get_rwqe()
|
D | ipath_qp.c | 636 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; in ipath_query_qp() 660 init_attr->srq = qp->ibqp.srq; in ipath_query_qp() 681 if (qp->ibqp.srq) { in ipath_compute_aeth() 765 if (!init_attr->srq) { in ipath_create_qp() 796 if (init_attr->srq) { in ipath_create_qp() 797 struct ipath_srq *srq = to_isrq(init_attr->srq); in ipath_create_qp() local 799 if (srq->rq.max_sge > 1) in ipath_create_qp() 801 (srq->rq.max_sge - 1); in ipath_create_qp() 820 if (init_attr->srq) { in ipath_create_qp()
|
/linux-2.6.39/drivers/infiniband/hw/qib/ |
D | qib_srq.c | 51 struct qib_srq *srq = to_isrq(ibsrq); in qib_post_srq_receive() local 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive() 67 spin_lock_irqsave(&srq->rq.lock, flags); in qib_post_srq_receive() 68 wq = srq->rq.wq; in qib_post_srq_receive() 70 if (next >= srq->rq.size) in qib_post_srq_receive() 73 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 87 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive() 106 struct qib_srq *srq; in qib_create_srq() local 118 srq = kmalloc(sizeof(*srq), GFP_KERNEL); in qib_create_srq() [all …]
|
D | qib_ruc.c | 90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge() 143 struct qib_srq *srq; in qib_get_rwqe() local 149 if (qp->ibqp.srq) { in qib_get_rwqe() 150 srq = to_isrq(qp->ibqp.srq); in qib_get_rwqe() 151 handler = srq->ibsrq.event_handler; in qib_get_rwqe() 152 rq = &srq->rq; in qib_get_rwqe() 154 srq = NULL; in qib_get_rwqe() 207 if (n < srq->limit) { in qib_get_rwqe() 210 srq->limit = 0; in qib_get_rwqe() 213 ev.element.srq = qp->ibqp.srq; in qib_get_rwqe() [all …]
|
D | qib_qp.c | 841 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; in qib_query_qp() 865 init_attr->srq = qp->ibqp.srq; in qib_query_qp() 886 if (qp->ibqp.srq) { in qib_compute_aeth() 966 if (!init_attr->srq) { in qib_create_qp() 1002 if (init_attr->srq) { in qib_create_qp() 1003 struct qib_srq *srq = to_isrq(init_attr->srq); in qib_create_qp() local 1005 if (srq->rq.max_sge > 1) in qib_create_qp() 1007 (srq->rq.max_sge - 1); in qib_create_qp() 1016 if (init_attr->srq) in qib_create_qp()
|
/linux-2.6.39/drivers/net/mlx4/ |
D | srq.c | 63 struct mlx4_srq *srq; in mlx4_srq_event() local 67 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event() 68 if (srq) in mlx4_srq_event() 69 atomic_inc(&srq->refcount); in mlx4_srq_event() 73 if (!srq) { in mlx4_srq_event() 78 srq->event(srq, event_type); in mlx4_srq_event() 80 if (atomic_dec_and_test(&srq->refcount)) in mlx4_srq_event() 81 complete(&srq->free); in mlx4_srq_event() 113 u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument 121 srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); in mlx4_srq_alloc() [all …]
|
D | Makefile | 4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
|
/linux-2.6.39/drivers/infiniband/core/ |
D | verbs.c | 240 struct ib_srq *srq; in ib_create_srq() local 245 srq = pd->device->create_srq(pd, srq_init_attr, NULL); in ib_create_srq() 247 if (!IS_ERR(srq)) { in ib_create_srq() 248 srq->device = pd->device; in ib_create_srq() 249 srq->pd = pd; in ib_create_srq() 250 srq->uobject = NULL; in ib_create_srq() 251 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq() 252 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq() 254 atomic_set(&srq->usecnt, 0); in ib_create_srq() 257 return srq; in ib_create_srq() [all …]
|
D | uverbs_cmd.c | 253 static void put_srq_read(struct ib_srq *srq) in put_srq_read() argument 255 put_uobj_read(srq->uobject); in put_srq_read() 1057 struct ib_srq *srq; in ib_uverbs_create_qp() local 1079 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; in ib_uverbs_create_qp() 1085 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { in ib_uverbs_create_qp() 1094 attr.srq = srq; in ib_uverbs_create_qp() 1119 qp->srq = attr.srq; in ib_uverbs_create_qp() 1127 if (attr.srq) in ib_uverbs_create_qp() 1128 atomic_inc(&attr.srq->usecnt); in ib_uverbs_create_qp() 1154 if (srq) in ib_uverbs_create_qp() [all …]
|
/linux-2.6.39/drivers/net/ |
D | eql.c | 263 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); 264 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); 410 slaving_request_t srq; in eql_enslave() local 412 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_enslave() 415 slave_dev = dev_get_by_name(&init_net, srq.slave_name); in eql_enslave() 432 s->priority = srq.priority; in eql_enslave() 433 s->priority_bps = srq.priority; in eql_enslave() 434 s->priority_Bps = srq.priority / 8; in eql_enslave() 457 slaving_request_t srq; in eql_emancipate() local 460 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_emancipate() [all …]
|
/linux-2.6.39/include/rdma/ |
D | ib_user_cm.h | 149 __u8 srq; member 166 __u8 srq; member 246 __u8 srq; member 262 __u8 srq; member
|
D | ib_cm.h | 130 unsigned int srq:1; member 144 unsigned int srq:1; member 366 u8 srq; member 389 u8 srq; member
|
D | ib_verbs.h | 361 struct ib_srq *srq; member 580 struct ib_srq *srq; member 892 struct ib_srq *srq; member 1054 int (*modify_srq)(struct ib_srq *srq, 1058 int (*query_srq)(struct ib_srq *srq, 1060 int (*destroy_srq)(struct ib_srq *srq); 1061 int (*post_srq_recv)(struct ib_srq *srq, 1365 int ib_modify_srq(struct ib_srq *srq, 1375 int ib_query_srq(struct ib_srq *srq, 1382 int ib_destroy_srq(struct ib_srq *srq); [all …]
|
/linux-2.6.39/drivers/infiniband/hw/ehca/ |
D | ehca_iverbs.h | 161 int ehca_post_srq_recv(struct ib_srq *srq, 169 int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr, 172 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 174 int ehca_destroy_srq(struct ib_srq *srq);
|
/linux-2.6.39/drivers/infiniband/ulp/ipoib/ |
D | ipoib_cm.c | 101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); in ipoib_cm_post_receive_srq() 254 .srq = priv->cm.srq, in ipoib_cm_create_rx_qp() 431 rep.srq = ipoib_cm_has_srq(dev); in ipoib_cm_send_rep() 1019 .srq = priv->cm.srq, in ipoib_cm_create_tx_qp() 1063 req.srq = ipoib_cm_has_srq(dev); in ipoib_cm_send_req() 1512 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); in ipoib_cm_create_srq() 1513 if (IS_ERR(priv->cm.srq)) { in ipoib_cm_create_srq() 1514 if (PTR_ERR(priv->cm.srq) != -ENOSYS) in ipoib_cm_create_srq() 1516 priv->ca->name, PTR_ERR(priv->cm.srq)); in ipoib_cm_create_srq() 1517 priv->cm.srq = NULL; in ipoib_cm_create_srq() [all …]
|