Home
last modified time | relevance | path

Searched refs:nreq (Results 1 – 12 of 12) sorted by relevance

/linux-2.6.39/net/sunrpc/xprtrdma/
Dtransport.c484 struct rpcrdma_req *req, *nreq; in xprt_rdma_allocate() local
524 nreq = kmalloc(sizeof *req + size, GFP_ATOMIC); in xprt_rdma_allocate()
526 nreq = kmalloc(sizeof *req + size, GFP_NOFS); in xprt_rdma_allocate()
527 if (nreq == NULL) in xprt_rdma_allocate()
531 nreq->rl_base, size + sizeof(struct rpcrdma_req) in xprt_rdma_allocate()
533 &nreq->rl_handle, &nreq->rl_iov)) { in xprt_rdma_allocate()
534 kfree(nreq); in xprt_rdma_allocate()
538 nreq->rl_size = size; in xprt_rdma_allocate()
539 nreq->rl_niovs = 0; in xprt_rdma_allocate()
540 nreq->rl_nchunks = 0; in xprt_rdma_allocate()
[all …]
/linux-2.6.39/drivers/infiniband/hw/mthca/
Dmthca_qp.c1554 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument
1561 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow()
1569 return cur + nreq >= wq->max; in mthca_wq_overflow()
1620 int nreq; in mthca_tavor_post_send() local
1641 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1642 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1646 qp->sq.max, nreq); in mthca_tavor_post_send()
1775 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send()
1779 if (!nreq) { in mthca_tavor_post_send()
1792 if (likely(nreq)) { in mthca_tavor_post_send()
[all …]
Dmthca_srq.c504 int nreq; in mthca_tavor_post_srq_recv() local
513 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
554 ++nreq; in mthca_tavor_post_srq_recv()
555 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv()
556 nreq = 0; in mthca_tavor_post_srq_recv()
572 if (likely(nreq)) { in mthca_tavor_post_srq_recv()
579 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
603 int nreq; in mthca_arbel_post_srq_recv() local
609 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
644 if (likely(nreq)) { in mthca_arbel_post_srq_recv()
[all …]
/linux-2.6.39/arch/ia64/include/asm/
Dperfmon.h253 extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *re…
254 extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *r…
255 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
256 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
/linux-2.6.39/drivers/infiniband/hw/mlx4/
Dsrq.c308 int nreq; in mlx4_ib_post_srq_recv() local
313 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
345 if (likely(nreq)) { in mlx4_ib_post_srq_recv()
346 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
Dqp.c1454 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument
1460 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow()
1468 return cur + nreq >= wq->max_post; in mlx4_wq_overflow()
1641 int nreq; in mlx4_ib_post_send() local
1658 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_send()
1662 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
1675 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
1858 if (likely(nreq)) { in mlx4_ib_post_send()
1859 qp->sq.head += nreq; in mlx4_ib_post_send()
1894 int nreq; in mlx4_ib_post_recv() local
[all …]
/linux-2.6.39/drivers/net/mlx4/
Dmain.c1008 int nreq = min_t(int, dev->caps.num_ports * in mlx4_enable_msi_x() local
1015 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, in mlx4_enable_msi_x()
1016 nreq); in mlx4_enable_msi_x()
1017 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); in mlx4_enable_msi_x()
1021 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x()
1025 err = pci_enable_msix(dev->pdev, entries, nreq); in mlx4_enable_msi_x()
1031 "trying again\n", nreq, err); in mlx4_enable_msi_x()
1032 nreq = err; in mlx4_enable_msi_x()
1039 if (nreq < in mlx4_enable_msi_x()
1043 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
[all …]
/linux-2.6.39/fs/nilfs2/
Dbtree.c1628 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument
1651 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert()
1652 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert()
1653 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1657 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert()
1670 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1684 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument
1702 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert()
1704 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert()
1720 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert()
[all …]
/linux-2.6.39/arch/ia64/kernel/
Dperfmon.c3427 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_pmcs() argument
3443 return pfm_write_pmcs(ctx, req, nreq, regs); in pfm_mod_write_pmcs()
3448 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_read_pmds() argument
3464 return pfm_read_pmds(ctx, req, nreq, regs); in pfm_mod_read_pmds()
3928 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_ibrs() argument
3944 return pfm_write_ibrs(ctx, req, nreq, regs); in pfm_mod_write_ibrs()
3949 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_dbrs() argument
3965 return pfm_write_dbrs(ctx, req, nreq, regs); in pfm_mod_write_dbrs()
/linux-2.6.39/net/ceph/
Dosd_client.c564 struct ceph_osd_request *req, *nreq; in __kick_osd_requests() local
580 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, in __kick_osd_requests()
1272 struct ceph_osd_request *req, *nreq; in kick_requests() local
1295 list_for_each_entry_safe(req, nreq, &osdc->req_linger, in kick_requests()
/linux-2.6.39/fs/ceph/
Dmds_client.c1859 struct ceph_mds_request *req, *nreq; in __wake_requests() local
1861 list_for_each_entry_safe(req, nreq, head, r_wait) { in __wake_requests()
2338 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local
2344 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { in replay_unsafe_requests()
/linux-2.6.39/drivers/net/vxge/
Dvxge-config.c2357 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local
2361 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add()
2362 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add()
2365 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()