/linux-5.19.10/drivers/block/null_blk/ |
D | main.c | 674 static void put_tag(struct nullb_queue *nq, unsigned int tag) in put_tag() argument 676 clear_bit_unlock(tag, nq->tag_map); in put_tag() 678 if (waitqueue_active(&nq->wait)) in put_tag() 679 wake_up(&nq->wait); in put_tag() 682 static unsigned int get_tag(struct nullb_queue *nq) in get_tag() argument 687 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); in get_tag() 688 if (tag >= nq->queue_depth) in get_tag() 690 } while (test_and_set_bit_lock(tag, nq->tag_map)); in get_tag() 697 put_tag(cmd->nq, cmd->tag); in free_cmd() 702 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) in __alloc_cmd() argument [all …]
|
D | zoned.c | 372 struct nullb_device *dev = cmd->nq->dev; in null_zone_write() 606 struct nullb_device *dev = cmd->nq->dev; in null_zone_mgmt() 675 dev = cmd->nq->dev; in null_process_zoned_cmd()
|
D | null_blk.h | 26 struct nullb_queue *nq; member
|
/linux-5.19.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 159 struct bnxt_qplib_nq *nq = nq_work->nq; in bnxt_qpn_cqn_sched_task() local 161 if (cq && nq) { in bnxt_qpn_cqn_sched_task() 163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { in bnxt_qpn_cqn_sched_task() 164 dev_dbg(&nq->pdev->dev, in bnxt_qpn_cqn_sched_task() 166 __func__, cq, nq); in bnxt_qpn_cqn_sched_task() 167 nq->cqn_handler(nq, cq); in bnxt_qpn_cqn_sched_task() 235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) in clean_nq() argument 237 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() 239 int budget = nq->budget; in clean_nq() 295 clean_nq(cq->nq, cq); in __wait_for_all_nqes() [all …]
|
D | qplib_fp.h | 404 struct bnxt_qplib_nq *nq; member 466 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 468 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 491 struct bnxt_qplib_nq *nq; member 495 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 496 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 497 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 499 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 540 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 541 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
|
D | main.c | 301 struct bnxt_qplib_nq *nq; in bnxt_re_stop_irq() local 305 nq = &rdev->nq[indx - 1]; in bnxt_re_stop_irq() 306 bnxt_qplib_nq_stop_irq(nq, false); in bnxt_re_stop_irq() 317 struct bnxt_qplib_nq *nq; in bnxt_re_start_irq() local 339 nq = &rdev->nq[indx - 1]; in bnxt_re_start_irq() 340 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, in bnxt_re_start_irq() 890 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_srqn_handler() argument 912 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_cqn_handler() argument 941 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); in bnxt_re_cleanup_res() 957 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], in bnxt_re_init_res() [all …]
|
D | ib_verbs.c | 1560 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_destroy_srq() local 1563 nq = qplib_srq->cq->nq; in bnxt_re_destroy_srq() 1567 if (nq) in bnxt_re_destroy_srq() 1568 nq->budget--; in bnxt_re_destroy_srq() 1609 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_create_srq() local 1649 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; in bnxt_re_create_srq() 1650 nq = &rdev->nq[0]; in bnxt_re_create_srq() 1676 if (nq) in bnxt_re_create_srq() 1677 nq->budget++; in bnxt_re_create_srq() 2794 struct bnxt_qplib_nq *nq; in bnxt_re_destroy_cq() local [all …]
|
D | bnxt_re.h | 155 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; member
|
/linux-5.19.10/lib/crypto/ |
D | curve25519-hacl64.c | 546 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step() argument 551 point_swap_conditional(nq, nqpq, bit0); in ladder_smallloop_cmult_small_loop_step() 552 addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); in ladder_smallloop_cmult_small_loop_step() 558 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step() argument 562 ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); in ladder_smallloop_cmult_small_loop_double_step() 564 ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); in ladder_smallloop_cmult_small_loop_double_step() 568 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop() argument 572 ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, in ladder_smallloop_cmult_small_loop() 578 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop() argument 585 ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, in ladder_bigloop_cmult_big_loop() [all …]
|
/linux-5.19.10/fs/xfs/ |
D | xfs_trans_dquot.c | 78 struct xfs_dqtrx *oq, *nq; in xfs_trans_dup_dqinfo() local 97 nq = &nqa[i]; in xfs_trans_dup_dqinfo() 102 nq->qt_dquot = oq->qt_dquot; in xfs_trans_dup_dqinfo() 103 nq->qt_bcount_delta = nq->qt_icount_delta = 0; in xfs_trans_dup_dqinfo() 104 nq->qt_rtbcount_delta = 0; in xfs_trans_dup_dqinfo() 109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; in xfs_trans_dup_dqinfo() 112 nq->qt_rtblk_res = oq->qt_rtblk_res - in xfs_trans_dup_dqinfo() 116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; in xfs_trans_dup_dqinfo()
|
/linux-5.19.10/arch/x86/crypto/ |
D | curve25519-x86_64.c | 977 u64 *nq = p01_tmp1; in point_add_and_double() local 981 u64 *x2 = nq; in point_add_and_double() 982 u64 *z2 = nq + (u32)4U; in point_add_and_double() 1024 fmul2(nq, dc1, ab1, tmp2); in point_add_and_double() 1028 static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) in point_double() argument 1030 u64 *x2 = nq; in point_double() 1031 u64 *z2 = nq + (u32)4U; in point_double() 1048 fmul2(nq, dc, ab, tmp2); in point_double()
|
/linux-5.19.10/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.c | 111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; in alloc_uld_rxqs() local 125 for (i = 0; i < nq; i++, q++) { in alloc_uld_rxqs() 403 int nq = txq_info->ntxq; in free_sge_txq_uld() local 406 for (i = 0; i < nq; i++) { in free_sge_txq_uld() 426 int nq = txq_info->ntxq; in alloc_sge_txq_uld() local 429 j = nq / adap->params.nports; in alloc_sge_txq_uld() 430 for (i = 0; i < nq; i++) { in alloc_sge_txq_uld()
|
/linux-5.19.10/drivers/net/ethernet/marvell/ |
D | mvneta.c | 1864 struct netdev_queue *nq, bool napi) in mvneta_txq_bufs_free() argument 1902 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); in mvneta_txq_bufs_free() 1909 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done() local 1916 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done() 1920 if (netif_tx_queue_stopped(nq)) { in mvneta_txq_done() 1922 netif_tx_wake_queue(nq); in mvneta_txq_done() 2171 struct netdev_queue *nq; in mvneta_xdp_xmit_back() local 2182 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back() 2184 __netif_tx_lock(nq, cpu); in mvneta_xdp_xmit_back() 2199 __netif_tx_unlock(nq); in mvneta_xdp_xmit_back() [all …]
|
D | mv643xx_eth.c | 495 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake() local 497 if (netif_tx_queue_stopped(nq)) { in txq_maybe_wake() 498 __netif_tx_lock(nq, smp_processor_id()); in txq_maybe_wake() 500 netif_tx_wake_queue(nq); in txq_maybe_wake() 501 __netif_tx_unlock(nq); in txq_maybe_wake() 997 struct netdev_queue *nq; in mv643xx_eth_xmit() local 1001 nq = netdev_get_tx_queue(dev, queue); in mv643xx_eth_xmit() 1020 netif_tx_stop_queue(nq); in mv643xx_eth_xmit() 1034 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick() local 1038 __netif_tx_lock(nq, smp_processor_id()); in txq_kick() [all …]
|
/linux-5.19.10/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | t4vf_hw.c | 1272 int nq = min(n, 32); in t4vf_config_rss_range() local 1279 cmd.niqid = cpu_to_be16(nq); in t4vf_config_rss_range() 1285 start += nq; in t4vf_config_rss_range() 1286 n -= nq; in t4vf_config_rss_range() 1293 while (nq > 0) { in t4vf_config_rss_range() 1302 int nqbuf = min(3, nq); in t4vf_config_rss_range() 1304 nq -= nqbuf; in t4vf_config_rss_range()
|
/linux-5.19.10/arch/s390/mm/ |
D | pgtable.c | 787 unsigned char key, bool nq) in set_guest_storage_key() argument 842 page_set_storage_key(paddr, skey, !nq); in set_guest_storage_key() 866 bool nq, bool mr, bool mc) in cond_set_guest_storage_key() argument 885 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
|
/linux-5.19.10/drivers/net/ |
D | tap.c | 190 struct tap_queue *nq; in tap_disable_queue() local 201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); in tap_disable_queue() 202 nq->queue_index = index; in tap_disable_queue() 204 rcu_assign_pointer(tap->taps[index], nq); in tap_disable_queue()
|
/linux-5.19.10/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 2211 struct netdev_queue *nq; in igc_xdp_xmit_back() local 2219 nq = txring_txq(ring); in igc_xdp_xmit_back() 2221 __netif_tx_lock(nq, cpu); in igc_xdp_xmit_back() 2223 __netif_tx_unlock(nq); in igc_xdp_xmit_back() 2290 struct netdev_queue *nq; in igc_finalize_xdp() local 2295 nq = txring_txq(ring); in igc_finalize_xdp() 2297 __netif_tx_lock(nq, cpu); in igc_finalize_xdp() 2299 __netif_tx_unlock(nq); in igc_finalize_xdp() 2611 struct netdev_queue *nq = txring_txq(ring); in igc_xdp_xmit_zc() local 2621 __netif_tx_lock(nq, cpu); in igc_xdp_xmit_zc() [all …]
|
/linux-5.19.10/kernel/rcu/ |
D | tasks.h | 1197 int nq = READ_ONCE(t->trc_reader_special.b.need_qs); in rcu_read_unlock_trace_special() local 1203 if (nq) in rcu_read_unlock_trace_special() 1206 if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) in rcu_read_unlock_trace_special()
|
/linux-5.19.10/drivers/net/ethernet/aquantia/atlantic/ |
D | aq_nic.c | 832 struct netdev_queue *nq; in aq_nic_xmit_xdpf() local 843 nq = netdev_get_tx_queue(ndev, tx_ring->idx); in aq_nic_xmit_xdpf() 844 __netif_tx_lock(nq, cpu); in aq_nic_xmit_xdpf() 857 __netif_tx_unlock(nq); in aq_nic_xmit_xdpf()
|
/linux-5.19.10/net/sched/ |
D | sch_api.c | 318 struct netdev_queue *nq; in qdisc_lookup_rcu() local 327 nq = dev_ingress_queue_rcu(dev); in qdisc_lookup_rcu() 328 if (nq) in qdisc_lookup_rcu() 329 q = qdisc_match_from_root(nq->qdisc_sleeping, handle); in qdisc_lookup_rcu()
|
/linux-5.19.10/drivers/net/ethernet/freescale/ |
D | fec_main.c | 833 struct netdev_queue *nq; in fec_enet_start_xmit() local 838 nq = netdev_get_tx_queue(ndev, queue); in fec_enet_start_xmit() 849 netif_tx_stop_queue(nq); in fec_enet_start_xmit() 1299 struct netdev_queue *nq; in fec_enet_tx_queue() local 1307 nq = netdev_get_tx_queue(ndev, queue_id); in fec_enet_tx_queue() 1387 if (netif_tx_queue_stopped(nq)) { in fec_enet_tx_queue() 1390 netif_tx_wake_queue(nq); in fec_enet_tx_queue()
|
/linux-5.19.10/arch/s390/kvm/ |
D | priv.c | 1030 bool mr = false, mc = false, nq; in handle_pfmf() local 1060 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; in handle_pfmf() 1112 key, NULL, nq, mr, mc); in handle_pfmf()
|
/linux-5.19.10/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_main.c | 2333 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc() local 2342 txq_trans_cond_update(nq); in stmmac_xdp_xmit_zc() 4645 struct netdev_queue *nq; in stmmac_xdp_xmit_back() local 4653 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back() 4655 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit_back() 4657 txq_trans_cond_update(nq); in stmmac_xdp_xmit_back() 4663 __netif_tx_unlock(nq); in stmmac_xdp_xmit_back() 6262 struct netdev_queue *nq; in stmmac_xdp_xmit() local 6273 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit() 6275 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit() [all …]
|
/linux-5.19.10/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-eth.c | 1362 struct netdev_queue *nq; in __dpaa2_eth_tx() local 1441 nq = netdev_get_tx_queue(net_dev, queue_mapping); in __dpaa2_eth_tx() 1442 netdev_tx_sent_queue(nq, fd_len); in __dpaa2_eth_tx() 1464 netdev_tx_completed_queue(nq, 1, fd_len); in __dpaa2_eth_tx() 1831 struct netdev_queue *nq; in dpaa2_eth_poll() local 1898 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); in dpaa2_eth_poll() 1899 netdev_tx_completed_queue(nq, txc_fq->dq_frames, in dpaa2_eth_poll()
|