Home
last modified time | relevance | path

Searched refs:qno (Results 1 – 23 of 23) sorted by relevance

/linux-5.19.10/drivers/net/ethernet/marvell/octeon_ep/
Doctep_cn9k_pf.c37 static void cn93_dump_regs(struct octep_device *oct, int qno) in cn93_dump_regs() argument
41 dev_info(dev, "IQ-%d register dump\n", qno); in cn93_dump_regs()
43 qno, CN93_SDP_R_IN_INSTR_DBELL(qno), in cn93_dump_regs()
44 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); in cn93_dump_regs()
46 qno, CN93_SDP_R_IN_CONTROL(qno), in cn93_dump_regs()
47 octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); in cn93_dump_regs()
49 qno, CN93_SDP_R_IN_ENABLE(qno), in cn93_dump_regs()
50 octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); in cn93_dump_regs()
52 qno, CN93_SDP_R_IN_INSTR_BADDR(qno), in cn93_dump_regs()
53 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); in cn93_dump_regs()
[all …]
/linux-5.19.10/drivers/net/wwan/t7xx/
Dt7xx_cldma.c63 void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_start_queue() argument
71 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); in t7xx_cldma_hw_start_queue()
105 bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) in t7xx_cldma_tx_addr_is_set() argument
107 u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; in t7xx_cldma_tx_addr_is_set()
112 void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, in t7xx_cldma_hw_set_start_addr() argument
115 u32 offset = qno * ADDR_SIZE; in t7xx_cldma_hw_set_start_addr()
123 void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_resume_queue() argument
129 iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); in t7xx_cldma_hw_resume_queue()
131 iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); in t7xx_cldma_hw_resume_queue()
134 unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_queue_status() argument
[all …]
Dt7xx_cldma.h153 void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
155 void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
157 void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
159 void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
160 unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
163 void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
166 void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
172 unsigned int qno, u64 address, enum mtk_txrx tx_rx);
179 bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
Dt7xx_dpmaif.c143 static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) in t7xx_mask_dlq_intr() argument
148 q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; in t7xx_mask_dlq_intr()
164 void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) in t7xx_dpmaif_dlq_unmask_rx_done() argument
168 mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; in t7xx_dpmaif_dlq_unmask_rx_done()
182 unsigned int qno) in t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr() argument
184 if (qno == DPF_RX_QNO0) in t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr()
193 unsigned int qno) in t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr() argument
195 if (qno == DPF_RX_QNO0) in t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr()
265 struct dpmaif_hw_intr_st_para *para, int qno) in t7xx_dpmaif_hw_check_rx_intr() argument
267 if (qno == DPF_RX_QNO_DFT) { in t7xx_dpmaif_hw_check_rx_intr()
[all …]
Dt7xx_netdev.c345 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) in t7xx_ccmni_queue_tx_irq_notify() argument
351 net_queue = netdev_get_tx_queue(ccmni->dev, qno); in t7xx_ccmni_queue_tx_irq_notify()
357 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) in t7xx_ccmni_queue_tx_full_notify() argument
363 netdev_err(ccmni->dev, "TX queue %d is full\n", qno); in t7xx_ccmni_queue_tx_full_notify()
364 net_queue = netdev_get_tx_queue(ccmni->dev, qno); in t7xx_ccmni_queue_tx_full_notify()
370 enum dpmaif_txq_state state, int qno) in t7xx_ccmni_queue_state_notify() argument
383 t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); in t7xx_ccmni_queue_state_notify()
385 t7xx_ccmni_queue_tx_full_notify(ctlb, qno); in t7xx_ccmni_queue_state_notify()
Dt7xx_dpmaif.h155 struct dpmaif_hw_intr_st_para *para, int qno);
164 unsigned int qno);
165 void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
166 bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
Dt7xx_hif_cldma.c876 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, in t7xx_cldma_hw_start_send() argument
882 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { in t7xx_cldma_hw_start_send()
884 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); in t7xx_cldma_hw_start_send()
885 md_ctrl->txq_started &= ~BIT(qno); in t7xx_cldma_hw_start_send()
888 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { in t7xx_cldma_hw_start_send()
889 if (md_ctrl->txq_started & BIT(qno)) in t7xx_cldma_hw_start_send()
890 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); in t7xx_cldma_hw_start_send()
892 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); in t7xx_cldma_hw_start_send()
894 md_ctrl->txq_started |= BIT(qno); in t7xx_cldma_hw_start_send()
922 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) in t7xx_cldma_send_skb() argument
[all …]
Dt7xx_hif_dpmaif.c415 int qno; in t7xx_dpmaif_unmask_dlq_intr() local
417 for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) in t7xx_dpmaif_unmask_dlq_intr()
418 t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); in t7xx_dpmaif_unmask_dlq_intr()
Dt7xx_hif_dpmaif_rx.c941 int qno; in t7xx_dpmaif_irq_rx_done() local
943 qno = ffs(que_mask) - 1; in t7xx_dpmaif_irq_rx_done()
944 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { in t7xx_dpmaif_irq_rx_done()
945 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); in t7xx_dpmaif_irq_rx_done()
949 rxq = &dpmaif_ctrl->rxq[qno]; in t7xx_dpmaif_irq_rx_done()
Dt7xx_hif_cldma.h123 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
/linux-5.19.10/drivers/crypto/cavium/cpt/
Dcptvf_reqmanager.c35 int qno) in pending_queue_inc_front() argument
37 struct pending_queue *queue = &pqinfo->queue[qno]; in pending_queue_inc_front()
224 u32 qno) in send_cpt_command() argument
233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command()
235 qno, cptvf->nr_queues); in send_cpt_command()
240 queue = &qinfo->queue[qno]; in send_cpt_command()
326 int qno) in process_pending_queue() argument
329 struct pending_queue *pqueue = &pqinfo->queue[qno]; in process_pending_queue()
346 pending_queue_inc_front(pqinfo, qno); in process_pending_queue()
362 pending_queue_inc_front(pqinfo, qno); in process_pending_queue()
[all …]
Dcptvf_main.c17 u32 qno; member
29 vq_post_process(cwqe->cptvf, cwqe->qno); in vq_work_handler()
50 cwqe_info->vq_wqe[i].qno = i; in init_worker_threads()
554 int qno) in get_cptvf_vq_wqe() argument
558 if (unlikely(qno >= cptvf->nr_queues)) in get_cptvf_vq_wqe()
562 return &nwqe_info->vq_wqe[qno]; in get_cptvf_vq_wqe()
Dcptvf.h127 void vq_post_process(struct cpt_vf *cptvf, u32 qno);
Drequest_manager.h144 void vq_post_process(struct cpt_vf *cptvf, u32 qno);
/linux-5.19.10/drivers/crypto/cavium/nitrox/
Dnitrox_reqmgr.c388 int qno, ret = 0; in nitrox_process_se_request() local
425 qno = smp_processor_id() % ndev->nr_queues; in nitrox_process_se_request()
427 sr->cmdq = &ndev->pkt_inq[qno]; in nitrox_process_se_request()
464 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; in nitrox_process_se_request()
Dnitrox_hal.h21 void enable_aqm_ring(struct nitrox_device *ndev, int qno);
Dnitrox_lib.c113 cmdq->qno = i; in nitrox_alloc_aqm_queues()
167 cmdq->qno = i; in nitrox_alloc_pktin_queues()
Dnitrox_dev.h60 u8 qno; member
/linux-5.19.10/drivers/net/ethernet/cavium/liquidio/
Docteon_network.h574 int i, qno; in wake_txqs() local
577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; in wake_txqs()
580 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, in wake_txqs()
/linux-5.19.10/drivers/crypto/marvell/octeontx/
Dotx_cptvf_main.c537 int qno) in get_cptvf_vq_wqe() argument
541 if (unlikely(qno >= cptvf->num_queues)) in get_cptvf_vq_wqe()
545 return &nwqe_info->vq_wqe[qno]; in get_cptvf_vq_wqe()
/linux-5.19.10/drivers/net/wireless/rsi/
Drsi_mgmt.h708 static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno) in rsi_set_len_qno() argument
710 *addr = cpu_to_le16(len | ((qno & 7) << 12)); in rsi_set_len_qno()
/linux-5.19.10/drivers/scsi/lpfc/
Dlpfc_nvmet.c2191 uint32_t *payload, qno; in lpfc_nvmet_process_rcv_fcp_req() local
2265 qno = nvmebuf->idx; in lpfc_nvmet_process_rcv_fcp_req()
2267 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_process_rcv_fcp_req()
2268 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_process_rcv_fcp_req()
2383 uint32_t size, oxid, sid, qno; in lpfc_nvmet_unsol_fcp_buffer() local
2449 qno = nvmebuf->idx; in lpfc_nvmet_unsol_fcp_buffer()
2451 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_unsol_fcp_buffer()
2452 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_unsol_fcp_buffer()
Dlpfc_sli.c2574 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) in lpfc_sli_hbqbuf_add_hbqs() argument
2579 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, in lpfc_sli_hbqbuf_add_hbqs()
2580 lpfc_hbq_defs[qno]->add_count); in lpfc_sli_hbqbuf_add_hbqs()
2593 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) in lpfc_sli_hbqbuf_init_hbqs() argument
2596 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, in lpfc_sli_hbqbuf_init_hbqs()
2597 lpfc_hbq_defs[qno]->entry_count); in lpfc_sli_hbqbuf_init_hbqs()
2599 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, in lpfc_sli_hbqbuf_init_hbqs()
2600 lpfc_hbq_defs[qno]->init_count); in lpfc_sli_hbqbuf_init_hbqs()