/linux-6.1.9/drivers/s390/crypto/ |
D | ap_queue.c | 19 static void __ap_flush_queue(struct ap_queue *aq); 30 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind) in ap_queue_enable_irq() argument 37 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind)); in ap_queue_enable_irq() 47 AP_QID_CARD(aq->qid), in ap_queue_enable_irq() 48 AP_QID_QUEUE(aq->qid)); in ap_queue_enable_irq() 122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq) in ap_sm_nop() argument 134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) in ap_sm_recv() argument 151 status = ap_dqap(aq->qid, &aq->reply->psmid, in ap_sm_recv() 152 aq->reply->msg, aq->reply->bufsize, in ap_sm_recv() 159 aq->queue_count = max_t(int, 0, aq->queue_count - 1); in ap_sm_recv() [all …]
|
D | ap_bus.c | 438 struct ap_queue *aq = from_timer(aq, t, timeout); in ap_request_timeout() local 440 spin_lock_bh(&aq->lock); in ap_request_timeout() 441 ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT)); in ap_request_timeout() 442 spin_unlock_bh(&aq->lock); in ap_request_timeout() 478 struct ap_queue *aq; in ap_tasklet_fn() local 489 hash_for_each(ap_queues, bkt, aq, hnode) { in ap_tasklet_fn() 490 spin_lock_bh(&aq->lock); in ap_tasklet_fn() 491 wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL)); in ap_tasklet_fn() 492 spin_unlock_bh(&aq->lock); in ap_tasklet_fn() 502 struct ap_queue *aq; in ap_pending_requests() local [all …]
|
D | ap_card.c | 81 struct ap_queue *aq; in request_count_store() local 85 hash_for_each(ap_queues, bkt, aq, hnode) in request_count_store() 86 if (ac == aq->card) in request_count_store() 87 aq->total_request_count = 0; in request_count_store() 100 struct ap_queue *aq; in requestq_count_show() local 106 hash_for_each(ap_queues, bkt, aq, hnode) in requestq_count_show() 107 if (ac == aq->card) in requestq_count_show() 108 reqq_cnt += aq->requestq_count; in requestq_count_show() 119 struct ap_queue *aq; in pendingq_count_show() local 125 hash_for_each(ap_queues, bkt, aq, hnode) in pendingq_count_show() [all …]
|
D | zcrypt_cex2c.c | 181 static int zcrypt_cex2c_rng_supported(struct ap_queue *aq) in zcrypt_cex2c_rng_supported() argument 210 msg->cprbx.domain = AP_QID_QUEUE(aq->qid); in zcrypt_cex2c_rng_supported() 212 rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len); in zcrypt_cex2c_rng_supported() 219 rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096); in zcrypt_cex2c_rng_supported() 335 struct ap_queue *aq = to_ap_queue(&ap_dev->device); in zcrypt_cex2c_queue_probe() local 342 zq->queue = aq; in zcrypt_cex2c_queue_probe() 345 ap_rapq(aq->qid); in zcrypt_cex2c_queue_probe() 346 rc = zcrypt_cex2c_rng_supported(aq); in zcrypt_cex2c_queue_probe() 357 ap_queue_init_state(aq); in zcrypt_cex2c_queue_probe() 358 ap_queue_init_reply(aq, &zq->reply); in zcrypt_cex2c_queue_probe() [all …]
|
D | ap_bus.h | 291 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); 292 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); 294 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); 295 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); 296 void ap_flush_queue(struct ap_queue *aq); 306 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 308 void ap_queue_prepare_remove(struct ap_queue *aq); 309 void ap_queue_remove(struct ap_queue *aq); 310 void ap_queue_init_state(struct ap_queue *aq);
|
D | zcrypt_queue.c | 44 struct ap_queue *aq = to_ap_queue(dev); in online_show() local 45 int online = aq->config && zq->online ? 1 : 0; in online_show() 55 struct ap_queue *aq = to_ap_queue(dev); in online_store() local 62 if (online && (!aq->config || !aq->card->config)) in online_store() 72 ap_send_online_uevent(&aq->ap_dev, online); in online_store()
|
D | zcrypt_cex2a.c | 152 struct ap_queue *aq = to_ap_queue(&ap_dev->device); in zcrypt_cex2a_queue_probe() local 171 zq->queue = aq; in zcrypt_cex2a_queue_probe() 174 ap_queue_init_state(aq); in zcrypt_cex2a_queue_probe() 175 ap_queue_init_reply(aq, &zq->reply); in zcrypt_cex2a_queue_probe() 176 aq->request_timeout = CEX2A_CLEANUP_TIME; in zcrypt_cex2a_queue_probe()
|
D | zcrypt_cex4.c | 658 struct ap_queue *aq = to_ap_queue(&ap_dev->device); in zcrypt_cex4_queue_probe() local 662 if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { in zcrypt_cex4_queue_probe() 663 zq = zcrypt_queue_alloc(aq->card->maxmsgsize); in zcrypt_cex4_queue_probe() 668 } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { in zcrypt_cex4_queue_probe() 669 zq = zcrypt_queue_alloc(aq->card->maxmsgsize); in zcrypt_cex4_queue_probe() 674 } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { in zcrypt_cex4_queue_probe() 675 zq = zcrypt_queue_alloc(aq->card->maxmsgsize); in zcrypt_cex4_queue_probe() 684 zq->queue = aq; in zcrypt_cex4_queue_probe() 687 ap_queue_init_state(aq); in zcrypt_cex4_queue_probe() 688 ap_queue_init_reply(aq, &zq->reply); in zcrypt_cex4_queue_probe() [all …]
|
/linux-6.1.9/drivers/net/ethernet/intel/i40e/ |
D | i40e_adminq.c | 22 hw->aq.asq.tail = I40E_VF_ATQT1; in i40e_adminq_init_regs() 23 hw->aq.asq.head = I40E_VF_ATQH1; in i40e_adminq_init_regs() 24 hw->aq.asq.len = I40E_VF_ATQLEN1; in i40e_adminq_init_regs() 25 hw->aq.asq.bal = I40E_VF_ATQBAL1; in i40e_adminq_init_regs() 26 hw->aq.asq.bah = I40E_VF_ATQBAH1; in i40e_adminq_init_regs() 27 hw->aq.arq.tail = I40E_VF_ARQT1; in i40e_adminq_init_regs() 28 hw->aq.arq.head = I40E_VF_ARQH1; in i40e_adminq_init_regs() 29 hw->aq.arq.len = I40E_VF_ARQLEN1; in i40e_adminq_init_regs() 30 hw->aq.arq.bal = I40E_VF_ARQBAL1; in i40e_adminq_init_regs() 31 hw->aq.arq.bah = I40E_VF_ARQBAH1; in i40e_adminq_init_regs() [all …]
|
D | i40e_nvm.c | 76 access, time_left, ret_code, hw->aq.asq_last_status); in i40e_acquire_nvm() 98 time_left, ret_code, hw->aq.asq_last_status); in i40e_acquire_nvm() 126 (total_delay < hw->aq.asq_cmd_timeout)) { in i40e_release_nvm() 874 mutex_lock(&hw->aq.arq_mutex); in i40e_nvmupd_command() 912 mutex_unlock(&hw->aq.arq_mutex); in i40e_nvmupd_command() 940 hw->aq.asq_last_status); in i40e_nvmupd_state_init() 951 hw->aq.asq_last_status); in i40e_nvmupd_state_init() 965 hw->aq.asq_last_status); in i40e_nvmupd_state_init() 982 hw->aq.asq_last_status); in i40e_nvmupd_state_init() 999 hw->aq.asq_last_status); in i40e_nvmupd_state_init() [all …]
|
D | i40e_main.c | 1827 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_mac() 1859 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq() 1871 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq() 2451 i40e_aq_str(hw, hw->aq.asq_last_status), in i40e_aqc_broadcast_filter() 2493 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous() 2505 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous() 2515 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous() 2634 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters() 2695 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters() 2815 hw->aq.asq_last_status); in i40e_sync_vsi_filters() [all …]
|
D | i40e_client.c | 357 cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver; in i40e_client_add_instance() 358 cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver; in i40e_client_add_instance() 359 cdev->lan_info.fw_build = pf->hw.aq.fw_build; in i40e_client_add_instance() 550 err, hw->aq.asq_last_status); in i40e_client_virtchnl_send() 692 pf->hw.aq.asq_last_status)); in i40e_client_update_vsi_ctxt() 720 pf->hw.aq.asq_last_status)); in i40e_client_update_vsi_ctxt()
|
D | i40e_debugfs.c | 506 ring = &(hw->aq.asq); in i40e_dbg_dump_aq_desc() 519 ring = &(hw->aq.arq); in i40e_dbg_dump_aq_desc() 1062 pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1175 ret, pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1315 desc->opcode, pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1376 desc->opcode, pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1408 pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1419 __func__, pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1437 __func__, pf->hw.aq.asq_last_status); in i40e_dbg_command_write() 1445 pf->hw.aq.asq_last_status); in i40e_dbg_command_write() [all …]
|
/linux-6.1.9/drivers/net/ethernet/intel/iavf/ |
D | iavf_adminq.c | 19 hw->aq.asq.tail = IAVF_VF_ATQT1; in iavf_adminq_init_regs() 20 hw->aq.asq.head = IAVF_VF_ATQH1; in iavf_adminq_init_regs() 21 hw->aq.asq.len = IAVF_VF_ATQLEN1; in iavf_adminq_init_regs() 22 hw->aq.asq.bal = IAVF_VF_ATQBAL1; in iavf_adminq_init_regs() 23 hw->aq.asq.bah = IAVF_VF_ATQBAH1; in iavf_adminq_init_regs() 24 hw->aq.arq.tail = IAVF_VF_ARQT1; in iavf_adminq_init_regs() 25 hw->aq.arq.head = IAVF_VF_ARQH1; in iavf_adminq_init_regs() 26 hw->aq.arq.len = IAVF_VF_ARQLEN1; in iavf_adminq_init_regs() 27 hw->aq.arq.bal = IAVF_VF_ARQBAL1; in iavf_adminq_init_regs() 28 hw->aq.arq.bah = IAVF_VF_ARQBAH1; in iavf_adminq_init_regs() [all …]
|
/linux-6.1.9/drivers/infiniband/hw/efa/ |
D | efa_com.c | 126 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_admin_init_sq() local 127 struct efa_com_admin_sq *sq = &aq->sq; in efa_com_admin_init_sq() 128 u16 size = aq->depth * sizeof(*sq->entries); in efa_com_admin_init_sq() 134 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); in efa_com_admin_init_sq() 152 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth); in efa_com_admin_init_sq() 163 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_admin_init_cq() local 164 struct efa_com_admin_cq *cq = &aq->cq; in efa_com_admin_init_cq() 165 u16 size = aq->depth * sizeof(*cq->entries); in efa_com_admin_init_cq() 171 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL); in efa_com_admin_init_cq() 186 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth); in efa_com_admin_init_cq() [all …]
|
D | efa_com_cmd.c | 15 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_create_qp() local 35 err = efa_com_cmd_exec(aq, in efa_com_create_qp() 60 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_modify_qp() local 75 err = efa_com_cmd_exec(aq, in efa_com_modify_qp() 95 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_query_qp() local 103 err = efa_com_cmd_exec(aq, in efa_com_query_qp() 129 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_destroy_qp() local 135 err = efa_com_cmd_exec(aq, in efa_com_destroy_qp() 156 struct efa_com_admin_queue *aq = &edev->aq; in efa_com_create_cq() local 179 err = efa_com_cmd_exec(aq, in efa_com_create_cq() [all …]
|
D | efa_com.h | 107 struct efa_com_admin_queue aq; member 171 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
|
/linux-6.1.9/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | cn10k.c | 77 struct nix_cn10k_aq_enq_req *aq; in cn10k_sq_aq_init() local 81 aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); in cn10k_sq_aq_init() 82 if (!aq) in cn10k_sq_aq_init() 85 aq->sq.cq = pfvf->hw.rx_queues + qidx; in cn10k_sq_aq_init() 86 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ in cn10k_sq_aq_init() 87 aq->sq.cq_ena = 1; in cn10k_sq_aq_init() 88 aq->sq.ena = 1; in cn10k_sq_aq_init() 89 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); in cn10k_sq_aq_init() 90 aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); in cn10k_sq_aq_init() 91 aq->sq.default_chan = pfvf->hw.tx_chan_base; in cn10k_sq_aq_init() [all …]
|
D | otx2_common.c | 309 struct nix_aq_enq_req *aq; in otx2_set_rss_table() local 316 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); in otx2_set_rss_table() 317 if (!aq) { in otx2_set_rss_table() 326 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); in otx2_set_rss_table() 327 if (!aq) { in otx2_set_rss_table() 333 aq->rss.rq = rss_ctx->ind_tbl[idx]; in otx2_set_rss_table() 336 aq->qidx = index + idx; in otx2_set_rss_table() 337 aq->ctype = NIX_AQ_CTYPE_RSS; in otx2_set_rss_table() 338 aq->op = NIX_AQ_INSTOP_INIT; in otx2_set_rss_table() 798 struct nix_aq_enq_req *aq; in otx2_rq_init() local [all …]
|
D | otx2_dcbnl.c | 337 struct nix_aq_enq_req *aq; in otx2_update_bpid_in_rqctx() local 354 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_update_bpid_in_rqctx() 355 if (!aq) { in otx2_update_bpid_in_rqctx() 360 aq->cq.bpid = pfvf->bpid[vlan_prio]; in otx2_update_bpid_in_rqctx() 361 aq->cq_mask.bpid = GENMASK(8, 0); in otx2_update_bpid_in_rqctx() 364 aq->qidx = qidx; in otx2_update_bpid_in_rqctx() 365 aq->ctype = NIX_AQ_CTYPE_CQ; in otx2_update_bpid_in_rqctx() 366 aq->op = NIX_AQ_INSTOP_WRITE; in otx2_update_bpid_in_rqctx()
|
/linux-6.1.9/drivers/spi/ |
D | atmel-quadspi.c | 226 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset) in atmel_qspi_read() argument 228 u32 value = readl_relaxed(aq->regs + offset); in atmel_qspi_read() 233 dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value, in atmel_qspi_read() 240 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset) in atmel_qspi_write() argument 245 dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value, in atmel_qspi_write() 249 writel_relaxed(value, aq->regs + offset); in atmel_qspi_write() 295 static int atmel_qspi_set_cfg(struct atmel_qspi *aq, in atmel_qspi_set_cfg() argument 378 if (aq->mr != QSPI_MR_SMM) { in atmel_qspi_set_cfg() 379 atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); in atmel_qspi_set_cfg() 380 aq->mr = QSPI_MR_SMM; in atmel_qspi_set_cfg() [all …]
|
/linux-6.1.9/drivers/net/ethernet/marvell/octeontx2/af/ |
D | rvu_npa.c | 18 struct admin_queue *aq = block->aq; in npa_aq_enqueue_wait() local 23 result = (struct npa_aq_res_s *)aq->res->base; in npa_aq_enqueue_wait() 29 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), in npa_aq_enqueue_wait() 30 (void *)inst, aq->inst->entry_sz); in npa_aq_enqueue_wait() 60 struct admin_queue *aq; in rvu_npa_aq_enq_inst() local 74 aq = block->aq; in rvu_npa_aq_enq_inst() 75 if (!aq) { in rvu_npa_aq_enq_inst() 92 inst.res_addr = (u64)aq->res->iova; in rvu_npa_aq_enq_inst() 97 spin_lock(&aq->lock); in rvu_npa_aq_enq_inst() 100 memset(aq->res->base, 0, aq->res->entry_sz); in rvu_npa_aq_enq_inst() [all …]
|
/linux-6.1.9/lib/ |
D | kasprintf.c | 19 va_list aq; in kvasprintf() local 21 va_copy(aq, ap); in kvasprintf() 22 first = vsnprintf(NULL, 0, fmt, aq); in kvasprintf() 23 va_end(aq); in kvasprintf()
|
/linux-6.1.9/arch/riscv/net/ |
D | bpf_jit.h | 271 static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, in rv_amo_insn() argument 274 u8 funct7 = (funct5 << 2) | (aq << 1) | rl; in rv_amo_insn() 534 static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoadd_w() argument 536 return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoadd_w() 539 static inline u32 rv_amoand_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoand_w() argument 541 return rv_amo_insn(0xc, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoand_w() 544 static inline u32 rv_amoor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoor_w() argument 546 return rv_amo_insn(0x8, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoor_w() 549 static inline u32 rv_amoxor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoxor_w() argument 551 return rv_amo_insn(0x4, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoxor_w() [all …]
|
/linux-6.1.9/arch/riscv/include/asm/ |
D | asm.h | 26 #define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
|