/linux-6.1.9/block/ |
D | blk-mq-cpumap.c | 18 static int queue_index(struct blk_mq_queue_map *qmap, in queue_index() argument 21 return qmap->queue_offset + (q % nr_queues); in queue_index() 35 void blk_mq_map_queues(struct blk_mq_queue_map *qmap) in blk_mq_map_queues() argument 37 unsigned int *map = qmap->mq_map; in blk_mq_map_queues() 38 unsigned int nr_queues = qmap->nr_queues; in blk_mq_map_queues() 51 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 64 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 68 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 84 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) in blk_mq_hw_queue_to_node() argument 89 if (index == qmap->mq_map[i]) in blk_mq_hw_queue_to_node()
|
D | blk-mq-pci.c | 26 void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, in blk_mq_pci_map_queues() argument 32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues() 38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues() 44 WARN_ON_ONCE(qmap->nr_queues > 1); in blk_mq_pci_map_queues() 45 blk_mq_clear_mq_map(qmap); in blk_mq_pci_map_queues()
|
D | blk-mq-virtio.c | 24 void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, in blk_mq_virtio_map_queues() argument 33 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues() 39 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues() 45 blk_mq_map_queues(qmap); in blk_mq_virtio_map_queues()
|
D | blk-mq.h | 74 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 287 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) in blk_mq_clear_mq_map() argument 292 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
|
/linux-6.1.9/Documentation/ABI/testing/ |
D | sysfs-class-net-qmi | 32 Write a number ranging from 1 to 254 to add a qmap mux 50 created qmap mux based network device. 52 What: /sys/class/net/<qmimux iface>/qmap/mux_id
|
/linux-6.1.9/include/linux/ |
D | blk-mq-virtio.h | 8 void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
D | blk-mq-pci.h | 8 void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
D | blk-mq.h | 897 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
/linux-6.1.9/drivers/net/ipa/data/ |
D | ipa_data-v4.2.c | 107 .qmap = true, 132 .qmap = true,
|
D | ipa_data-v4.11.c | 111 .qmap = true, 135 .qmap = true,
|
D | ipa_data-v3.5.1.c | 117 .qmap = true, 142 .qmap = true,
|
D | ipa_data-v4.5.c | 120 .qmap = true, 144 .qmap = true,
|
D | ipa_data-v4.9.c | 112 .qmap = true, 136 .qmap = true,
|
D | ipa_data-v3.1.c | 126 .qmap = true, 150 .qmap = true,
|
/linux-6.1.9/drivers/net/ipa/ |
D | ipa_endpoint.h | 113 bool qmap; member
|
D | ipa_endpoint.c | 645 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr() 690 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext() 718 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext() 748 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
|
D | ipa_modem.c | 132 if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP)) in ipa_start_xmit()
|
/linux-6.1.9/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_dcb.c | 187 unsigned long qmap = 0; in bnxt_queue_remap() local 196 __set_bit(j, &qmap); in bnxt_queue_remap() 208 j = find_next_zero_bit(&qmap, max, j); in bnxt_queue_remap() 210 __set_bit(j, &qmap); in bnxt_queue_remap()
|
/linux-6.1.9/arch/powerpc/kvm/ |
D | book3s_xive.h | 132 u8 qmap; member
|
D | book3s_xive.c | 982 if (xive->qmap & (1 << prio)) in xive_check_provisioning() 1001 xive->qmap |= (1 << prio); in xive_check_provisioning() 1997 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
|
/linux-6.1.9/drivers/net/ethernet/intel/ice/ |
D | ice_lib.c | 917 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; in ice_vsi_setup_q_map() local 968 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & in ice_vsi_setup_q_map() 974 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map() 1111 u16 qcount, qmap; in ice_chnl_vsi_setup_q_map() local 1118 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & in ice_chnl_vsi_setup_q_map() 1123 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map() 3540 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; in ice_vsi_setup_q_map_mqprio() local 3550 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & in ice_vsi_setup_q_map_mqprio() 3603 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_vsi_setup_q_map_mqprio()
|
D | ice_virtchnl.c | 1471 unsigned long qmap; in ice_cfg_interrupt() local 1476 qmap = map->rxq_map; in ice_cfg_interrupt() 1477 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { in ice_cfg_interrupt() 1490 qmap = map->txq_map; in ice_cfg_interrupt() 1491 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { in ice_cfg_interrupt()
|
/linux-6.1.9/drivers/scsi/pm8001/ |
D | pm8001_init.c | 88 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in pm8001_map_queues() local 91 blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); in pm8001_map_queues() 93 return blk_mq_map_queues(qmap); in pm8001_map_queues()
|
/linux-6.1.9/drivers/scsi/ |
D | virtio_scsi.c | 717 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in virtscsi_map_queues() local 719 blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); in virtscsi_map_queues()
|
/linux-6.1.9/drivers/scsi/hisi_sas/ |
D | hisi_sas_v2_hw.c | 3540 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in map_queues_v2_hw() local 3544 for (queue = 0; queue < qmap->nr_queues; queue++) { in map_queues_v2_hw() 3550 qmap->mq_map[cpu] = qmap->queue_offset + queue; in map_queues_v2_hw()
|