/linux-5.19.10/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_dcbnl.c | 38 u8 num_tc = 0; in fm10k_dcbnl_ieee_setets() local 47 if (ets->prio_tc[i] > num_tc) in fm10k_dcbnl_ieee_setets() 48 num_tc = ets->prio_tc[i]; in fm10k_dcbnl_ieee_setets() 52 if (num_tc) in fm10k_dcbnl_ieee_setets() 53 num_tc++; in fm10k_dcbnl_ieee_setets() 55 if (num_tc > IEEE_8021QAZ_MAX_TCS) in fm10k_dcbnl_ieee_setets() 59 if (num_tc != netdev_get_num_tc(dev)) { in fm10k_dcbnl_ieee_setets() 60 int err = fm10k_setup_tc(dev, num_tc); in fm10k_dcbnl_ieee_setets()
|
/linux-5.19.10/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
D | hclge_dcb.c | 76 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, in hclge_dcb_common_validate() argument 81 if (num_tc > hdev->tc_max) { in hclge_dcb_common_validate() 84 num_tc, hdev->tc_max); in hclge_dcb_common_validate() 89 if (prio_tc[i] >= num_tc) { in hclge_dcb_common_validate() 92 i, prio_tc[i], num_tc); in hclge_dcb_common_validate() 97 if (num_tc > hdev->vport[0].alloc_tqps) { in hclge_dcb_common_validate() 100 num_tc, hdev->vport[0].alloc_tqps); in hclge_dcb_common_validate() 184 if (*tc != hdev->tm_info.num_tc) in hclge_ets_validate() 239 u8 num_tc = 0; in hclge_ieee_setets() local 246 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); in hclge_ieee_setets() [all …]
|
D | hclge_tm.c | 234 if (tc >= hdev->tm_info.num_tc) in hclge_fill_pri_array() 570 for (i = 0; i < kinfo->tc_info.num_tc; i++) { in hclge_tm_qs_shaper_cfg() 603 return vport->alloc_tqps / tc_info->num_tc; in hclge_vport_get_max_rss_size() 606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) in hclge_vport_get_max_rss_size() 624 return kinfo->rss_size * tc_info->num_tc; in hclge_vport_get_tqp_num() 627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) in hclge_vport_get_tqp_num() 646 kinfo->tc_info.num_tc = 1; in hclge_tm_update_kinfo_rss_size() 652 kinfo->tc_info.num_tc = in hclge_tm_update_kinfo_rss_size() 653 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); in hclge_tm_update_kinfo_rss_size() 693 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { in hclge_tm_vport_tc_info_update() [all …]
|
/linux-5.19.10/drivers/net/ethernet/sfc/siena/ |
D | tx.c | 363 unsigned tc, num_tc; in efx_siena_setup_tc() local 372 num_tc = mqprio->num_tc; in efx_siena_setup_tc() 374 if (num_tc > EFX_MAX_TX_TC) in efx_siena_setup_tc() 379 if (num_tc == net_dev->num_tc) in efx_siena_setup_tc() 382 for (tc = 0; tc < num_tc; tc++) { in efx_siena_setup_tc() 387 net_dev->num_tc = num_tc; in efx_siena_setup_tc() 390 max_t(int, num_tc, 1) * in efx_siena_setup_tc()
|
/linux-5.19.10/drivers/net/ethernet/sfc/falcon/ |
D | tx.c | 433 unsigned tc, num_tc; in ef4_setup_tc() local 439 num_tc = mqprio->num_tc; in ef4_setup_tc() 441 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) in ef4_setup_tc() 446 if (num_tc == net_dev->num_tc) in ef4_setup_tc() 449 for (tc = 0; tc < num_tc; tc++) { in ef4_setup_tc() 454 if (num_tc > net_dev->num_tc) { in ef4_setup_tc() 473 net_dev->num_tc = num_tc; in ef4_setup_tc() 477 max_t(int, num_tc, 1) * in ef4_setup_tc() 488 net_dev->num_tc = num_tc; in ef4_setup_tc()
|
/linux-5.19.10/net/sched/ |
D | sch_mqprio.c | 67 if (qopt->num_tc > TC_MAX_QUEUE) in mqprio_parse_opt() 72 if (qopt->prio_tc_map[i] >= qopt->num_tc) in mqprio_parse_opt() 91 for (i = 0; i < qopt->num_tc; i++) { in mqprio_parse_opt() 103 for (j = i + 1; j < qopt->num_tc; j++) { in mqprio_parse_opt() 195 if (i >= qopt->num_tc) in mqprio_init() 211 if (i >= qopt->num_tc) in mqprio_init() 258 for (i = 0; i < mqprio.qopt.num_tc; i++) in mqprio_init() 261 for (i = 0; i < mqprio.qopt.num_tc; i++) in mqprio_init() 275 netdev_set_num_tc(dev, qopt->num_tc); in mqprio_init() 276 for (i = 0; i < qopt->num_tc; i++) in mqprio_init() [all …]
|
D | sch_taprio.c | 939 if (!qopt && !dev->num_tc) { in taprio_parse_mqprio_opt() 947 if (dev->num_tc) in taprio_parse_mqprio_opt() 951 if (qopt->num_tc > TC_MAX_QUEUE) { in taprio_parse_mqprio_opt() 957 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt() 964 if (qopt->prio_tc_map[i] >= qopt->num_tc) { in taprio_parse_mqprio_opt() 970 for (i = 0; i < qopt->num_tc; i++) { in taprio_parse_mqprio_opt() 987 for (j = i + 1; j < qopt->num_tc; j++) { in taprio_parse_mqprio_opt() 1212 for (i = 0; i < dev->num_tc; i++) { in tc_map_to_queue_mask() 1413 if (!mqprio || mqprio->num_tc != dev->num_tc) in taprio_mqprio_cmp() 1416 for (i = 0; i < mqprio->num_tc; i++) in taprio_mqprio_cmp() [all …]
|
/linux-5.19.10/drivers/net/ethernet/sfc/ |
D | tx.c | 614 unsigned tc, num_tc; in efx_setup_tc() local 623 num_tc = mqprio->num_tc; in efx_setup_tc() 625 if (num_tc > EFX_MAX_TX_TC) in efx_setup_tc() 630 if (num_tc == net_dev->num_tc) in efx_setup_tc() 633 for (tc = 0; tc < num_tc; tc++) { in efx_setup_tc() 638 net_dev->num_tc = num_tc; in efx_setup_tc() 641 max_t(int, num_tc, 1) * in efx_setup_tc()
|
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | ptp.c | 146 for (i = 0; i < c->num_tc; i++) { in mlx5e_ptp_napi_poll() 171 for (i = 0; i < c->num_tc; i++) { in mlx5e_ptp_napi_poll() 326 u8 num_tc = mlx5e_get_dcb_num_tc(params); in mlx5e_ptp_open_txqsqs() local 331 ix_base = num_tc * params->num_channels; in mlx5e_ptp_open_txqsqs() 333 for (tc = 0; tc < num_tc; tc++) { in mlx5e_ptp_open_txqsqs() 355 for (tc = 0; tc < c->num_tc; tc++) in mlx5e_ptp_close_txqsqs() 366 u8 num_tc; in mlx5e_ptp_open_tx_cqs() local 370 num_tc = mlx5e_get_dcb_num_tc(params); in mlx5e_ptp_open_tx_cqs() 379 for (tc = 0; tc < num_tc; tc++) { in mlx5e_ptp_open_tx_cqs() 387 for (tc = 0; tc < num_tc; tc++) { in mlx5e_ptp_open_tx_cqs() [all …]
|
D | qos.c | 1035 u8 num_tc; member 1048 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, in mlx5e_mqprio_rl_init() argument 1058 if (num_tc > mlx5e_qos_max_leaf_nodes(mdev)) in mlx5e_mqprio_rl_init() 1062 rl->num_tc = num_tc; in mlx5e_mqprio_rl_init() 1063 rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL); in mlx5e_mqprio_rl_init() 1073 for (tc = 0; tc < num_tc; tc++) { in mlx5e_mqprio_rl_init() 1100 for (tc = 0; tc < rl->num_tc; tc++) in mlx5e_mqprio_rl_cleanup() 1108 if (tc >= rl->num_tc) in mlx5e_mqprio_rl_get_node_hw_id()
|
D | qos.h | 48 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
|
D | ptp.h | 36 u8 num_tc; member
|
/linux-5.19.10/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_tc_mqprio.c | 19 if (!mqprio->qopt.num_tc) in cxgb4_mqprio_validate() 31 } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) { in cxgb4_mqprio_validate() 47 for (i = 0; i < mqprio->qopt.num_tc; i++) { in cxgb4_mqprio_validate() 53 for (j = i + 1; j < mqprio->qopt.num_tc; j++) { in cxgb4_mqprio_validate() 339 for (i = 0; i < mqprio->qopt.num_tc; i++) { in cxgb4_mqprio_alloc_tc() 377 for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) in cxgb4_mqprio_free_tc() 444 for (i = 0; i < mqprio->qopt.num_tc; i++) { in cxgb4_mqprio_enable_offload() 480 ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc); in cxgb4_mqprio_enable_offload() 485 for (i = 0; i < mqprio->qopt.num_tc; i++) { in cxgb4_mqprio_enable_offload() 510 i = mqprio->qopt.num_tc; in cxgb4_mqprio_enable_offload() [all …]
|
/linux-5.19.10/drivers/net/ethernet/intel/ice/ |
D | ice_dcb_lib.c | 13 u8 i, num_tc, ena_tc = 1; in ice_dcb_get_ena_tc() local 15 num_tc = ice_dcb_get_num_tc(dcbcfg); in ice_dcb_get_ena_tc() 17 for (i = 0; i < num_tc; i++) in ice_dcb_get_ena_tc() 108 u8 num_tc = 0; in ice_dcb_get_num_tc() local 116 num_tc |= BIT(dcbcfg->etscfg.prio_table[i]); in ice_dcb_get_num_tc() 120 if (num_tc & BIT(i)) { in ice_dcb_get_num_tc() 152 u8 num_tc, ena_tc_map, pfc_ena_map; in ice_get_first_droptc() local 155 num_tc = ice_dcb_get_num_tc(cfg); in ice_get_first_droptc() 164 for (i = 0; i < num_tc; i++) { in ice_get_first_droptc() 314 u8 num_tc, total_bw = 0; in ice_dcb_bwchk() local [all …]
|
/linux-5.19.10/drivers/net/ethernet/aquantia/atlantic/ |
D | aq_main.c | 382 const unsigned int num_tc) in aq_validate_mqprio_opt() argument 389 if (num_tc > tcs_max) { in aq_validate_mqprio_opt() 394 if (num_tc != 0 && !is_power_of_2(num_tc)) { in aq_validate_mqprio_opt() 423 err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc); in aq_ndo_setup_tc() 427 for (i = 0; i < mqprio->qopt.num_tc; i++) { in aq_ndo_setup_tc() 443 return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc, in aq_ndo_setup_tc()
|
/linux-5.19.10/drivers/infiniband/hw/irdma/ |
D | main.c | 68 l2params->num_tc = qos_info->num_tc; in irdma_fill_qos_info() 71 for (i = 0; i < l2params->num_tc; i++) { in irdma_fill_qos_info() 117 iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode; in irdma_iidc_event_handler() 292 iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode; in irdma_probe()
|
/linux-5.19.10/drivers/net/ethernet/ti/ |
D | cpsw_priv.c | 781 static int cpsw_tc_to_fifo(int tc, int num_tc) in cpsw_tc_to_fifo() argument 783 if (tc == num_tc - 1) in cpsw_tc_to_fifo() 947 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); in cpsw_set_cbs() 996 int fifo, num_tc, count, offset; in cpsw_set_mqprio() local 1001 num_tc = mqprio->qopt.num_tc; in cpsw_set_mqprio() 1002 if (num_tc > CPSW_TC_NUM) in cpsw_set_mqprio() 1012 if (num_tc) { in cpsw_set_mqprio() 1015 fifo = cpsw_tc_to_fifo(tc, num_tc); in cpsw_set_mqprio() 1019 netdev_set_num_tc(ndev, num_tc); in cpsw_set_mqprio() 1020 for (i = 0; i < num_tc; i++) { in cpsw_set_mqprio()
|
/linux-5.19.10/drivers/net/ethernet/intel/i40e/ |
D | i40e_dcb.h | 252 u8 num_tc, u8 num_ports); 255 void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
|
D | i40e_virtchnl_pf.c | 319 for (i = 0; i < vf->num_tc; i++) { in i40e_get_real_pf_qid() 865 u32 reg, num_tc = 1; /* VF has at least one traffic class */ in i40e_map_pf_queues_to_vsi() local 870 num_tc = vf->num_tc; in i40e_map_pf_queues_to_vsi() 872 for (i = 0; i < num_tc; i++) { in i40e_map_pf_queues_to_vsi() 913 u32 qps, num_tc = 1; /* VF has at least one traffic class */ in i40e_map_pf_to_vf_queues() local 918 num_tc = vf->num_tc; in i40e_map_pf_to_vf_queues() 920 for (i = 0; i < num_tc; i++) { in i40e_map_pf_to_vf_queues() 1024 for (j = 0; j < vf->num_tc; j++) { in i40e_free_vf_res() 1099 for (idx = 1; idx < vf->num_tc; idx++) { in i40e_alloc_vf_res() 2032 for (i = 1; i < vf->num_tc; i++) { in i40e_del_qch() [all …]
|
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_txrx.c | 138 for (i = 0; i < c->num_tc; i++) in mlx5e_napi_poll() 206 for (i = 0; i < c->num_tc; i++) { in mlx5e_napi_poll()
|
/linux-5.19.10/drivers/net/ethernet/qlogic/qede/ |
D | qede_main.c | 618 static int qede_setup_tc(struct net_device *ndev, u8 num_tc) in qede_setup_tc() argument 623 if (num_tc > edev->dev_info.num_tc) in qede_setup_tc() 627 netdev_set_num_tc(ndev, num_tc); in qede_setup_tc() 689 return qede_setup_tc(dev, mqprio->num_tc); in qede_setup_tc_offload() 781 info->num_queues * info->num_tc, in qede_alloc_etherdev() 1009 fp->txq = kcalloc(edev->dev_info.num_tc, in qede_alloc_fp_array() 1875 edev->dev_info.num_tc); in qede_set_real_num_queues() 2417 u8 num_tc; in qede_load() local 2463 num_tc = netdev_get_num_tc(edev->ndev); in qede_load() 2464 num_tc = num_tc ? num_tc : edev->dev_info.num_tc; in qede_load() [all …]
|
/linux-5.19.10/include/linux/net/intel/ |
D | iidc.h | 64 u8 num_tc; member
|
/linux-5.19.10/drivers/net/ethernet/intel/iavf/ |
D | iavf_main.c | 1426 adapter->num_tc) in iavf_alloc_queues() 1762 adapter->num_tc) in iavf_init_interrupt_scheme() 1764 adapter->num_tc); in iavf_init_interrupt_scheme() 2946 adapter->num_tc) { in iavf_reset_task() 3341 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || in iavf_validate_ch_config() 3342 mqprio_qopt->qopt.num_tc < 1) in iavf_validate_ch_config() 3345 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { in iavf_validate_ch_config() 3424 u8 num_tc = 0, total_qps = 0; in __iavf_setup_tc() local 3430 num_tc = mqprio_qopt->qopt.num_tc; in __iavf_setup_tc() 3438 adapter->num_tc = 0; in __iavf_setup_tc() [all …]
|
/linux-5.19.10/net/core/ |
D | dev.c | 2326 dev->num_tc = 0; in netif_setup_tc() 2345 if (dev->num_tc) { in netdev_txq_to_tc() 2404 int num_tc = dev_maps->num_tc; in remove_xps_queue_cpu() local 2408 for (tci = cpu * num_tc; num_tc--; tci++) { in remove_xps_queue_cpu() 2526 int i, tci = index * dev_maps->num_tc; in xps_copy_dev_maps() 2530 for (i = 0; i < dev_maps->num_tc; i++, tci++) { in xps_copy_dev_maps() 2548 int maps_sz, num_tc = 1, tc = 0; in __netif_set_xps_queue() local 2552 if (dev->num_tc) { in __netif_set_xps_queue() 2554 num_tc = dev->num_tc; in __netif_set_xps_queue() 2555 if (num_tc < 0) in __netif_set_xps_queue() [all …]
|
/linux-5.19.10/drivers/net/ethernet/freescale/dpaa/ |
D | dpaa_eth.c | 354 u8 num_tc; in dpaa_setup_tc() local 361 num_tc = mqprio->num_tc; in dpaa_setup_tc() 363 if (num_tc == priv->num_tc) in dpaa_setup_tc() 366 if (!num_tc) { in dpaa_setup_tc() 371 if (num_tc > DPAA_TC_NUM) { in dpaa_setup_tc() 377 netdev_set_num_tc(net_dev, num_tc); in dpaa_setup_tc() 379 for (i = 0; i < num_tc; i++) in dpaa_setup_tc() 384 priv->num_tc = num_tc ? : 1; in dpaa_setup_tc() 385 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); in dpaa_setup_tc() 3467 priv->num_tc = 1; in dpaa_eth_probe() [all …]
|