Home
last modified time | relevance | path

Searched refs:MLX5_CAP_GEN (Results 1 – 25 of 99) sorted by relevance

1234

/linux-6.1.9/drivers/infiniband/hw/mlx5/
Dumr.h30 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in mlx5r_umr_can_load_pas()
37 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && in mlx5r_umr_can_load_pas()
55 MLX5_CAP_GEN(dev->mdev, atomic) && in mlx5r_umr_can_reconfig()
56 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in mlx5r_umr_can_reconfig()
60 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in mlx5r_umr_can_reconfig()
61 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in mlx5r_umr_can_reconfig()
65 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && in mlx5r_umr_can_reconfig()
66 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in mlx5r_umr_can_reconfig()
Dmain.c107 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); in mlx5_ib_port_link_layer()
523 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); in mlx5_query_port_roce()
628 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) in mlx5_use_mad_ifc()
629 return !MLX5_CAP_GEN(dev->mdev, ib_virt); in mlx5_use_mad_ifc()
725 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, in mlx5_query_max_pkeys()
809 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); in mlx5_ib_query_device()
844 if (MLX5_CAP_GEN(mdev, pkv)) in mlx5_ib_query_device()
846 if (MLX5_CAP_GEN(mdev, qkv)) in mlx5_ib_query_device()
848 if (MLX5_CAP_GEN(mdev, apm)) in mlx5_ib_query_device()
850 if (MLX5_CAP_GEN(mdev, xrc)) in mlx5_ib_query_device()
[all …]
Dcounters.c300 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in do_get_hw_stats()
492 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters()
499 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { in mlx5_ib_fill_counters()
506 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { in mlx5_ib_fill_counters()
513 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters()
520 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters()
567 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters()
570 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters()
573 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) in __mlx5_ib_alloc_counters()
576 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) in __mlx5_ib_alloc_counters()
[all …]
Dqp.c359 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) in set_rq_size()
394 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { in set_rq_size()
397 MLX5_CAP_GEN(dev->mdev, in set_rq_size()
524 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in calc_sq_size()
526 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in calc_sq_size()
536 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
540 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); in calc_sq_size()
563 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in set_user_buf_size()
565 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in set_user_buf_size()
577 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size()
[all …]
Dumr.c55 if (MLX5_CAP_GEN(dev->mdev, atomic)) in get_umr_update_access_mask()
58 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in get_umr_update_access_mask()
61 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in get_umr_update_access_mask()
79 MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in umr_check_mkey_mask()
83 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in umr_check_mkey_mask()
87 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in umr_check_mkey_mask()
91 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in umr_check_mkey_mask()
684 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); in umr_can_use_indirect_mkey()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/lib/
Dsf.h11 return MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_start_function_id()
18 return MLX5_CAP_GEN(dev, sf); in mlx5_sf_supported()
25 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_max_functions()
26 return MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_max_functions()
28 return 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_max_functions()
Dclock.h38 u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); in mlx5_is_real_time_rq()
47 u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format); in mlx5_is_real_time_sq()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/
Dfw.c150 if (MLX5_CAP_GEN(dev, port_selection_cap)) { in mlx5_query_hca_caps()
156 if (MLX5_CAP_GEN(dev, hca_cap_2)) { in mlx5_query_hca_caps()
162 if (MLX5_CAP_GEN(dev, eth_net_offloads)) { in mlx5_query_hca_caps()
168 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { in mlx5_query_hca_caps()
174 if (MLX5_CAP_GEN(dev, pg)) { in mlx5_query_hca_caps()
180 if (MLX5_CAP_GEN(dev, atomic)) { in mlx5_query_hca_caps()
186 if (MLX5_CAP_GEN(dev, roce)) { in mlx5_query_hca_caps()
192 if (MLX5_CAP_GEN(dev, nic_flow_table) || in mlx5_query_hca_caps()
193 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { in mlx5_query_hca_caps()
199 if (MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_query_hca_caps()
[all …]
Dvport.c271 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_query_nic_vport_mac_list()
272 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_query_nic_vport_mac_list()
329 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_modify_nic_vport_mac_list()
330 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_modify_nic_vport_mac_list()
380 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); in mlx5_modify_nic_vport_vlans()
470 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5_modify_nic_vport_node_guid()
530 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_gid()
531 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); in mlx5_query_hca_vport_gid()
564 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_gid()
597 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_pkey()
[all …]
Ddev.c62 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in mlx5_eth_supported()
65 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { in mlx5_eth_supported()
70 if (!MLX5_CAP_GEN(dev, nic_flow_table)) { in mlx5_eth_supported()
103 if (!MLX5_CAP_GEN(dev, cq_moderation)) in mlx5_eth_supported()
187 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in is_mp_supported()
604 if (!MLX5_CAP_GEN(mdev, vport_group_manager) || in next_phys_dev_lag()
605 !MLX5_CAP_GEN(mdev, lag_master) || in next_phys_dev_lag()
606 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS || in next_phys_dev_lag()
607 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1)) in next_phys_dev_lag()
Den_dcbnl.c60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
92 if (!MLX5_CAP_GEN(priv->mdev, dcbx)) in mlx5e_dcbnl_switch_to_host_mode()
117 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_getets()
326 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_setets()
422 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { in mlx5e_dcbnl_setdcbx()
454 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_setapp()
507 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_delapp()
629 if (!MLX5_CAP_GEN(mdev, ets)) in mlx5e_dcbnl_setall()
738 if (!MLX5_CAP_GEN(priv->mdev, ets)) { in mlx5e_dcbnl_getpgtccfgtx()
1025 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) in mlx5e_dcbnl_build_netdev()
[all …]
Dpci_irq.c55 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count()
56 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count()
85 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev)) in mlx5_set_msix_vec_count()
88 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count()
89 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count()
629 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_irq_table_create()
630 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_irq_table_create()
631 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_irq_table_create()
639 pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; in mlx5_irq_table_create()
Dmlx5_core.h255 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
256 MLX5_CAP_GEN((mdev), pps_modify) && \
285 return MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_lag_is_lacp_owner()
286 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && in mlx5_lag_is_lacp_owner()
287 MLX5_CAP_GEN(dev, lag_master); in mlx5_lag_is_lacp_owner()
Duar.c64 if (MLX5_CAP_GEN(mdev, uar_4k)) in uars_per_sys_page()
65 return MLX5_CAP_GEN(mdev, num_of_uars_per_page); in uars_per_sys_page()
74 if (MLX5_CAP_GEN(mdev, uar_4k)) in uar2pfn()
202 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; in map_offset()
282 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); in addr_to_dbi_in_syspage()
Deq.c309 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) in create_map_eq()
545 if (MLX5_CAP_GEN(dev, general_notification_event)) in gather_async_events_mask()
548 if (MLX5_CAP_GEN(dev, port_module_event)) in gather_async_events_mask()
556 if (MLX5_CAP_GEN(dev, fpga)) in gather_async_events_mask()
562 if (MLX5_CAP_GEN(dev, temp_warn_event)) in gather_async_events_mask()
568 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) in gather_async_events_mask()
583 if (MLX5_CAP_GEN(dev, event_cap)) in gather_async_events_mask()
1077 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_eq_table_create()
1078 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_eq_table_create()
1079 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_eq_table_create()
Den_ethtool.c522 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) in mlx5e_ethtool_get_coalesce()
612 if (!MLX5_CAP_GEN(mdev, cq_moderation)) in mlx5e_ethtool_set_coalesce()
630 !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) { in mlx5e_ethtool_set_coalesce()
1447 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5e_ethtool_set_pauseparam()
1479 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || in mlx5e_ethtool_get_ts_info()
1508 if (MLX5_CAP_GEN(mdev, wol_g)) in mlx5e_get_wol_supported()
1511 if (MLX5_CAP_GEN(mdev, wol_s)) in mlx5e_get_wol_supported()
1514 if (MLX5_CAP_GEN(mdev, wol_a)) in mlx5e_get_wol_supported()
1517 if (MLX5_CAP_GEN(mdev, wol_b)) in mlx5e_get_wol_supported()
1520 if (MLX5_CAP_GEN(mdev, wol_m)) in mlx5e_get_wol_supported()
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en/
Dmonitor_stats.c27 if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) in mlx5e_monitor_counter_supported()
30 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) < in mlx5e_monitor_counter_supported()
33 if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) < in mlx5e_monitor_counter_supported()
103 int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); in mlx5e_set_monitor_counter()
104 int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); in mlx5e_set_monitor_counter()
106 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); in mlx5e_set_monitor_counter()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/esw/
Ddebugfs.c141 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) in mlx5_esw_vport_debugfs_create()
158 if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) { in mlx5_esw_vport_debugfs_create()
165 if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) { in mlx5_esw_vport_debugfs_create()
172 if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun)) in mlx5_esw_vport_debugfs_create()
175 if (MLX5_CAP_GEN(esw->dev, invalid_command_count)) in mlx5_esw_vport_debugfs_create()
179 if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count)) in mlx5_esw_vport_debugfs_create()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/
Ddev.c30 return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); in mlx5_sf_dev_supported()
170 base_id = MLX5_CAP_GEN(table->dev, sf_base_id); in mlx5_sf_dev_state_change_handler()
212 function_id = MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_dev_vhca_arm_all()
237 function_id = MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_dev_add_active_work()
274 if (MLX5_CAP_GEN(table->dev, eswitch_manager)) in mlx5_sf_dev_queue_active_work()
313 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_dev_table_create()
314 max_sfs = MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_dev_table_create()
316 max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_dev_table_create()
317 table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); in mlx5_sf_dev_table_create()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/lag/
Dlag.h92 if (!MLX5_CAP_GEN(dev, vport_group_manager) || in mlx5_is_lag_supported()
93 !MLX5_CAP_GEN(dev, lag_master) || in mlx5_is_lag_supported()
94 MLX5_CAP_GEN(dev, num_lag_ports) < 2 || in mlx5_is_lag_supported()
95 MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) in mlx5_is_lag_supported()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dktls.h22 if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) in mlx5e_is_ktls_device()
25 if (!MLX5_CAP_GEN(mdev, log_max_dek)) in mlx5e_is_ktls_device()
61 return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); in mlx5e_is_ktls_tx()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_cmd.c115 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); in mlx5dr_cmd_query_device()
116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); in mlx5dr_cmd_query_device()
117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); in mlx5dr_cmd_query_device()
118 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); in mlx5dr_cmd_query_device()
119 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); in mlx5dr_cmd_query_device()
121 if (MLX5_CAP_GEN(mdev, roce)) { in mlx5dr_cmd_query_device()
133 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); in mlx5dr_cmd_query_device()
143 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); in mlx5dr_cmd_query_device()
144 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); in mlx5dr_cmd_query_device()
149 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); in mlx5dr_cmd_query_device()
[all …]
Dmlx5dr.h148 return MLX5_CAP_GEN(dev, roce) && in mlx5dr_is_supported()
151 (MLX5_CAP_GEN(dev, steering_format_version) <= in mlx5dr_is_supported()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/
Degress_ofld.c63 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create()
111 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create()
178 !MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup()
188 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup()
244 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); in mlx5_esw_acl_egress_vport_bond()
/linux-6.1.9/include/linux/mlx5/
Dvport.h40 (MLX5_CAP_GEN(mdev, vport_group_manager) && \
41 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \

1234