Home
last modified time | relevance | path

Searched refs:num_paths (Results 1 – 22 of 22) sorted by relevance

/linux-5.19.10/drivers/interconnect/
Dbulk.c15 int __must_check of_icc_bulk_get(struct device *dev, int num_paths, in of_icc_bulk_get() argument
20 for (i = 0; i < num_paths; i++) { in of_icc_bulk_get()
46 void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) in icc_bulk_put() argument
48 while (--num_paths >= 0) { in icc_bulk_put()
49 icc_put(paths[num_paths].path); in icc_bulk_put()
50 paths[num_paths].path = NULL; in icc_bulk_put()
62 int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_set_bw() argument
67 for (i = 0; i < num_paths; i++) { in icc_bulk_set_bw()
86 int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_enable() argument
90 for (i = 0; i < num_paths; i++) { in icc_bulk_enable()
[all …]
/linux-5.19.10/include/linux/
Dinterconnect.h54 int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
56 void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
57 int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
58 int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
59 void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
114 static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths) in of_icc_bulk_get() argument
119 static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) in icc_bulk_put() argument
123 static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_set_bw() argument
128 static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_enable() argument
133 static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_disable() argument
Dnetdevice.h921 int num_paths; member
/linux-5.19.10/samples/landlock/
Dsandboxer.c58 int i, num_paths = 0; in parse_path() local
61 num_paths++; in parse_path()
64 num_paths++; in parse_path()
67 *path_list = malloc(num_paths * sizeof(**path_list)); in parse_path()
68 for (i = 0; i < num_paths; i++) in parse_path()
71 return num_paths; in parse_path()
86 int num_paths, i, ret = 1; in populate_ruleset() local
101 num_paths = parse_path(env_path_name, &path_list); in populate_ruleset()
102 if (num_paths == 1 && path_list[0][0] == '\0') { in populate_ruleset()
111 for (i = 0; i < num_paths; i++) { in populate_ruleset()
/linux-5.19.10/drivers/gpu/drm/msm/
Dmsm_mdss.c43 u32 num_paths; member
56 msm_mdss->num_paths = 1; in msm_mdss_parse_data_bus_icc_path()
60 msm_mdss->num_paths++; in msm_mdss_parse_data_bus_icc_path()
71 for (i = 0; i < msm_mdss->num_paths; i++) in msm_mdss_put_icc_path()
79 for (i = 0; i < msm_mdss->num_paths; i++) in msm_mdss_icc_request_bw()
/linux-5.19.10/drivers/gpu/drm/msm/disp/dpu1/
Ddpu_core_perf.c233 dpu_cstate->new_perf.bw_ctl, kms->num_paths); in _dpu_core_perf_crtc_update_bus()
237 if (!kms->num_paths) in _dpu_core_perf_crtc_update_bus()
241 do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ in _dpu_core_perf_crtc_update_bus()
243 for (i = 0; i < kms->num_paths; i++) in _dpu_core_perf_crtc_update_bus()
Ddpu_kms.h114 u32 num_paths; member
Ddpu_kms.c397 dpu_kms->num_paths = 1; in dpu_kms_parse_data_bus_icc_path()
401 dpu_kms->num_paths++; in dpu_kms_parse_data_bus_icc_path()
1292 for (i = 0; i < dpu_kms->num_paths; i++) in dpu_runtime_suspend()
/linux-5.19.10/include/uapi/rdma/
Drdma_user_cm.h164 __u32 num_paths; member
185 __u32 num_paths; member
/linux-5.19.10/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c629 bna_rit_init(rxf, q_config->num_paths); in bna_rxf_init()
1265 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1626 cfg_req->num_queue_sets = rx->num_paths; in bna_bfi_rx_enet_start()
1627 for (i = 0; i < rx->num_paths; i++) { in bna_bfi_rx_enet_start()
1749 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1750 (rx_mod->rxq_free_count < rx_cfg->num_paths)) in bna_rx_res_check()
1753 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1754 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) in bna_rx_res_check()
2099 i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) { in bna_bfi_rx_enet_start_rsp()
2170 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
[all …]
Dbna_types.h657 int num_paths; member
782 int num_paths; member
Dbnad.c2030 rx_config->num_paths = bnad->num_rxp_per_rx; in bnad_init_rx_config()
2173 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); in bnad_destroy_rx()
2221 rx_config->num_paths, in bnad_setup_rx()
2228 rx_config->num_paths, in bnad_setup_rx()
2264 rx_config->num_paths); in bnad_setup_rx()
/linux-5.19.10/Documentation/admin-guide/device-mapper/
Dswitch.rst71 <num_paths> <region_size> <num_optional_args> [<optional_args>...] [<dev_path> <offset>]+
72 <num_paths>
105 The path number in the range 0 ... (<num_paths> - 1).
/linux-5.19.10/drivers/infiniband/core/
Ducma.c757 resp->num_paths = route->num_paths; in ucma_copy_ib_route()
758 switch (route->num_paths) { in ucma_copy_ib_route()
784 resp->num_paths = route->num_paths; in ucma_copy_iboe_route()
785 switch (route->num_paths) { in ucma_copy_iboe_route()
924 resp->num_paths = ctx->cm_id->route.num_paths; in ucma_query_path()
926 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); in ucma_query_path()
Dcma.c2121 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; in cma_ib_new_conn_id()
2122 rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec), in cma_ib_new_conn_id()
2128 if (rt->num_paths == 2) in cma_ib_new_conn_id()
2706 route->num_paths = 1; in cma_query_handler()
2961 id->route.num_paths = 1; in rdma_set_ib_path()
3094 route->num_paths = 1; in cma_resolve_iboe_route()
3154 route->num_paths = 0; in cma_resolve_iboe_route()
4142 if (route->num_paths == 2) in cma_connect_ib()
/linux-5.19.10/drivers/opp/
Dof.c459 int ret, i, count, num_paths; in dev_pm_opp_of_find_icc_paths() local
486 num_paths = count / 2; in dev_pm_opp_of_find_icc_paths()
487 paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL); in dev_pm_opp_of_find_icc_paths()
491 for (i = 0; i < num_paths; i++) { in dev_pm_opp_of_find_icc_paths()
505 opp_table->path_count = num_paths; in dev_pm_opp_of_find_icc_paths()
/linux-5.19.10/drivers/message/fusion/
Dmptscsih.c2130 int num_paths; in mptscsih_is_phys_disk() local
2149 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, in mptscsih_is_phys_disk()
2151 if (num_paths < 2) in mptscsih_is_phys_disk()
2154 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptscsih_is_phys_disk()
2163 for (j = 0; j < num_paths; j++) { in mptscsih_is_phys_disk()
2208 int num_paths; in mptscsih_raid_id_to_num() local
2227 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, in mptscsih_raid_id_to_num()
2229 if (num_paths < 2) in mptscsih_raid_id_to_num()
2232 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptscsih_raid_id_to_num()
2241 for (j = 0; j < num_paths; j++) { in mptscsih_raid_id_to_num()
Dmptsas.c4175 int num_paths; in mptsas_find_phyinfo_by_phys_disk_num() local
4183 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num); in mptsas_find_phyinfo_by_phys_disk_num()
4184 if (!num_paths) in mptsas_find_phyinfo_by_phys_disk_num()
4187 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptsas_find_phyinfo_by_phys_disk_num()
4191 for (i = 0; i < num_paths; i++) { in mptsas_find_phyinfo_by_phys_disk_num()
/linux-5.19.10/include/rdma/
Drdma_cm.h55 int num_paths; member
/linux-5.19.10/net/netfilter/
Dnft_flow_offload.c103 for (i = 0; i < stack->num_paths; i++) { in nft_dev_path_info()
117 i = stack->num_paths; in nft_dev_path_info()
/linux-5.19.10/drivers/platform/x86/
Dthinkpad_acpi.c720 char **paths, const int num_paths) in drv_acpi_handle_init() argument
728 for (i = 0; i < num_paths; i++) { in drv_acpi_handle_init()
/linux-5.19.10/net/core/
Ddev.c673 int k = stack->num_paths++; in dev_fwd_path()
692 stack->num_paths = 0; in dev_fill_forward_path()