Searched refs:num_pg (Results 1 – 7 of 7) sorted by relevance
301 static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg) in tcm_1d_limit() argument303 if (__tcm_sizeof(a) < num_pg) in tcm_1d_limit()305 if (!num_pg) in tcm_1d_limit()308 a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width; in tcm_1d_limit()309 a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width); in tcm_1d_limit()
791 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_info_init()871 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_to_pri_map()895 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_shaper_cfg()938 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_dwrr_cfg()1367 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_lvl2_schd_mode_cfg()1692 hdev->tm_info.num_pg != 1) in hclge_tm_schd_init()
356 u8 num_pg; /* It must be 1 if vNET-Base schd */ member
458 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { in hclge_dbg_dump_dcb_pg()735 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { in __hclge_dbg_dump_tm_pg()
1601 hdev->tm_info.num_pg = 1; in hclge_configure()
1249 unsigned long max_pg, num_pg, new_pg, old_pg; in mm_account_pinned_pages() local1255 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ in mm_account_pinned_pages()1261 new_pg = old_pg + num_pg; in mm_account_pinned_pages()1269 mmp->num_pg = num_pg; in mm_account_pinned_pages()1271 mmp->num_pg += num_pg; in mm_account_pinned_pages()1281 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
558 unsigned int num_pg; member