/linux-6.1.9/drivers/nvme/host/ |
D | zns.c | 182 int ret, zone_idx = 0; in nvme_ns_report_zones() local 201 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones() 216 for (i = 0; i < nz && zone_idx < nr_zones; i++) { in nvme_ns_report_zones() 218 zone_idx, cb, data); in nvme_ns_report_zones() 221 zone_idx++; in nvme_ns_report_zones() 227 if (zone_idx > 0) in nvme_ns_report_zones() 228 ret = zone_idx; in nvme_ns_report_zones()
|
/linux-6.1.9/include/trace/events/ |
D | oom.h | 46 __field( int, zone_idx) 57 __entry->zone_idx = zoneref->zone_idx; 67 __entry->node, __print_symbolic(__entry->zone_idx, ZONE_TYPE),
|
D | compaction.h | 198 __entry->idx = zone_idx(zone); 245 __entry->idx = zone_idx(zone);
|
/linux-6.1.9/drivers/scsi/ |
D | sd_zbc.c | 260 int zone_idx = 0; in sd_zbc_report_zones() local 275 while (zone_idx < nr_zones && lba < sdkp->capacity) { in sd_zbc_report_zones() 285 for (i = 0; i < nr && zone_idx < nr_zones; i++) { in sd_zbc_report_zones() 289 if ((zone_idx == 0 && in sd_zbc_report_zones() 292 (zone_idx > 0 && start_lba != lba) || in sd_zbc_report_zones() 296 zone_idx, lba, start_lba, zone_length); in sd_zbc_report_zones() 310 ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx, in sd_zbc_report_zones() 315 zone_idx++; in sd_zbc_report_zones() 319 ret = zone_idx; in sd_zbc_report_zones()
|
/linux-6.1.9/drivers/md/ |
D | dm-zone.c | 41 nr_zones - args.zone_idx); in dm_blk_do_report_zones() 44 } while (args.zone_idx < nr_zones && in dm_blk_do_report_zones() 47 return args.zone_idx; in dm_blk_do_report_zones() 103 return args->orig_cb(zone, args->zone_idx++, args->orig_data); in dm_report_zones_cb()
|
/linux-6.1.9/mm/ |
D | page_alloc.c | 1978 int zid = zone_idx(zone); in deferred_init_pages() 3860 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue() 4141 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment() 5496 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); in __alloc_pages_bulk() 6076 int zone_idx; in node_has_managed_zones() local 6077 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) in node_has_managed_zones() 6078 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones() 6100 if (zone_idx(zone) > max_zone_idx) in __show_free_areas() 6201 if (zone_idx(zone) > max_zone_idx) in __show_free_areas() 6264 if (zone_idx(zone) > max_zone_idx) in __show_free_areas() [all …]
|
D | memory_hotplug.c | 629 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online() 720 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone() 734 if (zone_idx(zone) == ZONE_MOVABLE) { in auto_movable_stats_account_zone() 1009 const bool movable = zone_idx(zone) == ZONE_MOVABLE; in adjust_present_page_count() 1754 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline() 2187 if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) in try_offline_memory_block()
|
D | page_isolation.c | 72 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
|
D | migrate.c | 1638 zidx = zone_idx(folio_zone(folio)); in alloc_migration_target()
|
D | vmscan.c | 602 int zone_idx) in lruvec_lru_size() argument 607 for (zid = 0; zid <= zone_idx; zid++) { in lruvec_lru_size() 6601 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
|
D | khugepaged.c | 2507 if (zone_idx(zone) > gfp_zone(GFP_USER)) in set_recommended_min_free_kbytes()
|
/linux-6.1.9/include/linux/ |
D | mmzone.h | 1053 int zone_idx; /* zone_idx(zoneref->zone) */ member 1290 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) macro 1295 return zone_idx(zone) == ZONE_DEVICE; in zone_is_zone_device() 1361 return is_highmem_idx(zone_idx(zone)); in is_highmem() 1448 return zoneref->zone_idx; in zonelist_zone_idx()
|
D | memcontrol.h | 892 enum lru_list lru, int zone_idx) in mem_cgroup_get_zone_lru_size() argument 897 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); in mem_cgroup_get_zone_lru_size() 1382 enum lru_list lru, int zone_idx) in mem_cgroup_get_zone_lru_size() argument
|
D | device-mapper.h | 495 unsigned int zone_idx; member
|
/linux-6.1.9/fs/f2fs/ |
D | segment.c | 4910 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx, in is_conv_zone() argument 4915 return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq); in is_conv_zone() 4935 unsigned int dev_idx, zone_idx; in f2fs_usable_zone_segs_in_sec() local 4938 zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx); in f2fs_usable_zone_segs_in_sec() 4941 if (is_conv_zone(sbi, zone_idx, dev_idx)) in f2fs_usable_zone_segs_in_sec() 4964 unsigned int zone_idx, dev_idx, secno; in f2fs_usable_zone_blks_in_seg() local 4969 zone_idx = get_zone_idx(sbi, secno, dev_idx); in f2fs_usable_zone_blks_in_seg() 4975 if (is_conv_zone(sbi, zone_idx, dev_idx)) in f2fs_usable_zone_blks_in_seg()
|