Lines Matching refs:b
415 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op, in vmballoon_stats_op_inc() argument
419 atomic64_inc(&b->stats->ops[op][type]); in vmballoon_stats_op_inc()
422 static inline void vmballoon_stats_gen_inc(struct vmballoon *b, in vmballoon_stats_gen_inc() argument
426 atomic64_inc(&b->stats->general_stat[stat]); in vmballoon_stats_gen_inc()
429 static inline void vmballoon_stats_gen_add(struct vmballoon *b, in vmballoon_stats_gen_add() argument
434 atomic64_add(val, &b->stats->general_stat[stat]); in vmballoon_stats_gen_add()
437 static inline void vmballoon_stats_page_inc(struct vmballoon *b, in vmballoon_stats_page_inc() argument
442 atomic64_inc(&b->stats->page_stat[stat][size]); in vmballoon_stats_page_inc()
445 static inline void vmballoon_stats_page_add(struct vmballoon *b, in vmballoon_stats_page_add() argument
451 atomic64_add(val, &b->stats->page_stat[stat][size]); in vmballoon_stats_page_add()
455 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, in __vmballoon_cmd() argument
460 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT); in __vmballoon_cmd()
483 WRITE_ONCE(b->target, local_result); in __vmballoon_cmd()
487 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT); in __vmballoon_cmd()
495 b->reset_required = true; in __vmballoon_cmd()
501 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, in vmballoon_cmd() argument
506 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy); in vmballoon_cmd()
513 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) in vmballoon_send_start() argument
517 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0, in vmballoon_send_start()
522 b->capabilities = capabilities; in vmballoon_send_start()
525 b->capabilities = VMW_BALLOON_BASIC_CMDS; in vmballoon_send_start()
536 b->max_page_size = VMW_BALLOON_4K_PAGE; in vmballoon_send_start()
537 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && in vmballoon_send_start()
538 (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) in vmballoon_send_start()
539 b->max_page_size = VMW_BALLOON_2M_PAGE; in vmballoon_send_start()
557 static int vmballoon_send_guest_id(struct vmballoon *b) in vmballoon_send_guest_id() argument
561 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID, in vmballoon_send_guest_id()
630 static int vmballoon_send_get_target(struct vmballoon *b) in vmballoon_send_get_target() argument
638 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) && in vmballoon_send_get_target()
642 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); in vmballoon_send_get_target()
659 static int vmballoon_alloc_page_list(struct vmballoon *b, in vmballoon_alloc_page_list() argument
683 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC, in vmballoon_alloc_page_list()
694 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, in vmballoon_alloc_page_list()
712 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page, in vmballoon_handle_one_result() argument
725 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, in vmballoon_handle_one_result()
744 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx, in vmballoon_status_page() argument
749 *p = pfn_to_page(b->batch_page[idx].pfn); in vmballoon_status_page()
750 return b->batch_page[idx].status; in vmballoon_status_page()
754 *p = b->page; in vmballoon_status_page()
780 static unsigned long vmballoon_lock_op(struct vmballoon *b, in vmballoon_lock_op() argument
787 lockdep_assert_held(&b->comm_lock); in vmballoon_lock_op()
799 pfn = PHYS_PFN(virt_to_phys(b->batch_page)); in vmballoon_lock_op()
803 pfn = page_to_pfn(b->page); in vmballoon_lock_op()
810 return vmballoon_cmd(b, cmd, pfn, num_pages); in vmballoon_lock_op()
822 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx, in vmballoon_add_page() argument
825 lockdep_assert_held(&b->comm_lock); in vmballoon_add_page()
828 b->batch_page[idx] = (struct vmballoon_batch_entry) in vmballoon_add_page()
831 b->page = p; in vmballoon_add_page()
853 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl) in vmballoon_lock() argument
864 spin_lock(&b->comm_lock); in vmballoon_lock()
868 vmballoon_add_page(b, i++, page); in vmballoon_lock()
870 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size, in vmballoon_lock()
881 status = vmballoon_status_page(b, i, &page); in vmballoon_lock()
891 if (!vmballoon_handle_one_result(b, page, ctl->page_size, in vmballoon_lock()
904 spin_unlock(&b->comm_lock); in vmballoon_lock()
938 static void vmballoon_release_refused_pages(struct vmballoon *b, in vmballoon_release_refused_pages() argument
941 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE, in vmballoon_release_refused_pages()
956 static int64_t vmballoon_change(struct vmballoon *b) in vmballoon_change() argument
960 size = atomic64_read(&b->size); in vmballoon_change()
961 target = READ_ONCE(b->target); in vmballoon_change()
968 if (b->reset_required) in vmballoon_change()
977 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout))) in vmballoon_change()
994 static void vmballoon_enqueue_page_list(struct vmballoon *b, in vmballoon_enqueue_page_list() argument
1003 balloon_page_list_enqueue(&b->b_dev_info, pages); in vmballoon_enqueue_page_list()
1009 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_enqueue_page_list()
1015 list_splice_init(pages, &b->huge_pages); in vmballoon_enqueue_page_list()
1018 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_enqueue_page_list()
1037 static void vmballoon_dequeue_page_list(struct vmballoon *b, in vmballoon_dequeue_page_list() argument
1049 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages, in vmballoon_dequeue_page_list()
1055 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_dequeue_page_list()
1056 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { in vmballoon_dequeue_page_list()
1066 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_dequeue_page_list()
1101 static void vmballoon_inflate(struct vmballoon *b) in vmballoon_inflate() argument
1108 .page_size = b->max_page_size, in vmballoon_inflate()
1112 while ((to_inflate_frames = vmballoon_change(b)) > 0) { in vmballoon_inflate()
1121 to_inflate_pages = min_t(unsigned long, b->batch_max_pages, in vmballoon_inflate()
1126 alloc_error = vmballoon_alloc_page_list(b, &ctl, in vmballoon_inflate()
1130 lock_error = vmballoon_lock(b, &ctl); in vmballoon_inflate()
1140 atomic64_add(ctl.n_pages * page_in_frames, &b->size); in vmballoon_inflate()
1142 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages, in vmballoon_inflate()
1171 vmballoon_release_refused_pages(b, &ctl); in vmballoon_inflate()
1188 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames, in vmballoon_deflate() argument
1220 -vmballoon_change(b); in vmballoon_deflate()
1230 to_deflate_pages = min_t(unsigned long, b->batch_max_pages, in vmballoon_deflate()
1235 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages, in vmballoon_deflate()
1252 vmballoon_lock(b, &ctl); in vmballoon_deflate()
1264 atomic64_sub(n_unlocked_frames, &b->size); in vmballoon_deflate()
1267 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE, in vmballoon_deflate()
1275 vmballoon_enqueue_page_list(b, &ctl.refused_pages, in vmballoon_deflate()
1281 if (ctl.page_size == b->max_page_size) in vmballoon_deflate()
1300 static void vmballoon_deinit_batching(struct vmballoon *b) in vmballoon_deinit_batching() argument
1302 free_page((unsigned long)b->batch_page); in vmballoon_deinit_batching()
1303 b->batch_page = NULL; in vmballoon_deinit_batching()
1305 b->batch_max_pages = 1; in vmballoon_deinit_batching()
1318 static int vmballoon_init_batching(struct vmballoon *b) in vmballoon_init_batching() argument
1326 b->batch_page = page_address(page); in vmballoon_init_batching()
1327 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry); in vmballoon_init_batching()
1339 struct vmballoon *b = client_data; in vmballoon_doorbell() local
1341 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL); in vmballoon_doorbell()
1343 mod_delayed_work(system_freezable_wq, &b->dwork, 0); in vmballoon_doorbell()
1349 static void vmballoon_vmci_cleanup(struct vmballoon *b) in vmballoon_vmci_cleanup() argument
1351 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, in vmballoon_vmci_cleanup()
1354 if (!vmci_handle_is_invalid(b->vmci_doorbell)) { in vmballoon_vmci_cleanup()
1355 vmci_doorbell_destroy(b->vmci_doorbell); in vmballoon_vmci_cleanup()
1356 b->vmci_doorbell = VMCI_INVALID_HANDLE; in vmballoon_vmci_cleanup()
1370 static int vmballoon_vmci_init(struct vmballoon *b) in vmballoon_vmci_init() argument
1374 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) in vmballoon_vmci_init()
1377 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, in vmballoon_vmci_init()
1379 vmballoon_doorbell, b); in vmballoon_vmci_init()
1384 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, in vmballoon_vmci_init()
1385 b->vmci_doorbell.context, in vmballoon_vmci_init()
1386 b->vmci_doorbell.resource, NULL); in vmballoon_vmci_init()
1393 vmballoon_vmci_cleanup(b); in vmballoon_vmci_init()
1406 static void vmballoon_pop(struct vmballoon *b) in vmballoon_pop() argument
1410 while ((size = atomic64_read(&b->size))) in vmballoon_pop()
1411 vmballoon_deflate(b, size, false); in vmballoon_pop()
1419 static void vmballoon_reset(struct vmballoon *b) in vmballoon_reset() argument
1423 down_write(&b->conf_sem); in vmballoon_reset()
1425 vmballoon_vmci_cleanup(b); in vmballoon_reset()
1428 vmballoon_pop(b); in vmballoon_reset()
1430 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) in vmballoon_reset()
1433 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { in vmballoon_reset()
1434 if (vmballoon_init_batching(b)) { in vmballoon_reset()
1441 vmballoon_send_start(b, 0); in vmballoon_reset()
1444 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { in vmballoon_reset()
1445 vmballoon_deinit_batching(b); in vmballoon_reset()
1448 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET); in vmballoon_reset()
1449 b->reset_required = false; in vmballoon_reset()
1451 error = vmballoon_vmci_init(b); in vmballoon_reset()
1455 if (vmballoon_send_guest_id(b)) in vmballoon_reset()
1459 up_write(&b->conf_sem); in vmballoon_reset()
1473 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); in vmballoon_work() local
1476 if (b->reset_required) in vmballoon_work()
1477 vmballoon_reset(b); in vmballoon_work()
1479 down_read(&b->conf_sem); in vmballoon_work()
1486 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER); in vmballoon_work()
1488 if (!vmballoon_send_get_target(b)) in vmballoon_work()
1489 change = vmballoon_change(b); in vmballoon_work()
1493 atomic64_read(&b->size), READ_ONCE(b->target)); in vmballoon_work()
1496 vmballoon_inflate(b); in vmballoon_work()
1498 vmballoon_deflate(b, 0, true); in vmballoon_work()
1501 up_read(&b->conf_sem); in vmballoon_work()
1522 struct vmballoon *b = &balloon; in vmballoon_shrinker_scan() local
1525 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size)); in vmballoon_shrinker_scan()
1527 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK); in vmballoon_shrinker_scan()
1533 if (!down_read_trylock(&b->conf_sem)) in vmballoon_shrinker_scan()
1536 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true); in vmballoon_shrinker_scan()
1538 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE, in vmballoon_shrinker_scan()
1546 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY); in vmballoon_shrinker_scan()
1548 up_read(&b->conf_sem); in vmballoon_shrinker_scan()
1564 struct vmballoon *b = &balloon; in vmballoon_shrinker_count() local
1566 return atomic64_read(&b->size); in vmballoon_shrinker_count()
1569 static void vmballoon_unregister_shrinker(struct vmballoon *b) in vmballoon_unregister_shrinker() argument
1571 if (b->shrinker_registered) in vmballoon_unregister_shrinker()
1572 unregister_shrinker(&b->shrinker); in vmballoon_unregister_shrinker()
1573 b->shrinker_registered = false; in vmballoon_unregister_shrinker()
1576 static int vmballoon_register_shrinker(struct vmballoon *b) in vmballoon_register_shrinker() argument
1584 b->shrinker.scan_objects = vmballoon_shrinker_scan; in vmballoon_register_shrinker()
1585 b->shrinker.count_objects = vmballoon_shrinker_count; in vmballoon_register_shrinker()
1586 b->shrinker.seeks = DEFAULT_SEEKS; in vmballoon_register_shrinker()
1588 r = register_shrinker(&b->shrinker, "vmw-balloon"); in vmballoon_register_shrinker()
1591 b->shrinker_registered = true; in vmballoon_register_shrinker()
1617 static int vmballoon_enable_stats(struct vmballoon *b) in vmballoon_enable_stats() argument
1621 down_write(&b->conf_sem); in vmballoon_enable_stats()
1624 if (b->stats) in vmballoon_enable_stats()
1627 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL); in vmballoon_enable_stats()
1629 if (!b->stats) { in vmballoon_enable_stats()
1636 up_write(&b->conf_sem); in vmballoon_enable_stats()
1653 struct vmballoon *b = f->private; in vmballoon_debug_show() local
1657 if (!b->stats) { in vmballoon_debug_show()
1658 int r = vmballoon_enable_stats(b); in vmballoon_debug_show()
1667 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities); in vmballoon_debug_show()
1669 b->reset_required ? "y" : "n"); in vmballoon_debug_show()
1672 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target)); in vmballoon_debug_show()
1673 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size)); in vmballoon_debug_show()
1681 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]), in vmballoon_debug_show()
1682 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT])); in vmballoon_debug_show()
1688 atomic64_read(&b->stats->general_stat[i])); in vmballoon_debug_show()
1695 atomic64_read(&b->stats->page_stat[i][j])); in vmballoon_debug_show()
1703 static void __init vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1705 debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, in vmballoon_debugfs_init()
1709 static void __exit vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
1713 kfree(b->stats); in vmballoon_debugfs_exit()
1714 b->stats = NULL; in vmballoon_debugfs_exit()
1719 static inline void vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1723 static inline void vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
1750 struct vmballoon *b; in vmballoon_migratepage() local
1753 b = container_of(b_dev_info, struct vmballoon, b_dev_info); in vmballoon_migratepage()
1759 if (!down_read_trylock(&b->conf_sem)) in vmballoon_migratepage()
1762 spin_lock(&b->comm_lock); in vmballoon_migratepage()
1770 vmballoon_add_page(b, 0, page); in vmballoon_migratepage()
1771 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, in vmballoon_migratepage()
1775 status = vmballoon_status_page(b, 0, &page); in vmballoon_migratepage()
1782 spin_unlock(&b->comm_lock); in vmballoon_migratepage()
1797 vmballoon_add_page(b, 0, newpage); in vmballoon_migratepage()
1798 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, in vmballoon_migratepage()
1802 status = vmballoon_status_page(b, 0, &newpage); in vmballoon_migratepage()
1804 spin_unlock(&b->comm_lock); in vmballoon_migratepage()
1813 atomic64_dec(&b->size); in vmballoon_migratepage()
1825 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); in vmballoon_migratepage()
1833 balloon_page_insert(&b->b_dev_info, newpage); in vmballoon_migratepage()
1841 b->b_dev_info.isolated_pages--; in vmballoon_migratepage()
1842 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); in vmballoon_migratepage()
1845 up_read(&b->conf_sem); in vmballoon_migratepage()
1860 static __init void vmballoon_compaction_init(struct vmballoon *b) in vmballoon_compaction_init() argument
1862 b->b_dev_info.migratepage = vmballoon_migratepage; in vmballoon_compaction_init()
1866 static inline void vmballoon_compaction_init(struct vmballoon *b) in vmballoon_compaction_init() argument