Home
last modified time | relevance | path

Searched refs:new_page (Results 1 – 25 of 26) sorted by relevance

12

/linux-6.1.9/arch/s390/mm/
Dvmem.c173 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); in modify_pte_table() local
175 if (!new_page) in modify_pte_table()
177 set_pte(pte, __pte(__pa(new_page) | prot)); in modify_pte_table()
249 void *new_page; in modify_pmd_table() local
258 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE); in modify_pmd_table()
259 if (new_page) { in modify_pmd_table()
260 set_pmd(pmd, __pmd(__pa(new_page) | prot)); in modify_pmd_table()
/linux-6.1.9/kernel/events/
Duprobes.c155 struct page *old_page, struct page *new_page) in __replace_page() argument
167 if (new_page) { in __replace_page()
168 new_folio = page_folio(new_page); in __replace_page()
183 if (new_page) { in __replace_page()
185 page_add_new_anon_rmap(new_page, vma, addr); in __replace_page()
198 if (new_page) in __replace_page()
200 mk_pte(new_page, vma->vm_page_prot)); in __replace_page()
465 struct page *old_page, *new_page; in uprobe_write_opcode() local
511 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode()
512 if (!new_page) in uprobe_write_opcode()
[all …]
/linux-6.1.9/fs/f2fs/
Dnamei.c955 struct page *old_page, *new_page = NULL; in f2fs_rename() local
1030 &new_page); in f2fs_rename()
1032 if (IS_ERR(new_page)) in f2fs_rename()
1033 err = PTR_ERR(new_page); in f2fs_rename()
1045 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename()
1046 new_page = NULL; in f2fs_rename()
1126 f2fs_put_page(new_page, 0); in f2fs_rename()
1144 struct page *old_page, *new_page; in f2fs_cross_rename() local
1179 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename()
1181 if (IS_ERR(new_page)) in f2fs_cross_rename()
[all …]
/linux-6.1.9/fs/sysv/
Dnamei.c219 struct page * new_page; in sysv_rename() local
227 new_de = sysv_find_entry(new_dentry, &new_page); in sysv_rename()
230 sysv_set_link(new_de, new_page, old_inode); in sysv_rename()
/linux-6.1.9/fs/minix/
Dnamei.c215 struct page * new_page; in minix_rename() local
223 new_de = minix_find_entry(new_dentry, &new_page); in minix_rename()
226 minix_set_link(new_de, new_page, old_inode); in minix_rename()
/linux-6.1.9/fs/ext2/
Dnamei.c360 struct page *new_page; in ext2_rename() local
368 &new_page, &page_addr); in ext2_rename()
373 ext2_set_link(new_dir, new_de, new_page, page_addr, old_inode, 1); in ext2_rename()
374 ext2_put_page(new_page, page_addr); in ext2_rename()
/linux-6.1.9/fs/ufs/
Dnamei.c273 struct page *new_page; in ufs_rename() local
281 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); in ufs_rename()
284 ufs_set_link(new_dir, new_de, new_page, old_inode, 1); in ufs_rename()
/linux-6.1.9/mm/
Dmemory.c909 struct page *new_page; in copy_present_page() local
912 new_page = *prealloc; in copy_present_page()
913 if (!new_page) in copy_present_page()
921 copy_user_highpage(new_page, page, addr, src_vma); in copy_present_page()
922 __SetPageUptodate(new_page); in copy_present_page()
923 page_add_new_anon_rmap(new_page, dst_vma, addr); in copy_present_page()
924 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page()
925 rss[mm_counter(new_page)]++; in copy_present_page()
928 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page()
1002 struct page *new_page; in page_copy_prealloc() local
[all …]
Dhugetlb.c2899 struct page *new_page; in alloc_and_dissolve_huge_page() local
2909 new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); in alloc_and_dissolve_huge_page()
2910 if (!new_page) in alloc_and_dissolve_huge_page()
2912 __prep_new_huge_page(h, new_page); in alloc_and_dissolve_huge_page()
2954 enqueue_huge_page(h, new_page); in alloc_and_dissolve_huge_page()
2968 set_page_refcounted(new_page); in alloc_and_dissolve_huge_page()
2969 update_and_free_page(h, new_page, false); in alloc_and_dissolve_huge_page()
4941 struct page *new_page) in hugetlb_install_page() argument
4943 __SetPageUptodate(new_page); in hugetlb_install_page()
4944 hugepage_add_new_anon_rmap(new_page, vma, addr); in hugetlb_install_page()
[all …]
Dksm.c2583 struct page *new_page; in ksm_might_need_to_copy() local
2598 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
2599 if (new_page && in ksm_might_need_to_copy()
2600 mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
2601 put_page(new_page); in ksm_might_need_to_copy()
2602 new_page = NULL; in ksm_might_need_to_copy()
2604 if (new_page) { in ksm_might_need_to_copy()
2605 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
2607 SetPageDirty(new_page); in ksm_might_need_to_copy()
2608 __SetPageUptodate(new_page); in ksm_might_need_to_copy()
[all …]
Dmempolicy.c1208 static struct page *new_page(struct page *page, unsigned long start) in new_page() function
1250 static struct page *new_page(struct page *page, unsigned long start) in new_page() function
1337 nr_failed = migrate_pages(&pagelist, new_page, NULL, in do_mbind()
/linux-6.1.9/fs/ubifs/
Dbudget.c364 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth()
381 if (req->new_page) in calc_data_growth()
427 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space()
514 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
Dfile.c198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget()
223 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow()
364 req.new_page = 1; in allocate_budget()
1492 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
Dubifs.h897 unsigned int new_page:1; member
907 unsigned int new_page; member
/linux-6.1.9/drivers/net/ethernet/ti/
Dcpsw.c346 struct page *new_page, *page = token; in cpsw_rx_handler() local
378 new_page = page; in cpsw_rx_handler()
387 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler()
388 if (unlikely(!new_page)) { in cpsw_rx_handler()
389 new_page = page; in cpsw_rx_handler()
441 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler()
445 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler()
446 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler()
450 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
Dcpsw_new.c284 struct page *new_page, *page = token; in cpsw_rx_handler() local
322 new_page = page; in cpsw_rx_handler()
331 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler()
332 if (unlikely(!new_page)) { in cpsw_rx_handler()
333 new_page = page; in cpsw_rx_handler()
385 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler()
389 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler()
390 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler()
394 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
/linux-6.1.9/fs/nilfs2/
Dnamei.c377 struct page *new_page; in nilfs_rename() local
385 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); in nilfs_rename()
388 nilfs_set_link(new_dir, new_de, new_page, old_inode); in nilfs_rename()
/linux-6.1.9/drivers/net/ethernet/microsoft/mana/
Dmana_en.c1197 struct page *new_page; in mana_process_rx_cqe() local
1240 new_page = rxq->xdp_save_page; in mana_process_rx_cqe()
1243 new_page = alloc_page(GFP_ATOMIC); in mana_process_rx_cqe()
1246 if (new_page) { in mana_process_rx_cqe()
1247 da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, in mana_process_rx_cqe()
1251 __free_page(new_page); in mana_process_rx_cqe()
1252 new_page = NULL; in mana_process_rx_cqe()
1256 new_buf = new_page ? page_to_virt(new_page) : NULL; in mana_process_rx_cqe()
/linux-6.1.9/drivers/tty/serial/
Dicom.c607 unsigned char *new_page = NULL; in load_code() local
680 new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL); in load_code()
682 if (!new_page) { in load_code()
702 new_page[index] = fw->data[index]; in load_code()
759 if (new_page != NULL) in load_code()
760 dma_free_coherent(&dev->dev, 4096, new_page, temp_pci); in load_code()
/linux-6.1.9/fs/jbd2/
Djournal.c345 struct page *new_page; in jbd2_journal_write_metadata_buffer() local
374 new_page = virt_to_page(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer()
377 new_page = jh2bh(jh_in)->b_page; in jbd2_journal_write_metadata_buffer()
381 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
421 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
425 new_page = virt_to_page(tmp); in jbd2_journal_write_metadata_buffer()
442 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
447 set_bh_page(new_bh, new_page, new_offset); in jbd2_journal_write_metadata_buffer()
/linux-6.1.9/drivers/net/wireless/intel/iwlwifi/fw/
Ddbg.c569 struct page *new_page; in alloc_sgtable() local
580 new_page = alloc_page(GFP_KERNEL); in alloc_sgtable()
581 if (!new_page) { in alloc_sgtable()
585 new_page = sg_page(iter); in alloc_sgtable()
586 if (new_page) in alloc_sgtable()
587 __free_page(new_page); in alloc_sgtable()
594 sg_set_page(iter, new_page, alloc_size, 0); in alloc_sgtable()
/linux-6.1.9/drivers/staging/rts5208/
Dxd.c1102 u32 old_page, new_page; in xd_copy_page() local
1116 new_page = (new_blk << xd_card->block_shift) + start_page; in xd_copy_page()
1185 xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); in xd_copy_page()
1208 new_page++; in xd_copy_page()
/linux-6.1.9/Documentation/networking/
Dpage_pool.rst197 new_page = page_pool_dev_alloc_pages(page_pool);
/linux-6.1.9/drivers/net/vmxnet3/
Dvmxnet3_drv.c1433 struct page *new_page = NULL; in vmxnet3_rq_rx_complete() local
1609 new_page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_rx_complete()
1615 if (unlikely(!new_page)) { in vmxnet3_rq_rx_complete()
1623 new_page, in vmxnet3_rq_rx_complete()
1628 put_page(new_page); in vmxnet3_rq_rx_complete()
1643 rbi->page = new_page; in vmxnet3_rq_rx_complete()
/linux-6.1.9/drivers/net/ethernet/freescale/
Dfec_main.c1547 struct page *new_page; in fec_enet_update_cbd() local
1550 new_page = page_pool_dev_alloc_pages(rxq->page_pool); in fec_enet_update_cbd()
1551 WARN_ON(!new_page); in fec_enet_update_cbd()
1552 rxq->rx_skb_info[index].page = new_page; in fec_enet_update_cbd()
1555 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; in fec_enet_update_cbd()

12