/linux-6.6.21/mm/ |
D | truncate.c | 31 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument 34 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 42 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument 45 spin_lock(&mapping->host->i_lock); in clear_shadow_entry() 46 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 47 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry() 48 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 49 if (mapping_shrinkable(mapping)) in clear_shadow_entry() 50 inode_add_lru(mapping->host); in clear_shadow_entry() 51 spin_unlock(&mapping->host->i_lock); in clear_shadow_entry() [all …]
|
D | filemap.c | 126 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 132 mapping_set_update(&xas, mapping); in page_cache_delete() 145 folio->mapping = NULL; in page_cache_delete() 147 mapping->nrpages -= nr; in page_cache_delete() 150 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 192 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 210 mapping_can_writeback(mapping))) in filemap_unaccount_folio() 211 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio() [all …]
|
D | readahead.c | 139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 208 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 210 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 225 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 230 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() 250 if (filemap_add_folio(mapping, folio, index + i, in page_cache_ra_unbounded() 270 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded() 284 struct inode *inode = ractl->mapping->host; in do_page_cache_ra() [all …]
|
/linux-6.6.21/include/linux/ |
D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 39 int filemap_fdatawait_keep_errors(struct address_space *mapping); 41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 44 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 46 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 50 int filemap_write_and_wait_range(struct address_space *mapping, 52 int __filemap_fdatawrite_range(struct address_space *mapping, 54 int filemap_fdatawrite_range(struct address_space *mapping, [all …]
|
D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument 96 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 97 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 98 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_local_wc() [all …]
|
D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size() 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() [all …]
|
D | secretmem.h | 11 struct address_space *mapping; in folio_is_secretmem() local 22 mapping = (struct address_space *) in folio_is_secretmem() 23 ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS); in folio_is_secretmem() 25 if (!mapping || mapping != folio->mapping) in folio_is_secretmem() 28 return mapping->a_ops == &secretmem_aops; in folio_is_secretmem()
|
D | shmem_fs.h | 97 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 99 return mapping->a_ops == &shmem_aops; in shmem_mapping() 102 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 107 extern void shmem_unlock_mapping(struct address_space *mapping); 108 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 123 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 137 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 140 static inline struct folio *shmem_read_folio(struct address_space *mapping, in shmem_read_folio() argument 143 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); in shmem_read_folio() 147 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument [all …]
|
/linux-6.6.21/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 81 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 82 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() 83 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() [all …]
|
/linux-6.6.21/drivers/gpu/drm/tegra/ |
D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
/linux-6.6.21/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 384 static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping, in uvc_mapping_get_menu_value() argument 387 if (!test_bit(idx, &mapping->menu_mask)) in uvc_mapping_get_menu_value() 390 if (mapping->menu_mapping) in uvc_mapping_get_menu_value() 391 return mapping->menu_mapping[idx]; in uvc_mapping_get_menu_value() 397 uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx) in uvc_mapping_get_menu_name() argument 399 if (!test_bit(idx, &mapping->menu_mask)) in uvc_mapping_get_menu_name() 402 if (mapping->menu_names) in uvc_mapping_get_menu_name() 403 return mapping->menu_names[idx]; in uvc_mapping_get_menu_name() 405 return v4l2_ctrl_get_menu(mapping->id)[idx]; in uvc_mapping_get_menu_name() 408 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument [all …]
|
/linux-6.6.21/arch/arm/mm/ |
D | dma-mapping.c | 754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() 775 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova() 776 mapping->bits, 0, count, align); in __alloc_iova() 778 if (start > mapping->bits) in __alloc_iova() 781 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova() 790 if (i == mapping->nr_bitmaps) { in __alloc_iova() [all …]
|
D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local 300 mapping = folio_flush_mapping(folio); in __sync_icache_dcache() 302 mapping = NULL; in __sync_icache_dcache() 305 __flush_dcache_folio(mapping, folio); in __sync_icache_dcache() [all …]
|
/linux-6.6.21/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 127 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 129 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 133 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 135 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 138 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument 140 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() 143 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 145 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping() 146 etnaviv_iommu_context_put(mapping->context); in etnaviv_iommu_reap_mapping() 147 mapping->context = NULL; in etnaviv_iommu_reap_mapping() [all …]
|
D | etnaviv_gem.c | 219 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 221 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 222 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 223 return mapping; in etnaviv_gem_get_vram_mapping() 229 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 231 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 234 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 235 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 246 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local 251 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); in etnaviv_gem_mapping_get() [all …]
|
/linux-6.6.21/drivers/gpu/drm/exynos/ |
D | exynos_drm_dma.c | 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 109 if (!priv->mapping) { in exynos_drm_register_dma() 110 void *mapping = NULL; in exynos_drm_register_dma() local 113 mapping = arm_iommu_create_mapping(&platform_bus_type, in exynos_drm_register_dma() 116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 118 if (!mapping) in exynos_drm_register_dma() 120 priv->mapping = mapping; in exynos_drm_register_dma() 140 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
/linux-6.6.21/fs/gfs2/ |
D | aops.c | 95 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio() 127 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio() 154 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 178 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages() 191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); in gfs2_writepages() 207 static int gfs2_write_jdata_batch(struct address_space *mapping, in gfs2_write_jdata_batch() argument 212 struct inode *inode = mapping->host; in gfs2_write_jdata_batch() 235 if (unlikely(folio->mapping != mapping)) { in gfs2_write_jdata_batch() 307 static int gfs2_write_cache_jdata(struct address_space *mapping, in gfs2_write_cache_jdata() argument [all …]
|
/linux-6.6.21/fs/ |
D | dax.c | 325 return page->mapping == PAGE_MAPPING_DAX_SHARED; in dax_page_is_shared() 334 if (page->mapping != PAGE_MAPPING_DAX_SHARED) { in dax_page_share_get() 339 if (page->mapping) in dax_page_share_get() 341 page->mapping = PAGE_MAPPING_DAX_SHARED; in dax_page_share_get() 356 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument 372 WARN_ON_ONCE(page->mapping); in dax_associate_entry() 373 page->mapping = mapping; in dax_associate_entry() 379 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument 396 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry() 397 page->mapping = NULL; in dax_disassociate_entry() [all …]
|
/linux-6.6.21/Documentation/powerpc/ |
D | vmemmap_dedup.rst | 14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap 18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K 20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping. 23 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 35 | mapping | +-----------+ | | 46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single 48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping. 52 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 64 | mapping | +-----------+ | | 74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K [all …]
|
/linux-6.6.21/Documentation/translations/zh_CN/mm/ |
D | page_migration.rst | 143 2. ``int (*migratepage) (struct address_space *mapping,`` 168 void __SetPageMovable(struct page *page, struct address_space *mapping) 171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低 175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; 177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可 178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。 181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如 182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE, 185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page() 189 同,PageMovable()在lock_page()下验证page->mapping和 [all …]
|
/linux-6.6.21/drivers/net/wireless/marvell/mwifiex/ |
D | util.h | 57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
/linux-6.6.21/drivers/sh/clk/ |
D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 369 kref_init(&mapping->ref); in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 372 if (unlikely(!mapping->base)) in clk_establish_mapping() 374 } else if (mapping->base) { in clk_establish_mapping() [all …]
|
/linux-6.6.21/Documentation/driver-api/ |
D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
/linux-6.6.21/fs/afs/ |
D | write.c | 17 static int afs_writepages_region(struct address_space *mapping, 30 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) in afs_dirty_folio() argument 32 return fscache_dirty_folio(mapping, folio, in afs_dirty_folio() 33 afs_vnode_cache(AFS_FS_I(mapping->host))); in afs_dirty_folio() 50 static int afs_flush_conflicting_write(struct address_space *mapping, in afs_flush_conflicting_write() argument 61 return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX, in afs_flush_conflicting_write() 68 int afs_write_begin(struct file *file, struct address_space *mapping, in afs_write_begin() argument 87 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); in afs_write_begin() 130 ret = afs_flush_conflicting_write(mapping, folio); in afs_write_begin() 153 int afs_write_end(struct file *file, struct address_space *mapping, in afs_write_end() argument [all …]
|
/linux-6.6.21/arch/nios2/include/asm/ |
D | cacheflush.h | 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) argument 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) argument 56 #define flush_dcache_mmap_lock_irqsave(mapping, flags) \ argument 57 xa_lock_irqsave(&mapping->i_pages, flags) 58 #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ argument 59 xa_unlock_irqrestore(&mapping->i_pages, flags)
|