/linux-6.6.21/drivers/gpu/drm/nouveau/ |
D | nouveau_vmm.c | 62 nouveau_vma_del(struct nouveau_vma **pvma) in nouveau_vma_del() argument 64 struct nouveau_vma *vma = *pvma; in nouveau_vma_del() 71 kfree(*pvma); in nouveau_vma_del() 73 *pvma = NULL; in nouveau_vma_del() 78 struct nouveau_vma **pvma) in nouveau_vma_new() argument 85 if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) { in nouveau_vma_new() 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new() 119 nouveau_vma_del(pvma); in nouveau_vma_new()
|
/linux-6.6.21/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | memgf100.c | 35 u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma) in gf100_mem_map() argument 55 nvkm_memory_size(memory), pvma); in gf100_mem_map() 59 ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm)); in gf100_mem_map() 63 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in gf100_mem_map() 64 *psize = (*pvma)->size; in gf100_mem_map()
|
D | memnv50.c | 35 u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma) in nv50_mem_map() argument 56 ret = nvkm_vmm_get(bar, 12, size, pvma); in nv50_mem_map() 60 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in nv50_mem_map() 61 *psize = (*pvma)->size; in nv50_mem_map() 62 return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm)); in nv50_mem_map()
|
D | memnv04.c | 32 u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma) in nv04_mem_map() argument 46 *pvma = ERR_PTR(-ENODEV); in nv04_mem_map()
|
D | vmm.c | 1686 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma) in nvkm_vmm_put() argument 1688 struct nvkm_vma *vma = *pvma; in nvkm_vmm_put() 1693 *pvma = NULL; in nvkm_vmm_put() 1699 u8 shift, u8 align, u64 size, struct nvkm_vma **pvma) in nvkm_vmm_get_locked() argument 1831 *pvma = vma; in nvkm_vmm_get_locked() 1836 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) in nvkm_vmm_get() argument 1840 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); in nvkm_vmm_get()
|
D | vmm.h | 172 struct nvkm_vma **pvma);
|
/linux-6.6.21/mm/ |
D | rmap.c | 335 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument 342 if (!pvma->anon_vma) in anon_vma_fork() 352 error = anon_vma_clone(vma, pvma); in anon_vma_fork() 373 anon_vma->root = pvma->anon_vma->root; in anon_vma_fork() 374 anon_vma->parent = pvma->anon_vma; in anon_vma_fork()
|
D | shmem.c | 1608 struct vm_area_struct pvma; in shmem_swapin() local 1611 .vma = &pvma, in shmem_swapin() 1614 shmem_pseudo_vma_init(&pvma, info, index); in shmem_swapin() 1616 shmem_pseudo_vma_destroy(&pvma); in shmem_swapin() 1650 struct vm_area_struct pvma; in shmem_alloc_hugefolio() local 1660 shmem_pseudo_vma_init(&pvma, info, hindex); in shmem_alloc_hugefolio() 1661 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); in shmem_alloc_hugefolio() 1662 shmem_pseudo_vma_destroy(&pvma); in shmem_alloc_hugefolio() 1671 struct vm_area_struct pvma; in shmem_alloc_folio() local 1674 shmem_pseudo_vma_init(&pvma, info, index); in shmem_alloc_folio() [all …]
|
D | mempolicy.c | 2800 struct vm_area_struct pvma; in mpol_shared_policy_init() local 2818 vma_init(&pvma, NULL); in mpol_shared_policy_init() 2819 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ in mpol_shared_policy_init() 2820 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ in mpol_shared_policy_init()
|