Lines Matching refs:vma
750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
751 if (vma) { in nvkm_vma_new()
752 vma->addr = addr; in nvkm_vma_new()
753 vma->size = size; in nvkm_vma_new()
754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
757 return vma; in nvkm_vma_new()
761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
765 BUG_ON(vma->size == tail); in nvkm_vma_tail()
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
769 vma->size -= tail; in nvkm_vma_tail()
771 new->mapref = vma->mapref; in nvkm_vma_tail()
772 new->sparse = vma->sparse; in nvkm_vma_tail()
773 new->page = vma->page; in nvkm_vma_tail()
774 new->refd = vma->refd; in nvkm_vma_tail()
775 new->used = vma->used; in nvkm_vma_tail()
776 new->part = vma->part; in nvkm_vma_tail()
777 new->busy = vma->busy; in nvkm_vma_tail()
778 new->mapped = vma->mapped; in nvkm_vma_tail()
779 list_add(&new->head, &vma->head); in nvkm_vma_tail()
784 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_remove() argument
786 rb_erase(&vma->tree, &vmm->free); in nvkm_vmm_free_remove()
790 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_delete() argument
792 nvkm_vmm_free_remove(vmm, vma); in nvkm_vmm_free_delete()
793 list_del(&vma->head); in nvkm_vmm_free_delete()
794 kfree(vma); in nvkm_vmm_free_delete()
798 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_insert() argument
806 if (vma->size < this->size) in nvkm_vmm_free_insert()
809 if (vma->size > this->size) in nvkm_vmm_free_insert()
812 if (vma->addr < this->addr) in nvkm_vmm_free_insert()
815 if (vma->addr > this->addr) in nvkm_vmm_free_insert()
821 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_free_insert()
822 rb_insert_color(&vma->tree, &vmm->free); in nvkm_vmm_free_insert()
826 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_remove() argument
828 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_node_remove()
832 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_delete() argument
834 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_delete()
835 list_del(&vma->head); in nvkm_vmm_node_delete()
836 kfree(vma); in nvkm_vmm_node_delete()
840 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_insert() argument
848 if (vma->addr < this->addr) in nvkm_vmm_node_insert()
851 if (vma->addr > this->addr) in nvkm_vmm_node_insert()
857 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_node_insert()
858 rb_insert_color(&vma->tree, &vmm->root); in nvkm_vmm_node_insert()
866 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_node_search() local
867 if (addr < vma->addr) in nvkm_vmm_node_search()
870 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search()
873 return vma; in nvkm_vmm_node_search()
883 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) in nvkm_vmm_node_merge() argument
886 if (vma->size == size) { in nvkm_vmm_node_merge()
887 vma->size += next->size; in nvkm_vmm_node_merge()
890 prev->size += vma->size; in nvkm_vmm_node_merge()
891 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
894 return vma; in nvkm_vmm_node_merge()
899 vma->size -= size; in nvkm_vmm_node_merge()
907 if (vma->size != size) { in nvkm_vmm_node_merge()
908 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_merge()
910 vma->addr += size; in nvkm_vmm_node_merge()
911 vma->size -= size; in nvkm_vmm_node_merge()
912 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_merge()
914 prev->size += vma->size; in nvkm_vmm_node_merge()
915 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
920 return vma; in nvkm_vmm_node_merge()
925 struct nvkm_vma *vma, u64 addr, u64 size) in nvkm_vmm_node_split() argument
929 if (vma->addr != addr) { in nvkm_vmm_node_split()
930 prev = vma; in nvkm_vmm_node_split()
931 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split()
933 vma->part = true; in nvkm_vmm_node_split()
934 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_split()
937 if (vma->size != size) { in nvkm_vmm_node_split()
939 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_node_split()
940 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); in nvkm_vmm_node_split()
947 return vma; in nvkm_vmm_node_split()
951 nvkm_vma_dump(struct nvkm_vma *vma) in nvkm_vma_dump() argument
954 vma->addr, (u64)vma->size, in nvkm_vma_dump()
955 vma->used ? '-' : 'F', in nvkm_vma_dump()
956 vma->mapref ? 'R' : '-', in nvkm_vma_dump()
957 vma->sparse ? 'S' : '-', in nvkm_vma_dump()
958 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', in nvkm_vma_dump()
959 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', in nvkm_vma_dump()
960 vma->part ? 'P' : '-', in nvkm_vma_dump()
961 vma->busy ? 'B' : '-', in nvkm_vma_dump()
962 vma->mapped ? 'M' : '-', in nvkm_vma_dump()
963 vma->memory); in nvkm_vma_dump()
969 struct nvkm_vma *vma; in nvkm_vmm_dump() local
970 list_for_each_entry(vma, &vmm->list, head) { in nvkm_vmm_dump()
971 nvkm_vma_dump(vma); in nvkm_vmm_dump()
978 struct nvkm_vma *vma; in nvkm_vmm_dtor() local
985 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_dtor() local
986 nvkm_vmm_put(vmm, &vma); in nvkm_vmm_dtor()
1000 vma = list_first_entry(&vmm->list, typeof(*vma), head); in nvkm_vmm_dtor()
1001 list_del(&vma->head); in nvkm_vmm_dtor()
1002 kfree(vma); in nvkm_vmm_dtor()
1019 struct nvkm_vma *vma; in nvkm_vmm_ctor_managed() local
1020 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor_managed()
1022 vma->mapref = true; in nvkm_vmm_ctor_managed()
1023 vma->sparse = false; in nvkm_vmm_ctor_managed()
1024 vma->used = true; in nvkm_vmm_ctor_managed()
1025 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_ctor_managed()
1026 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor_managed()
1039 struct nvkm_vma *vma; in nvkm_vmm_ctor() local
1106 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor()
1108 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1109 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1126 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) in nvkm_vmm_ctor()
1129 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1130 list_add(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1148 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_pfn_split_merge() argument
1154 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge()
1159 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) { in nvkm_vmm_pfn_split_merge()
1166 return nvkm_vmm_node_merge(vmm, prev, vma, next, size); in nvkm_vmm_pfn_split_merge()
1167 return nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_vmm_pfn_split_merge()
1173 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); in nvkm_vmm_pfn_unmap() local
1178 if (!vma) in nvkm_vmm_pfn_unmap()
1182 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap()
1185 size = min(limit - start, vma->size - (start - vma->addr)); in nvkm_vmm_pfn_unmap()
1187 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], in nvkm_vmm_pfn_unmap()
1190 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); in nvkm_vmm_pfn_unmap()
1192 vma = next; in nvkm_vmm_pfn_unmap()
1193 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_pfn_unmap()
1194 vma->mapped = false; in nvkm_vmm_pfn_unmap()
1196 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap()
1210 struct nvkm_vma *vma, *tmp; in nvkm_vmm_pfn_map() local
1231 if (!(vma = nvkm_vmm_node_search(vmm, addr))) in nvkm_vmm_pfn_map()
1236 bool mapped = vma->mapped; in nvkm_vmm_pfn_map()
1249 size = min_t(u64, size, vma->size + vma->addr - addr); in nvkm_vmm_pfn_map()
1254 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map()
1271 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size, in nvkm_vmm_pfn_map()
1283 vma = tmp; in nvkm_vmm_pfn_map()
1309 if (vma->addr + vma->size == addr + size) in nvkm_vmm_pfn_map()
1310 vma = node(vma, next); in nvkm_vmm_pfn_map()
1324 } while (vma && start < limit); in nvkm_vmm_pfn_map()
1330 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_region() argument
1335 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_unmap_region()
1336 nvkm_memory_unref(&vma->memory); in nvkm_vmm_unmap_region()
1337 vma->mapped = false; in nvkm_vmm_unmap_region()
1339 if (vma->part && (prev = node(vma, prev)) && prev->mapped) in nvkm_vmm_unmap_region()
1341 if ((next = node(vma, next)) && (!next->part || next->mapped)) in nvkm_vmm_unmap_region()
1343 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); in nvkm_vmm_unmap_region()
1347 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) in nvkm_vmm_unmap_locked() argument
1349 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked()
1351 if (vma->mapref) { in nvkm_vmm_unmap_locked()
1352 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1353 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_unmap_locked()
1355 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1358 nvkm_vmm_unmap_region(vmm, vma); in nvkm_vmm_unmap_locked()
1362 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap() argument
1364 if (vma->memory) { in nvkm_vmm_unmap()
1366 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_vmm_unmap()
1372 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_valid() argument
1394 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1395 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1399 vma->addr, (u64)vma->size, map->offset, map->page->shift, in nvkm_vmm_map_valid()
1408 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_choose() argument
1413 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) in nvkm_vmm_map_choose()
1420 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_locked() argument
1427 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { in nvkm_vmm_map_locked()
1430 map->offset, (u64)vma->size); in nvkm_vmm_map_locked()
1435 if (vma->page == NVKM_VMA_PAGE_NONE && in nvkm_vmm_map_locked()
1436 vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1440 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1444 nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1449 if (vma->refd != NVKM_VMA_PAGE_NONE) in nvkm_vmm_map_locked()
1450 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1452 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1454 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1487 if (vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1488 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1492 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1494 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1497 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_map_locked()
1498 nvkm_memory_unref(&vma->memory); in nvkm_vmm_map_locked()
1499 vma->memory = nvkm_memory_ref(map->memory); in nvkm_vmm_map_locked()
1500 vma->mapped = true; in nvkm_vmm_map_locked()
1501 vma->tags = map->tags; in nvkm_vmm_map_locked()
1506 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, in nvkm_vmm_map() argument
1511 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1512 vma->busy = false; in nvkm_vmm_map()
1518 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_region() argument
1522 if ((prev = node(vma, prev)) && !prev->used) { in nvkm_vmm_put_region()
1523 vma->addr = prev->addr; in nvkm_vmm_put_region()
1524 vma->size += prev->size; in nvkm_vmm_put_region()
1528 if ((next = node(vma, next)) && !next->used) { in nvkm_vmm_put_region()
1529 vma->size += next->size; in nvkm_vmm_put_region()
1533 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_put_region()
1537 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_locked() argument
1540 struct nvkm_vma *next = vma; in nvkm_vmm_put_locked()
1542 BUG_ON(vma->part); in nvkm_vmm_put_locked()
1544 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked()
1565 size, vma->sparse, in nvkm_vmm_put_locked()
1579 next = vma; in nvkm_vmm_put_locked()
1583 } while ((next = node(vma, next)) && next->part); in nvkm_vmm_put_locked()
1585 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked()
1594 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1596 if (vma->sparse) { in nvkm_vmm_put_locked()
1605 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false); in nvkm_vmm_put_locked()
1609 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_put_locked()
1612 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1613 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1614 vma->used = false; in nvkm_vmm_put_locked()
1615 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_put_locked()
1621 struct nvkm_vma *vma = *pvma; in nvkm_vmm_put() local
1622 if (vma) { in nvkm_vmm_put()
1624 nvkm_vmm_put_locked(vmm, vma); in nvkm_vmm_put()
1636 struct nvkm_vma *vma = NULL, *tmp; in nvkm_vmm_get_locked() local
1716 vma = this; in nvkm_vmm_get_locked()
1721 if (unlikely(!vma)) in nvkm_vmm_get_locked()
1727 if (addr != vma->addr) { in nvkm_vmm_get_locked()
1728 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) { in nvkm_vmm_get_locked()
1729 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1732 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_get_locked()
1733 vma = tmp; in nvkm_vmm_get_locked()
1736 if (size != vma->size) { in nvkm_vmm_get_locked()
1737 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_get_locked()
1738 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1746 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1748 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true); in nvkm_vmm_get_locked()
1750 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1754 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1758 vma->mapref = mapref && !getref; in nvkm_vmm_get_locked()
1759 vma->sparse = sparse; in nvkm_vmm_get_locked()
1760 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1761 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE; in nvkm_vmm_get_locked()
1762 vma->used = true; in nvkm_vmm_get_locked()
1763 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_get_locked()
1764 *pvma = vma; in nvkm_vmm_get_locked()