Lines Matching refs:bo

49 	struct ttm_buffer_object *bo;  in ttm_bo_vm_lookup_rb()  local
53 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); in ttm_bo_vm_lookup_rb()
54 cur_offset = bo->vm_node->start; in ttm_bo_vm_lookup_rb()
57 best_bo = bo; in ttm_bo_vm_lookup_rb()
76 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) in ttm_bo_vm_fault() local
78 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_vm_fault()
89 &bdev->man[bo->mem.mem_type]; in ttm_bo_vm_fault()
97 ret = ttm_bo_reserve(bo, true, true, false, 0); in ttm_bo_vm_fault()
105 ret = bdev->driver->fault_reserve_notify(bo); in ttm_bo_vm_fault()
126 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { in ttm_bo_vm_fault()
127 ret = ttm_bo_wait(bo, false, true, false); in ttm_bo_vm_fault()
142 ret = ttm_mem_io_reserve_vm(bo); in ttm_bo_vm_fault()
149 bo->vm_node->start - vma->vm_pgoff; in ttm_bo_vm_fault()
151 bo->vm_node->start - vma->vm_pgoff; in ttm_bo_vm_fault()
153 if (unlikely(page_offset >= bo->num_pages)) { in ttm_bo_vm_fault()
171 if (bo->mem.bus.is_iomem) { in ttm_bo_vm_fault()
172 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, in ttm_bo_vm_fault()
175 ttm = bo->ttm; in ttm_bo_vm_fault()
176 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? in ttm_bo_vm_fault()
178 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); in ttm_bo_vm_fault()
192 if (bo->mem.bus.is_iomem) in ttm_bo_vm_fault()
193 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; in ttm_bo_vm_fault()
226 ttm_bo_unreserve(bo); in ttm_bo_vm_fault()
232 struct ttm_buffer_object *bo = in ttm_bo_vm_open() local
235 (void)ttm_bo_reference(bo); in ttm_bo_vm_open()
240 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; in ttm_bo_vm_close() local
242 ttm_bo_unref(&bo); in ttm_bo_vm_close()
256 struct ttm_buffer_object *bo; in ttm_bo_mmap() local
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, in ttm_bo_mmap()
262 if (likely(bo != NULL)) in ttm_bo_mmap()
263 ttm_bo_reference(bo); in ttm_bo_mmap()
266 if (unlikely(bo == NULL)) { in ttm_bo_mmap()
271 driver = bo->bdev->driver; in ttm_bo_mmap()
276 ret = driver->verify_access(bo, filp); in ttm_bo_mmap()
287 vma->vm_private_data = bo; in ttm_bo_mmap()
291 ttm_bo_unref(&bo); in ttm_bo_mmap()
296 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) in ttm_fbdev_mmap() argument
302 vma->vm_private_data = ttm_bo_reference(bo); in ttm_fbdev_mmap()
313 struct ttm_buffer_object *bo; in ttm_bo_io() local
328 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); in ttm_bo_io()
329 if (likely(bo != NULL)) in ttm_bo_io()
330 ttm_bo_reference(bo); in ttm_bo_io()
333 if (unlikely(bo == NULL)) in ttm_bo_io()
336 driver = bo->bdev->driver; in ttm_bo_io()
342 ret = driver->verify_access(bo, filp); in ttm_bo_io()
346 kmap_offset = dev_offset - bo->vm_node->start; in ttm_bo_io()
347 if (unlikely(kmap_offset >= bo->num_pages)) { in ttm_bo_io()
353 io_size = bo->num_pages - kmap_offset; in ttm_bo_io()
361 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); in ttm_bo_io()
373 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); in ttm_bo_io()
375 ttm_bo_unreserve(bo); in ttm_bo_io()
388 ttm_bo_unreserve(bo); in ttm_bo_io()
389 ttm_bo_unref(&bo); in ttm_bo_io()
398 ttm_bo_unref(&bo); in ttm_bo_io()
402 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, in ttm_bo_fbdev_io() argument
418 if (unlikely(kmap_offset >= bo->num_pages)) in ttm_bo_fbdev_io()
422 io_size = bo->num_pages - kmap_offset; in ttm_bo_fbdev_io()
430 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); in ttm_bo_fbdev_io()
441 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); in ttm_bo_fbdev_io()
443 ttm_bo_unreserve(bo); in ttm_bo_fbdev_io()
456 ttm_bo_unreserve(bo); in ttm_bo_fbdev_io()
457 ttm_bo_unref(&bo); in ttm_bo_fbdev_io()