Lines Matching refs:chunk

96 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);  in page_to_drm()  local
98 return chunk->drm; in page_to_drm()
103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local
105 chunk->pagemap.range.start; in nouveau_dmem_page_addr()
107 return chunk->bo->offset + off; in nouveau_dmem_page_addr()
112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_free() local
113 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free()
119 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free()
120 chunk->callocated--; in nouveau_dmem_page_free()
229 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_chunk_alloc() local
236 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); in nouveau_dmem_chunk_alloc()
237 if (chunk == NULL) { in nouveau_dmem_chunk_alloc()
250 chunk->drm = drm; in nouveau_dmem_chunk_alloc()
251 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; in nouveau_dmem_chunk_alloc()
252 chunk->pagemap.range.start = res->start; in nouveau_dmem_chunk_alloc()
253 chunk->pagemap.range.end = res->end; in nouveau_dmem_chunk_alloc()
254 chunk->pagemap.nr_range = 1; in nouveau_dmem_chunk_alloc()
255 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; in nouveau_dmem_chunk_alloc()
256 chunk->pagemap.owner = drm->dev; in nouveau_dmem_chunk_alloc()
260 &chunk->bo); in nouveau_dmem_chunk_alloc()
264 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); in nouveau_dmem_chunk_alloc()
268 ptr = memremap_pages(&chunk->pagemap, numa_node_id()); in nouveau_dmem_chunk_alloc()
275 list_add(&chunk->list, &drm->dmem->chunks); in nouveau_dmem_chunk_alloc()
278 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT; in nouveau_dmem_chunk_alloc()
286 chunk->callocated++; in nouveau_dmem_chunk_alloc()
295 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_chunk_alloc()
297 nouveau_bo_ref(NULL, &chunk->bo); in nouveau_dmem_chunk_alloc()
299 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range)); in nouveau_dmem_chunk_alloc()
301 kfree(chunk); in nouveau_dmem_chunk_alloc()
309 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_page_alloc_locked() local
317 chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_alloc_locked()
318 chunk->callocated++; in nouveau_dmem_page_alloc_locked()
341 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_resume() local
348 list_for_each_entry(chunk, &drm->dmem->chunks, list) { in nouveau_dmem_resume()
349 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); in nouveau_dmem_resume()
359 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_suspend() local
365 list_for_each_entry(chunk, &drm->dmem->chunks, list) in nouveau_dmem_suspend()
366 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_suspend()
374 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) in nouveau_dmem_evict_chunk() argument
376 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT; in nouveau_dmem_evict_chunk()
385 migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, in nouveau_dmem_evict_chunk()
399 nouveau_dmem_copy_one(chunk->drm, in nouveau_dmem_evict_chunk()
405 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan); in nouveau_dmem_evict_chunk()
412 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_evict_chunk()
419 struct nouveau_dmem_chunk *chunk, *tmp; in nouveau_dmem_fini() local
426 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { in nouveau_dmem_fini()
427 nouveau_dmem_evict_chunk(chunk); in nouveau_dmem_fini()
428 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_fini()
429 nouveau_bo_ref(NULL, &chunk->bo); in nouveau_dmem_fini()
430 WARN_ON(chunk->callocated); in nouveau_dmem_fini()
431 list_del(&chunk->list); in nouveau_dmem_fini()
432 memunmap_pages(&chunk->pagemap); in nouveau_dmem_fini()
433 release_mem_region(chunk->pagemap.range.start, in nouveau_dmem_fini()
434 range_len(&chunk->pagemap.range)); in nouveau_dmem_fini()
435 kfree(chunk); in nouveau_dmem_fini()