Lines Matching refs:pd
120 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) in psb_mmu_set_pd_context() argument
122 struct drm_device *dev = pd->driver->dev; in psb_mmu_set_pd_context()
127 down_write(&pd->driver->sem); in psb_mmu_set_pd_context()
128 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context()
130 psb_mmu_flush_pd_locked(pd->driver, 1); in psb_mmu_set_pd_context()
131 pd->hw_context = hw_context; in psb_mmu_set_pd_context()
132 up_write(&pd->driver->sem); in psb_mmu_set_pd_context()
160 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); in psb_mmu_alloc_pd() local
164 if (!pd) in psb_mmu_alloc_pd()
167 pd->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
168 if (!pd->p) in psb_mmu_alloc_pd()
170 pd->dummy_pt = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
171 if (!pd->dummy_pt) in psb_mmu_alloc_pd()
173 pd->dummy_page = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
174 if (!pd->dummy_page) in psb_mmu_alloc_pd()
178 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), in psb_mmu_alloc_pd()
180 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), in psb_mmu_alloc_pd()
183 pd->invalid_pde = 0; in psb_mmu_alloc_pd()
184 pd->invalid_pte = 0; in psb_mmu_alloc_pd()
187 v = kmap_local_page(pd->dummy_pt); in psb_mmu_alloc_pd()
189 v[i] = pd->invalid_pte; in psb_mmu_alloc_pd()
193 v = kmap_local_page(pd->p); in psb_mmu_alloc_pd()
195 v[i] = pd->invalid_pde; in psb_mmu_alloc_pd()
199 clear_page(kmap(pd->dummy_page)); in psb_mmu_alloc_pd()
200 kunmap(pd->dummy_page); in psb_mmu_alloc_pd()
202 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); in psb_mmu_alloc_pd()
203 if (!pd->tables) in psb_mmu_alloc_pd()
206 pd->hw_context = -1; in psb_mmu_alloc_pd()
207 pd->pd_mask = PSB_PTE_VALID; in psb_mmu_alloc_pd()
208 pd->driver = driver; in psb_mmu_alloc_pd()
210 return pd; in psb_mmu_alloc_pd()
213 __free_page(pd->dummy_page); in psb_mmu_alloc_pd()
215 __free_page(pd->dummy_pt); in psb_mmu_alloc_pd()
217 __free_page(pd->p); in psb_mmu_alloc_pd()
219 kfree(pd); in psb_mmu_alloc_pd()
229 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) in psb_mmu_free_pagedir() argument
231 struct psb_mmu_driver *driver = pd->driver; in psb_mmu_free_pagedir()
238 if (pd->hw_context != -1) { in psb_mmu_free_pagedir()
239 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); in psb_mmu_free_pagedir()
247 pt = pd->tables[i]; in psb_mmu_free_pagedir()
252 vfree(pd->tables); in psb_mmu_free_pagedir()
253 __free_page(pd->dummy_page); in psb_mmu_free_pagedir()
254 __free_page(pd->dummy_pt); in psb_mmu_free_pagedir()
255 __free_page(pd->p); in psb_mmu_free_pagedir()
256 kfree(pd); in psb_mmu_free_pagedir()
260 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) in psb_mmu_alloc_pt() argument
264 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; in psb_mmu_alloc_pt()
266 spinlock_t *lock = &pd->driver->lock; in psb_mmu_alloc_pt()
286 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
288 if (pd->driver->has_clflush && pd->hw_context != -1) { in psb_mmu_alloc_pt()
300 pt->pd = pd; in psb_mmu_alloc_pt()
306 static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_alloc_map_lock() argument
312 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_alloc_map_lock()
315 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
318 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
323 if (pd->tables[index]) { in psb_mmu_pt_alloc_map_lock()
327 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
331 v = kmap_atomic(pd->p); in psb_mmu_pt_alloc_map_lock()
332 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
333 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
337 if (pd->hw_context != -1) { in psb_mmu_pt_alloc_map_lock()
338 psb_mmu_clflush(pd->driver, (void *)&v[index]); in psb_mmu_pt_alloc_map_lock()
339 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_alloc_map_lock()
346 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_map_lock() argument
351 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_map_lock()
354 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
365 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock() local
370 v = kmap_atomic(pd->p); in psb_mmu_pt_unmap_unlock()
371 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
372 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
374 if (pd->hw_context != -1) { in psb_mmu_pt_unmap_unlock()
375 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); in psb_mmu_pt_unmap_unlock()
376 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_unmap_unlock()
379 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
383 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
395 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
400 struct psb_mmu_pd *pd; in psb_mmu_get_default_pd() local
403 pd = driver->default_pd; in psb_mmu_get_default_pd()
406 return pd; in psb_mmu_get_default_pd()
477 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
489 unsigned long clflush_add = pd->driver->clflush_add; in psb_mmu_flush_ptes()
490 unsigned long clflush_mask = pd->driver->clflush_mask; in psb_mmu_flush_ptes()
492 if (!pd->driver->has_clflush) in psb_mmu_flush_ptes()
510 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
525 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, in psb_mmu_remove_pfn_sequence() argument
534 down_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
541 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
553 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
554 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
556 up_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
558 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
559 psb_mmu_flush(pd->driver); in psb_mmu_remove_pfn_sequence()
564 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_remove_pages() argument
586 down_read(&pd->driver->sem); in psb_mmu_remove_pages()
597 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
610 if (pd->hw_context != -1) in psb_mmu_remove_pages()
611 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages()
614 up_read(&pd->driver->sem); in psb_mmu_remove_pages()
616 if (pd->hw_context != -1) in psb_mmu_remove_pages()
617 psb_mmu_flush(pd->driver); in psb_mmu_remove_pages()
620 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument
632 down_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
639 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
655 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
656 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_insert_pfn_sequence()
658 up_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
660 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
661 psb_mmu_flush(pd->driver); in psb_mmu_insert_pfn_sequence()
666 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, in psb_mmu_insert_pages() argument
694 down_read(&pd->driver->sem); in psb_mmu_insert_pages()
703 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
721 if (pd->hw_context != -1) in psb_mmu_insert_pages()
722 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_insert_pages()
725 up_read(&pd->driver->sem); in psb_mmu_insert_pages()
727 if (pd->hw_context != -1) in psb_mmu_insert_pages()
728 psb_mmu_flush(pd->driver); in psb_mmu_insert_pages()
733 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, in psb_mmu_virtual_to_pfn() argument
739 spinlock_t *lock = &pd->driver->lock; in psb_mmu_virtual_to_pfn()
741 down_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()
742 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
747 v = kmap_atomic(pd->p); in psb_mmu_virtual_to_pfn()
752 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || in psb_mmu_virtual_to_pfn()
753 !(pd->invalid_pte & PSB_PTE_VALID)) { in psb_mmu_virtual_to_pfn()
758 *pfn = pd->invalid_pte >> PAGE_SHIFT; in psb_mmu_virtual_to_pfn()
770 up_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()