1 /*
2  * arch/mips/mm/remap.c
3  * A copy of mm/memory.c with modifications to handle 64 bit
4  * physical addresses.
5  */
6 
7 /*
8  * maps a range of physical memory into the requested pages. the old
9  * mappings are removed. any references to nonexistent pages results
10  * in null mappings (currently treated as "copy-on-access")
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/swapctl.h>
18 #include <linux/iobuf.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/module.h>
22 
23 #include <asm/pgalloc.h>
24 #include <asm/uaccess.h>
25 #include <asm/tlb.h>
26 
27 /*
28  * Return indicates whether a page was freed so caller can adjust rss
29  */
forget_pte(pte_t page)30 static inline void forget_pte(pte_t page)
31 {
32 	if (!pte_none(page)) {
33 		printk("forget_pte: old mapping existed!\n");
34 		BUG();
35 	}
36 }
37 
remap_pte_range(pte_t * pte,unsigned long address,unsigned long size,phys_t phys_addr,pgprot_t prot)38 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
39 	phys_t phys_addr, pgprot_t prot)
40 {
41 	unsigned long end;
42 
43 	address &= ~PMD_MASK;
44 	end = address + size;
45 	if (end > PMD_SIZE)
46 		end = PMD_SIZE;
47 	do {
48 		struct page *page;
49 		pte_t oldpage;
50 		oldpage = ptep_get_and_clear(pte);
51 
52 		page = virt_to_page(__va(phys_addr));
53 		if ((!VALID_PAGE(page)) || PageReserved(page))
54  			set_pte(pte, mk_pte_phys(phys_addr, prot));
55 		forget_pte(oldpage);
56 		address += PAGE_SIZE;
57 		phys_addr += PAGE_SIZE;
58 		pte++;
59 	} while (address && (address < end));
60 }
61 
remap_pmd_range(struct mm_struct * mm,pmd_t * pmd,unsigned long address,unsigned long size,phys_t phys_addr,pgprot_t prot)62 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
63 	phys_t phys_addr, pgprot_t prot)
64 {
65 	unsigned long end;
66 
67 	address &= ~PGDIR_MASK;
68 	end = address + size;
69 	if (end > PGDIR_SIZE)
70 		end = PGDIR_SIZE;
71 	phys_addr -= address;
72 	do {
73 		pte_t * pte = pte_alloc(mm, pmd, address);
74 		if (!pte)
75 			return -ENOMEM;
76 		remap_pte_range(pte, address, end - address, address + phys_addr, prot);
77 		address = (address + PMD_SIZE) & PMD_MASK;
78 		pmd++;
79 	} while (address && (address < end));
80 	return 0;
81 }
82 
83 extern phys_t (*fixup_bigphys_addr)(phys_t phys_addr, phys_t size);
84 /*  Note: this is only safe if the mm semaphore is held when called. */
remap_page_range_high(unsigned long from,phys_t phys_addr,unsigned long size,pgprot_t prot)85 int remap_page_range_high(unsigned long from, phys_t phys_addr, unsigned long size, pgprot_t prot)
86 {
87 	int error = 0;
88 	pgd_t * dir;
89 	unsigned long beg = from;
90 	unsigned long end = from + size;
91 	struct mm_struct *mm = current->mm;
92 
93 	phys_addr = fixup_bigphys_addr(phys_addr, size);
94 	phys_addr -= from;
95 	dir = pgd_offset(mm, from);
96 	flush_cache_range(mm, beg, end);
97 	if (from >= end)
98 		BUG();
99 
100 	spin_lock(&mm->page_table_lock);
101 	do {
102 		pmd_t *pmd = pmd_alloc(mm, dir, from);
103 		error = -ENOMEM;
104 		if (!pmd)
105 			break;
106 		error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
107 		if (error)
108 			break;
109 		from = (from + PGDIR_SIZE) & PGDIR_MASK;
110 		dir++;
111 	} while (from && (from < end));
112 	spin_unlock(&mm->page_table_lock);
113 	flush_tlb_range(mm, beg, end);
114 	return error;
115 }
116