1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5 
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/io.h>
14 
lookup_address(unsigned long address)15 static inline pte_t *lookup_address(unsigned long address)
16 {
17 	pgd_t *pgd = pgd_offset_k(address);
18 	pmd_t *pmd;
19 
20 	if (!pgd) return NULL;
21 	pmd = pmd_offset(pgd, address);
22 	if (!pmd) return NULL;
23 	if ((pmd_val(*pmd) & PAGE_LARGE) == PAGE_LARGE)
24 		return (pte_t *)pmd;
25 
26         return pte_offset(pmd, address);
27 }
28 
split_large_page(unsigned long address,pgprot_t prot)29 static struct page *split_large_page(unsigned long address, pgprot_t prot)
30 {
31 	int i;
32 	unsigned long addr;
33 	struct page *base = alloc_pages(GFP_KERNEL, 0);
34 	pte_t *pbase;
35 	if (!base)
36 		return NULL;
37 	address = __pa(address);
38 	addr = address & LARGE_PAGE_MASK;
39 	pbase = (pte_t *)page_address(base);
40 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
41 		pbase[i] = mk_pte_phys(addr,
42 				      addr == address ? prot : PAGE_KERNEL);
43 	}
44 	return base;
45 }
46 
flush_kernel_map(void * address)47 static void flush_kernel_map(void * address)
48 {
49 	struct cpuinfo_x86 *cpu = &cpu_data[smp_processor_id()];
50 	wmb();
51 	if (0 && test_bit(X86_FEATURE_CLFLSH, &cpu->x86_capability)) {
52 		/* is this worth it? */
53 		int i;
54 		for (i = 0; i < PAGE_SIZE; i += cpu->x86_clflush_size)
55 			asm volatile("clflush (%0)" :: "r" (address + i));
56 	} else
57 		asm volatile("wbinvd":::"memory");
58 	__flush_tlb_all();
59 }
60 
61 /* no more special protections in this 2MB area - revert to a
62    large page again. */
revert_page(struct page * kpte_page,unsigned long address)63 static inline void revert_page(struct page *kpte_page, unsigned long address)
64 {
65 	pgd_t *pgd;
66 	pmd_t *pmd;
67 	pte_t large_pte;
68 
69 	pgd = pgd_offset_k(address);
70 	if (!pgd) BUG();
71 	pmd = pmd_offset(pgd, address);
72 	if (!pmd) BUG();
73 	if ((pmd_val(*pmd) & _PAGE_GLOBAL) == 0) BUG();
74 
75 	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, PAGE_KERNEL_LARGE);
76 	set_pte((pte_t *)pmd, large_pte);
77 }
78 
79 /*
80  * Change the page attributes of an page in the linear mapping.
81  *
82  * This should be used when a page is mapped with a different caching policy
83  * than write-back somewhere - some CPUs do not like it when mappings with
84  * different caching policies exist. This changes the page attributes of the
85  * in kernel linear mapping too.
86  *
87  * The caller needs to ensure that there are no conflicting mappings elsewhere.
88  * This function only deals with the kernel linear map.
89  * When page is in highmem it must never be kmap'ed.
90  */
91 static int
__change_page_attr(unsigned long address,struct page * page,pgprot_t prot,struct page ** oldpage)92 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot,
93 		   struct page **oldpage)
94 {
95 	pte_t *kpte;
96 	struct page *kpte_page;
97 
98 	kpte = lookup_address(address);
99 	if (!kpte)
100 		return 0; /* not mapped in kernel */
101 	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
102 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
103 		if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
104 			set_pte(kpte, mk_pte(page, prot));
105 		} else {
106 			struct page *split = split_large_page(address, prot);
107 			if (!split)
108 				return -ENOMEM;
109 			set_pte(kpte,mk_pte(split, PAGE_KERNEL));
110 			kpte_page = split;
111 		}
112 		atomic_inc(&kpte_page->count);
113 	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
114 		set_pte(kpte, mk_pte(page, PAGE_KERNEL));
115 		atomic_dec(&kpte_page->count);
116 	}
117 
118 	if (atomic_read(&kpte_page->count) == 1) {
119 		*oldpage = kpte_page;
120 		revert_page(kpte_page, address);
121 	}
122 	return 0;
123 }
124 
flush_and_free(void * address,struct page * fpage)125 static inline void flush_and_free(void *address, struct page *fpage)
126 {
127 #ifdef CONFIG_SMP
128 	smp_call_function(flush_kernel_map, address, 1, 1);
129 #endif
130 	flush_kernel_map(address);
131 	if (fpage)
132 		__free_page(fpage);
133 }
134 
change_page_attr(struct page * page,int numpages,pgprot_t prot)135 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
136 {
137 	int err = 0;
138 	struct page *fpage, *fpage2;
139 	int i;
140 
141 	down_write(&init_mm.mmap_sem);
142 	for (i = 0; i < numpages; i++, page++) {
143 		fpage = fpage2 = NULL;
144 		err = __change_page_attr((unsigned long)page_address(page),
145 					 page, prot, &fpage);
146 
147 		/* Handle kernel mapping too which aliases part of the lowmem */
148 		if (!err && page_to_phys(page) < KERNEL_TEXT_SIZE) {
149 			err = __change_page_attr((unsigned long) __START_KERNEL_map +
150 						 page_to_phys(page),
151 						 page, prot, &fpage2);
152 		}
153 
154 		if (err)
155 			break;
156 
157 		if (fpage || fpage2 || i == numpages-1) {
158 			flush_and_free(page_address(page), fpage);
159 			if (unlikely(fpage2 != NULL))
160 				flush_and_free((char *)__START_KERNEL_map +
161 					       page_to_phys(page), fpage2);
162 		}
163 	}
164 	up_write(&init_mm.mmap_sem);
165 	return err;
166 }
167 
168 EXPORT_SYMBOL(change_page_attr);
169