1 /* MN10300 Page table management
2  *
3  * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Modified by David Howells (dhowells@redhat.com)
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public Licence
9  * as published by the Free Software Foundation; either version
10  * 2 of the Licence, or (at your option) any later version.
11  */
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18 #include <linux/smp.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/spinlock.h>
22 #include <linux/quicklist.h>
23 
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlb.h>
27 #include <asm/tlbflush.h>
28 
29 /*
30  * Associate a large virtual page frame with a given physical page frame
31  * and protection flags for that frame. pfn is for the base of the page,
32  * vaddr is what the page gets mapped to - both must be properly aligned.
33  * The pmd must already be instantiated. Assumes PAE mode.
34  */
set_pmd_pfn(unsigned long vaddr,unsigned long pfn,pgprot_t flags)35 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
36 {
37 	pgd_t *pgd;
38 	pud_t *pud;
39 	pmd_t *pmd;
40 
41 	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
42 		printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
43 		return; /* BUG(); */
44 	}
45 	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
46 		printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
47 		return; /* BUG(); */
48 	}
49 	pgd = swapper_pg_dir + pgd_index(vaddr);
50 	if (pgd_none(*pgd)) {
51 		printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
52 		return; /* BUG(); */
53 	}
54 	pud = pud_offset(pgd, vaddr);
55 	pmd = pmd_offset(pud, vaddr);
56 	set_pmd(pmd, pfn_pmd(pfn, flags));
57 	/*
58 	 * It's enough to flush this one mapping.
59 	 * (PGE mappings get flushed as well)
60 	 */
61 	local_flush_tlb_one(vaddr);
62 }
63 
pte_alloc_one_kernel(struct mm_struct * mm,unsigned long address)64 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
65 {
66 	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
67 	if (pte)
68 		clear_page(pte);
69 	return pte;
70 }
71 
pte_alloc_one(struct mm_struct * mm,unsigned long address)72 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
73 {
74 	struct page *pte;
75 
76 #ifdef CONFIG_HIGHPTE
77 	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
78 #else
79 	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
80 #endif
81 	if (pte)
82 		clear_highpage(pte);
83 	return pte;
84 }
85 
86 /*
87  * List of all pgd's needed for non-PAE so it can invalidate entries
88  * in both cached and uncached pgd's; not needed for PAE since the
89  * kernel pmd is shared. If PAE were not to share the pmd a similar
90  * tactic would be needed. This is essentially codepath-based locking
91  * against pageattr.c; it is the unique case in which a valid change
92  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
93  * vmalloc faults work because attached pagetables are never freed.
94  * If the locking proves to be non-performant, a ticketing scheme with
95  * checks at dup_mmap(), exec(), and other mmlist addition points
96  * could be used. The locking scheme was chosen on the basis of
97  * manfred's recommendations and having no core impact whatsoever.
98  * -- wli
99  */
100 DEFINE_SPINLOCK(pgd_lock);
101 struct page *pgd_list;
102 
pgd_list_add(pgd_t * pgd)103 static inline void pgd_list_add(pgd_t *pgd)
104 {
105 	struct page *page = virt_to_page(pgd);
106 	page->index = (unsigned long) pgd_list;
107 	if (pgd_list)
108 		set_page_private(pgd_list, (unsigned long) &page->index);
109 	pgd_list = page;
110 	set_page_private(page, (unsigned long) &pgd_list);
111 }
112 
pgd_list_del(pgd_t * pgd)113 static inline void pgd_list_del(pgd_t *pgd)
114 {
115 	struct page *next, **pprev, *page = virt_to_page(pgd);
116 	next = (struct page *) page->index;
117 	pprev = (struct page **) page_private(page);
118 	*pprev = next;
119 	if (next)
120 		set_page_private(next, (unsigned long) pprev);
121 }
122 
pgd_ctor(void * pgd)123 void pgd_ctor(void *pgd)
124 {
125 	unsigned long flags;
126 
127 	if (PTRS_PER_PMD == 1)
128 		spin_lock_irqsave(&pgd_lock, flags);
129 
130 	memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
131 			swapper_pg_dir + USER_PTRS_PER_PGD,
132 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
133 
134 	if (PTRS_PER_PMD > 1)
135 		return;
136 
137 	pgd_list_add(pgd);
138 	spin_unlock_irqrestore(&pgd_lock, flags);
139 	memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
140 }
141 
142 /* never called when PTRS_PER_PMD > 1 */
pgd_dtor(void * pgd)143 void pgd_dtor(void *pgd)
144 {
145 	unsigned long flags; /* can be called from interrupt context */
146 
147 	spin_lock_irqsave(&pgd_lock, flags);
148 	pgd_list_del(pgd);
149 	spin_unlock_irqrestore(&pgd_lock, flags);
150 }
151 
pgd_alloc(struct mm_struct * mm)152 pgd_t *pgd_alloc(struct mm_struct *mm)
153 {
154 	return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
155 }
156 
pgd_free(struct mm_struct * mm,pgd_t * pgd)157 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
158 {
159 	quicklist_free(0, pgd_dtor, pgd);
160 }
161 
pgtable_cache_init(void)162 void __init pgtable_cache_init(void)
163 {
164 }
165 
check_pgt_cache(void)166 void check_pgt_cache(void)
167 {
168 	quicklist_trim(0, pgd_dtor, 25, 16);
169 }
170