1 /*
2 * highmem.h: virtual kernel memory mappings for high memory
3 *
4 * PowerPC version, stolen from the i386 version.
5 *
6 * Used in CONFIG_HIGHMEM systems for memory pages which
7 * are not addressable by direct kernel virtual addresses.
8 *
9 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
10 * Gerhard.Wichert@pdb.siemens.de
11 *
12 *
13 * Redesigned the x86 32-bit VM architecture to deal with
14 * up to 16 Terrabyte physical memory. With current x86 CPUs
15 * we now support up to 64 Gigabytes physical RAM.
16 *
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 */
19
20 #ifndef _ASM_HIGHMEM_H
21 #define _ASM_HIGHMEM_H
22
23 #ifdef __KERNEL__
24
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <asm/kmap_types.h>
28 #include <asm/pgtable.h>
29
30 /* undef for production */
31 #define HIGHMEM_DEBUG 1
32
33 extern pte_t *kmap_pte;
34 extern pgprot_t kmap_prot;
35 extern pte_t *pkmap_page_table;
36
37 extern void kmap_init(void) __init;
38
39 /*
40 * Right now we initialize only a single pte table. It can be extended
41 * easily, subsequent pte tables have to be allocated in one physical
42 * chunk of RAM.
43 */
44 #define PKMAP_BASE CONFIG_HIGHMEM_START
45 #define LAST_PKMAP PTRS_PER_PTE
46 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
47 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
48 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
49
50 #define KMAP_FIX_BEGIN (PKMAP_BASE + 0x00400000UL)
51
52 extern void *kmap_high(struct page *page, int nonblock);
53 extern void kunmap_high(struct page *page);
54
55 #define kmap(page) __kmap(page, 0)
56 #define kmap_nonblock(page) __kmap(page, 1)
57
__kmap(struct page * page,int nonblock)58 static inline void *__kmap(struct page *page, int nonblock)
59 {
60 if (in_interrupt())
61 BUG();
62 if (page < highmem_start_page)
63 return page_address(page);
64 return kmap_high(page, nonblock);
65 }
66
kunmap(struct page * page)67 static inline void kunmap(struct page *page)
68 {
69 if (in_interrupt())
70 BUG();
71 if (page < highmem_start_page)
72 return;
73 kunmap_high(page);
74 }
75
76 /*
77 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
78 * gives a more generic (and caching) interface. But kmap_atomic can
79 * be used in IRQ contexts, so in some (very limited) cases we need
80 * it.
81 */
kmap_atomic(struct page * page,enum km_type type)82 static inline void *kmap_atomic(struct page *page, enum km_type type)
83 {
84 unsigned int idx;
85 unsigned long vaddr;
86
87 if (page < highmem_start_page)
88 return page_address(page);
89
90 idx = type + KM_TYPE_NR*smp_processor_id();
91 vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE;
92 #if HIGHMEM_DEBUG
93 if (!pte_none(*(kmap_pte+idx)))
94 BUG();
95 #endif
96 set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
97 flush_tlb_page(0, vaddr);
98
99 return (void*) vaddr;
100 }
101
kunmap_atomic(void * kvaddr,enum km_type type)102 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
103 {
104 #if HIGHMEM_DEBUG
105 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
106 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
107
108 if (vaddr < KMAP_FIX_BEGIN) // FIXME
109 return;
110
111 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
112 BUG();
113
114 /*
115 * force other mappings to Oops if they'll try to access
116 * this pte without first remap it
117 */
118 pte_clear(kmap_pte+idx);
119 flush_tlb_page(0, vaddr);
120 #endif
121 }
122
123 #endif /* __KERNEL__ */
124
125 #endif /* _ASM_HIGHMEM_H */
126