1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
3
4 #include <linux/config.h>
5 #include <asm/pgalloc.h>
6
7 #ifdef CONFIG_HIGHMEM
8
9 extern struct page *highmem_start_page;
10
11 #include <asm/highmem.h>
12
13 /* declarations for linux/mm/highmem.c */
14 unsigned int nr_free_highpages(void);
15
16 extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig);
17
bh_kmap(struct buffer_head * bh)18 static inline char *bh_kmap(struct buffer_head *bh)
19 {
20 return kmap(bh->b_page) + bh_offset(bh);
21 }
22
bh_kunmap(struct buffer_head * bh)23 static inline void bh_kunmap(struct buffer_head *bh)
24 {
25 kunmap(bh->b_page);
26 }
27
28 /*
29 * remember to add offset! and never ever reenable interrupts between a
30 * bh_kmap_irq and bh_kunmap_irq!!
31 */
bh_kmap_irq(struct buffer_head * bh,unsigned long * flags)32 static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags)
33 {
34 unsigned long addr;
35
36 __save_flags(*flags);
37
38 /*
39 * could be low
40 */
41 if (!PageHighMem(bh->b_page))
42 return bh->b_data;
43
44 /*
45 * it's a highmem page
46 */
47 __cli();
48 addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);
49
50 if (addr & ~PAGE_MASK)
51 BUG();
52
53 return (char *) addr + bh_offset(bh);
54 }
55
bh_kunmap_irq(char * buffer,unsigned long * flags)56 static inline void bh_kunmap_irq(char *buffer, unsigned long *flags)
57 {
58 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
59
60 kunmap_atomic((void *) ptr, KM_BH_IRQ);
61 __restore_flags(*flags);
62 }
63
64 #else /* CONFIG_HIGHMEM */
65
nr_free_highpages(void)66 static inline unsigned int nr_free_highpages(void) { return 0; }
67
kmap(struct page * page)68 static inline void *kmap(struct page *page) { return page_address(page); }
69
70 #define kunmap(page) do { } while (0)
71
72 #define kmap_atomic(page,idx) kmap(page)
73 #define kunmap_atomic(page,idx) kunmap(page)
74
75 #define bh_kmap(bh) ((bh)->b_data)
76 #define bh_kunmap(bh) do { } while (0)
77 #define kmap_nonblock(page) kmap(page)
78 #define bh_kmap_irq(bh, flags) ((bh)->b_data)
79 #define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0)
80
81 #endif /* CONFIG_HIGHMEM */
82
83 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
clear_user_highpage(struct page * page,unsigned long vaddr)84 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
85 {
86 void *addr = kmap_atomic(page, KM_USER0);
87 clear_user_page(addr, vaddr);
88 kunmap_atomic(addr, KM_USER0);
89 }
90
clear_highpage(struct page * page)91 static inline void clear_highpage(struct page *page)
92 {
93 clear_page(kmap(page));
94 kunmap(page);
95 }
96
97 /*
98 * Same but also flushes aliased cache contents to RAM.
99 */
memclear_highpage_flush(struct page * page,unsigned int offset,unsigned int size)100 static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
101 {
102 char *kaddr;
103
104 if (offset + size > PAGE_SIZE)
105 out_of_line_bug();
106 kaddr = kmap(page);
107 memset(kaddr + offset, 0, size);
108 flush_dcache_page(page);
109 flush_page_to_ram(page);
110 kunmap(page);
111 }
112
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr)113 static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
114 {
115 char *vfrom, *vto;
116
117 vfrom = kmap_atomic(from, KM_USER0);
118 vto = kmap_atomic(to, KM_USER1);
119 copy_user_page(vto, vfrom, vaddr);
120 kunmap_atomic(vfrom, KM_USER0);
121 kunmap_atomic(vto, KM_USER1);
122 }
123
copy_highpage(struct page * to,struct page * from)124 static inline void copy_highpage(struct page *to, struct page *from)
125 {
126 char *vfrom, *vto;
127
128 vfrom = kmap_atomic(from, KM_USER0);
129 vto = kmap_atomic(to, KM_USER1);
130 copy_page(vto, vfrom);
131 kunmap_atomic(vfrom, KM_USER0);
132 kunmap_atomic(vto, KM_USER1);
133 }
134
135 #endif /* _LINUX_HIGHMEM_H */
136