1 #ifndef __LINUX_VMALLOC_H
2 #define __LINUX_VMALLOC_H
3 
4 #include <linux/sched.h>
5 #include <linux/mm.h>
6 #include <linux/spinlock.h>
7 
8 #include <linux/highmem.h>	/* several arch define VMALLOC_END via PKMAP_BASE */
9 #include <asm/pgtable.h>
10 
11 /* bits in vm_struct->flags */
12 #define VM_IOREMAP	0x00000001	/* ioremap() and friends */
13 #define VM_ALLOC	0x00000002	/* vmalloc() */
14 
15 struct vm_struct {
16 	unsigned long flags;
17 	void * addr;
18 	unsigned long size;
19 	struct vm_struct * next;
20 };
21 
22 extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags);
23 extern void vfree(void * addr);
24 #define vunmap(addr)	vfree(addr)
25 extern void * vmap(struct page **pages, int count,
26 		   unsigned long flags, pgprot_t prot);
27 extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot);
28 extern long vread(char *buf, char *addr, unsigned long count);
29 extern void vmfree_area_pages(unsigned long address, unsigned long size);
30 extern int vmalloc_area_pages(unsigned long address, unsigned long size,
31                               int gfp_mask, pgprot_t prot);
32 
33 /*
34  *	Allocate any pages
35  */
36 
vmalloc(unsigned long size)37 static inline void * vmalloc (unsigned long size)
38 {
39 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
40 }
41 
42 /*
43  *	Allocate ISA addressable pages for broke crap
44  */
45 
vmalloc_dma(unsigned long size)46 static inline void * vmalloc_dma (unsigned long size)
47 {
48 	return __vmalloc(size, GFP_KERNEL|GFP_DMA, PAGE_KERNEL);
49 }
50 
51 /*
52  *	vmalloc 32bit PA addressable pages - eg for PCI 32bit devices
53  */
54 
vmalloc_32(unsigned long size)55 static inline void * vmalloc_32(unsigned long size)
56 {
57 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
58 }
59 
60 /*
61  * vmlist_lock is a read-write spinlock that protects vmlist
62  * Used in mm/vmalloc.c (get_vm_area() and vfree()) and fs/proc/kcore.c.
63  */
64 extern rwlock_t vmlist_lock;
65 
66 extern struct vm_struct * vmlist;
67 #endif
68 
69