1 #include <linux/mm.h>
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <linux/sched.h>
5 
6 struct kmem_cache *task_xstate_cachep = NULL;
7 unsigned int xstate_size;
8 
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)9 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
10 {
11 	*dst = *src;
12 
13 	if (src->thread.xstate) {
14 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
15 						      GFP_KERNEL);
16 		if (!dst->thread.xstate)
17 			return -ENOMEM;
18 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
19 	}
20 
21 	return 0;
22 }
23 
free_thread_xstate(struct task_struct * tsk)24 void free_thread_xstate(struct task_struct *tsk)
25 {
26 	if (tsk->thread.xstate) {
27 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
28 		tsk->thread.xstate = NULL;
29 	}
30 }
31 
32 #if THREAD_SHIFT < PAGE_SHIFT
33 static struct kmem_cache *thread_info_cache;
34 
alloc_thread_info_node(struct task_struct * tsk,int node)35 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
36 {
37 	struct thread_info *ti;
38 #ifdef CONFIG_DEBUG_STACK_USAGE
39 	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
40 #else
41 	gfp_t mask = GFP_KERNEL;
42 #endif
43 
44 	ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
45 	return ti;
46 }
47 
free_thread_info(struct thread_info * ti)48 void free_thread_info(struct thread_info *ti)
49 {
50 	free_thread_xstate(ti->task);
51 	kmem_cache_free(thread_info_cache, ti);
52 }
53 
thread_info_cache_init(void)54 void thread_info_cache_init(void)
55 {
56 	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
57 					      THREAD_SIZE, SLAB_PANIC, NULL);
58 }
59 #else
alloc_thread_info_node(struct task_struct * tsk,int node)60 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
61 {
62 #ifdef CONFIG_DEBUG_STACK_USAGE
63 	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
64 #else
65 	gfp_t mask = GFP_KERNEL;
66 #endif
67 	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
68 
69 	return page ? page_address(page) : NULL;
70 }
71 
free_thread_info(struct thread_info * ti)72 void free_thread_info(struct thread_info *ti)
73 {
74 	free_thread_xstate(ti->task);
75 	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
76 }
77 #endif /* THREAD_SHIFT < PAGE_SHIFT */
78 
arch_task_cache_init(void)79 void arch_task_cache_init(void)
80 {
81 	if (!xstate_size)
82 		return;
83 
84 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
85 					       __alignof__(union thread_xstate),
86 					       SLAB_PANIC | SLAB_NOTRACK, NULL);
87 }
88 
89 #ifdef CONFIG_SH_FPU_EMU
90 # define HAVE_SOFTFP	1
91 #else
92 # define HAVE_SOFTFP	0
93 #endif
94 
init_thread_xstate(void)95 void __cpuinit init_thread_xstate(void)
96 {
97 	if (boot_cpu_data.flags & CPU_HAS_FPU)
98 		xstate_size = sizeof(struct sh_fpu_hard_struct);
99 	else if (HAVE_SOFTFP)
100 		xstate_size = sizeof(struct sh_fpu_soft_struct);
101 	else
102 		xstate_size = 0;
103 }
104