1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20 /*
21 * This file contains functions for dynamic memory pool management
22 */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26
27 #include <asm/set_memory.h>
28
29 #include "atomisp_internal.h"
30
31 #include "hmm/hmm_pool.h"
32
33 /*
34 * dynamic memory pool ops.
35 */
get_pages_from_dynamic_pool(void * pool,struct hmm_page_object * page_obj,unsigned int size,bool cached)36 static unsigned int get_pages_from_dynamic_pool(void *pool,
37 struct hmm_page_object *page_obj,
38 unsigned int size, bool cached)
39 {
40 struct hmm_page *hmm_page;
41 unsigned long flags;
42 unsigned int i = 0;
43 struct hmm_dynamic_pool_info *dypool_info = pool;
44
45 if (!dypool_info)
46 return 0;
47
48 spin_lock_irqsave(&dypool_info->list_lock, flags);
49 if (dypool_info->initialized) {
50 while (!list_empty(&dypool_info->pages_list)) {
51 hmm_page = list_entry(dypool_info->pages_list.next,
52 struct hmm_page, list);
53
54 list_del(&hmm_page->list);
55 dypool_info->pgnr--;
56 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
57
58 page_obj[i].page = hmm_page->page;
59 page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
60 kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
61
62 if (i == size)
63 return i;
64
65 spin_lock_irqsave(&dypool_info->list_lock, flags);
66 }
67 }
68 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
69
70 return i;
71 }
72
free_pages_to_dynamic_pool(void * pool,struct hmm_page_object * page_obj)73 static void free_pages_to_dynamic_pool(void *pool,
74 struct hmm_page_object *page_obj)
75 {
76 struct hmm_page *hmm_page;
77 unsigned long flags;
78 int ret;
79 struct hmm_dynamic_pool_info *dypool_info = pool;
80
81 if (!dypool_info)
82 return;
83
84 spin_lock_irqsave(&dypool_info->list_lock, flags);
85 if (!dypool_info->initialized) {
86 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
87 return;
88 }
89 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
90
91 if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
92 return;
93
94 if (dypool_info->pgnr >= dypool_info->pool_size) {
95 /* free page directly back to system */
96 ret = set_pages_wb(page_obj->page, 1);
97 if (ret)
98 dev_err(atomisp_dev,
99 "set page to WB err ...ret=%d\n", ret);
100 /*
101 W/A: set_pages_wb seldom return value = -EFAULT
102 indicate that address of page is not in valid
103 range(0xffff880000000000~0xffffc7ffffffffff)
104 then, _free_pages would panic; Do not know why page
105 address be valid, it maybe memory corruption by lowmemory
106 */
107 if (!ret) {
108 __free_pages(page_obj->page, 0);
109 hmm_mem_stat.sys_size--;
110 }
111 return;
112 }
113 hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
114 GFP_KERNEL);
115 if (!hmm_page) {
116 /* free page directly */
117 ret = set_pages_wb(page_obj->page, 1);
118 if (ret)
119 dev_err(atomisp_dev,
120 "set page to WB err ...ret=%d\n", ret);
121 if (!ret) {
122 __free_pages(page_obj->page, 0);
123 hmm_mem_stat.sys_size--;
124 }
125 return;
126 }
127
128 hmm_page->page = page_obj->page;
129
130 /*
131 * add to pages_list of pages_pool
132 */
133 spin_lock_irqsave(&dypool_info->list_lock, flags);
134 list_add_tail(&hmm_page->list, &dypool_info->pages_list);
135 dypool_info->pgnr++;
136 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
137 hmm_mem_stat.dyc_size++;
138 }
139
hmm_dynamic_pool_init(void ** pool,unsigned int pool_size)140 static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
141 {
142 struct hmm_dynamic_pool_info *dypool_info;
143
144 if (pool_size == 0)
145 return 0;
146
147 dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
148 GFP_KERNEL);
149 if (unlikely(!dypool_info))
150 return -ENOMEM;
151
152 dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
153 sizeof(struct hmm_page), 0,
154 SLAB_HWCACHE_ALIGN, NULL);
155 if (!dypool_info->pgptr_cache) {
156 kfree(dypool_info);
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&dypool_info->pages_list);
161 spin_lock_init(&dypool_info->list_lock);
162 dypool_info->initialized = true;
163 dypool_info->pool_size = pool_size;
164 dypool_info->pgnr = 0;
165
166 *pool = dypool_info;
167
168 return 0;
169 }
170
hmm_dynamic_pool_exit(void ** pool)171 static void hmm_dynamic_pool_exit(void **pool)
172 {
173 struct hmm_dynamic_pool_info *dypool_info = *pool;
174 struct hmm_page *hmm_page;
175 unsigned long flags;
176 int ret;
177
178 if (!dypool_info)
179 return;
180
181 spin_lock_irqsave(&dypool_info->list_lock, flags);
182 if (!dypool_info->initialized) {
183 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
184 return;
185 }
186 dypool_info->initialized = false;
187
188 while (!list_empty(&dypool_info->pages_list)) {
189 hmm_page = list_entry(dypool_info->pages_list.next,
190 struct hmm_page, list);
191
192 list_del(&hmm_page->list);
193 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
194
195 /* can cause thread sleep, so cannot be put into spin_lock */
196 ret = set_pages_wb(hmm_page->page, 1);
197 if (ret)
198 dev_err(atomisp_dev,
199 "set page to WB err...ret=%d\n", ret);
200 if (!ret) {
201 __free_pages(hmm_page->page, 0);
202 hmm_mem_stat.dyc_size--;
203 hmm_mem_stat.sys_size--;
204 }
205 kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
206 spin_lock_irqsave(&dypool_info->list_lock, flags);
207 }
208
209 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
210
211 kmem_cache_destroy(dypool_info->pgptr_cache);
212
213 kfree(dypool_info);
214
215 *pool = NULL;
216 }
217
hmm_dynamic_pool_inited(void * pool)218 static int hmm_dynamic_pool_inited(void *pool)
219 {
220 struct hmm_dynamic_pool_info *dypool_info = pool;
221
222 if (!dypool_info)
223 return 0;
224
225 return dypool_info->initialized;
226 }
227
228 struct hmm_pool_ops dynamic_pops = {
229 .pool_init = hmm_dynamic_pool_init,
230 .pool_exit = hmm_dynamic_pool_exit,
231 .pool_alloc_pages = get_pages_from_dynamic_pool,
232 .pool_free_pages = free_pages_to_dynamic_pool,
233 .pool_inited = hmm_dynamic_pool_inited,
234 };
235