1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20 /*
21 * This file contains entry functions for memory management of ISP driver
22 */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h> /* for kmap */
27 #include <linux/io.h> /* for page_to_phys */
28 #include <linux/sysfs.h>
29
30 #include "hmm/hmm.h"
31 #include "hmm/hmm_bo.h"
32
33 #include "atomisp_internal.h"
34 #include "asm/cacheflush.h"
35 #include "mmu/isp_mmu.h"
36 #include "mmu/sh_mmu_mrfld.h"
37
38 struct hmm_bo_device bo_device;
39 static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
40 static bool hmm_initialized;
41
42 /*
43 * p: private
44 * s: shared
45 * u: user
46 * i: ion
47 */
48 static const char hmm_bo_type_string[] = "psui";
49
bo_show(struct device * dev,struct device_attribute * attr,char * buf,struct list_head * bo_list,bool active)50 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
51 char *buf, struct list_head *bo_list, bool active)
52 {
53 ssize_t ret = 0;
54 struct hmm_buffer_object *bo;
55 unsigned long flags;
56 int i;
57 long total[HMM_BO_LAST] = { 0 };
58 long count[HMM_BO_LAST] = { 0 };
59 int index1 = 0;
60 int index2 = 0;
61
62 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
63 if (ret <= 0)
64 return 0;
65
66 index1 += ret;
67
68 spin_lock_irqsave(&bo_device.list_lock, flags);
69 list_for_each_entry(bo, bo_list, list) {
70 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
71 (!active && !(bo->status & HMM_BO_ALLOCED))) {
72 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
73 "%c %d\n",
74 hmm_bo_type_string[bo->type], bo->pgnr);
75
76 total[bo->type] += bo->pgnr;
77 count[bo->type]++;
78 if (ret > 0)
79 index1 += ret;
80 }
81 }
82 spin_unlock_irqrestore(&bo_device.list_lock, flags);
83
84 for (i = 0; i < HMM_BO_LAST; i++) {
85 if (count[i]) {
86 ret = scnprintf(buf + index1 + index2,
87 PAGE_SIZE - index1 - index2,
88 "%ld %c buffer objects: %ld KB\n",
89 count[i], hmm_bo_type_string[i],
90 total[i] * 4);
91 if (ret > 0)
92 index2 += ret;
93 }
94 }
95
96 /* Add trailing zero, not included by scnprintf */
97 return index1 + index2 + 1;
98 }
99
active_bo_show(struct device * dev,struct device_attribute * attr,char * buf)100 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102 {
103 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
104 }
105
free_bo_show(struct device * dev,struct device_attribute * attr,char * buf)106 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
107 char *buf)
108 {
109 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
110 }
111
112
113 static DEVICE_ATTR_RO(active_bo);
114 static DEVICE_ATTR_RO(free_bo);
115
116 static struct attribute *sysfs_attrs_ctrl[] = {
117 &dev_attr_active_bo.attr,
118 &dev_attr_free_bo.attr,
119 NULL
120 };
121
122 static struct attribute_group atomisp_attribute_group[] = {
123 {.attrs = sysfs_attrs_ctrl },
124 };
125
hmm_init(void)126 int hmm_init(void)
127 {
128 int ret;
129
130 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
131 ISP_VM_START, ISP_VM_SIZE);
132 if (ret)
133 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
134
135 hmm_initialized = true;
136
137 /*
138 * As hmm use NULL to indicate invalid ISP virtual address,
139 * and ISP_VM_START is defined to 0 too, so we allocate
140 * one piece of dummy memory, which should return value 0,
141 * at the beginning, to avoid hmm_alloc return 0 in the
142 * further allocation.
143 */
144 dummy_ptr = hmm_alloc(1);
145
146 if (!ret) {
147 ret = sysfs_create_group(&atomisp_dev->kobj,
148 atomisp_attribute_group);
149 if (ret)
150 dev_err(atomisp_dev,
151 "%s Failed to create sysfs\n", __func__);
152 }
153
154 return ret;
155 }
156
hmm_cleanup(void)157 void hmm_cleanup(void)
158 {
159 if (dummy_ptr == mmgr_EXCEPTION)
160 return;
161 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
162
163 /* free dummy memory first */
164 hmm_free(dummy_ptr);
165 dummy_ptr = 0;
166
167 hmm_bo_device_exit(&bo_device);
168 hmm_initialized = false;
169 }
170
__hmm_alloc(size_t bytes,enum hmm_bo_type type,const void __user * userptr)171 static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, const void __user *userptr)
172 {
173 unsigned int pgnr;
174 struct hmm_buffer_object *bo;
175 int ret;
176
177 /*
178 * Check if we are initialized. In the ideal world we wouldn't need
179 * this but we can tackle it once the driver is a lot cleaner
180 */
181
182 if (!hmm_initialized)
183 hmm_init();
184 /* Get page number from size */
185 pgnr = size_to_pgnr_ceil(bytes);
186
187 /* Buffer object structure init */
188 bo = hmm_bo_alloc(&bo_device, pgnr);
189 if (!bo) {
190 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
191 goto create_bo_err;
192 }
193
194 /* Allocate pages for memory */
195 ret = hmm_bo_alloc_pages(bo, type, userptr);
196 if (ret) {
197 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
198 goto alloc_page_err;
199 }
200
201 /* Combine the virtual address and pages together */
202 ret = hmm_bo_bind(bo);
203 if (ret) {
204 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
205 goto bind_err;
206 }
207
208 dev_dbg(atomisp_dev,
209 "%s: pages: 0x%08x (%zu bytes), type: %d, user ptr %p\n",
210 __func__, bo->start, bytes, type, userptr);
211
212 return bo->start;
213
214 bind_err:
215 hmm_bo_free_pages(bo);
216 alloc_page_err:
217 hmm_bo_unref(bo);
218 create_bo_err:
219 return 0;
220 }
221
hmm_alloc(size_t bytes)222 ia_css_ptr hmm_alloc(size_t bytes)
223 {
224 return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
225 }
226
hmm_create_from_userdata(size_t bytes,const void __user * userptr)227 ia_css_ptr hmm_create_from_userdata(size_t bytes, const void __user *userptr)
228 {
229 return __hmm_alloc(bytes, HMM_BO_USER, userptr);
230 }
231
hmm_free(ia_css_ptr virt)232 void hmm_free(ia_css_ptr virt)
233 {
234 struct hmm_buffer_object *bo;
235
236 dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
237
238 if (WARN_ON(virt == mmgr_EXCEPTION))
239 return;
240
241 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
242
243 if (!bo) {
244 dev_err(atomisp_dev,
245 "can not find buffer object start with address 0x%x\n",
246 (unsigned int)virt);
247 return;
248 }
249
250 hmm_bo_unbind(bo);
251 hmm_bo_free_pages(bo);
252 hmm_bo_unref(bo);
253 }
254
hmm_check_bo(struct hmm_buffer_object * bo,unsigned int ptr)255 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
256 {
257 if (!bo) {
258 dev_err(atomisp_dev,
259 "can not find buffer object contains address 0x%x\n",
260 ptr);
261 return -EINVAL;
262 }
263
264 if (!hmm_bo_page_allocated(bo)) {
265 dev_err(atomisp_dev,
266 "buffer object has no page allocated.\n");
267 return -EINVAL;
268 }
269
270 if (!hmm_bo_allocated(bo)) {
271 dev_err(atomisp_dev,
272 "buffer object has no virtual address space allocated.\n");
273 return -EINVAL;
274 }
275
276 return 0;
277 }
278
279 /* Read function in ISP memory management */
load_and_flush_by_kmap(ia_css_ptr virt,void * data,unsigned int bytes)280 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
281 unsigned int bytes)
282 {
283 struct hmm_buffer_object *bo;
284 unsigned int idx, offset, len;
285 char *src, *des;
286 int ret;
287
288 bo = hmm_bo_device_search_in_range(&bo_device, virt);
289 ret = hmm_check_bo(bo, virt);
290 if (ret)
291 return ret;
292
293 des = (char *)data;
294 while (bytes) {
295 idx = (virt - bo->start) >> PAGE_SHIFT;
296 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
297
298 src = (char *)kmap_local_page(bo->pages[idx]) + offset;
299
300 if ((bytes + offset) >= PAGE_SIZE) {
301 len = PAGE_SIZE - offset;
302 bytes -= len;
303 } else {
304 len = bytes;
305 bytes = 0;
306 }
307
308 virt += len; /* update virt for next loop */
309
310 if (des) {
311 memcpy(des, src, len);
312 des += len;
313 }
314
315 clflush_cache_range(src, len);
316
317 kunmap_local(src);
318 }
319
320 return 0;
321 }
322
323 /* Read function in ISP memory management */
load_and_flush(ia_css_ptr virt,void * data,unsigned int bytes)324 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
325 {
326 struct hmm_buffer_object *bo;
327 int ret;
328
329 bo = hmm_bo_device_search_in_range(&bo_device, virt);
330 ret = hmm_check_bo(bo, virt);
331 if (ret)
332 return ret;
333
334 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
335 void *src = bo->vmap_addr;
336
337 src += (virt - bo->start);
338 memcpy(data, src, bytes);
339 if (bo->status & HMM_BO_VMAPED_CACHED)
340 clflush_cache_range(src, bytes);
341 } else {
342 void *vptr;
343
344 vptr = hmm_bo_vmap(bo, true);
345 if (!vptr)
346 return load_and_flush_by_kmap(virt, data, bytes);
347 else
348 vptr = vptr + (virt - bo->start);
349
350 memcpy(data, vptr, bytes);
351 clflush_cache_range(vptr, bytes);
352 hmm_bo_vunmap(bo);
353 }
354
355 return 0;
356 }
357
358 /* Read function in ISP memory management */
hmm_load(ia_css_ptr virt,void * data,unsigned int bytes)359 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
360 {
361 if (!virt) {
362 dev_warn(atomisp_dev,
363 "hmm_store: address is NULL\n");
364 return -EINVAL;
365 }
366 if (!data) {
367 dev_err(atomisp_dev,
368 "hmm_store: data is a NULL argument\n");
369 return -EINVAL;
370 }
371 return load_and_flush(virt, data, bytes);
372 }
373
374 /* Flush hmm data from the data cache */
hmm_flush(ia_css_ptr virt,unsigned int bytes)375 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
376 {
377 return load_and_flush(virt, NULL, bytes);
378 }
379
380 /* Write function in ISP memory management */
hmm_store(ia_css_ptr virt,const void * data,unsigned int bytes)381 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
382 {
383 struct hmm_buffer_object *bo;
384 unsigned int idx, offset, len;
385 char *src, *des;
386 int ret;
387
388 if (!virt) {
389 dev_warn(atomisp_dev,
390 "hmm_store: address is NULL\n");
391 return -EINVAL;
392 }
393 if (!data) {
394 dev_err(atomisp_dev,
395 "hmm_store: data is a NULL argument\n");
396 return -EINVAL;
397 }
398
399 bo = hmm_bo_device_search_in_range(&bo_device, virt);
400 ret = hmm_check_bo(bo, virt);
401 if (ret)
402 return ret;
403
404 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
405 void *dst = bo->vmap_addr;
406
407 dst += (virt - bo->start);
408 memcpy(dst, data, bytes);
409 if (bo->status & HMM_BO_VMAPED_CACHED)
410 clflush_cache_range(dst, bytes);
411 } else {
412 void *vptr;
413
414 vptr = hmm_bo_vmap(bo, true);
415 if (vptr) {
416 vptr = vptr + (virt - bo->start);
417
418 memcpy(vptr, data, bytes);
419 clflush_cache_range(vptr, bytes);
420 hmm_bo_vunmap(bo);
421 return 0;
422 }
423 }
424
425 src = (char *)data;
426 while (bytes) {
427 idx = (virt - bo->start) >> PAGE_SHIFT;
428 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
429
430 des = (char *)kmap_local_page(bo->pages[idx]);
431
432 if (!des) {
433 dev_err(atomisp_dev,
434 "kmap buffer object page failed: pg_idx = %d\n",
435 idx);
436 return -EINVAL;
437 }
438
439 des += offset;
440
441 if ((bytes + offset) >= PAGE_SIZE) {
442 len = PAGE_SIZE - offset;
443 bytes -= len;
444 } else {
445 len = bytes;
446 bytes = 0;
447 }
448
449 virt += len;
450
451 memcpy(des, src, len);
452
453 src += len;
454
455 clflush_cache_range(des, len);
456
457 kunmap_local(des);
458 }
459
460 return 0;
461 }
462
463 /* memset function in ISP memory management */
hmm_set(ia_css_ptr virt,int c,unsigned int bytes)464 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
465 {
466 struct hmm_buffer_object *bo;
467 unsigned int idx, offset, len;
468 char *des;
469 int ret;
470
471 bo = hmm_bo_device_search_in_range(&bo_device, virt);
472 ret = hmm_check_bo(bo, virt);
473 if (ret)
474 return ret;
475
476 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
477 void *dst = bo->vmap_addr;
478
479 dst += (virt - bo->start);
480 memset(dst, c, bytes);
481
482 if (bo->status & HMM_BO_VMAPED_CACHED)
483 clflush_cache_range(dst, bytes);
484 } else {
485 void *vptr;
486
487 vptr = hmm_bo_vmap(bo, true);
488 if (vptr) {
489 vptr = vptr + (virt - bo->start);
490 memset(vptr, c, bytes);
491 clflush_cache_range(vptr, bytes);
492 hmm_bo_vunmap(bo);
493 return 0;
494 }
495 }
496
497 while (bytes) {
498 idx = (virt - bo->start) >> PAGE_SHIFT;
499 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
500
501 des = (char *)kmap_local_page(bo->pages[idx]) + offset;
502
503 if ((bytes + offset) >= PAGE_SIZE) {
504 len = PAGE_SIZE - offset;
505 bytes -= len;
506 } else {
507 len = bytes;
508 bytes = 0;
509 }
510
511 virt += len;
512
513 memset(des, c, len);
514
515 clflush_cache_range(des, len);
516
517 kunmap_local(des);
518 }
519
520 return 0;
521 }
522
523 /* Virtual address to physical address convert */
hmm_virt_to_phys(ia_css_ptr virt)524 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
525 {
526 unsigned int idx, offset;
527 struct hmm_buffer_object *bo;
528
529 bo = hmm_bo_device_search_in_range(&bo_device, virt);
530 if (!bo) {
531 dev_err(atomisp_dev,
532 "can not find buffer object contains address 0x%x\n",
533 virt);
534 return -1;
535 }
536
537 idx = (virt - bo->start) >> PAGE_SHIFT;
538 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
539
540 return page_to_phys(bo->pages[idx]) + offset;
541 }
542
hmm_mmap(struct vm_area_struct * vma,ia_css_ptr virt)543 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
544 {
545 struct hmm_buffer_object *bo;
546
547 bo = hmm_bo_device_search_start(&bo_device, virt);
548 if (!bo) {
549 dev_err(atomisp_dev,
550 "can not find buffer object start with address 0x%x\n",
551 virt);
552 return -EINVAL;
553 }
554
555 return hmm_bo_mmap(vma, bo);
556 }
557
558 /* Map ISP virtual address into IA virtual address */
hmm_vmap(ia_css_ptr virt,bool cached)559 void *hmm_vmap(ia_css_ptr virt, bool cached)
560 {
561 struct hmm_buffer_object *bo;
562 void *ptr;
563
564 bo = hmm_bo_device_search_in_range(&bo_device, virt);
565 if (!bo) {
566 dev_err(atomisp_dev,
567 "can not find buffer object contains address 0x%x\n",
568 virt);
569 return NULL;
570 }
571
572 ptr = hmm_bo_vmap(bo, cached);
573 if (ptr)
574 return ptr + (virt - bo->start);
575 else
576 return NULL;
577 }
578
579 /* Flush the memory which is mapped as cached memory through hmm_vmap */
hmm_flush_vmap(ia_css_ptr virt)580 void hmm_flush_vmap(ia_css_ptr virt)
581 {
582 struct hmm_buffer_object *bo;
583
584 bo = hmm_bo_device_search_in_range(&bo_device, virt);
585 if (!bo) {
586 dev_warn(atomisp_dev,
587 "can not find buffer object contains address 0x%x\n",
588 virt);
589 return;
590 }
591
592 hmm_bo_flush_vmap(bo);
593 }
594
hmm_vunmap(ia_css_ptr virt)595 void hmm_vunmap(ia_css_ptr virt)
596 {
597 struct hmm_buffer_object *bo;
598
599 bo = hmm_bo_device_search_in_range(&bo_device, virt);
600 if (!bo) {
601 dev_warn(atomisp_dev,
602 "can not find buffer object contains address 0x%x\n",
603 virt);
604 return;
605 }
606
607 hmm_bo_vunmap(bo);
608 }
609