1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20 /*
21 * ISP MMU driver for classic two-level page tables
22 */
23 #ifndef __ISP_MMU_H__
24 #define __ISP_MMU_H__
25
26 #include <linux/types.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29
30 /*
31 * do not change these values, the page size for ISP must be the
32 * same as kernel's page size.
33 */
34 #define ISP_PAGE_OFFSET 12
35 #define ISP_PAGE_SIZE BIT(ISP_PAGE_OFFSET)
36 #define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
37
38 #define ISP_L1PT_OFFSET 22
39 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
40
41 #define ISP_L2PT_OFFSET 12
42 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
43
44 #define ISP_L1PT_PTES 1024
45 #define ISP_L2PT_PTES 1024
46
47 #define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
48 >> ISP_L1PT_OFFSET)
49
50 #define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
51 >> ISP_L2PT_OFFSET)
52
53 #define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE - 1)) \
54 & ISP_PAGE_MASK)
55
56 #define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
57 ((l1_idx) << ISP_L1PT_OFFSET) | \
58 ((l2_idx) << ISP_L2PT_OFFSET) | \
59 (offset)\
60 } while (0)
61
62 #define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
63 #define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
64 >> ISP_PAGE_OFFSET)
65 #define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
66
67 struct isp_mmu;
68
69 struct isp_mmu_client {
70 /*
71 * const value
72 *
73 * @name:
74 * driver name
75 * @pte_valid_mask:
76 * should be 1 bit valid data, meaning the value should
77 * be power of 2.
78 */
79 char *name;
80 unsigned int pte_valid_mask;
81 unsigned int null_pte;
82
83 /*
84 * get page directory base address (physical address).
85 *
86 * must be provided.
87 */
88 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
89 /*
90 * callback to flush tlb.
91 *
92 * tlb_flush_range will at least flush TLBs containing
93 * address mapping from addr to addr + size.
94 *
95 * tlb_flush_all will flush all TLBs.
96 *
97 * tlb_flush_all is must be provided. if tlb_flush_range is
98 * not valid, it will set to tlb_flush_all by default.
99 */
100 void (*tlb_flush_range)(struct isp_mmu *mmu,
101 unsigned int addr, unsigned int size);
102 void (*tlb_flush_all)(struct isp_mmu *mmu);
103 unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
104 phys_addr_t phys);
105 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
106 unsigned int pte);
107
108 };
109
110 struct isp_mmu {
111 struct isp_mmu_client *driver;
112 unsigned int l1_pte;
113 int l2_pgt_refcount[ISP_L1PT_PTES];
114 phys_addr_t base_address;
115
116 struct mutex pt_mutex;
117 };
118
119 /* flags for PDE and PTE */
120 #define ISP_PTE_VALID_MASK(mmu) \
121 ((mmu)->driver->pte_valid_mask)
122
123 #define ISP_PTE_VALID(mmu, pte) \
124 ((pte) & ISP_PTE_VALID_MASK(mmu))
125
126 #define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
127 #define PAGE_VALID(page) ((page) != NULL_PAGE)
128
129 /*
130 * init mmu with specific mmu driver.
131 */
132 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
133 /*
134 * cleanup all mmu related things.
135 */
136 void isp_mmu_exit(struct isp_mmu *mmu);
137
138 /*
139 * setup/remove address mapping for pgnr continuous physical pages
140 * and isp_virt.
141 *
142 * map/unmap is mutex lock protected, and caller does not have
143 * to do lock/unlock operation.
144 *
145 * map/unmap will not flush tlb, and caller needs to deal with
146 * this itself.
147 */
148 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
149 phys_addr_t phys, unsigned int pgnr);
150
151 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
152 unsigned int pgnr);
153
isp_mmu_flush_tlb_all(struct isp_mmu * mmu)154 static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
155 {
156 if (mmu->driver && mmu->driver->tlb_flush_all)
157 mmu->driver->tlb_flush_all(mmu);
158 }
159
160 #define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
161
isp_mmu_flush_tlb_range(struct isp_mmu * mmu,unsigned int start,unsigned int size)162 static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
163 unsigned int start, unsigned int size)
164 {
165 if (mmu->driver && mmu->driver->tlb_flush_range)
166 mmu->driver->tlb_flush_range(mmu, start, size);
167 }
168
169 #endif /* ISP_MMU_H_ */
170