1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Handle caching attributes in page tables (PAT)
4  *
5  * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6  *          Suresh B Siddha <suresh.b.siddha@intel.com>
7  *
8  * Interval tree used to store the PAT memory type reservations.
9  */
10 
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/interval_tree_generic.h>
15 #include <linux/sched.h>
16 #include <linux/gfp.h>
17 #include <linux/pgtable.h>
18 
19 #include <asm/memtype.h>
20 
21 #include "memtype.h"
22 
23 /*
24  * The memtype tree keeps track of memory type for specific
25  * physical memory areas. Without proper tracking, conflicting memory
26  * types in different mappings can cause CPU cache corruption.
27  *
28  * The tree is an interval tree (augmented rbtree) which tree is ordered
29  * by the starting address. The tree can contain multiple entries for
30  * different regions which overlap. All the aliases have the same
31  * cache attributes of course, as enforced by the PAT logic.
32  *
33  * memtype_lock protects the rbtree.
34  */
35 
interval_start(struct memtype * entry)36 static inline u64 interval_start(struct memtype *entry)
37 {
38 	return entry->start;
39 }
40 
interval_end(struct memtype * entry)41 static inline u64 interval_end(struct memtype *entry)
42 {
43 	return entry->end - 1;
44 }
45 
46 INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
47 		     interval_start, interval_end,
48 		     static, interval)
49 
50 static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
51 
52 enum {
53 	MEMTYPE_EXACT_MATCH	= 0,
54 	MEMTYPE_END_MATCH	= 1
55 };
56 
memtype_match(u64 start,u64 end,int match_type)57 static struct memtype *memtype_match(u64 start, u64 end, int match_type)
58 {
59 	struct memtype *entry_match;
60 
61 	entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
62 
63 	while (entry_match != NULL && entry_match->start < end) {
64 		if ((match_type == MEMTYPE_EXACT_MATCH) &&
65 		    (entry_match->start == start) && (entry_match->end == end))
66 			return entry_match;
67 
68 		if ((match_type == MEMTYPE_END_MATCH) &&
69 		    (entry_match->start < start) && (entry_match->end == end))
70 			return entry_match;
71 
72 		entry_match = interval_iter_next(entry_match, start, end-1);
73 	}
74 
75 	return NULL; /* Returns NULL if there is no match */
76 }
77 
memtype_check_conflict(u64 start,u64 end,enum page_cache_mode reqtype,enum page_cache_mode * newtype)78 static int memtype_check_conflict(u64 start, u64 end,
79 				  enum page_cache_mode reqtype,
80 				  enum page_cache_mode *newtype)
81 {
82 	struct memtype *entry_match;
83 	enum page_cache_mode found_type = reqtype;
84 
85 	entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
86 	if (entry_match == NULL)
87 		goto success;
88 
89 	if (entry_match->type != found_type && newtype == NULL)
90 		goto failure;
91 
92 	dprintk("Overlap at 0x%Lx-0x%Lx\n", entry_match->start, entry_match->end);
93 	found_type = entry_match->type;
94 
95 	entry_match = interval_iter_next(entry_match, start, end-1);
96 	while (entry_match) {
97 		if (entry_match->type != found_type)
98 			goto failure;
99 
100 		entry_match = interval_iter_next(entry_match, start, end-1);
101 	}
102 success:
103 	if (newtype)
104 		*newtype = found_type;
105 
106 	return 0;
107 
108 failure:
109 	pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
110 		current->comm, current->pid, start, end,
111 		cattr_name(found_type), cattr_name(entry_match->type));
112 
113 	return -EBUSY;
114 }
115 
memtype_check_insert(struct memtype * entry_new,enum page_cache_mode * ret_type)116 int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_type)
117 {
118 	int err = 0;
119 
120 	err = memtype_check_conflict(entry_new->start, entry_new->end, entry_new->type, ret_type);
121 	if (err)
122 		return err;
123 
124 	if (ret_type)
125 		entry_new->type = *ret_type;
126 
127 	interval_insert(entry_new, &memtype_rbroot);
128 	return 0;
129 }
130 
memtype_erase(u64 start,u64 end)131 struct memtype *memtype_erase(u64 start, u64 end)
132 {
133 	struct memtype *entry_old;
134 
135 	/*
136 	 * Since the memtype_rbroot tree allows overlapping ranges,
137 	 * memtype_erase() checks with EXACT_MATCH first, i.e. free
138 	 * a whole node for the munmap case.  If no such entry is found,
139 	 * it then checks with END_MATCH, i.e. shrink the size of a node
140 	 * from the end for the mremap case.
141 	 */
142 	entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
143 	if (!entry_old) {
144 		entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
145 		if (!entry_old)
146 			return ERR_PTR(-EINVAL);
147 	}
148 
149 	if (entry_old->start == start) {
150 		/* munmap: erase this node */
151 		interval_remove(entry_old, &memtype_rbroot);
152 	} else {
153 		/* mremap: update the end value of this node */
154 		interval_remove(entry_old, &memtype_rbroot);
155 		entry_old->end = start;
156 		interval_insert(entry_old, &memtype_rbroot);
157 
158 		return NULL;
159 	}
160 
161 	return entry_old;
162 }
163 
memtype_lookup(u64 addr)164 struct memtype *memtype_lookup(u64 addr)
165 {
166 	return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1);
167 }
168 
169 /*
170  * Debugging helper, copy the Nth entry of the tree into a
171  * a copy for printout. This allows us to print out the tree
172  * via debugfs, without holding the memtype_lock too long:
173  */
174 #ifdef CONFIG_DEBUG_FS
memtype_copy_nth_element(struct memtype * entry_out,loff_t pos)175 int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos)
176 {
177 	struct memtype *entry_match;
178 	int i = 1;
179 
180 	entry_match = interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
181 
182 	while (entry_match && pos != i) {
183 		entry_match = interval_iter_next(entry_match, 0, ULONG_MAX);
184 		i++;
185 	}
186 
187 	if (entry_match) { /* pos == i */
188 		*entry_out = *entry_match;
189 		return 0;
190 	} else {
191 		return 1;
192 	}
193 }
194 #endif
195