1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MEMREMAP_H_
3 #define _LINUX_MEMREMAP_H_
4
5 #include <linux/mmzone.h>
6 #include <linux/range.h>
7 #include <linux/ioport.h>
8 #include <linux/percpu-refcount.h>
9
10 struct resource;
11 struct device;
12
13 /**
14 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
15 * @base_pfn: base of the entire dev_pagemap mapping
16 * @reserve: pages mapped, but reserved for driver use (relative to @base)
17 * @free: free pages set aside in the mapping for memmap storage
18 * @align: pages reserved to meet allocation alignments
19 * @alloc: track pages consumed, private to vmemmap_populate()
20 */
21 struct vmem_altmap {
22 unsigned long base_pfn;
23 const unsigned long end_pfn;
24 const unsigned long reserve;
25 unsigned long free;
26 unsigned long align;
27 unsigned long alloc;
28 };
29
30 /*
31 * Specialize ZONE_DEVICE memory into multiple types each has a different
32 * usage.
33 *
34 * MEMORY_DEVICE_PRIVATE:
35 * Device memory that is not directly addressable by the CPU: CPU can neither
36 * read nor write private memory. In this case, we do still have struct pages
37 * backing the device memory. Doing so simplifies the implementation, but it is
38 * important to remember that there are certain points at which the struct page
39 * must be treated as an opaque object, rather than a "normal" struct page.
40 *
41 * A more complete discussion of unaddressable memory may be found in
42 * include/linux/hmm.h and Documentation/mm/hmm.rst.
43 *
44 * MEMORY_DEVICE_COHERENT:
45 * Device memory that is cache coherent from device and CPU point of view. This
46 * is used on platforms that have an advanced system bus (like CAPI or CXL). A
47 * driver can hotplug the device memory using ZONE_DEVICE and with that memory
48 * type. Any page of a process can be migrated to such memory. However no one
49 * should be allowed to pin such memory so that it can always be evicted.
50 *
51 * MEMORY_DEVICE_FS_DAX:
52 * Host memory that has similar access semantics as System RAM i.e. DMA
53 * coherent and supports page pinning. In support of coordinating page
54 * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
55 * wakeup event whenever a page is unpinned and becomes idle. This
56 * wakeup is used to coordinate physical address space management (ex:
57 * fs truncate/hole punch) vs pinned pages (ex: device dma).
58 *
59 * MEMORY_DEVICE_GENERIC:
60 * Host memory that has similar access semantics as System RAM i.e. DMA
61 * coherent and supports page pinning. This is for example used by DAX devices
62 * that expose memory using a character device.
63 *
64 * MEMORY_DEVICE_PCI_P2PDMA:
65 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
66 * transactions.
67 */
68 enum memory_type {
69 /* 0 is reserved to catch uninitialized type fields */
70 MEMORY_DEVICE_PRIVATE = 1,
71 MEMORY_DEVICE_COHERENT,
72 MEMORY_DEVICE_FS_DAX,
73 MEMORY_DEVICE_GENERIC,
74 MEMORY_DEVICE_PCI_P2PDMA,
75 };
76
77 struct dev_pagemap_ops {
78 /*
79 * Called once the page refcount reaches 0. The reference count will be
80 * reset to one by the core code after the method is called to prepare
81 * for handing out the page again.
82 */
83 void (*page_free)(struct page *page);
84
85 /*
86 * Used for private (un-addressable) device memory only. Must migrate
87 * the page back to a CPU accessible page.
88 */
89 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
90
91 /*
92 * Handle the memory failure happens on a range of pfns. Notify the
93 * processes who are using these pfns, and try to recover the data on
94 * them if necessary. The mf_flags is finally passed to the recover
95 * function through the whole notify routine.
96 *
97 * When this is not implemented, or it returns -EOPNOTSUPP, the caller
98 * will fall back to a common handler called mf_generic_kill_procs().
99 */
100 int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
101 unsigned long nr_pages, int mf_flags);
102 };
103
104 #define PGMAP_ALTMAP_VALID (1 << 0)
105
106 /**
107 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
108 * @altmap: pre-allocated/reserved memory for vmemmap allocations
109 * @ref: reference count that pins the devm_memremap_pages() mapping
110 * @done: completion for @ref
111 * @type: memory type: see MEMORY_* in memory_hotplug.h
112 * @flags: PGMAP_* flags to specify defailed behavior
113 * @vmemmap_shift: structural definition of how the vmemmap page metadata
114 * is populated, specifically the metadata page order.
115 * A zero value (default) uses base pages as the vmemmap metadata
116 * representation. A bigger value will set up compound struct pages
117 * of the requested order value.
118 * @ops: method table
119 * @owner: an opaque pointer identifying the entity that manages this
120 * instance. Used by various helpers to make sure that no
121 * foreign ZONE_DEVICE memory is accessed.
122 * @nr_range: number of ranges to be mapped
123 * @range: range to be mapped when nr_range == 1
124 * @ranges: array of ranges to be mapped when nr_range > 1
125 */
126 struct dev_pagemap {
127 struct vmem_altmap altmap;
128 struct percpu_ref ref;
129 struct completion done;
130 enum memory_type type;
131 unsigned int flags;
132 unsigned long vmemmap_shift;
133 const struct dev_pagemap_ops *ops;
134 void *owner;
135 int nr_range;
136 union {
137 struct range range;
138 DECLARE_FLEX_ARRAY(struct range, ranges);
139 };
140 };
141
pgmap_has_memory_failure(struct dev_pagemap * pgmap)142 static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
143 {
144 return pgmap->ops && pgmap->ops->memory_failure;
145 }
146
pgmap_altmap(struct dev_pagemap * pgmap)147 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
148 {
149 if (pgmap->flags & PGMAP_ALTMAP_VALID)
150 return &pgmap->altmap;
151 return NULL;
152 }
153
pgmap_vmemmap_nr(struct dev_pagemap * pgmap)154 static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
155 {
156 return 1 << pgmap->vmemmap_shift;
157 }
158
is_device_private_page(const struct page * page)159 static inline bool is_device_private_page(const struct page *page)
160 {
161 return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
162 is_zone_device_page(page) &&
163 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
164 }
165
folio_is_device_private(const struct folio * folio)166 static inline bool folio_is_device_private(const struct folio *folio)
167 {
168 return is_device_private_page(&folio->page);
169 }
170
is_pci_p2pdma_page(const struct page * page)171 static inline bool is_pci_p2pdma_page(const struct page *page)
172 {
173 return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
174 is_zone_device_page(page) &&
175 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
176 }
177
is_device_coherent_page(const struct page * page)178 static inline bool is_device_coherent_page(const struct page *page)
179 {
180 return is_zone_device_page(page) &&
181 page->pgmap->type == MEMORY_DEVICE_COHERENT;
182 }
183
folio_is_device_coherent(const struct folio * folio)184 static inline bool folio_is_device_coherent(const struct folio *folio)
185 {
186 return is_device_coherent_page(&folio->page);
187 }
188
189 #ifdef CONFIG_ZONE_DEVICE
190 void zone_device_page_init(struct page *page);
191 void *memremap_pages(struct dev_pagemap *pgmap, int nid);
192 void memunmap_pages(struct dev_pagemap *pgmap);
193 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
194 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
195 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
196 struct dev_pagemap *pgmap);
197 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
198
199 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
200 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
201 unsigned long memremap_compat_align(void);
202 #else
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)203 static inline void *devm_memremap_pages(struct device *dev,
204 struct dev_pagemap *pgmap)
205 {
206 /*
207 * Fail attempts to call devm_memremap_pages() without
208 * ZONE_DEVICE support enabled, this requires callers to fall
209 * back to plain devm_memremap() based on config
210 */
211 WARN_ON_ONCE(1);
212 return ERR_PTR(-ENXIO);
213 }
214
devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap)215 static inline void devm_memunmap_pages(struct device *dev,
216 struct dev_pagemap *pgmap)
217 {
218 }
219
get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap)220 static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
221 struct dev_pagemap *pgmap)
222 {
223 return NULL;
224 }
225
pgmap_pfn_valid(struct dev_pagemap * pgmap,unsigned long pfn)226 static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
227 {
228 return false;
229 }
230
vmem_altmap_offset(struct vmem_altmap * altmap)231 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
232 {
233 return 0;
234 }
235
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)236 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
237 unsigned long nr_pfns)
238 {
239 }
240
241 /* when memremap_pages() is disabled all archs can remap a single page */
memremap_compat_align(void)242 static inline unsigned long memremap_compat_align(void)
243 {
244 return PAGE_SIZE;
245 }
246 #endif /* CONFIG_ZONE_DEVICE */
247
put_dev_pagemap(struct dev_pagemap * pgmap)248 static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
249 {
250 if (pgmap)
251 percpu_ref_put(&pgmap->ref);
252 }
253
254 #endif /* _LINUX_MEMREMAP_H_ */
255