1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #ifndef __LINUX_ND_H__
6 #define __LINUX_ND_H__
7 #include <linux/fs.h>
8 #include <linux/ndctl.h>
9 #include <linux/device.h>
10 #include <linux/badblocks.h>
11 #include <linux/perf_event.h>
12 
13 enum nvdimm_event {
14 	NVDIMM_REVALIDATE_POISON,
15 	NVDIMM_REVALIDATE_REGION,
16 };
17 
18 enum nvdimm_claim_class {
19 	NVDIMM_CCLASS_NONE,
20 	NVDIMM_CCLASS_BTT,
21 	NVDIMM_CCLASS_BTT2,
22 	NVDIMM_CCLASS_PFN,
23 	NVDIMM_CCLASS_DAX,
24 	NVDIMM_CCLASS_UNKNOWN,
25 };
26 
27 #define NVDIMM_EVENT_VAR(_id)  event_attr_##_id
28 #define NVDIMM_EVENT_PTR(_id)  (&event_attr_##_id.attr.attr)
29 
30 #define NVDIMM_EVENT_ATTR(_name, _id)				\
31 	PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id,	\
32 			nvdimm_events_sysfs_show)
33 
34 /* Event attribute array index */
35 #define NVDIMM_PMU_FORMAT_ATTR	0
36 #define NVDIMM_PMU_EVENT_ATTR	1
37 #define NVDIMM_PMU_CPUMASK_ATTR	2
38 #define NVDIMM_PMU_NULL_ATTR	3
39 
40 /**
41  * struct nvdimm_pmu - data structure for nvdimm perf driver
42  * @pmu: pmu data structure for nvdimm performance stats.
43  * @dev: nvdimm device pointer.
44  * @cpu: designated cpu for counter access.
45  * @node: node for cpu hotplug notifier link.
46  * @cpuhp_state: state for cpu hotplug notification.
47  * @arch_cpumask: cpumask to get designated cpu for counter access.
48  */
49 struct nvdimm_pmu {
50 	struct pmu pmu;
51 	struct device *dev;
52 	int cpu;
53 	struct hlist_node node;
54 	enum cpuhp_state cpuhp_state;
55 	/* cpumask provided by arch/platform specific code */
56 	struct cpumask arch_cpumask;
57 };
58 
59 struct platform_device;
60 
61 #ifdef CONFIG_PERF_EVENTS
62 extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
63 					struct device_attribute *attr,
64 					char *page);
65 
66 int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
67 void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
68 
69 #else
register_nvdimm_pmu(struct nvdimm_pmu * nvdimm,struct platform_device * pdev)70 static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
71 {
72 	return -ENXIO;
73 }
74 
unregister_nvdimm_pmu(struct nvdimm_pmu * nd_pmu)75 static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
76 #endif
77 
78 struct nd_device_driver {
79 	struct device_driver drv;
80 	unsigned long type;
81 	int (*probe)(struct device *dev);
82 	void (*remove)(struct device *dev);
83 	void (*shutdown)(struct device *dev);
84 	void (*notify)(struct device *dev, enum nvdimm_event event);
85 };
86 
to_nd_device_driver(struct device_driver * drv)87 static inline struct nd_device_driver *to_nd_device_driver(
88 		struct device_driver *drv)
89 {
90 	return container_of(drv, struct nd_device_driver, drv);
91 };
92 
93 /**
94  * struct nd_namespace_common - core infrastructure of a namespace
95  * @force_raw: ignore other personalities for the namespace (e.g. btt)
96  * @dev: device model node
97  * @claim: when set a another personality has taken ownership of the namespace
98  * @claim_class: restrict claim type to a given class
99  * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
100  */
101 struct nd_namespace_common {
102 	int force_raw;
103 	struct device dev;
104 	struct device *claim;
105 	enum nvdimm_claim_class claim_class;
106 	int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
107 			void *buf, size_t size, int rw, unsigned long flags);
108 };
109 
to_ndns(struct device * dev)110 static inline struct nd_namespace_common *to_ndns(struct device *dev)
111 {
112 	return container_of(dev, struct nd_namespace_common, dev);
113 }
114 
115 /**
116  * struct nd_namespace_io - device representation of a persistent memory range
117  * @dev: namespace device created by the nd region driver
118  * @res: struct resource conversion of a NFIT SPA table
119  * @size: cached resource_size(@res) for fast path size checks
120  * @addr: virtual address to access the namespace range
121  * @bb: badblocks list for the namespace range
122  */
123 struct nd_namespace_io {
124 	struct nd_namespace_common common;
125 	struct resource res;
126 	resource_size_t size;
127 	void *addr;
128 	struct badblocks bb;
129 };
130 
131 /**
132  * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
133  * @nsio: device and system physical address range to drive
134  * @lbasize: logical sector size for the namespace in block-device-mode
135  * @alt_name: namespace name supplied in the dimm label
136  * @uuid: namespace name supplied in the dimm label
137  * @id: ida allocated id
138  */
139 struct nd_namespace_pmem {
140 	struct nd_namespace_io nsio;
141 	unsigned long lbasize;
142 	char *alt_name;
143 	uuid_t *uuid;
144 	int id;
145 };
146 
to_nd_namespace_io(const struct device * dev)147 static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
148 {
149 	return container_of(dev, struct nd_namespace_io, common.dev);
150 }
151 
to_nd_namespace_pmem(const struct device * dev)152 static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev)
153 {
154 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
155 
156 	return container_of(nsio, struct nd_namespace_pmem, nsio);
157 }
158 
159 /**
160  * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
161  * @ndns: device to read
162  * @offset: namespace-relative starting offset
163  * @buf: buffer to fill
164  * @size: transfer length
165  *
166  * @buf is up-to-date upon return from this routine.
167  */
nvdimm_read_bytes(struct nd_namespace_common * ndns,resource_size_t offset,void * buf,size_t size,unsigned long flags)168 static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
169 		resource_size_t offset, void *buf, size_t size,
170 		unsigned long flags)
171 {
172 	return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
173 }
174 
175 /**
176  * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
177  * @ndns: device to write
178  * @offset: namespace-relative starting offset
179  * @buf: buffer to drain
180  * @size: transfer length
181  *
182  * NVDIMM Namepaces disks do not implement sectors internally.  Depending on
183  * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
184  * or on backing memory media upon return from this routine.  Flushing
185  * to media is handled internal to the @ndns driver, if at all.
186  */
nvdimm_write_bytes(struct nd_namespace_common * ndns,resource_size_t offset,void * buf,size_t size,unsigned long flags)187 static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
188 		resource_size_t offset, void *buf, size_t size,
189 		unsigned long flags)
190 {
191 	return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
192 }
193 
194 #define MODULE_ALIAS_ND_DEVICE(type) \
195 	MODULE_ALIAS("nd:t" __stringify(type) "*")
196 #define ND_DEVICE_MODALIAS_FMT "nd:t%d"
197 
198 struct nd_region;
199 void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
200 int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
201 		struct module *module, const char *mod_name);
nd_driver_unregister(struct nd_device_driver * drv)202 static inline void nd_driver_unregister(struct nd_device_driver *drv)
203 {
204 	driver_unregister(&drv->drv);
205 }
206 #define nd_driver_register(driver) \
207 	__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
208 #define module_nd_driver(driver) \
209 	module_driver(driver, nd_driver_register, nd_driver_unregister)
210 #endif /* __LINUX_ND_H__ */
211