1# SPDX-License-Identifier: GPL-2.0-only
2menu "Xen driver support"
3	depends on XEN
4
5config XEN_BALLOON
6	bool "Xen memory balloon driver"
7	default y
8	help
9	  The balloon driver allows the Xen domain to request more memory from
10	  the system to expand the domain's memory allocation, or alternatively
11	  return unneeded memory to the system.
12
13config XEN_BALLOON_MEMORY_HOTPLUG
14	bool "Memory hotplug support for Xen balloon driver"
15	depends on XEN_BALLOON && MEMORY_HOTPLUG
16	default y
17	help
18	  Memory hotplug support for Xen balloon driver allows expanding memory
19	  available for the system above limit declared at system startup.
20	  It is very useful on critical systems which require long
21	  run without rebooting.
22
23	  It's also very useful for non PV domains to obtain unpopulated physical
24	  memory ranges to use in order to map foreign memory or grants.
25
26	  Memory could be hotplugged in following steps:
27
28	    1) target domain: ensure that memory auto online policy is in
29	       effect by checking /sys/devices/system/memory/auto_online_blocks
30	       file (should be 'online').
31
32	    2) control domain: xl mem-max <target-domain> <maxmem>
33	       where <maxmem> is >= requested memory size,
34
35	    3) control domain: xl mem-set <target-domain> <memory>
36	       where <memory> is requested memory size; alternatively memory
37	       could be added by writing proper value to
38	       /sys/devices/system/xen_memory/xen_memory0/target or
39	       /sys/devices/system/xen_memory/xen_memory0/target_kb on the
40	       target domain.
41
42	  Alternatively, if memory auto onlining was not requested at step 1
43	  the newly added memory can be manually onlined in the target domain
44	  by doing the following:
45
46		for i in /sys/devices/system/memory/memory*/state; do \
47		  [ "`cat "$i"`" = offline ] && echo online > "$i"; done
48
49	  or by adding the following line to udev rules:
50
51	  SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
52
53config XEN_MEMORY_HOTPLUG_LIMIT
54	int "Hotplugged memory limit (in GiB) for a PV guest"
55	default 512
56	depends on XEN_HAVE_PVMMU
57	depends on MEMORY_HOTPLUG
58	help
59	  Maximum amount of memory (in GiB) that a PV guest can be
60	  expanded to when using memory hotplug.
61
62	  A PV guest can have more memory than this limit if is
63	  started with a larger maximum.
64
65	  This value is used to allocate enough space in internal
66	  tables needed for physical memory administration.
67
68config XEN_SCRUB_PAGES_DEFAULT
69	bool "Scrub pages before returning them to system by default"
70	depends on XEN_BALLOON
71	default y
72	help
73	  Scrub pages before returning them to the system for reuse by
74	  other domains.  This makes sure that any confidential data
75	  is not accidentally visible to other domains.  It is more
76	  secure, but slightly less efficient. This can be controlled with
77	  xen_scrub_pages=0 parameter and
78	  /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
79	  This option only sets the default value.
80
81	  If in doubt, say yes.
82
83config XEN_DEV_EVTCHN
84	tristate "Xen /dev/xen/evtchn device"
85	default y
86	help
87	  The evtchn driver allows a userspace process to trigger event
88	  channels and to receive notification of an event channel
89	  firing.
90	  If in doubt, say yes.
91
92config XEN_BACKEND
93	bool "Backend driver support"
94	default XEN_DOM0
95	help
96	  Support for backend device drivers that provide I/O services
97	  to other virtual machines.
98
99config XENFS
100	tristate "Xen filesystem"
101	select XEN_PRIVCMD
102	default y
103	help
104	  The xen filesystem provides a way for domains to share
105	  information with each other and with the hypervisor.
106	  For example, by reading and writing the "xenbus" file, guests
107	  may pass arbitrary information to the initial domain.
108	  If in doubt, say yes.
109
110config XEN_COMPAT_XENFS
111	bool "Create compatibility mount point /proc/xen"
112	depends on XENFS
113	default y
114	help
115	  The old xenstore userspace tools expect to find "xenbus"
116	  under /proc/xen, but "xenbus" is now found at the root of the
117	  xenfs filesystem.  Selecting this causes the kernel to create
118	  the compatibility mount point /proc/xen if it is running on
119	  a xen platform.
120	  If in doubt, say yes.
121
122config XEN_SYS_HYPERVISOR
123	bool "Create xen entries under /sys/hypervisor"
124	depends on SYSFS
125	select SYS_HYPERVISOR
126	default y
127	help
128	  Create entries under /sys/hypervisor describing the Xen
129	  hypervisor environment.  When running native or in another
130	  virtual environment, /sys/hypervisor will still be present,
131	  but will have no xen contents.
132
133config XEN_XENBUS_FRONTEND
134	tristate
135
136config XEN_GNTDEV
137	tristate "userspace grant access device driver"
138	depends on XEN
139	default m
140	select MMU_NOTIFIER
141	help
142	  Allows userspace processes to use grants.
143
144config XEN_GNTDEV_DMABUF
145	bool "Add support for dma-buf grant access device driver extension"
146	depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
147	select DMA_SHARED_BUFFER
148	help
149	  Allows userspace processes and kernel modules to use Xen backed
150	  dma-buf implementation. With this extension grant references to
151	  the pages of an imported dma-buf can be exported for other domain
152	  use and grant references coming from a foreign domain can be
153	  converted into a local dma-buf for local export.
154
155config XEN_GRANT_DEV_ALLOC
156	tristate "User-space grant reference allocator driver"
157	depends on XEN
158	default m
159	help
160	  Allows userspace processes to create pages with access granted
161	  to other domains. This can be used to implement frontend drivers
162	  or as part of an inter-domain shared memory channel.
163
164config XEN_GRANT_DMA_ALLOC
165	bool "Allow allocating DMA capable buffers with grant reference module"
166	depends on XEN && HAS_DMA
167	help
168	  Extends grant table module API to allow allocating DMA capable
169	  buffers and mapping foreign grant references on top of it.
170	  The resulting buffer is similar to one allocated by the balloon
171	  driver in that proper memory reservation is made by
172	  ({increase|decrease}_reservation and VA mappings are updated if
173	  needed).
174	  This is useful for sharing foreign buffers with HW drivers which
175	  cannot work with scattered buffers provided by the balloon driver,
176	  but require DMAable memory instead.
177
178config SWIOTLB_XEN
179	def_bool y
180	depends on XEN_PV || ARM || ARM64
181	select DMA_OPS
182	select SWIOTLB
183
184config XEN_PCI_STUB
185	bool
186
187config XEN_PCIDEV_STUB
188	tristate "Xen PCI-device stub driver"
189	depends on PCI && !X86 && XEN
190	depends on XEN_BACKEND
191	select XEN_PCI_STUB
192	default m
193	help
194	  The PCI device stub driver provides limited version of the PCI
195	  device backend driver without para-virtualized support for guests.
196	  If you select this to be a module, you will need to make sure no
197	  other driver has bound to the device(s) you want to make visible to
198	  other guests.
199
200	  The "hide" parameter (only applicable if backend driver is compiled
201	  into the kernel) allows you to bind the PCI devices to this module
202	  from the default device drivers. The argument is the list of PCI BDFs:
203	  xen-pciback.hide=(03:00.0)(04:00.0)
204
205	  If in doubt, say m.
206
207config XEN_PCIDEV_BACKEND
208	tristate "Xen PCI-device backend driver"
209	depends on PCI && X86 && XEN
210	depends on XEN_BACKEND
211	select XEN_PCI_STUB
212	default m
213	help
214	  The PCI device backend driver allows the kernel to export arbitrary
215	  PCI devices to other guests. If you select this to be a module, you
216	  will need to make sure no other driver has bound to the device(s)
217	  you want to make visible to other guests.
218
219	  The parameter "passthrough" allows you specify how you want the PCI
220	  devices to appear in the guest. You can choose the default (0) where
221	  PCI topology starts at 00.00.0, or (1) for passthrough if you want
222	  the PCI devices topology appear the same as in the host.
223
224	  The "hide" parameter (only applicable if backend driver is compiled
225	  into the kernel) allows you to bind the PCI devices to this module
226	  from the default device drivers. The argument is the list of PCI BDFs:
227	  xen-pciback.hide=(03:00.0)(04:00.0)
228
229	  If in doubt, say m.
230
231config XEN_PVCALLS_FRONTEND
232	tristate "XEN PV Calls frontend driver"
233	depends on INET && XEN
234	select XEN_XENBUS_FRONTEND
235	help
236	  Experimental frontend for the Xen PV Calls protocol
237	  (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
238	  sends a small set of POSIX calls to the backend, which
239	  implements them.
240
241config XEN_PVCALLS_BACKEND
242	tristate "XEN PV Calls backend driver"
243	depends on INET && XEN && XEN_BACKEND
244	help
245	  Experimental backend for the Xen PV Calls protocol
246	  (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
247	  allows PV Calls frontends to send POSIX calls to the backend,
248	  which implements them.
249
250	  If in doubt, say n.
251
252config XEN_SCSI_BACKEND
253	tristate "XEN SCSI backend driver"
254	depends on XEN && XEN_BACKEND && TARGET_CORE
255	help
256	  The SCSI backend driver allows the kernel to export its SCSI Devices
257	  to other guests via a high-performance shared-memory interface.
258	  Only needed for systems running as XEN driver domains (e.g. Dom0) and
259	  if guests need generic access to SCSI devices.
260
261config XEN_PRIVCMD
262	tristate "Xen hypercall passthrough driver"
263	depends on XEN
264	default m
265	help
266	  The hypercall passthrough driver allows privileged user programs to
267	  perform Xen hypercalls. This driver is normally required for systems
268	  running as Dom0 to perform privileged operations, but in some
269	  disaggregated Xen setups this driver might be needed for other
270	  domains, too.
271
272config XEN_PRIVCMD_IRQFD
273	bool "Xen irqfd support"
274	depends on XEN_PRIVCMD && XEN_VIRTIO && EVENTFD
275	help
276	  Using the irqfd mechanism a virtio backend running in a daemon can
277	  speed up interrupt injection into a guest.
278
279config XEN_ACPI_PROCESSOR
280	tristate "Xen ACPI processor"
281	depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
282	default m
283	help
284	  This ACPI processor uploads Power Management information to the Xen
285	  hypervisor.
286
287	  To do that the driver parses the Power Management data and uploads
288	  said information to the Xen hypervisor. Then the Xen hypervisor can
289	  select the proper Cx and Pxx states. It also registers itself as the
290	  SMM so that other drivers (such as ACPI cpufreq scaling driver) will
291	  not load.
292
293	  To compile this driver as a module, choose M here: the module will be
294	  called xen_acpi_processor  If you do not know what to choose, select
295	  M here. If the CPUFREQ drivers are built in, select Y here.
296
297config XEN_MCE_LOG
298	bool "Xen platform mcelog"
299	depends on XEN_PV_DOM0 && X86_MCE
300	help
301	  Allow kernel fetching MCE error from Xen platform and
302	  converting it into Linux mcelog format for mcelog tools
303
304config XEN_HAVE_PVMMU
305	bool
306
307config XEN_EFI
308	def_bool y
309	depends on (ARM || ARM64 || X86_64) && EFI
310
311config XEN_AUTO_XLATE
312	def_bool y
313	depends on ARM || ARM64 || XEN_PVHVM
314	help
315	  Support for auto-translated physmap guests.
316
317config XEN_ACPI
318	def_bool y
319	depends on X86 && ACPI
320
321config XEN_SYMS
322	bool "Xen symbols"
323	depends on X86 && XEN_DOM0 && XENFS
324	default y if KALLSYMS
325	help
326	  Exports hypervisor symbols (along with their types and addresses) via
327	  /proc/xen/xensyms file, similar to /proc/kallsyms
328
329config XEN_HAVE_VPMU
330	bool
331
332config XEN_FRONT_PGDIR_SHBUF
333	tristate
334
335config XEN_UNPOPULATED_ALLOC
336	bool "Use unpopulated memory ranges for guest mappings"
337	depends on ZONE_DEVICE
338	default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
339	help
340	  Use unpopulated memory ranges in order to create mappings for guest
341	  memory regions, including grant maps and foreign pages. This avoids
342	  having to balloon out RAM regions in order to obtain physical memory
343	  space to create such mappings.
344
345config XEN_GRANT_DMA_IOMMU
346	bool
347	select IOMMU_API
348
349config XEN_GRANT_DMA_OPS
350	bool
351	select DMA_OPS
352
353config XEN_VIRTIO
354	bool "Xen virtio support"
355	depends on VIRTIO
356	select XEN_GRANT_DMA_OPS
357	select XEN_GRANT_DMA_IOMMU if OF
358	help
359	  Enable virtio support for running as Xen guest. Depending on the
360	  guest type this will require special support on the backend side
361	  (qemu or kernel, depending on the virtio device types used).
362
363	  If in doubt, say n.
364
365config XEN_VIRTIO_FORCE_GRANT
366	bool "Require Xen virtio support to use grants"
367	depends on XEN_VIRTIO
368	help
369	  Require virtio for Xen guests to use grant mappings.
370	  This will avoid the need to give the backend the right to map all
371	  of the guest memory. This will need support on the backend side
372	  (e.g. qemu or kernel, depending on the virtio device types used).
373
374endmenu
375