1 /******************************************************************************
2  * Talks to Xen Store to figure out what devices we have.
3  *
4  * Copyright (C) 2005 Rusty Russell, IBM Corporation
5  * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6  * Copyright (C) 2005, 2006 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define dev_fmt pr_fmt
35 
36 #define DPRINTK(fmt, args...)				\
37 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
38 		 __func__, __LINE__, ##args)
39 
40 #include <linux/kernel.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/ctype.h>
44 #include <linux/fcntl.h>
45 #include <linux/mm.h>
46 #include <linux/proc_fs.h>
47 #include <linux/notifier.h>
48 #include <linux/kthread.h>
49 #include <linux/mutex.h>
50 #include <linux/io.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
53 
54 #include <asm/page.h>
55 #include <asm/xen/hypervisor.h>
56 
57 #include <xen/xen.h>
58 #include <xen/xenbus.h>
59 #include <xen/events.h>
60 #include <xen/xen-ops.h>
61 #include <xen/page.h>
62 
63 #include <xen/hvm.h>
64 
65 #include "xenbus.h"
66 
67 
68 static int xs_init_irq;
69 int xen_store_evtchn;
70 EXPORT_SYMBOL_GPL(xen_store_evtchn);
71 
72 struct xenstore_domain_interface *xen_store_interface;
73 EXPORT_SYMBOL_GPL(xen_store_interface);
74 
75 enum xenstore_init xen_store_domain_type;
76 EXPORT_SYMBOL_GPL(xen_store_domain_type);
77 
78 static unsigned long xen_store_gfn;
79 
80 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
81 
82 /* If something in array of ids matches this device, return it. */
83 static const struct xenbus_device_id *
match_device(const struct xenbus_device_id * arr,struct xenbus_device * dev)84 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
85 {
86 	for (; *arr->devicetype != '\0'; arr++) {
87 		if (!strcmp(arr->devicetype, dev->devicetype))
88 			return arr;
89 	}
90 	return NULL;
91 }
92 
xenbus_match(struct device * _dev,struct device_driver * _drv)93 int xenbus_match(struct device *_dev, struct device_driver *_drv)
94 {
95 	struct xenbus_driver *drv = to_xenbus_driver(_drv);
96 
97 	if (!drv->ids)
98 		return 0;
99 
100 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
101 }
102 EXPORT_SYMBOL_GPL(xenbus_match);
103 
104 
free_otherend_details(struct xenbus_device * dev)105 static void free_otherend_details(struct xenbus_device *dev)
106 {
107 	kfree(dev->otherend);
108 	dev->otherend = NULL;
109 }
110 
111 
free_otherend_watch(struct xenbus_device * dev)112 static void free_otherend_watch(struct xenbus_device *dev)
113 {
114 	if (dev->otherend_watch.node) {
115 		unregister_xenbus_watch(&dev->otherend_watch);
116 		kfree(dev->otherend_watch.node);
117 		dev->otherend_watch.node = NULL;
118 	}
119 }
120 
121 
talk_to_otherend(struct xenbus_device * dev)122 static int talk_to_otherend(struct xenbus_device *dev)
123 {
124 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
125 
126 	free_otherend_watch(dev);
127 	free_otherend_details(dev);
128 
129 	return drv->read_otherend_details(dev);
130 }
131 
132 
133 
watch_otherend(struct xenbus_device * dev)134 static int watch_otherend(struct xenbus_device *dev)
135 {
136 	struct xen_bus_type *bus =
137 		container_of(dev->dev.bus, struct xen_bus_type, bus);
138 
139 	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
140 				    bus->otherend_will_handle,
141 				    bus->otherend_changed,
142 				    "%s/%s", dev->otherend, "state");
143 }
144 
145 
xenbus_read_otherend_details(struct xenbus_device * xendev,char * id_node,char * path_node)146 int xenbus_read_otherend_details(struct xenbus_device *xendev,
147 				 char *id_node, char *path_node)
148 {
149 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
150 				id_node, "%i", &xendev->otherend_id,
151 				path_node, NULL, &xendev->otherend,
152 				NULL);
153 	if (err) {
154 		xenbus_dev_fatal(xendev, err,
155 				 "reading other end details from %s",
156 				 xendev->nodename);
157 		return err;
158 	}
159 	if (strlen(xendev->otherend) == 0 ||
160 	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
161 		xenbus_dev_fatal(xendev, -ENOENT,
162 				 "unable to read other end from %s.  "
163 				 "missing or inaccessible.",
164 				 xendev->nodename);
165 		free_otherend_details(xendev);
166 		return -ENOENT;
167 	}
168 
169 	return 0;
170 }
171 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
172 
xenbus_otherend_changed(struct xenbus_watch * watch,const char * path,const char * token,int ignore_on_shutdown)173 void xenbus_otherend_changed(struct xenbus_watch *watch,
174 			     const char *path, const char *token,
175 			     int ignore_on_shutdown)
176 {
177 	struct xenbus_device *dev =
178 		container_of(watch, struct xenbus_device, otherend_watch);
179 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
180 	enum xenbus_state state;
181 
182 	/* Protect us against watches firing on old details when the otherend
183 	   details change, say immediately after a resume. */
184 	if (!dev->otherend ||
185 	    strncmp(dev->otherend, path, strlen(dev->otherend))) {
186 		dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
187 		return;
188 	}
189 
190 	state = xenbus_read_driver_state(dev->otherend);
191 
192 	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
193 		state, xenbus_strstate(state), dev->otherend_watch.node, path);
194 
195 	/*
196 	 * Ignore xenbus transitions during shutdown. This prevents us doing
197 	 * work that can fail e.g., when the rootfs is gone.
198 	 */
199 	if (system_state > SYSTEM_RUNNING) {
200 		if (ignore_on_shutdown && (state == XenbusStateClosing))
201 			xenbus_frontend_closed(dev);
202 		return;
203 	}
204 
205 	if (drv->otherend_changed)
206 		drv->otherend_changed(dev, state);
207 }
208 EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
209 
210 #define XENBUS_SHOW_STAT(name)						\
211 static ssize_t name##_show(struct device *_dev,				\
212 			   struct device_attribute *attr,		\
213 			   char *buf)					\
214 {									\
215 	struct xenbus_device *dev = to_xenbus_device(_dev);		\
216 									\
217 	return sprintf(buf, "%d\n", atomic_read(&dev->name));		\
218 }									\
219 static DEVICE_ATTR_RO(name)
220 
221 XENBUS_SHOW_STAT(event_channels);
222 XENBUS_SHOW_STAT(events);
223 XENBUS_SHOW_STAT(spurious_events);
224 XENBUS_SHOW_STAT(jiffies_eoi_delayed);
225 
spurious_threshold_show(struct device * _dev,struct device_attribute * attr,char * buf)226 static ssize_t spurious_threshold_show(struct device *_dev,
227 				       struct device_attribute *attr,
228 				       char *buf)
229 {
230 	struct xenbus_device *dev = to_xenbus_device(_dev);
231 
232 	return sprintf(buf, "%d\n", dev->spurious_threshold);
233 }
234 
spurious_threshold_store(struct device * _dev,struct device_attribute * attr,const char * buf,size_t count)235 static ssize_t spurious_threshold_store(struct device *_dev,
236 					struct device_attribute *attr,
237 					const char *buf, size_t count)
238 {
239 	struct xenbus_device *dev = to_xenbus_device(_dev);
240 	unsigned int val;
241 	ssize_t ret;
242 
243 	ret = kstrtouint(buf, 0, &val);
244 	if (ret)
245 		return ret;
246 
247 	dev->spurious_threshold = val;
248 
249 	return count;
250 }
251 
252 static DEVICE_ATTR_RW(spurious_threshold);
253 
254 static struct attribute *xenbus_attrs[] = {
255 	&dev_attr_event_channels.attr,
256 	&dev_attr_events.attr,
257 	&dev_attr_spurious_events.attr,
258 	&dev_attr_jiffies_eoi_delayed.attr,
259 	&dev_attr_spurious_threshold.attr,
260 	NULL
261 };
262 
263 static const struct attribute_group xenbus_group = {
264 	.name = "xenbus",
265 	.attrs = xenbus_attrs,
266 };
267 
xenbus_dev_probe(struct device * _dev)268 int xenbus_dev_probe(struct device *_dev)
269 {
270 	struct xenbus_device *dev = to_xenbus_device(_dev);
271 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
272 	const struct xenbus_device_id *id;
273 	int err;
274 
275 	DPRINTK("%s", dev->nodename);
276 
277 	if (!drv->probe) {
278 		err = -ENODEV;
279 		goto fail;
280 	}
281 
282 	id = match_device(drv->ids, dev);
283 	if (!id) {
284 		err = -ENODEV;
285 		goto fail;
286 	}
287 
288 	err = talk_to_otherend(dev);
289 	if (err) {
290 		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
291 			 dev->nodename);
292 		return err;
293 	}
294 
295 	if (!try_module_get(drv->driver.owner)) {
296 		dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
297 			 drv->driver.name);
298 		err = -ESRCH;
299 		goto fail;
300 	}
301 
302 	down(&dev->reclaim_sem);
303 	err = drv->probe(dev, id);
304 	up(&dev->reclaim_sem);
305 	if (err)
306 		goto fail_put;
307 
308 	err = watch_otherend(dev);
309 	if (err) {
310 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
311 		       dev->nodename);
312 		return err;
313 	}
314 
315 	dev->spurious_threshold = 1;
316 	if (sysfs_create_group(&dev->dev.kobj, &xenbus_group))
317 		dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n",
318 			 dev->nodename);
319 
320 	return 0;
321 fail_put:
322 	module_put(drv->driver.owner);
323 fail:
324 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
325 	return err;
326 }
327 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
328 
xenbus_dev_remove(struct device * _dev)329 void xenbus_dev_remove(struct device *_dev)
330 {
331 	struct xenbus_device *dev = to_xenbus_device(_dev);
332 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
333 
334 	DPRINTK("%s", dev->nodename);
335 
336 	sysfs_remove_group(&dev->dev.kobj, &xenbus_group);
337 
338 	free_otherend_watch(dev);
339 
340 	if (drv->remove) {
341 		down(&dev->reclaim_sem);
342 		drv->remove(dev);
343 		up(&dev->reclaim_sem);
344 	}
345 
346 	module_put(drv->driver.owner);
347 
348 	free_otherend_details(dev);
349 
350 	/*
351 	 * If the toolstack has forced the device state to closing then set
352 	 * the state to closed now to allow it to be cleaned up.
353 	 * Similarly, if the driver does not support re-bind, set the
354 	 * closed.
355 	 */
356 	if (!drv->allow_rebind ||
357 	    xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
358 		xenbus_switch_state(dev, XenbusStateClosed);
359 }
360 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
361 
xenbus_register_driver_common(struct xenbus_driver * drv,struct xen_bus_type * bus,struct module * owner,const char * mod_name)362 int xenbus_register_driver_common(struct xenbus_driver *drv,
363 				  struct xen_bus_type *bus,
364 				  struct module *owner, const char *mod_name)
365 {
366 	drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
367 	drv->driver.bus = &bus->bus;
368 	drv->driver.owner = owner;
369 	drv->driver.mod_name = mod_name;
370 
371 	return driver_register(&drv->driver);
372 }
373 EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
374 
xenbus_unregister_driver(struct xenbus_driver * drv)375 void xenbus_unregister_driver(struct xenbus_driver *drv)
376 {
377 	driver_unregister(&drv->driver);
378 }
379 EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
380 
381 struct xb_find_info {
382 	struct xenbus_device *dev;
383 	const char *nodename;
384 };
385 
cmp_dev(struct device * dev,void * data)386 static int cmp_dev(struct device *dev, void *data)
387 {
388 	struct xenbus_device *xendev = to_xenbus_device(dev);
389 	struct xb_find_info *info = data;
390 
391 	if (!strcmp(xendev->nodename, info->nodename)) {
392 		info->dev = xendev;
393 		get_device(dev);
394 		return 1;
395 	}
396 	return 0;
397 }
398 
xenbus_device_find(const char * nodename,struct bus_type * bus)399 static struct xenbus_device *xenbus_device_find(const char *nodename,
400 						struct bus_type *bus)
401 {
402 	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
403 
404 	bus_for_each_dev(bus, NULL, &info, cmp_dev);
405 	return info.dev;
406 }
407 
cleanup_dev(struct device * dev,void * data)408 static int cleanup_dev(struct device *dev, void *data)
409 {
410 	struct xenbus_device *xendev = to_xenbus_device(dev);
411 	struct xb_find_info *info = data;
412 	int len = strlen(info->nodename);
413 
414 	DPRINTK("%s", info->nodename);
415 
416 	/* Match the info->nodename path, or any subdirectory of that path. */
417 	if (strncmp(xendev->nodename, info->nodename, len))
418 		return 0;
419 
420 	/* If the node name is longer, ensure it really is a subdirectory. */
421 	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
422 		return 0;
423 
424 	info->dev = xendev;
425 	get_device(dev);
426 	return 1;
427 }
428 
xenbus_cleanup_devices(const char * path,struct bus_type * bus)429 static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
430 {
431 	struct xb_find_info info = { .nodename = path };
432 
433 	do {
434 		info.dev = NULL;
435 		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
436 		if (info.dev) {
437 			device_unregister(&info.dev->dev);
438 			put_device(&info.dev->dev);
439 		}
440 	} while (info.dev);
441 }
442 
xenbus_dev_release(struct device * dev)443 static void xenbus_dev_release(struct device *dev)
444 {
445 	if (dev)
446 		kfree(to_xenbus_device(dev));
447 }
448 
nodename_show(struct device * dev,struct device_attribute * attr,char * buf)449 static ssize_t nodename_show(struct device *dev,
450 			     struct device_attribute *attr, char *buf)
451 {
452 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
453 }
454 static DEVICE_ATTR_RO(nodename);
455 
devtype_show(struct device * dev,struct device_attribute * attr,char * buf)456 static ssize_t devtype_show(struct device *dev,
457 			    struct device_attribute *attr, char *buf)
458 {
459 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
460 }
461 static DEVICE_ATTR_RO(devtype);
462 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)463 static ssize_t modalias_show(struct device *dev,
464 			     struct device_attribute *attr, char *buf)
465 {
466 	return sprintf(buf, "%s:%s\n", dev->bus->name,
467 		       to_xenbus_device(dev)->devicetype);
468 }
469 static DEVICE_ATTR_RO(modalias);
470 
state_show(struct device * dev,struct device_attribute * attr,char * buf)471 static ssize_t state_show(struct device *dev,
472 			    struct device_attribute *attr, char *buf)
473 {
474 	return sprintf(buf, "%s\n",
475 			xenbus_strstate(to_xenbus_device(dev)->state));
476 }
477 static DEVICE_ATTR_RO(state);
478 
479 static struct attribute *xenbus_dev_attrs[] = {
480 	&dev_attr_nodename.attr,
481 	&dev_attr_devtype.attr,
482 	&dev_attr_modalias.attr,
483 	&dev_attr_state.attr,
484 	NULL,
485 };
486 
487 static const struct attribute_group xenbus_dev_group = {
488 	.attrs = xenbus_dev_attrs,
489 };
490 
491 const struct attribute_group *xenbus_dev_groups[] = {
492 	&xenbus_dev_group,
493 	NULL,
494 };
495 EXPORT_SYMBOL_GPL(xenbus_dev_groups);
496 
xenbus_probe_node(struct xen_bus_type * bus,const char * type,const char * nodename)497 int xenbus_probe_node(struct xen_bus_type *bus,
498 		      const char *type,
499 		      const char *nodename)
500 {
501 	char devname[XEN_BUS_ID_SIZE];
502 	int err;
503 	struct xenbus_device *xendev;
504 	size_t stringlen;
505 	char *tmpstring;
506 
507 	enum xenbus_state state = xenbus_read_driver_state(nodename);
508 
509 	if (state != XenbusStateInitialising) {
510 		/* Device is not new, so ignore it.  This can happen if a
511 		   device is going away after switching to Closed.  */
512 		return 0;
513 	}
514 
515 	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
516 	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
517 	if (!xendev)
518 		return -ENOMEM;
519 
520 	xendev->state = XenbusStateInitialising;
521 
522 	/* Copy the strings into the extra space. */
523 
524 	tmpstring = (char *)(xendev + 1);
525 	strcpy(tmpstring, nodename);
526 	xendev->nodename = tmpstring;
527 
528 	tmpstring += strlen(tmpstring) + 1;
529 	strcpy(tmpstring, type);
530 	xendev->devicetype = tmpstring;
531 	init_completion(&xendev->down);
532 
533 	xendev->dev.bus = &bus->bus;
534 	xendev->dev.release = xenbus_dev_release;
535 
536 	err = bus->get_bus_id(devname, xendev->nodename);
537 	if (err)
538 		goto fail;
539 
540 	dev_set_name(&xendev->dev, "%s", devname);
541 	sema_init(&xendev->reclaim_sem, 1);
542 
543 	/* Register with generic device framework. */
544 	err = device_register(&xendev->dev);
545 	if (err) {
546 		put_device(&xendev->dev);
547 		xendev = NULL;
548 		goto fail;
549 	}
550 
551 	return 0;
552 fail:
553 	kfree(xendev);
554 	return err;
555 }
556 EXPORT_SYMBOL_GPL(xenbus_probe_node);
557 
xenbus_probe_device_type(struct xen_bus_type * bus,const char * type)558 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
559 {
560 	int err = 0;
561 	char **dir;
562 	unsigned int dir_n = 0;
563 	int i;
564 
565 	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
566 	if (IS_ERR(dir))
567 		return PTR_ERR(dir);
568 
569 	for (i = 0; i < dir_n; i++) {
570 		err = bus->probe(bus, type, dir[i]);
571 		if (err)
572 			break;
573 	}
574 
575 	kfree(dir);
576 	return err;
577 }
578 
xenbus_probe_devices(struct xen_bus_type * bus)579 int xenbus_probe_devices(struct xen_bus_type *bus)
580 {
581 	int err = 0;
582 	char **dir;
583 	unsigned int i, dir_n;
584 
585 	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
586 	if (IS_ERR(dir))
587 		return PTR_ERR(dir);
588 
589 	for (i = 0; i < dir_n; i++) {
590 		err = xenbus_probe_device_type(bus, dir[i]);
591 		if (err)
592 			break;
593 	}
594 
595 	kfree(dir);
596 	return err;
597 }
598 EXPORT_SYMBOL_GPL(xenbus_probe_devices);
599 
char_count(const char * str,char c)600 static unsigned int char_count(const char *str, char c)
601 {
602 	unsigned int i, ret = 0;
603 
604 	for (i = 0; str[i]; i++)
605 		if (str[i] == c)
606 			ret++;
607 	return ret;
608 }
609 
strsep_len(const char * str,char c,unsigned int len)610 static int strsep_len(const char *str, char c, unsigned int len)
611 {
612 	unsigned int i;
613 
614 	for (i = 0; str[i]; i++)
615 		if (str[i] == c) {
616 			if (len == 0)
617 				return i;
618 			len--;
619 		}
620 	return (len == 0) ? i : -ERANGE;
621 }
622 
xenbus_dev_changed(const char * node,struct xen_bus_type * bus)623 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
624 {
625 	int exists, rootlen;
626 	struct xenbus_device *dev;
627 	char type[XEN_BUS_ID_SIZE];
628 	const char *p, *root;
629 
630 	if (char_count(node, '/') < 2)
631 		return;
632 
633 	exists = xenbus_exists(XBT_NIL, node, "");
634 	if (!exists) {
635 		xenbus_cleanup_devices(node, &bus->bus);
636 		return;
637 	}
638 
639 	/* backend/<type>/... or device/<type>/... */
640 	p = strchr(node, '/') + 1;
641 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
642 	type[XEN_BUS_ID_SIZE-1] = '\0';
643 
644 	rootlen = strsep_len(node, '/', bus->levels);
645 	if (rootlen < 0)
646 		return;
647 	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
648 	if (!root)
649 		return;
650 
651 	dev = xenbus_device_find(root, &bus->bus);
652 	if (!dev)
653 		xenbus_probe_node(bus, type, root);
654 	else
655 		put_device(&dev->dev);
656 
657 	kfree(root);
658 }
659 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
660 
xenbus_dev_suspend(struct device * dev)661 int xenbus_dev_suspend(struct device *dev)
662 {
663 	int err = 0;
664 	struct xenbus_driver *drv;
665 	struct xenbus_device *xdev
666 		= container_of(dev, struct xenbus_device, dev);
667 
668 	DPRINTK("%s", xdev->nodename);
669 
670 	if (dev->driver == NULL)
671 		return 0;
672 	drv = to_xenbus_driver(dev->driver);
673 	if (drv->suspend)
674 		err = drv->suspend(xdev);
675 	if (err)
676 		dev_warn(dev, "suspend failed: %i\n", err);
677 	return 0;
678 }
679 EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
680 
xenbus_dev_resume(struct device * dev)681 int xenbus_dev_resume(struct device *dev)
682 {
683 	int err;
684 	struct xenbus_driver *drv;
685 	struct xenbus_device *xdev
686 		= container_of(dev, struct xenbus_device, dev);
687 
688 	DPRINTK("%s", xdev->nodename);
689 
690 	if (dev->driver == NULL)
691 		return 0;
692 	drv = to_xenbus_driver(dev->driver);
693 	err = talk_to_otherend(xdev);
694 	if (err) {
695 		dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err);
696 		return err;
697 	}
698 
699 	xdev->state = XenbusStateInitialising;
700 
701 	if (drv->resume) {
702 		err = drv->resume(xdev);
703 		if (err) {
704 			dev_warn(dev, "resume failed: %i\n", err);
705 			return err;
706 		}
707 	}
708 
709 	err = watch_otherend(xdev);
710 	if (err) {
711 		dev_warn(dev, "resume (watch_otherend) failed: %d\n", err);
712 		return err;
713 	}
714 
715 	return 0;
716 }
717 EXPORT_SYMBOL_GPL(xenbus_dev_resume);
718 
xenbus_dev_cancel(struct device * dev)719 int xenbus_dev_cancel(struct device *dev)
720 {
721 	/* Do nothing */
722 	DPRINTK("cancel");
723 	return 0;
724 }
725 EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
726 
727 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
728 int xenstored_ready;
729 
730 
register_xenstore_notifier(struct notifier_block * nb)731 int register_xenstore_notifier(struct notifier_block *nb)
732 {
733 	int ret = 0;
734 
735 	if (xenstored_ready > 0)
736 		ret = nb->notifier_call(nb, 0, NULL);
737 	else
738 		blocking_notifier_chain_register(&xenstore_chain, nb);
739 
740 	return ret;
741 }
742 EXPORT_SYMBOL_GPL(register_xenstore_notifier);
743 
unregister_xenstore_notifier(struct notifier_block * nb)744 void unregister_xenstore_notifier(struct notifier_block *nb)
745 {
746 	blocking_notifier_chain_unregister(&xenstore_chain, nb);
747 }
748 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
749 
xenbus_probe(void)750 static void xenbus_probe(void)
751 {
752 	xenstored_ready = 1;
753 
754 	if (!xen_store_interface) {
755 		xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
756 					       XEN_PAGE_SIZE, MEMREMAP_WB);
757 		/*
758 		 * Now it is safe to free the IRQ used for xenstore late
759 		 * initialization. No need to unbind: it is about to be
760 		 * bound again from xb_init_comms. Note that calling
761 		 * unbind_from_irqhandler now would result in xen_evtchn_close()
762 		 * being called and the event channel not being enabled again
763 		 * afterwards, resulting in missed event notifications.
764 		 */
765 		free_irq(xs_init_irq, &xb_waitq);
766 	}
767 
768 	/*
769 	 * In the HVM case, xenbus_init() deferred its call to
770 	 * xs_init() in case callbacks were not operational yet.
771 	 * So do it now.
772 	 */
773 	if (xen_store_domain_type == XS_HVM)
774 		xs_init();
775 
776 	/* Notify others that xenstore is up */
777 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
778 }
779 
780 /*
781  * Returns true when XenStore init must be deferred in order to
782  * allow the PCI platform device to be initialised, before we
783  * can actually have event channel interrupts working.
784  */
xs_hvm_defer_init_for_callback(void)785 static bool xs_hvm_defer_init_for_callback(void)
786 {
787 #ifdef CONFIG_XEN_PVHVM
788 	return xen_store_domain_type == XS_HVM &&
789 		!xen_have_vector_callback;
790 #else
791 	return false;
792 #endif
793 }
794 
xenbus_probe_thread(void * unused)795 static int xenbus_probe_thread(void *unused)
796 {
797 	DEFINE_WAIT(w);
798 
799 	/*
800 	 * We actually just want to wait for *any* trigger of xb_waitq,
801 	 * and run xenbus_probe() the moment it occurs.
802 	 */
803 	prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
804 	schedule();
805 	finish_wait(&xb_waitq, &w);
806 
807 	DPRINTK("probing");
808 	xenbus_probe();
809 	return 0;
810 }
811 
xenbus_probe_initcall(void)812 static int __init xenbus_probe_initcall(void)
813 {
814 	/*
815 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
816 	 * need to wait for the platform PCI device to come up or
817 	 * xen_store_interface is not ready.
818 	 */
819 	if (xen_store_domain_type == XS_PV ||
820 	    (xen_store_domain_type == XS_HVM &&
821 	     !xs_hvm_defer_init_for_callback() &&
822 	     xen_store_interface != NULL))
823 		xenbus_probe();
824 
825 	/*
826 	 * For XS_LOCAL or when xen_store_interface is not ready, spawn a
827 	 * thread which will wait for xenstored or a xenstore-stubdom to be
828 	 * started, then probe.  It will be triggered when communication
829 	 * starts happening, by waiting on xb_waitq.
830 	 */
831 	if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
832 		struct task_struct *probe_task;
833 
834 		probe_task = kthread_run(xenbus_probe_thread, NULL,
835 					 "xenbus_probe");
836 		if (IS_ERR(probe_task))
837 			return PTR_ERR(probe_task);
838 	}
839 	return 0;
840 }
841 device_initcall(xenbus_probe_initcall);
842 
xen_set_callback_via(uint64_t via)843 int xen_set_callback_via(uint64_t via)
844 {
845 	struct xen_hvm_param a;
846 	int ret;
847 
848 	a.domid = DOMID_SELF;
849 	a.index = HVM_PARAM_CALLBACK_IRQ;
850 	a.value = via;
851 
852 	ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
853 	if (ret)
854 		return ret;
855 
856 	/*
857 	 * If xenbus_probe_initcall() deferred the xenbus_probe()
858 	 * due to the callback not functioning yet, we can do it now.
859 	 */
860 	if (!xenstored_ready && xs_hvm_defer_init_for_callback())
861 		xenbus_probe();
862 
863 	return ret;
864 }
865 EXPORT_SYMBOL_GPL(xen_set_callback_via);
866 
867 /* Set up event channel for xenstored which is run as a local process
868  * (this is normally used only in dom0)
869  */
xenstored_local_init(void)870 static int __init xenstored_local_init(void)
871 {
872 	int err = -ENOMEM;
873 	unsigned long page = 0;
874 	struct evtchn_alloc_unbound alloc_unbound;
875 
876 	/* Allocate Xenstore page */
877 	page = get_zeroed_page(GFP_KERNEL);
878 	if (!page)
879 		goto out_err;
880 
881 	xen_store_gfn = virt_to_gfn((void *)page);
882 
883 	/* Next allocate a local port which xenstored can bind to */
884 	alloc_unbound.dom        = DOMID_SELF;
885 	alloc_unbound.remote_dom = DOMID_SELF;
886 
887 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
888 					  &alloc_unbound);
889 	if (err == -ENOSYS)
890 		goto out_err;
891 
892 	BUG_ON(err);
893 	xen_store_evtchn = alloc_unbound.port;
894 
895 	return 0;
896 
897  out_err:
898 	if (page != 0)
899 		free_page(page);
900 	return err;
901 }
902 
xenbus_resume_cb(struct notifier_block * nb,unsigned long action,void * data)903 static int xenbus_resume_cb(struct notifier_block *nb,
904 			    unsigned long action, void *data)
905 {
906 	int err = 0;
907 
908 	if (xen_hvm_domain()) {
909 		uint64_t v = 0;
910 
911 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
912 		if (!err && v)
913 			xen_store_evtchn = v;
914 		else
915 			pr_warn("Cannot update xenstore event channel: %d\n",
916 				err);
917 	} else
918 		xen_store_evtchn = xen_start_info->store_evtchn;
919 
920 	return err;
921 }
922 
923 static struct notifier_block xenbus_resume_nb = {
924 	.notifier_call = xenbus_resume_cb,
925 };
926 
xenbus_late_init(int irq,void * unused)927 static irqreturn_t xenbus_late_init(int irq, void *unused)
928 {
929 	int err;
930 	uint64_t v = 0;
931 
932 	err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
933 	if (err || !v || !~v)
934 		return IRQ_HANDLED;
935 	xen_store_gfn = (unsigned long)v;
936 
937 	wake_up(&xb_waitq);
938 	return IRQ_HANDLED;
939 }
940 
xenbus_init(void)941 static int __init xenbus_init(void)
942 {
943 	int err;
944 	uint64_t v = 0;
945 	bool wait = false;
946 	xen_store_domain_type = XS_UNKNOWN;
947 
948 	if (!xen_domain())
949 		return -ENODEV;
950 
951 	xenbus_ring_ops_init();
952 
953 	if (xen_pv_domain())
954 		xen_store_domain_type = XS_PV;
955 	if (xen_hvm_domain())
956 		xen_store_domain_type = XS_HVM;
957 	if (xen_hvm_domain() && xen_initial_domain())
958 		xen_store_domain_type = XS_LOCAL;
959 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
960 		xen_store_domain_type = XS_LOCAL;
961 	if (xen_pv_domain() && xen_start_info->store_evtchn)
962 		xenstored_ready = 1;
963 
964 	switch (xen_store_domain_type) {
965 	case XS_LOCAL:
966 		err = xenstored_local_init();
967 		if (err)
968 			goto out_error;
969 		xen_store_interface = gfn_to_virt(xen_store_gfn);
970 		break;
971 	case XS_PV:
972 		xen_store_evtchn = xen_start_info->store_evtchn;
973 		xen_store_gfn = xen_start_info->store_mfn;
974 		xen_store_interface = gfn_to_virt(xen_store_gfn);
975 		break;
976 	case XS_HVM:
977 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
978 		if (err)
979 			goto out_error;
980 		xen_store_evtchn = (int)v;
981 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
982 		if (err)
983 			goto out_error;
984 		/*
985 		 * Uninitialized hvm_params are zero and return no error.
986 		 * Although it is theoretically possible to have
987 		 * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
988 		 * not zero when valid. If zero, it means that Xenstore hasn't
989 		 * been properly initialized. Instead of attempting to map a
990 		 * wrong guest physical address return error.
991 		 *
992 		 * Also recognize all bits set as an invalid/uninitialized value.
993 		 */
994 		if (!v) {
995 			err = -ENOENT;
996 			goto out_error;
997 		}
998 		if (v == ~0ULL) {
999 			wait = true;
1000 		} else {
1001 			/* Avoid truncation on 32-bit. */
1002 #if BITS_PER_LONG == 32
1003 			if (v > ULONG_MAX) {
1004 				pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
1005 				       __func__, v);
1006 				err = -EINVAL;
1007 				goto out_error;
1008 			}
1009 #endif
1010 			xen_store_gfn = (unsigned long)v;
1011 			xen_store_interface =
1012 				memremap(xen_store_gfn << XEN_PAGE_SHIFT,
1013 					 XEN_PAGE_SIZE, MEMREMAP_WB);
1014 			if (xen_store_interface->connection != XENSTORE_CONNECTED)
1015 				wait = true;
1016 		}
1017 		if (wait) {
1018 			err = bind_evtchn_to_irqhandler(xen_store_evtchn,
1019 							xenbus_late_init,
1020 							0, "xenstore_late_init",
1021 							&xb_waitq);
1022 			if (err < 0) {
1023 				pr_err("xenstore_late_init couldn't bind irq err=%d\n",
1024 				       err);
1025 				return err;
1026 			}
1027 
1028 			xs_init_irq = err;
1029 		}
1030 		break;
1031 	default:
1032 		pr_warn("Xenstore state unknown\n");
1033 		break;
1034 	}
1035 
1036 	/*
1037 	 * HVM domains may not have a functional callback yet. In that
1038 	 * case let xs_init() be called from xenbus_probe(), which will
1039 	 * get invoked at an appropriate time.
1040 	 */
1041 	if (xen_store_domain_type != XS_HVM) {
1042 		err = xs_init();
1043 		if (err) {
1044 			pr_warn("Error initializing xenstore comms: %i\n", err);
1045 			goto out_error;
1046 		}
1047 	}
1048 
1049 	if ((xen_store_domain_type != XS_LOCAL) &&
1050 	    (xen_store_domain_type != XS_UNKNOWN))
1051 		xen_resume_notifier_register(&xenbus_resume_nb);
1052 
1053 #ifdef CONFIG_XEN_COMPAT_XENFS
1054 	/*
1055 	 * Create xenfs mountpoint in /proc for compatibility with
1056 	 * utilities that expect to find "xenbus" under "/proc/xen".
1057 	 */
1058 	proc_create_mount_point("xen");
1059 #endif
1060 	return 0;
1061 
1062 out_error:
1063 	xen_store_domain_type = XS_UNKNOWN;
1064 	return err;
1065 }
1066 
1067 postcore_initcall(xenbus_init);
1068 
1069 MODULE_LICENSE("GPL");
1070