1 /*
2 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6 #include <unistd.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pty.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <string.h>
14 #include "kern_constants.h"
15 #include "kern_util.h"
16 #include "init.h"
17 #include "os.h"
18 #include "process.h"
19 #include "sigio.h"
20 #include "um_malloc.h"
21 #include "user.h"
22
23 /*
24 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
25 * exitcall.
26 */
27 static int write_sigio_pid = -1;
28 static unsigned long write_sigio_stack;
29
30 /*
31 * These arrays are initialized before the sigio thread is started, and
32 * the descriptors closed after it is killed. So, it can't see them change.
33 * On the UML side, they are changed under the sigio_lock.
34 */
35 #define SIGIO_FDS_INIT {-1, -1}
36
37 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
38 static int sigio_private[2] = SIGIO_FDS_INIT;
39
40 struct pollfds {
41 struct pollfd *poll;
42 int size;
43 int used;
44 };
45
46 /*
47 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
48 * synchronizes with it.
49 */
50 static struct pollfds current_poll;
51 static struct pollfds next_poll;
52 static struct pollfds all_sigio_fds;
53
write_sigio_thread(void * unused)54 static int write_sigio_thread(void *unused)
55 {
56 struct pollfds *fds, tmp;
57 struct pollfd *p;
58 int i, n, respond_fd;
59 char c;
60
61 signal(SIGWINCH, SIG_IGN);
62 fds = ¤t_poll;
63 while (1) {
64 n = poll(fds->poll, fds->used, -1);
65 if (n < 0) {
66 if (errno == EINTR)
67 continue;
68 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
69 "%d, errno = %d\n", n, errno);
70 }
71 for (i = 0; i < fds->used; i++) {
72 p = &fds->poll[i];
73 if (p->revents == 0)
74 continue;
75 if (p->fd == sigio_private[1]) {
76 CATCH_EINTR(n = read(sigio_private[1], &c,
77 sizeof(c)));
78 if (n != sizeof(c))
79 printk(UM_KERN_ERR
80 "write_sigio_thread : "
81 "read on socket failed, "
82 "err = %d\n", errno);
83 tmp = current_poll;
84 current_poll = next_poll;
85 next_poll = tmp;
86 respond_fd = sigio_private[1];
87 }
88 else {
89 respond_fd = write_sigio_fds[1];
90 fds->used--;
91 memmove(&fds->poll[i], &fds->poll[i + 1],
92 (fds->used - i) * sizeof(*fds->poll));
93 }
94
95 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
96 if (n != sizeof(c))
97 printk(UM_KERN_ERR "write_sigio_thread : "
98 "write on socket failed, err = %d\n",
99 errno);
100 }
101 }
102
103 return 0;
104 }
105
need_poll(struct pollfds * polls,int n)106 static int need_poll(struct pollfds *polls, int n)
107 {
108 struct pollfd *new;
109
110 if (n <= polls->size)
111 return 0;
112
113 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
114 if (new == NULL) {
115 printk(UM_KERN_ERR "need_poll : failed to allocate new "
116 "pollfds\n");
117 return -ENOMEM;
118 }
119
120 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
121 kfree(polls->poll);
122
123 polls->poll = new;
124 polls->size = n;
125 return 0;
126 }
127
128 /*
129 * Must be called with sigio_lock held, because it's needed by the marked
130 * critical section.
131 */
update_thread(void)132 static void update_thread(void)
133 {
134 unsigned long flags;
135 int n;
136 char c;
137
138 flags = set_signals(0);
139 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
140 if (n != sizeof(c)) {
141 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
142 errno);
143 goto fail;
144 }
145
146 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
147 if (n != sizeof(c)) {
148 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
149 errno);
150 goto fail;
151 }
152
153 set_signals(flags);
154 return;
155 fail:
156 /* Critical section start */
157 if (write_sigio_pid != -1) {
158 os_kill_process(write_sigio_pid, 1);
159 free_stack(write_sigio_stack, 0);
160 }
161 write_sigio_pid = -1;
162 close(sigio_private[0]);
163 close(sigio_private[1]);
164 close(write_sigio_fds[0]);
165 close(write_sigio_fds[1]);
166 /* Critical section end */
167 set_signals(flags);
168 }
169
add_sigio_fd(int fd)170 int add_sigio_fd(int fd)
171 {
172 struct pollfd *p;
173 int err = 0, i, n;
174
175 sigio_lock();
176 for (i = 0; i < all_sigio_fds.used; i++) {
177 if (all_sigio_fds.poll[i].fd == fd)
178 break;
179 }
180 if (i == all_sigio_fds.used)
181 goto out;
182
183 p = &all_sigio_fds.poll[i];
184
185 for (i = 0; i < current_poll.used; i++) {
186 if (current_poll.poll[i].fd == fd)
187 goto out;
188 }
189
190 n = current_poll.used;
191 err = need_poll(&next_poll, n + 1);
192 if (err)
193 goto out;
194
195 memcpy(next_poll.poll, current_poll.poll,
196 current_poll.used * sizeof(struct pollfd));
197 next_poll.poll[n] = *p;
198 next_poll.used = n + 1;
199 update_thread();
200 out:
201 sigio_unlock();
202 return err;
203 }
204
ignore_sigio_fd(int fd)205 int ignore_sigio_fd(int fd)
206 {
207 struct pollfd *p;
208 int err = 0, i, n = 0;
209
210 /*
211 * This is called from exitcalls elsewhere in UML - if
212 * sigio_cleanup has already run, then update_thread will hang
213 * or fail because the thread is no longer running.
214 */
215 if (write_sigio_pid == -1)
216 return -EIO;
217
218 sigio_lock();
219 for (i = 0; i < current_poll.used; i++) {
220 if (current_poll.poll[i].fd == fd)
221 break;
222 }
223 if (i == current_poll.used)
224 goto out;
225
226 err = need_poll(&next_poll, current_poll.used - 1);
227 if (err)
228 goto out;
229
230 for (i = 0; i < current_poll.used; i++) {
231 p = ¤t_poll.poll[i];
232 if (p->fd != fd)
233 next_poll.poll[n++] = *p;
234 }
235 next_poll.used = current_poll.used - 1;
236
237 update_thread();
238 out:
239 sigio_unlock();
240 return err;
241 }
242
setup_initial_poll(int fd)243 static struct pollfd *setup_initial_poll(int fd)
244 {
245 struct pollfd *p;
246
247 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
248 if (p == NULL) {
249 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
250 "poll\n");
251 return NULL;
252 }
253 *p = ((struct pollfd) { .fd = fd,
254 .events = POLLIN,
255 .revents = 0 });
256 return p;
257 }
258
write_sigio_workaround(void)259 static void write_sigio_workaround(void)
260 {
261 struct pollfd *p;
262 int err;
263 int l_write_sigio_fds[2];
264 int l_sigio_private[2];
265 int l_write_sigio_pid;
266
267 /* We call this *tons* of times - and most ones we must just fail. */
268 sigio_lock();
269 l_write_sigio_pid = write_sigio_pid;
270 sigio_unlock();
271
272 if (l_write_sigio_pid != -1)
273 return;
274
275 err = os_pipe(l_write_sigio_fds, 1, 1);
276 if (err < 0) {
277 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
278 "err = %d\n", -err);
279 return;
280 }
281 err = os_pipe(l_sigio_private, 1, 1);
282 if (err < 0) {
283 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
284 "err = %d\n", -err);
285 goto out_close1;
286 }
287
288 p = setup_initial_poll(l_sigio_private[1]);
289 if (!p)
290 goto out_close2;
291
292 sigio_lock();
293
294 /*
295 * Did we race? Don't try to optimize this, please, it's not so likely
296 * to happen, and no more than once at the boot.
297 */
298 if (write_sigio_pid != -1)
299 goto out_free;
300
301 current_poll = ((struct pollfds) { .poll = p,
302 .used = 1,
303 .size = 1 });
304
305 if (write_sigio_irq(l_write_sigio_fds[0]))
306 goto out_clear_poll;
307
308 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
309 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
310
311 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
312 CLONE_FILES | CLONE_VM,
313 &write_sigio_stack);
314
315 if (write_sigio_pid < 0)
316 goto out_clear;
317
318 sigio_unlock();
319 return;
320
321 out_clear:
322 write_sigio_pid = -1;
323 write_sigio_fds[0] = -1;
324 write_sigio_fds[1] = -1;
325 sigio_private[0] = -1;
326 sigio_private[1] = -1;
327 out_clear_poll:
328 current_poll = ((struct pollfds) { .poll = NULL,
329 .size = 0,
330 .used = 0 });
331 out_free:
332 sigio_unlock();
333 kfree(p);
334 out_close2:
335 close(l_sigio_private[0]);
336 close(l_sigio_private[1]);
337 out_close1:
338 close(l_write_sigio_fds[0]);
339 close(l_write_sigio_fds[1]);
340 }
341
sigio_broken(int fd,int read)342 void sigio_broken(int fd, int read)
343 {
344 int err;
345
346 write_sigio_workaround();
347
348 sigio_lock();
349 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
350 if (err) {
351 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
352 "for descriptor %d\n", fd);
353 goto out;
354 }
355
356 all_sigio_fds.poll[all_sigio_fds.used++] =
357 ((struct pollfd) { .fd = fd,
358 .events = read ? POLLIN : POLLOUT,
359 .revents = 0 });
360 out:
361 sigio_unlock();
362 }
363
364 /* Changed during early boot */
365 static int pty_output_sigio;
366 static int pty_close_sigio;
367
maybe_sigio_broken(int fd,int read)368 void maybe_sigio_broken(int fd, int read)
369 {
370 if (!isatty(fd))
371 return;
372
373 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
374 return;
375
376 sigio_broken(fd, read);
377 }
378
sigio_cleanup(void)379 static void sigio_cleanup(void)
380 {
381 if (write_sigio_pid == -1)
382 return;
383
384 os_kill_process(write_sigio_pid, 1);
385 free_stack(write_sigio_stack, 0);
386 write_sigio_pid = -1;
387 }
388
389 __uml_exitcall(sigio_cleanup);
390
391 /* Used as a flag during SIGIO testing early in boot */
392 static int got_sigio;
393
handler(int sig)394 static void __init handler(int sig)
395 {
396 got_sigio = 1;
397 }
398
399 struct openpty_arg {
400 int master;
401 int slave;
402 int err;
403 };
404
openpty_cb(void * arg)405 static void openpty_cb(void *arg)
406 {
407 struct openpty_arg *info = arg;
408
409 info->err = 0;
410 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
411 info->err = -errno;
412 }
413
async_pty(int master,int slave)414 static int async_pty(int master, int slave)
415 {
416 int flags;
417
418 flags = fcntl(master, F_GETFL);
419 if (flags < 0)
420 return -errno;
421
422 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
423 (fcntl(master, F_SETOWN, os_getpid()) < 0))
424 return -errno;
425
426 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
427 return -errno;
428
429 return 0;
430 }
431
check_one_sigio(void (* proc)(int,int))432 static void __init check_one_sigio(void (*proc)(int, int))
433 {
434 struct sigaction old, new;
435 struct openpty_arg pty = { .master = -1, .slave = -1 };
436 int master, slave, err;
437
438 initial_thread_cb(openpty_cb, &pty);
439 if (pty.err) {
440 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
441 -pty.err);
442 return;
443 }
444
445 master = pty.master;
446 slave = pty.slave;
447
448 if ((master == -1) || (slave == -1)) {
449 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
450 "pty\n");
451 return;
452 }
453
454 /* Not now, but complain so we now where we failed. */
455 err = raw(master);
456 if (err < 0) {
457 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
458 -err);
459 return;
460 }
461
462 err = async_pty(master, slave);
463 if (err < 0) {
464 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
465 "err = %d\n", -err);
466 return;
467 }
468
469 if (sigaction(SIGIO, NULL, &old) < 0) {
470 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
471 "errno = %d\n", errno);
472 return;
473 }
474
475 new = old;
476 new.sa_handler = handler;
477 if (sigaction(SIGIO, &new, NULL) < 0) {
478 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
479 "errno = %d\n", errno);
480 return;
481 }
482
483 got_sigio = 0;
484 (*proc)(master, slave);
485
486 close(master);
487 close(slave);
488
489 if (sigaction(SIGIO, &old, NULL) < 0)
490 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
491 "errno = %d\n", errno);
492 }
493
tty_output(int master,int slave)494 static void tty_output(int master, int slave)
495 {
496 int n;
497 char buf[512];
498
499 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
500
501 memset(buf, 0, sizeof(buf));
502
503 while (write(master, buf, sizeof(buf)) > 0) ;
504 if (errno != EAGAIN)
505 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
506 errno);
507 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
508 !({ barrier(); got_sigio; }))
509 ;
510
511 if (got_sigio) {
512 printk(UM_KERN_CONT "Yes\n");
513 pty_output_sigio = 1;
514 } else if (n == -EAGAIN)
515 printk(UM_KERN_CONT "No, enabling workaround\n");
516 else
517 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
518 }
519
tty_close(int master,int slave)520 static void tty_close(int master, int slave)
521 {
522 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
523 "close...");
524
525 close(slave);
526 if (got_sigio) {
527 printk(UM_KERN_CONT "Yes\n");
528 pty_close_sigio = 1;
529 } else
530 printk(UM_KERN_CONT "No, enabling workaround\n");
531 }
532
check_sigio(void)533 static void __init check_sigio(void)
534 {
535 if ((access("/dev/ptmx", R_OK) < 0) &&
536 (access("/dev/ptyp0", R_OK) < 0)) {
537 printk(UM_KERN_WARNING "No pseudo-terminals available - "
538 "skipping pty SIGIO check\n");
539 return;
540 }
541 check_one_sigio(tty_output);
542 check_one_sigio(tty_close);
543 }
544
545 /* Here because it only does the SIGIO testing for now */
os_check_bugs(void)546 void __init os_check_bugs(void)
547 {
548 check_sigio();
549 }
550