1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4  * Copyright © 2009 Canonical Ltd.
5  * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
6  */
7 
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <getopt.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/epoll.h>
16 #include <sys/file.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <sys/wait.h>
25 #include <unistd.h>
26 
27 #include "sd-daemon.h"
28 #include "sd-event.h"
29 
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "errno-list.h"
39 #include "event-util.h"
40 #include "fd-util.h"
41 #include "fileio.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "hashmap.h"
45 #include "inotify-util.h"
46 #include "io-util.h"
47 #include "limits-util.h"
48 #include "list.h"
49 #include "main-func.h"
50 #include "mkdir.h"
51 #include "netlink-util.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "pretty-print.h"
55 #include "proc-cmdline.h"
56 #include "process-util.h"
57 #include "selinux-util.h"
58 #include "signal-util.h"
59 #include "socket-util.h"
60 #include "string-util.h"
61 #include "strv.h"
62 #include "strxcpyx.h"
63 #include "syslog-util.h"
64 #include "udevd.h"
65 #include "udev-builtin.h"
66 #include "udev-ctrl.h"
67 #include "udev-event.h"
68 #include "udev-util.h"
69 #include "udev-watch.h"
70 #include "user-util.h"
71 #include "version.h"
72 
73 #define WORKER_NUM_MAX 2048U
74 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
75 #define EVENT_RETRY_TIMEOUT_USEC  (3 * USEC_PER_MINUTE)
76 
77 static bool arg_debug = false;
78 static int arg_daemonize = false;
79 static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
80 static unsigned arg_children_max = 0;
81 static usec_t arg_exec_delay_usec = 0;
82 static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
83 static int arg_timeout_signal = SIGKILL;
84 static bool arg_blockdev_read_only = false;
85 
86 typedef struct Event Event;
87 typedef struct Worker Worker;
88 
89 typedef struct Manager {
90         sd_event *event;
91         Hashmap *workers;
92         LIST_HEAD(Event, events);
93         char *cgroup;
94         pid_t pid; /* the process that originally allocated the manager object */
95         int log_level;
96 
97         UdevRules *rules;
98         Hashmap *properties;
99 
100         sd_netlink *rtnl;
101 
102         sd_device_monitor *monitor;
103         UdevCtrl *ctrl;
104         int worker_watch[2];
105 
106         /* used by udev-watch */
107         int inotify_fd;
108         sd_event_source *inotify_event;
109 
110         sd_event_source *kill_workers_event;
111 
112         usec_t last_usec;
113 
114         bool stop_exec_queue;
115         bool exit;
116 } Manager;
117 
118 typedef enum EventState {
119         EVENT_UNDEF,
120         EVENT_QUEUED,
121         EVENT_RUNNING,
122 } EventState;
123 
124 typedef struct Event {
125         Manager *manager;
126         Worker *worker;
127         EventState state;
128 
129         sd_device *dev;
130 
131         sd_device_action_t action;
132         uint64_t seqnum;
133         uint64_t blocker_seqnum;
134         usec_t retry_again_next_usec;
135         usec_t retry_again_timeout_usec;
136 
137         sd_event_source *timeout_warning_event;
138         sd_event_source *timeout_event;
139 
140         LIST_FIELDS(Event, event);
141 } Event;
142 
143 typedef enum WorkerState {
144         WORKER_UNDEF,
145         WORKER_RUNNING,
146         WORKER_IDLE,
147         WORKER_KILLED,
148         WORKER_KILLING,
149 } WorkerState;
150 
151 typedef struct Worker {
152         Manager *manager;
153         pid_t pid;
154         sd_event_source *child_event_source;
155         sd_device_monitor *monitor;
156         WorkerState state;
157         Event *event;
158 } Worker;
159 
160 /* passed from worker to main process */
161 typedef enum EventResult {
162         EVENT_RESULT_NERRNO_MIN       = -ERRNO_MAX,
163         EVENT_RESULT_NERRNO_MAX       = -1,
164         EVENT_RESULT_SUCCESS          = 0,
165         EVENT_RESULT_EXIT_STATUS_BASE = 0,
166         EVENT_RESULT_EXIT_STATUS_MAX  = 255,
167         EVENT_RESULT_TRY_AGAIN        = 256, /* when the block device is locked by another process. */
168         EVENT_RESULT_SIGNAL_BASE      = 257,
169         EVENT_RESULT_SIGNAL_MAX       = EVENT_RESULT_SIGNAL_BASE + _NSIG,
170         _EVENT_RESULT_MAX,
171         _EVENT_RESULT_INVALID         = -EINVAL,
172 } EventResult;
173 
event_free(Event * event)174 static Event *event_free(Event *event) {
175         if (!event)
176                 return NULL;
177 
178         assert(event->manager);
179 
180         LIST_REMOVE(event, event->manager->events, event);
181         sd_device_unref(event->dev);
182 
183         /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
184          * main process. */
185         sd_event_source_unref(event->timeout_warning_event);
186         sd_event_source_unref(event->timeout_event);
187 
188         if (event->worker)
189                 event->worker->event = NULL;
190 
191         return mfree(event);
192 }
193 
event_queue_cleanup(Manager * manager,EventState match_state)194 static void event_queue_cleanup(Manager *manager, EventState match_state) {
195         LIST_FOREACH(event, event, manager->events) {
196                 if (match_state != EVENT_UNDEF && match_state != event->state)
197                         continue;
198 
199                 event_free(event);
200         }
201 }
202 
worker_free(Worker * worker)203 static Worker *worker_free(Worker *worker) {
204         if (!worker)
205                 return NULL;
206 
207         if (worker->manager)
208                 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
209 
210         sd_event_source_unref(worker->child_event_source);
211         sd_device_monitor_unref(worker->monitor);
212         event_free(worker->event);
213 
214         return mfree(worker);
215 }
216 
217 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker*, worker_free);
218 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, Worker, worker_free);
219 
manager_clear_for_worker(Manager * manager)220 static void manager_clear_for_worker(Manager *manager) {
221         assert(manager);
222 
223         /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
224          * main process. */
225         manager->inotify_event = sd_event_source_unref(manager->inotify_event);
226         manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
227 
228         manager->event = sd_event_unref(manager->event);
229 
230         manager->workers = hashmap_free(manager->workers);
231         event_queue_cleanup(manager, EVENT_UNDEF);
232 
233         manager->monitor = sd_device_monitor_unref(manager->monitor);
234         manager->ctrl = udev_ctrl_unref(manager->ctrl);
235 
236         manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
237 }
238 
manager_free(Manager * manager)239 static Manager* manager_free(Manager *manager) {
240         if (!manager)
241                 return NULL;
242 
243         udev_builtin_exit();
244 
245         manager_clear_for_worker(manager);
246 
247         sd_netlink_unref(manager->rtnl);
248 
249         hashmap_free_free_free(manager->properties);
250         udev_rules_free(manager->rules);
251 
252         safe_close(manager->inotify_fd);
253         safe_close_pair(manager->worker_watch);
254 
255         free(manager->cgroup);
256         return mfree(manager);
257 }
258 
259 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
260 
261 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata);
262 
worker_new(Worker ** ret,Manager * manager,sd_device_monitor * worker_monitor,pid_t pid)263 static int worker_new(Worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
264         _cleanup_(worker_freep) Worker *worker = NULL;
265         int r;
266 
267         assert(ret);
268         assert(manager);
269         assert(worker_monitor);
270         assert(pid > 1);
271 
272         /* close monitor, but keep address around */
273         device_monitor_disconnect(worker_monitor);
274 
275         worker = new(Worker, 1);
276         if (!worker)
277                 return -ENOMEM;
278 
279         *worker = (Worker) {
280                 .monitor = sd_device_monitor_ref(worker_monitor),
281                 .pid = pid,
282         };
283 
284         r = sd_event_add_child(manager->event, &worker->child_event_source, pid, WEXITED, on_sigchld, worker);
285         if (r < 0)
286                 return r;
287 
288         r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
289         if (r < 0)
290                 return r;
291 
292         worker->manager = manager;
293 
294         *ret = TAKE_PTR(worker);
295         return 0;
296 }
297 
manager_kill_workers(Manager * manager,bool force)298 static void manager_kill_workers(Manager *manager, bool force) {
299         Worker *worker;
300 
301         assert(manager);
302 
303         HASHMAP_FOREACH(worker, manager->workers) {
304                 if (worker->state == WORKER_KILLED)
305                         continue;
306 
307                 if (worker->state == WORKER_RUNNING && !force) {
308                         worker->state = WORKER_KILLING;
309                         continue;
310                 }
311 
312                 worker->state = WORKER_KILLED;
313                 (void) kill(worker->pid, SIGTERM);
314         }
315 }
316 
manager_exit(Manager * manager)317 static void manager_exit(Manager *manager) {
318         assert(manager);
319 
320         manager->exit = true;
321 
322         sd_notify(false,
323                   "STOPPING=1\n"
324                   "STATUS=Starting shutdown...");
325 
326         /* close sources of new events and discard buffered events */
327         manager->ctrl = udev_ctrl_unref(manager->ctrl);
328 
329         manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
330         manager->inotify_fd = safe_close(manager->inotify_fd);
331 
332         manager->monitor = sd_device_monitor_unref(manager->monitor);
333 
334         /* discard queued events and kill workers */
335         event_queue_cleanup(manager, EVENT_QUEUED);
336         manager_kill_workers(manager, true);
337 }
338 
notify_ready(void)339 static void notify_ready(void) {
340         int r;
341 
342         r = sd_notifyf(false,
343                        "READY=1\n"
344                        "STATUS=Processing with %u children at max", arg_children_max);
345         if (r < 0)
346                 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
347 }
348 
349 /* reload requested, HUP signal received, rules changed, builtin changed */
manager_reload(Manager * manager)350 static void manager_reload(Manager *manager) {
351         assert(manager);
352 
353         sd_notify(false,
354                   "RELOADING=1\n"
355                   "STATUS=Flushing configuration...");
356 
357         manager_kill_workers(manager, false);
358         manager->rules = udev_rules_free(manager->rules);
359         udev_builtin_exit();
360 
361         notify_ready();
362 }
363 
on_kill_workers_event(sd_event_source * s,uint64_t usec,void * userdata)364 static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
365         Manager *manager = userdata;
366 
367         assert(manager);
368 
369         log_debug("Cleanup idle workers");
370         manager_kill_workers(manager, false);
371 
372         return 1;
373 }
374 
device_broadcast(sd_device_monitor * monitor,sd_device * dev,EventResult result)375 static void device_broadcast(sd_device_monitor *monitor, sd_device *dev, EventResult result) {
376         int r;
377 
378         assert(dev);
379 
380         /* On exit, manager->monitor is already NULL. */
381         if (!monitor)
382                 return;
383 
384         if (result != EVENT_RESULT_SUCCESS) {
385                 (void) device_add_property(dev, "UDEV_WORKER_FAILED", "1");
386 
387                 switch (result) {
388                 case EVENT_RESULT_NERRNO_MIN ... EVENT_RESULT_NERRNO_MAX: {
389                         const char *str;
390 
391                         (void) device_add_propertyf(dev, "UDEV_WORKER_ERRNO", "%i", -result);
392 
393                         str = errno_to_name(result);
394                         if (str)
395                                 (void) device_add_property(dev, "UDEV_WORKER_ERRNO_NAME", str);
396                         break;
397                 }
398                 case EVENT_RESULT_EXIT_STATUS_BASE ... EVENT_RESULT_EXIT_STATUS_MAX:
399                         (void) device_add_propertyf(dev, "UDEV_WORKER_EXIT_STATUS", "%i", result - EVENT_RESULT_EXIT_STATUS_BASE);
400                         break;
401 
402                 case EVENT_RESULT_TRY_AGAIN:
403                         assert_not_reached();
404                         break;
405 
406                 case EVENT_RESULT_SIGNAL_BASE ... EVENT_RESULT_SIGNAL_MAX: {
407                         const char *str;
408 
409                         (void) device_add_propertyf(dev, "UDEV_WORKER_SIGNAL", "%i", result - EVENT_RESULT_SIGNAL_BASE);
410 
411                         str = signal_to_string(result - EVENT_RESULT_SIGNAL_BASE);
412                         if (str)
413                                 (void) device_add_property(dev, "UDEV_WORKER_SIGNAL_NAME", str);
414                         break;
415                 }
416                 default:
417                         log_device_warning(dev, "Unknown event result \"%i\", ignoring.", result);
418                 }
419         }
420 
421         r = device_monitor_send_device(monitor, NULL, dev);
422         if (r < 0)
423                 log_device_warning_errno(dev, r,
424                                          "Failed to broadcast event to libudev listeners, ignoring: %m");
425 }
426 
worker_send_result(Manager * manager,EventResult result)427 static int worker_send_result(Manager *manager, EventResult result) {
428         assert(manager);
429         assert(manager->worker_watch[WRITE_END] >= 0);
430 
431         return loop_write(manager->worker_watch[WRITE_END], &result, sizeof(result), false);
432 }
433 
device_get_whole_disk(sd_device * dev,sd_device ** ret_device,const char ** ret_devname)434 static int device_get_whole_disk(sd_device *dev, sd_device **ret_device, const char **ret_devname) {
435         const char *val;
436         int r;
437 
438         assert(dev);
439 
440         if (device_for_action(dev, SD_DEVICE_REMOVE))
441                 goto irrelevant;
442 
443         r = sd_device_get_subsystem(dev, &val);
444         if (r < 0)
445                 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
446 
447         if (!streq(val, "block"))
448                 goto irrelevant;
449 
450         r = sd_device_get_sysname(dev, &val);
451         if (r < 0)
452                 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
453 
454         /* Exclude the following devices:
455          * For "dm-", see the comment added by e918a1b5a94f270186dca59156354acd2a596494.
456          * For "md", see the commit message of 2e5b17d01347d3c3118be2b8ad63d20415dbb1f0,
457          * but not sure the assumption is still valid even when partitions are created on the md
458          * devices, surprisingly which seems to be possible, see PR #22973.
459          * For "drbd", see the commit message of fee854ee8ccde0cd28e0f925dea18cce35f3993d. */
460         if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
461                 goto irrelevant;
462 
463         r = sd_device_get_devtype(dev, &val);
464         if (r < 0 && r != -ENOENT)
465                 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
466         if (r >= 0 && streq(val, "partition")) {
467                 r = sd_device_get_parent(dev, &dev);
468                 if (r == -ENOENT) /* The device may be already removed. */
469                         goto irrelevant;
470                 if (r < 0)
471                         return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
472         }
473 
474         r = sd_device_get_devname(dev, &val);
475         if (r == -ENOENT)
476                 goto irrelevant;
477         if (r < 0)
478                 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
479 
480         if (ret_device)
481                 *ret_device = dev;
482         if (ret_devname)
483                 *ret_devname = val;
484         return 1;
485 
486 irrelevant:
487         if (ret_device)
488                 *ret_device = NULL;
489         if (ret_devname)
490                 *ret_devname = NULL;
491         return 0;
492 }
493 
worker_lock_whole_disk(sd_device * dev,int * ret_fd)494 static int worker_lock_whole_disk(sd_device *dev, int *ret_fd) {
495         _cleanup_close_ int fd = -1;
496         sd_device *dev_whole_disk;
497         const char *val;
498         int r;
499 
500         assert(dev);
501         assert(ret_fd);
502 
503         /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
504          * serialize device access. External processes holding an exclusive lock will cause udev to skip the
505          * event handling; in the case udev acquired the lock, the external process can block until udev has
506          * finished its event handling. */
507 
508         r = device_get_whole_disk(dev, &dev_whole_disk, &val);
509         if (r < 0)
510                 return r;
511         if (r == 0)
512                 goto nolock;
513 
514         fd = sd_device_open(dev_whole_disk, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
515         if (fd < 0) {
516                 bool ignore = ERRNO_IS_DEVICE_ABSENT(fd);
517 
518                 log_device_debug_errno(dev, fd, "Failed to open '%s'%s: %m", val, ignore ? ", ignoring" : "");
519                 if (!ignore)
520                         return fd;
521 
522                 goto nolock;
523         }
524 
525         if (flock(fd, LOCK_SH|LOCK_NB) < 0)
526                 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
527 
528         *ret_fd = TAKE_FD(fd);
529         return 1;
530 
531 nolock:
532         *ret_fd = -1;
533         return 0;
534 }
535 
worker_mark_block_device_read_only(sd_device * dev)536 static int worker_mark_block_device_read_only(sd_device *dev) {
537         _cleanup_close_ int fd = -1;
538         const char *val;
539         int state = 1, r;
540 
541         assert(dev);
542 
543         if (!arg_blockdev_read_only)
544                 return 0;
545 
546         /* Do this only once, when the block device is new. If the device is later retriggered let's not
547          * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
548          * for specific devices only. */
549         if (!device_for_action(dev, SD_DEVICE_ADD))
550                 return 0;
551 
552         r = sd_device_get_subsystem(dev, &val);
553         if (r < 0)
554                 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
555 
556         if (!streq(val, "block"))
557                 return 0;
558 
559         r = sd_device_get_sysname(dev, &val);
560         if (r < 0)
561                 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
562 
563         /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
564          * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
565          * underlying block devices to receive changes. */
566         if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
567                 return 0;
568 
569         fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
570         if (fd < 0)
571                 return log_device_debug_errno(dev, fd, "Failed to open '%s', ignoring: %m", val);
572 
573         if (ioctl(fd, BLKROSET, &state) < 0)
574                 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
575 
576         log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
577         return 0;
578 }
579 
worker_process_device(Manager * manager,sd_device * dev)580 static int worker_process_device(Manager *manager, sd_device *dev) {
581         _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
582         _cleanup_close_ int fd_lock = -1;
583         int r;
584 
585         assert(manager);
586         assert(dev);
587 
588         log_device_uevent(dev, "Processing device");
589 
590         udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
591         if (!udev_event)
592                 return -ENOMEM;
593 
594         /* If this is a block device and the device is locked currently via the BSD advisory locks,
595          * someone else is using it exclusively. We don't run our udev rules now to not interfere.
596          * Instead of processing the event, we requeue the event and will try again after a delay.
597          *
598          * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
599         r = worker_lock_whole_disk(dev, &fd_lock);
600         if (r == -EAGAIN)
601                 return EVENT_RESULT_TRY_AGAIN;
602         if (r < 0)
603                 return r;
604 
605         (void) worker_mark_block_device_read_only(dev);
606 
607         /* apply rules, create node, symlinks */
608         r = udev_event_execute_rules(
609                           udev_event,
610                           manager->inotify_fd,
611                           arg_event_timeout_usec,
612                           arg_timeout_signal,
613                           manager->properties,
614                           manager->rules);
615         if (r < 0)
616                 return r;
617 
618         udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
619 
620         if (!manager->rtnl)
621                 /* in case rtnl was initialized */
622                 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
623 
624         r = udev_event_process_inotify_watch(udev_event, manager->inotify_fd);
625         if (r < 0)
626                 return r;
627 
628         log_device_uevent(dev, "Device processed");
629         return 0;
630 }
631 
worker_device_monitor_handler(sd_device_monitor * monitor,sd_device * dev,void * userdata)632 static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
633         Manager *manager = userdata;
634         int r;
635 
636         assert(dev);
637         assert(manager);
638 
639         r = worker_process_device(manager, dev);
640         if (r == EVENT_RESULT_TRY_AGAIN)
641                 /* if we couldn't acquire the flock(), then requeue the event */
642                 log_device_debug(dev, "Block device is currently locked, requeueing the event.");
643         else {
644                 if (r < 0)
645                         log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
646 
647                 /* send processed event back to libudev listeners */
648                 device_broadcast(monitor, dev, r);
649         }
650 
651         /* send udevd the result of the event execution */
652         r = worker_send_result(manager, r);
653         if (r < 0)
654                 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
655 
656         /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
657         log_set_max_level(manager->log_level);
658 
659         return 1;
660 }
661 
worker_main(Manager * _manager,sd_device_monitor * monitor,sd_device * first_device)662 static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
663         _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
664         _cleanup_(manager_freep) Manager *manager = _manager;
665         int r;
666 
667         assert(manager);
668         assert(monitor);
669         assert(dev);
670 
671         assert_se(unsetenv("NOTIFY_SOCKET") == 0);
672 
673         assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
674 
675         /* Reset OOM score, we only protect the main daemon. */
676         r = set_oom_score_adjust(0);
677         if (r < 0)
678                 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
679 
680         /* Clear unnecessary data in Manager object. */
681         manager_clear_for_worker(manager);
682 
683         r = sd_event_new(&manager->event);
684         if (r < 0)
685                 return log_error_errno(r, "Failed to allocate event loop: %m");
686 
687         r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
688         if (r < 0)
689                 return log_error_errno(r, "Failed to set SIGTERM event: %m");
690 
691         r = sd_device_monitor_attach_event(monitor, manager->event);
692         if (r < 0)
693                 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
694 
695         r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
696         if (r < 0)
697                 return log_error_errno(r, "Failed to start device monitor: %m");
698 
699         (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
700 
701         /* Process first device */
702         (void) worker_device_monitor_handler(monitor, dev, manager);
703 
704         r = sd_event_loop(manager->event);
705         if (r < 0)
706                 return log_error_errno(r, "Event loop failed: %m");
707 
708         return 0;
709 }
710 
on_event_timeout(sd_event_source * s,uint64_t usec,void * userdata)711 static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
712         Event *event = userdata;
713 
714         assert(event);
715         assert(event->worker);
716 
717         kill_and_sigcont(event->worker->pid, arg_timeout_signal);
718         event->worker->state = WORKER_KILLED;
719 
720         log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
721 
722         return 1;
723 }
724 
on_event_timeout_warning(sd_event_source * s,uint64_t usec,void * userdata)725 static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
726         Event *event = userdata;
727 
728         assert(event);
729         assert(event->worker);
730 
731         log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
732 
733         return 1;
734 }
735 
worker_attach_event(Worker * worker,Event * event)736 static void worker_attach_event(Worker *worker, Event *event) {
737         sd_event *e;
738 
739         assert(worker);
740         assert(worker->manager);
741         assert(event);
742         assert(!event->worker);
743         assert(!worker->event);
744 
745         worker->state = WORKER_RUNNING;
746         worker->event = event;
747         event->state = EVENT_RUNNING;
748         event->worker = worker;
749 
750         e = worker->manager->event;
751 
752         (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
753                                           udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
754                                           on_event_timeout_warning, event);
755 
756         (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
757                                           arg_event_timeout_usec, USEC_PER_SEC,
758                                           on_event_timeout, event);
759 }
760 
worker_spawn(Manager * manager,Event * event)761 static int worker_spawn(Manager *manager, Event *event) {
762         _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
763         Worker *worker;
764         pid_t pid;
765         int r;
766 
767         /* listen for new events */
768         r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
769         if (r < 0)
770                 return r;
771 
772         /* allow the main daemon netlink address to send devices to the worker */
773         r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
774         if (r < 0)
775                 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
776 
777         r = device_monitor_enable_receiving(worker_monitor);
778         if (r < 0)
779                 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
780 
781         r = safe_fork(NULL, FORK_DEATHSIG, &pid);
782         if (r < 0) {
783                 event->state = EVENT_QUEUED;
784                 return log_error_errno(r, "Failed to fork() worker: %m");
785         }
786         if (r == 0) {
787                 DEVICE_TRACE_POINT(worker_spawned, event->dev, getpid());
788 
789                 /* Worker process */
790                 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
791                 log_close();
792                 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
793         }
794 
795         r = worker_new(&worker, manager, worker_monitor, pid);
796         if (r < 0)
797                 return log_error_errno(r, "Failed to create worker object: %m");
798 
799         worker_attach_event(worker, event);
800 
801         log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
802         return 0;
803 }
804 
event_run(Event * event)805 static int event_run(Event *event) {
806         static bool log_children_max_reached = true;
807         Manager *manager;
808         Worker *worker;
809         int r;
810 
811         assert(event);
812         assert(event->manager);
813 
814         log_device_uevent(event->dev, "Device ready for processing");
815 
816         manager = event->manager;
817         HASHMAP_FOREACH(worker, manager->workers) {
818                 if (worker->state != WORKER_IDLE)
819                         continue;
820 
821                 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
822                 if (r < 0) {
823                         log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
824                                                worker->pid);
825                         (void) kill(worker->pid, SIGKILL);
826                         worker->state = WORKER_KILLED;
827                         continue;
828                 }
829                 worker_attach_event(worker, event);
830                 return 1; /* event is now processing. */
831         }
832 
833         if (hashmap_size(manager->workers) >= arg_children_max) {
834                 /* Avoid spamming the debug logs if the limit is already reached and
835                  * many events still need to be processed */
836                 if (log_children_max_reached && arg_children_max > 1) {
837                         log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
838                         log_children_max_reached = false;
839                 }
840                 return 0; /* no free worker */
841         }
842 
843         /* Re-enable the debug message for the next batch of events */
844         log_children_max_reached = true;
845 
846         /* start new worker and pass initial device */
847         r = worker_spawn(manager, event);
848         if (r < 0)
849                 return r;
850 
851         return 1; /* event is now processing. */
852 }
853 
event_is_blocked(Event * event)854 static int event_is_blocked(Event *event) {
855         const char *subsystem, *devpath, *devpath_old = NULL;
856         dev_t devnum = makedev(0, 0);
857         Event *loop_event = NULL;
858         size_t devpath_len;
859         int r, ifindex = 0;
860         bool is_block;
861 
862         /* lookup event for identical, parent, child device */
863 
864         assert(event);
865         assert(event->manager);
866         assert(event->blocker_seqnum <= event->seqnum);
867 
868         if (event->retry_again_next_usec > 0) {
869                 usec_t now_usec;
870 
871                 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
872                 if (r < 0)
873                         return r;
874 
875                 if (event->retry_again_next_usec <= now_usec)
876                         return true;
877         }
878 
879         if (event->blocker_seqnum == event->seqnum)
880                 /* we have checked previously and no blocker found */
881                 return false;
882 
883         LIST_FOREACH(event, e, event->manager->events) {
884                 loop_event = e;
885 
886                 /* we already found a later event, earlier cannot block us, no need to check again */
887                 if (loop_event->seqnum < event->blocker_seqnum)
888                         continue;
889 
890                 /* event we checked earlier still exists, no need to check again */
891                 if (loop_event->seqnum == event->blocker_seqnum)
892                         return true;
893 
894                 /* found ourself, no later event can block us */
895                 if (loop_event->seqnum >= event->seqnum)
896                         goto no_blocker;
897 
898                 /* found event we have not checked */
899                 break;
900         }
901 
902         assert(loop_event);
903         assert(loop_event->seqnum > event->blocker_seqnum &&
904                loop_event->seqnum < event->seqnum);
905 
906         r = sd_device_get_subsystem(event->dev, &subsystem);
907         if (r < 0)
908                 return r;
909 
910         is_block = streq(subsystem, "block");
911 
912         r = sd_device_get_devpath(event->dev, &devpath);
913         if (r < 0)
914                 return r;
915 
916         devpath_len = strlen(devpath);
917 
918         r = sd_device_get_property_value(event->dev, "DEVPATH_OLD", &devpath_old);
919         if (r < 0 && r != -ENOENT)
920                 return r;
921 
922         r = sd_device_get_devnum(event->dev, &devnum);
923         if (r < 0 && r != -ENOENT)
924                 return r;
925 
926         r = sd_device_get_ifindex(event->dev, &ifindex);
927         if (r < 0 && r != -ENOENT)
928                 return r;
929 
930         /* check if queue contains events we depend on */
931         LIST_FOREACH(event, e, loop_event) {
932                 size_t loop_devpath_len, common;
933                 const char *loop_devpath;
934 
935                 loop_event = e;
936 
937                 /* found ourself, no later event can block us */
938                 if (loop_event->seqnum >= event->seqnum)
939                         goto no_blocker;
940 
941                 /* check major/minor */
942                 if (major(devnum) != 0) {
943                         const char *s;
944                         dev_t d;
945 
946                         if (sd_device_get_subsystem(loop_event->dev, &s) < 0)
947                                 continue;
948 
949                         if (sd_device_get_devnum(loop_event->dev, &d) >= 0 &&
950                             devnum == d && is_block == streq(s, "block"))
951                                 break;
952                 }
953 
954                 /* check network device ifindex */
955                 if (ifindex > 0) {
956                         int i;
957 
958                         if (sd_device_get_ifindex(loop_event->dev, &i) >= 0 &&
959                             ifindex == i)
960                                 break;
961                 }
962 
963                 if (sd_device_get_devpath(loop_event->dev, &loop_devpath) < 0)
964                         continue;
965 
966                 /* check our old name */
967                 if (devpath_old && streq(devpath_old, loop_devpath))
968                         break;
969 
970                 loop_devpath_len = strlen(loop_devpath);
971 
972                 /* compare devpath */
973                 common = MIN(devpath_len, loop_devpath_len);
974 
975                 /* one devpath is contained in the other? */
976                 if (!strneq(devpath, loop_devpath, common))
977                         continue;
978 
979                 /* identical device event found */
980                 if (devpath_len == loop_devpath_len)
981                         break;
982 
983                 /* parent device event found */
984                 if (devpath[common] == '/')
985                         break;
986 
987                 /* child device event found */
988                 if (loop_devpath[common] == '/')
989                         break;
990         }
991 
992         assert(loop_event);
993 
994         log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
995                          event->seqnum, loop_event->seqnum);
996 
997         event->blocker_seqnum = loop_event->seqnum;
998         return true;
999 
1000 no_blocker:
1001         event->blocker_seqnum = event->seqnum;
1002         return false;
1003 }
1004 
event_queue_start(Manager * manager)1005 static int event_queue_start(Manager *manager) {
1006         usec_t usec;
1007         int r;
1008 
1009         assert(manager);
1010 
1011         if (LIST_IS_EMPTY(manager->events) ||
1012             manager->exit || manager->stop_exec_queue)
1013                 return 0;
1014 
1015         assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
1016         /* check for changed config, every 3 seconds at most */
1017         if (manager->last_usec == 0 ||
1018             usec > usec_add(manager->last_usec, 3 * USEC_PER_SEC)) {
1019                 if (udev_rules_check_timestamp(manager->rules) ||
1020                     udev_builtin_validate())
1021                         manager_reload(manager);
1022 
1023                 manager->last_usec = usec;
1024         }
1025 
1026         r = event_source_disable(manager->kill_workers_event);
1027         if (r < 0)
1028                 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1029 
1030         udev_builtin_init();
1031 
1032         if (!manager->rules) {
1033                 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
1034                 if (r < 0)
1035                         return log_warning_errno(r, "Failed to read udev rules: %m");
1036         }
1037 
1038         /* fork with up-to-date SELinux label database, so the child inherits the up-to-date db
1039          * and, until the next SELinux policy changes, we safe further reloads in future children */
1040         mac_selinux_maybe_reload();
1041 
1042         LIST_FOREACH(event, event, manager->events) {
1043                 if (event->state != EVENT_QUEUED)
1044                         continue;
1045 
1046                 /* do not start event if parent or child event is still running or queued */
1047                 r = event_is_blocked(event);
1048                 if (r > 0)
1049                         continue;
1050                 if (r < 0)
1051                         log_device_warning_errno(event->dev, r,
1052                                                  "Failed to check dependencies for event (SEQNUM=%"PRIu64", ACTION=%s), "
1053                                                  "assuming there is no blocking event, ignoring: %m",
1054                                                  event->seqnum,
1055                                                  strna(device_action_to_string(event->action)));
1056 
1057                 r = event_run(event);
1058                 if (r <= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
1059                         return r;
1060         }
1061 
1062         return 0;
1063 }
1064 
event_requeue(Event * event)1065 static int event_requeue(Event *event) {
1066         usec_t now_usec;
1067         int r;
1068 
1069         assert(event);
1070         assert(event->manager);
1071         assert(event->manager->event);
1072 
1073         event->timeout_warning_event = sd_event_source_disable_unref(event->timeout_warning_event);
1074         event->timeout_event = sd_event_source_disable_unref(event->timeout_event);
1075 
1076         /* add a short delay to suppress busy loop */
1077         r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
1078         if (r < 0)
1079                 return log_device_warning_errno(event->dev, r,
1080                                                 "Failed to get current time, "
1081                                                 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1082                                                 event->seqnum, strna(device_action_to_string(event->action)));
1083 
1084         if (event->retry_again_timeout_usec > 0 && event->retry_again_timeout_usec <= now_usec)
1085                 return log_device_warning_errno(event->dev, SYNTHETIC_ERRNO(ETIMEDOUT),
1086                                                 "The underlying block device is locked by a process more than %s, "
1087                                                 "skipping event (SEQNUM=%"PRIu64", ACTION=%s).",
1088                                                 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC, USEC_PER_MINUTE),
1089                                                 event->seqnum, strna(device_action_to_string(event->action)));
1090 
1091         event->retry_again_next_usec = usec_add(now_usec, EVENT_RETRY_INTERVAL_USEC);
1092         if (event->retry_again_timeout_usec == 0)
1093                 event->retry_again_timeout_usec = usec_add(now_usec, EVENT_RETRY_TIMEOUT_USEC);
1094 
1095         if (event->worker && event->worker->event == event)
1096                 event->worker->event = NULL;
1097         event->worker = NULL;
1098 
1099         event->state = EVENT_QUEUED;
1100         return 0;
1101 }
1102 
event_queue_assume_block_device_unlocked(Manager * manager,sd_device * dev)1103 static int event_queue_assume_block_device_unlocked(Manager *manager, sd_device *dev) {
1104         const char *devname;
1105         int r;
1106 
1107         /* When a new event for a block device is queued or we get an inotify event, assume that the
1108          * device is not locked anymore. The assumption may not be true, but that should not cause any
1109          * issues, as in that case events will be requeued soon. */
1110 
1111         r = device_get_whole_disk(dev, NULL, &devname);
1112         if (r <= 0)
1113                 return r;
1114 
1115         LIST_FOREACH(event, event, manager->events) {
1116                 const char *event_devname;
1117 
1118                 if (event->state != EVENT_QUEUED)
1119                         continue;
1120 
1121                 if (event->retry_again_next_usec == 0)
1122                         continue;
1123 
1124                 if (device_get_whole_disk(event->dev, NULL, &event_devname) <= 0)
1125                         continue;
1126 
1127                 if (!streq(devname, event_devname))
1128                         continue;
1129 
1130                 event->retry_again_next_usec = 0;
1131         }
1132 
1133         return 0;
1134 }
1135 
event_queue_insert(Manager * manager,sd_device * dev)1136 static int event_queue_insert(Manager *manager, sd_device *dev) {
1137         sd_device_action_t action;
1138         uint64_t seqnum;
1139         Event *event;
1140         int r;
1141 
1142         assert(manager);
1143         assert(dev);
1144 
1145         /* only one process can add events to the queue */
1146         assert(manager->pid == getpid_cached());
1147 
1148         /* We only accepts devices received by device monitor. */
1149         r = sd_device_get_seqnum(dev, &seqnum);
1150         if (r < 0)
1151                 return r;
1152 
1153         r = sd_device_get_action(dev, &action);
1154         if (r < 0)
1155                 return r;
1156 
1157         event = new(Event, 1);
1158         if (!event)
1159                 return -ENOMEM;
1160 
1161         *event = (Event) {
1162                 .manager = manager,
1163                 .dev = sd_device_ref(dev),
1164                 .seqnum = seqnum,
1165                 .action = action,
1166                 .state = EVENT_QUEUED,
1167         };
1168 
1169         if (LIST_IS_EMPTY(manager->events)) {
1170                 r = touch("/run/udev/queue");
1171                 if (r < 0)
1172                         log_warning_errno(r, "Failed to touch /run/udev/queue, ignoring: %m");
1173         }
1174 
1175         LIST_APPEND(event, manager->events, event);
1176 
1177         log_device_uevent(dev, "Device is queued");
1178 
1179         return 0;
1180 }
1181 
on_uevent(sd_device_monitor * monitor,sd_device * dev,void * userdata)1182 static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
1183         Manager *manager = userdata;
1184         int r;
1185 
1186         assert(manager);
1187 
1188         DEVICE_TRACE_POINT(kernel_uevent_received, dev);
1189 
1190         device_ensure_usec_initialized(dev, NULL);
1191 
1192         r = event_queue_insert(manager, dev);
1193         if (r < 0) {
1194                 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
1195                 return 1;
1196         }
1197 
1198         (void) event_queue_assume_block_device_unlocked(manager, dev);
1199 
1200         /* we have fresh events, try to schedule them */
1201         event_queue_start(manager);
1202 
1203         return 1;
1204 }
1205 
on_worker(sd_event_source * s,int fd,uint32_t revents,void * userdata)1206 static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1207         Manager *manager = userdata;
1208 
1209         assert(manager);
1210 
1211         for (;;) {
1212                 EventResult result;
1213                 struct iovec iovec = IOVEC_MAKE(&result, sizeof(result));
1214                 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
1215                 struct msghdr msghdr = {
1216                         .msg_iov = &iovec,
1217                         .msg_iovlen = 1,
1218                         .msg_control = &control,
1219                         .msg_controllen = sizeof(control),
1220                 };
1221                 ssize_t size;
1222                 struct ucred *ucred;
1223                 Worker *worker;
1224 
1225                 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
1226                 if (size == -EINTR)
1227                         continue;
1228                 if (size == -EAGAIN)
1229                         /* nothing more to read */
1230                         break;
1231                 if (size < 0)
1232                         return log_error_errno(size, "Failed to receive message: %m");
1233 
1234                 cmsg_close_all(&msghdr);
1235 
1236                 if (size != sizeof(result)) {
1237                         log_warning("Ignoring worker message with invalid size %zi bytes", size);
1238                         continue;
1239                 }
1240 
1241                 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
1242                 if (!ucred || ucred->pid <= 0) {
1243                         log_warning("Ignoring worker message without valid PID");
1244                         continue;
1245                 }
1246 
1247                 /* lookup worker who sent the signal */
1248                 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
1249                 if (!worker) {
1250                         log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
1251                         continue;
1252                 }
1253 
1254                 if (worker->state == WORKER_KILLING) {
1255                         worker->state = WORKER_KILLED;
1256                         (void) kill(worker->pid, SIGTERM);
1257                 } else if (worker->state != WORKER_KILLED)
1258                         worker->state = WORKER_IDLE;
1259 
1260                 /* worker returned */
1261                 if (result == EVENT_RESULT_TRY_AGAIN &&
1262                     event_requeue(worker->event) < 0)
1263                         device_broadcast(manager->monitor, worker->event->dev, -ETIMEDOUT);
1264 
1265                 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1266                 event_free(worker->event);
1267         }
1268 
1269         /* we have free workers, try to schedule events */
1270         event_queue_start(manager);
1271 
1272         return 1;
1273 }
1274 
1275 /* receive the udevd message from userspace */
on_ctrl_msg(UdevCtrl * uctrl,UdevCtrlMessageType type,const UdevCtrlMessageValue * value,void * userdata)1276 static int on_ctrl_msg(UdevCtrl *uctrl, UdevCtrlMessageType type, const UdevCtrlMessageValue *value, void *userdata) {
1277         Manager *manager = userdata;
1278         int r;
1279 
1280         assert(value);
1281         assert(manager);
1282 
1283         switch (type) {
1284         case UDEV_CTRL_SET_LOG_LEVEL:
1285                 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
1286                 log_set_max_level(value->intval);
1287                 manager->log_level = value->intval;
1288                 manager_kill_workers(manager, false);
1289                 break;
1290         case UDEV_CTRL_STOP_EXEC_QUEUE:
1291                 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1292                 manager->stop_exec_queue = true;
1293                 break;
1294         case UDEV_CTRL_START_EXEC_QUEUE:
1295                 log_debug("Received udev control message (START_EXEC_QUEUE)");
1296                 manager->stop_exec_queue = false;
1297                 event_queue_start(manager);
1298                 break;
1299         case UDEV_CTRL_RELOAD:
1300                 log_debug("Received udev control message (RELOAD)");
1301                 manager_reload(manager);
1302                 break;
1303         case UDEV_CTRL_SET_ENV: {
1304                 _unused_ _cleanup_free_ char *old_val = NULL;
1305                 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL;
1306                 const char *eq;
1307 
1308                 eq = strchr(value->buf, '=');
1309                 if (!eq) {
1310                         log_error("Invalid key format '%s'", value->buf);
1311                         return 1;
1312                 }
1313 
1314                 key = strndup(value->buf, eq - value->buf);
1315                 if (!key) {
1316                         log_oom();
1317                         return 1;
1318                 }
1319 
1320                 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1321 
1322                 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1323                 if (r < 0) {
1324                         log_oom();
1325                         return 1;
1326                 }
1327 
1328                 eq++;
1329                 if (isempty(eq)) {
1330                         log_debug("Received udev control message (ENV), unsetting '%s'", key);
1331 
1332                         r = hashmap_put(manager->properties, key, NULL);
1333                         if (r < 0) {
1334                                 log_oom();
1335                                 return 1;
1336                         }
1337                 } else {
1338                         val = strdup(eq);
1339                         if (!val) {
1340                                 log_oom();
1341                                 return 1;
1342                         }
1343 
1344                         log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
1345 
1346                         r = hashmap_put(manager->properties, key, val);
1347                         if (r < 0) {
1348                                 log_oom();
1349                                 return 1;
1350                         }
1351                 }
1352 
1353                 key = val = NULL;
1354                 manager_kill_workers(manager, false);
1355                 break;
1356         }
1357         case UDEV_CTRL_SET_CHILDREN_MAX:
1358                 if (value->intval <= 0) {
1359                         log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1360                         return 0;
1361                 }
1362 
1363                 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1364                 arg_children_max = value->intval;
1365 
1366                 notify_ready();
1367                 break;
1368         case UDEV_CTRL_PING:
1369                 log_debug("Received udev control message (PING)");
1370                 break;
1371         case UDEV_CTRL_EXIT:
1372                 log_debug("Received udev control message (EXIT)");
1373                 manager_exit(manager);
1374                 break;
1375         default:
1376                 log_debug("Received unknown udev control message, ignoring");
1377         }
1378 
1379         return 1;
1380 }
1381 
synthesize_change_one(sd_device * dev,sd_device * target)1382 static int synthesize_change_one(sd_device *dev, sd_device *target) {
1383         int r;
1384 
1385         if (DEBUG_LOGGING) {
1386                 const char *syspath = NULL;
1387                 (void) sd_device_get_syspath(target, &syspath);
1388                 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1389         }
1390 
1391         r = sd_device_trigger(target, SD_DEVICE_CHANGE);
1392         if (r < 0)
1393                 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1394 
1395         DEVICE_TRACE_POINT(synthetic_change_event, dev);
1396 
1397         return 0;
1398 }
1399 
synthesize_change(sd_device * dev)1400 static int synthesize_change(sd_device *dev) {
1401         const char *subsystem, *sysname, *devtype;
1402         int r;
1403 
1404         r = sd_device_get_subsystem(dev, &subsystem);
1405         if (r < 0)
1406                 return r;
1407 
1408         r = sd_device_get_devtype(dev, &devtype);
1409         if (r < 0)
1410                 return r;
1411 
1412         r = sd_device_get_sysname(dev, &sysname);
1413         if (r < 0)
1414                 return r;
1415 
1416         if (streq_ptr(subsystem, "block") &&
1417             streq_ptr(devtype, "disk") &&
1418             !startswith(sysname, "dm-")) {
1419                 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1420                 bool part_table_read = false, has_partitions = false;
1421                 sd_device *d;
1422                 int fd;
1423 
1424                 /* Try to re-read the partition table. This only succeeds if none of the devices is
1425                  * busy. The kernel returns 0 if no partition table is found, and we will not get an
1426                  * event for the disk. */
1427                 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1428                 if (fd >= 0) {
1429                         r = flock(fd, LOCK_EX|LOCK_NB);
1430                         if (r >= 0)
1431                                 r = ioctl(fd, BLKRRPART, 0);
1432 
1433                         close(fd);
1434                         if (r >= 0)
1435                                 part_table_read = true;
1436                 }
1437 
1438                 /* search for partitions */
1439                 r = sd_device_enumerator_new(&e);
1440                 if (r < 0)
1441                         return r;
1442 
1443                 r = sd_device_enumerator_allow_uninitialized(e);
1444                 if (r < 0)
1445                         return r;
1446 
1447                 r = sd_device_enumerator_add_match_parent(e, dev);
1448                 if (r < 0)
1449                         return r;
1450 
1451                 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1452                 if (r < 0)
1453                         return r;
1454 
1455                 FOREACH_DEVICE(e, d) {
1456                         const char *t;
1457 
1458                         if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1459                                 continue;
1460 
1461                         has_partitions = true;
1462                         break;
1463                 }
1464 
1465                 /* We have partitions and re-read the table, the kernel already sent out a "change"
1466                  * event for the disk, and "remove/add" for all partitions. */
1467                 if (part_table_read && has_partitions)
1468                         return 0;
1469 
1470                 /* We have partitions but re-reading the partition table did not work, synthesize
1471                  * "change" for the disk and all partitions. */
1472                 (void) synthesize_change_one(dev, dev);
1473 
1474                 FOREACH_DEVICE(e, d) {
1475                         const char *t;
1476 
1477                         if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1478                                 continue;
1479 
1480                         (void) synthesize_change_one(dev, d);
1481                 }
1482 
1483         } else
1484                 (void) synthesize_change_one(dev, dev);
1485 
1486         return 0;
1487 }
1488 
on_inotify(sd_event_source * s,int fd,uint32_t revents,void * userdata)1489 static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1490         Manager *manager = userdata;
1491         union inotify_event_buffer buffer;
1492         ssize_t l;
1493         int r;
1494 
1495         assert(manager);
1496 
1497         r = event_source_disable(manager->kill_workers_event);
1498         if (r < 0)
1499                 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1500 
1501         l = read(fd, &buffer, sizeof(buffer));
1502         if (l < 0) {
1503                 if (ERRNO_IS_TRANSIENT(errno))
1504                         return 1;
1505 
1506                 return log_error_errno(errno, "Failed to read inotify fd: %m");
1507         }
1508 
1509         FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
1510                 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1511                 const char *devnode;
1512 
1513                 r = device_new_from_watch_handle(&dev, e->wd);
1514                 if (r < 0) {
1515                         log_debug_errno(r, "Failed to create sd_device object from watch handle, ignoring: %m");
1516                         continue;
1517                 }
1518 
1519                 if (sd_device_get_devname(dev, &devnode) < 0)
1520                         continue;
1521 
1522                 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
1523                 if (e->mask & IN_CLOSE_WRITE) {
1524                         (void) event_queue_assume_block_device_unlocked(manager, dev);
1525                         (void) synthesize_change(dev);
1526                 }
1527 
1528                 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1529                  * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1530         }
1531 
1532         return 1;
1533 }
1534 
on_sigterm(sd_event_source * s,const struct signalfd_siginfo * si,void * userdata)1535 static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1536         Manager *manager = userdata;
1537 
1538         assert(manager);
1539 
1540         manager_exit(manager);
1541 
1542         return 1;
1543 }
1544 
on_sighup(sd_event_source * s,const struct signalfd_siginfo * si,void * userdata)1545 static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1546         Manager *manager = userdata;
1547 
1548         assert(manager);
1549 
1550         manager_reload(manager);
1551 
1552         return 1;
1553 }
1554 
on_sigchld(sd_event_source * s,const siginfo_t * si,void * userdata)1555 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata) {
1556         Worker *worker = ASSERT_PTR(userdata);
1557         Manager *manager = ASSERT_PTR(worker->manager);
1558         sd_device *dev = worker->event ? ASSERT_PTR(worker->event->dev) : NULL;
1559         EventResult result;
1560         int r;
1561 
1562         assert(si);
1563 
1564         switch (si->si_code) {
1565         case CLD_EXITED:
1566                 if (si->si_status == 0)
1567                         log_device_debug(dev, "Worker ["PID_FMT"] exited.", si->si_pid);
1568                 else
1569                         log_device_warning(dev, "Worker ["PID_FMT"] exited with return code %i.",
1570                                            si->si_pid, si->si_status);
1571                 result = EVENT_RESULT_EXIT_STATUS_BASE + si->si_status;
1572                 break;
1573 
1574         case CLD_KILLED:
1575         case CLD_DUMPED:
1576                 log_device_warning(dev, "Worker ["PID_FMT"] terminated by signal %i (%s).",
1577                                    si->si_pid, si->si_status, signal_to_string(si->si_status));
1578                 result = EVENT_RESULT_SIGNAL_BASE + si->si_status;
1579                 break;
1580 
1581         default:
1582                 assert_not_reached();
1583         }
1584 
1585         if (result != EVENT_RESULT_SUCCESS && dev) {
1586                 /* delete state from disk */
1587                 device_delete_db(dev);
1588                 device_tag_index(dev, NULL, false);
1589 
1590                 /* Forward kernel event to libudev listeners */
1591                 device_broadcast(manager->monitor, dev, result);
1592         }
1593 
1594         worker_free(worker);
1595 
1596         /* we can start new workers, try to schedule events */
1597         event_queue_start(manager);
1598 
1599         /* Disable unnecessary cleanup event */
1600         if (hashmap_isempty(manager->workers)) {
1601                 r = event_source_disable(manager->kill_workers_event);
1602                 if (r < 0)
1603                         log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1604         }
1605 
1606         return 1;
1607 }
1608 
on_post(sd_event_source * s,void * userdata)1609 static int on_post(sd_event_source *s, void *userdata) {
1610         Manager *manager = userdata;
1611 
1612         assert(manager);
1613 
1614         if (!LIST_IS_EMPTY(manager->events)) {
1615                 /* Try to process pending events if idle workers exist. Why is this necessary?
1616                  * When a worker finished an event and became idle, even if there was a pending event,
1617                  * the corresponding device might have been locked and the processing of the event
1618                  * delayed for a while, preventing the worker from processing the event immediately.
1619                  * Now, the device may be unlocked. Let's try again! */
1620                 event_queue_start(manager);
1621                 return 1;
1622         }
1623 
1624         /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1625 
1626         if (unlink("/run/udev/queue") < 0) {
1627                 if (errno != ENOENT)
1628                         log_warning_errno(errno, "Failed to unlink /run/udev/queue, ignoring: %m");
1629         } else
1630                 log_debug("No events are queued, removing /run/udev/queue.");
1631 
1632         if (!hashmap_isempty(manager->workers)) {
1633                 /* There are idle workers */
1634                 (void) event_reset_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
1635                                         now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC,
1636                                         on_kill_workers_event, manager, 0, "kill-workers-event", false);
1637                 return 1;
1638         }
1639 
1640         /* There are no idle workers. */
1641 
1642         if (manager->exit)
1643                 return sd_event_exit(manager->event, 0);
1644 
1645         if (manager->cgroup)
1646                 /* cleanup possible left-over processes in our cgroup */
1647                 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1648 
1649         return 1;
1650 }
1651 
listen_fds(int * ret_ctrl,int * ret_netlink)1652 static int listen_fds(int *ret_ctrl, int *ret_netlink) {
1653         int ctrl_fd = -1, netlink_fd = -1;
1654         int fd, n;
1655 
1656         assert(ret_ctrl);
1657         assert(ret_netlink);
1658 
1659         n = sd_listen_fds(true);
1660         if (n < 0)
1661                 return n;
1662 
1663         for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1664                 if (sd_is_socket(fd, AF_UNIX, SOCK_SEQPACKET, -1) > 0) {
1665                         if (ctrl_fd >= 0)
1666                                 return -EINVAL;
1667                         ctrl_fd = fd;
1668                         continue;
1669                 }
1670 
1671                 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
1672                         if (netlink_fd >= 0)
1673                                 return -EINVAL;
1674                         netlink_fd = fd;
1675                         continue;
1676                 }
1677 
1678                 return -EINVAL;
1679         }
1680 
1681         *ret_ctrl = ctrl_fd;
1682         *ret_netlink = netlink_fd;
1683 
1684         return 0;
1685 }
1686 
1687 /*
1688  * read the kernel command line, in case we need to get into debug mode
1689  *   udev.log_level=<level>                    syslog priority
1690  *   udev.children_max=<number of workers>     events are fully serialized if set to 1
1691  *   udev.exec_delay=<number of seconds>       delay execution of every executed program
1692  *   udev.event_timeout=<number of seconds>    seconds to wait before terminating an event
1693  *   udev.blockdev_read_only<=bool>            mark all block devices read-only when they appear
1694  */
parse_proc_cmdline_item(const char * key,const char * value,void * data)1695 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1696         int r;
1697 
1698         assert(key);
1699 
1700         if (proc_cmdline_key_streq(key, "udev.log_level") ||
1701             proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1702 
1703                 if (proc_cmdline_value_missing(key, value))
1704                         return 0;
1705 
1706                 r = log_level_from_string(value);
1707                 if (r >= 0)
1708                         log_set_max_level(r);
1709 
1710         } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1711 
1712                 if (proc_cmdline_value_missing(key, value))
1713                         return 0;
1714 
1715                 r = parse_sec(value, &arg_event_timeout_usec);
1716 
1717         } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1718 
1719                 if (proc_cmdline_value_missing(key, value))
1720                         return 0;
1721 
1722                 r = safe_atou(value, &arg_children_max);
1723 
1724         } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1725 
1726                 if (proc_cmdline_value_missing(key, value))
1727                         return 0;
1728 
1729                 r = parse_sec(value, &arg_exec_delay_usec);
1730 
1731         } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
1732 
1733                 if (proc_cmdline_value_missing(key, value))
1734                         return 0;
1735 
1736                 r = signal_from_string(value);
1737                 if (r > 0)
1738                         arg_timeout_signal = r;
1739 
1740         } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1741 
1742                 if (!value)
1743                         arg_blockdev_read_only = true;
1744                 else {
1745                         r = parse_boolean(value);
1746                         if (r < 0)
1747                                 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1748                         else
1749                                 arg_blockdev_read_only = r;
1750                 }
1751 
1752                 if (arg_blockdev_read_only)
1753                         log_notice("All physical block devices will be marked read-only.");
1754 
1755                 return 0;
1756 
1757         } else {
1758                 if (startswith(key, "udev."))
1759                         log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1760 
1761                 return 0;
1762         }
1763 
1764         if (r < 0)
1765                 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1766 
1767         return 0;
1768 }
1769 
help(void)1770 static int help(void) {
1771         _cleanup_free_ char *link = NULL;
1772         int r;
1773 
1774         r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1775         if (r < 0)
1776                 return log_oom();
1777 
1778         printf("%s [OPTIONS...]\n\n"
1779                "Rule-based manager for device events and files.\n\n"
1780                "  -h --help                   Print this message\n"
1781                "  -V --version                Print version of the program\n"
1782                "  -d --daemon                 Detach and run in the background\n"
1783                "  -D --debug                  Enable debug output\n"
1784                "  -c --children-max=INT       Set maximum number of workers\n"
1785                "  -e --exec-delay=SECONDS     Seconds to wait before executing RUN=\n"
1786                "  -t --event-timeout=SECONDS  Seconds to wait before terminating an event\n"
1787                "  -N --resolve-names=early|late|never\n"
1788                "                              When to resolve users and groups\n"
1789                "\nSee the %s for details.\n",
1790                program_invocation_short_name,
1791                link);
1792 
1793         return 0;
1794 }
1795 
parse_argv(int argc,char * argv[])1796 static int parse_argv(int argc, char *argv[]) {
1797         enum {
1798                 ARG_TIMEOUT_SIGNAL,
1799         };
1800 
1801         static const struct option options[] = {
1802                 { "daemon",             no_argument,            NULL, 'd'                 },
1803                 { "debug",              no_argument,            NULL, 'D'                 },
1804                 { "children-max",       required_argument,      NULL, 'c'                 },
1805                 { "exec-delay",         required_argument,      NULL, 'e'                 },
1806                 { "event-timeout",      required_argument,      NULL, 't'                 },
1807                 { "resolve-names",      required_argument,      NULL, 'N'                 },
1808                 { "help",               no_argument,            NULL, 'h'                 },
1809                 { "version",            no_argument,            NULL, 'V'                 },
1810                 { "timeout-signal",     required_argument,      NULL,  ARG_TIMEOUT_SIGNAL },
1811                 {}
1812         };
1813 
1814         int c, r;
1815 
1816         assert(argc >= 0);
1817         assert(argv);
1818 
1819         while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
1820                 switch (c) {
1821 
1822                 case 'd':
1823                         arg_daemonize = true;
1824                         break;
1825                 case 'c':
1826                         r = safe_atou(optarg, &arg_children_max);
1827                         if (r < 0)
1828                                 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
1829                         break;
1830                 case 'e':
1831                         r = parse_sec(optarg, &arg_exec_delay_usec);
1832                         if (r < 0)
1833                                 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
1834                         break;
1835                 case ARG_TIMEOUT_SIGNAL:
1836                         r = signal_from_string(optarg);
1837                         if (r <= 0)
1838                                 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1839                         else
1840                                 arg_timeout_signal = r;
1841 
1842                         break;
1843                 case 't':
1844                         r = parse_sec(optarg, &arg_event_timeout_usec);
1845                         if (r < 0)
1846                                 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1847                         break;
1848                 case 'D':
1849                         arg_debug = true;
1850                         break;
1851                 case 'N': {
1852                         ResolveNameTiming t;
1853 
1854                         t = resolve_name_timing_from_string(optarg);
1855                         if (t < 0)
1856                                 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1857                         else
1858                                 arg_resolve_name_timing = t;
1859                         break;
1860                 }
1861                 case 'h':
1862                         return help();
1863                 case 'V':
1864                         printf("%s\n", GIT_VERSION);
1865                         return 0;
1866                 case '?':
1867                         return -EINVAL;
1868                 default:
1869                         assert_not_reached();
1870 
1871                 }
1872         }
1873 
1874         return 1;
1875 }
1876 
create_subcgroup(char ** ret)1877 static int create_subcgroup(char **ret) {
1878         _cleanup_free_ char *cgroup = NULL, *subcgroup = NULL;
1879         int r;
1880 
1881         if (getppid() != 1)
1882                 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "Not invoked by PID1.");
1883 
1884         r = sd_booted();
1885         if (r < 0)
1886                 return log_debug_errno(r, "Failed to check if systemd is running: %m");
1887         if (r == 0)
1888                 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "systemd is not running.");
1889 
1890         /* Get our own cgroup, we regularly kill everything udev has left behind.
1891          * We only do this on systemd systems, and only if we are directly spawned
1892          * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1893 
1894         r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
1895         if (r < 0) {
1896                 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
1897                         return log_debug_errno(r, "Dedicated cgroup not found: %m");
1898                 return log_debug_errno(r, "Failed to get cgroup: %m");
1899         }
1900 
1901         r = cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER, cgroup, "trusted.delegate");
1902         if (IN_SET(r, 0, -ENODATA))
1903                 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "The cgroup %s is not delegated to us.", cgroup);
1904         if (r < 0)
1905                 return log_debug_errno(r, "Failed to read trusted.delegate attribute: %m");
1906 
1907         /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1908          * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1909          * manager invokes the ExecReload= job in the .control/ subcgroup. */
1910 
1911         subcgroup = path_join(cgroup, "/udev");
1912         if (!subcgroup)
1913                 return log_oom_debug();
1914 
1915         r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, subcgroup, 0);
1916         if (r < 0)
1917                 return log_debug_errno(r, "Failed to create %s subcgroup: %m", subcgroup);
1918 
1919         log_debug("Created %s subcgroup.", subcgroup);
1920         if (ret)
1921                 *ret = TAKE_PTR(subcgroup);
1922         return 0;
1923 }
1924 
manager_new(Manager ** ret,int fd_ctrl,int fd_uevent)1925 static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent) {
1926         _cleanup_(manager_freep) Manager *manager = NULL;
1927         _cleanup_free_ char *cgroup = NULL;
1928         int r;
1929 
1930         assert(ret);
1931 
1932         (void) create_subcgroup(&cgroup);
1933 
1934         manager = new(Manager, 1);
1935         if (!manager)
1936                 return log_oom();
1937 
1938         *manager = (Manager) {
1939                 .inotify_fd = -1,
1940                 .worker_watch = { -1, -1 },
1941                 .cgroup = TAKE_PTR(cgroup),
1942         };
1943 
1944         r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1945         if (r < 0)
1946                 return log_error_errno(r, "Failed to initialize udev control socket: %m");
1947 
1948         r = udev_ctrl_enable_receiving(manager->ctrl);
1949         if (r < 0)
1950                 return log_error_errno(r, "Failed to bind udev control socket: %m");
1951 
1952         r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1953         if (r < 0)
1954                 return log_error_errno(r, "Failed to initialize device monitor: %m");
1955 
1956         /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1957          * case systemd sets the receive buffer size for us, and the value in the .socket unit
1958          * should take full effect. */
1959         if (fd_uevent < 0) {
1960                 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1961                 if (r < 0)
1962                         log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1963         }
1964 
1965         r = device_monitor_enable_receiving(manager->monitor);
1966         if (r < 0)
1967                 return log_error_errno(r, "Failed to bind netlink socket: %m");
1968 
1969         manager->log_level = log_get_max_level();
1970 
1971         *ret = TAKE_PTR(manager);
1972 
1973         return 0;
1974 }
1975 
main_loop(Manager * manager)1976 static int main_loop(Manager *manager) {
1977         int fd_worker, r;
1978 
1979         manager->pid = getpid_cached();
1980 
1981         /* unnamed socket from workers to the main daemon */
1982         r = socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1983         if (r < 0)
1984                 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
1985 
1986         fd_worker = manager->worker_watch[READ_END];
1987 
1988         r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
1989         if (r < 0)
1990                 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
1991 
1992         manager->inotify_fd = inotify_init1(IN_CLOEXEC);
1993         if (manager->inotify_fd < 0)
1994                 return log_error_errno(errno, "Failed to create inotify descriptor: %m");
1995 
1996         udev_watch_restore(manager->inotify_fd);
1997 
1998         /* block and listen to all signals on signalfd */
1999         assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
2000 
2001         r = sd_event_default(&manager->event);
2002         if (r < 0)
2003                 return log_error_errno(r, "Failed to allocate event loop: %m");
2004 
2005         r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
2006         if (r < 0)
2007                 return log_error_errno(r, "Failed to create SIGINT event source: %m");
2008 
2009         r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
2010         if (r < 0)
2011                 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
2012 
2013         r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
2014         if (r < 0)
2015                 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
2016 
2017         r = sd_event_set_watchdog(manager->event, true);
2018         if (r < 0)
2019                 return log_error_errno(r, "Failed to create watchdog event source: %m");
2020 
2021         r = udev_ctrl_attach_event(manager->ctrl, manager->event);
2022         if (r < 0)
2023                 return log_error_errno(r, "Failed to attach event to udev control: %m");
2024 
2025         r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
2026         if (r < 0)
2027                 return log_error_errno(r, "Failed to start device monitor: %m");
2028 
2029         /* This needs to be after the inotify and uevent handling, to make sure
2030          * that the ping is send back after fully processing the pending uevents
2031          * (including the synthetic ones we may create due to inotify events).
2032          */
2033         r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
2034         if (r < 0)
2035                 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
2036 
2037         r = sd_event_add_io(manager->event, &manager->inotify_event, manager->inotify_fd, EPOLLIN, on_inotify, manager);
2038         if (r < 0)
2039                 return log_error_errno(r, "Failed to create inotify event source: %m");
2040 
2041         r = sd_device_monitor_attach_event(manager->monitor, manager->event);
2042         if (r < 0)
2043                 return log_error_errno(r, "Failed to attach event to device monitor: %m");
2044 
2045         r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
2046         if (r < 0)
2047                 return log_error_errno(r, "Failed to start device monitor: %m");
2048 
2049         (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
2050 
2051         r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
2052         if (r < 0)
2053                 return log_error_errno(r, "Failed to create worker event source: %m");
2054 
2055         r = sd_event_add_post(manager->event, NULL, on_post, manager);
2056         if (r < 0)
2057                 return log_error_errno(r, "Failed to create post event source: %m");
2058 
2059         udev_builtin_init();
2060 
2061         r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
2062         if (!manager->rules)
2063                 return log_error_errno(r, "Failed to read udev rules: %m");
2064 
2065         r = udev_rules_apply_static_dev_perms(manager->rules);
2066         if (r < 0)
2067                 log_error_errno(r, "Failed to apply permissions on static device nodes: %m");
2068 
2069         notify_ready();
2070 
2071         r = sd_event_loop(manager->event);
2072         if (r < 0)
2073                 log_error_errno(r, "Event loop failed: %m");
2074 
2075         sd_notify(false,
2076                   "STOPPING=1\n"
2077                   "STATUS=Shutting down...");
2078         return r;
2079 }
2080 
run_udevd(int argc,char * argv[])2081 int run_udevd(int argc, char *argv[]) {
2082         _cleanup_(manager_freep) Manager *manager = NULL;
2083         int fd_ctrl = -1, fd_uevent = -1;
2084         int r;
2085 
2086         log_set_target(LOG_TARGET_AUTO);
2087         log_open();
2088         udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
2089         log_parse_environment();
2090         log_open(); /* Done again to update after reading configuration. */
2091 
2092         r = parse_argv(argc, argv);
2093         if (r <= 0)
2094                 return r;
2095 
2096         r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
2097         if (r < 0)
2098                 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2099 
2100         if (arg_debug) {
2101                 log_set_target(LOG_TARGET_CONSOLE);
2102                 log_set_max_level(LOG_DEBUG);
2103         }
2104 
2105         r = must_be_root();
2106         if (r < 0)
2107                 return r;
2108 
2109         if (arg_children_max == 0) {
2110                 unsigned long cpu_limit, mem_limit, cpu_count = 1;
2111 
2112                 r = cpus_in_affinity_mask();
2113                 if (r < 0)
2114                         log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
2115                 else
2116                         cpu_count = r;
2117 
2118                 cpu_limit = cpu_count * 2 + 16;
2119                 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
2120 
2121                 arg_children_max = MIN(cpu_limit, mem_limit);
2122                 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
2123 
2124                 log_debug("Set children_max to %u", arg_children_max);
2125         }
2126 
2127         /* set umask before creating any file/directory */
2128         umask(022);
2129 
2130         r = mac_selinux_init();
2131         if (r < 0)
2132                 return r;
2133 
2134         r = RET_NERRNO(mkdir("/run/udev", 0755));
2135         if (r < 0 && r != -EEXIST)
2136                 return log_error_errno(r, "Failed to create /run/udev: %m");
2137 
2138         r = listen_fds(&fd_ctrl, &fd_uevent);
2139         if (r < 0)
2140                 return log_error_errno(r, "Failed to listen on fds: %m");
2141 
2142         r = manager_new(&manager, fd_ctrl, fd_uevent);
2143         if (r < 0)
2144                 return log_error_errno(r, "Failed to create manager: %m");
2145 
2146         if (arg_daemonize) {
2147                 pid_t pid;
2148 
2149                 log_info("Starting version " GIT_VERSION);
2150 
2151                 /* connect /dev/null to stdin, stdout, stderr */
2152                 if (log_get_max_level() < LOG_DEBUG) {
2153                         r = make_null_stdio();
2154                         if (r < 0)
2155                                 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
2156                 }
2157 
2158                 pid = fork();
2159                 if (pid < 0)
2160                         return log_error_errno(errno, "Failed to fork daemon: %m");
2161                 if (pid > 0)
2162                         /* parent */
2163                         return 0;
2164 
2165                 /* child */
2166                 (void) setsid();
2167         }
2168 
2169         return main_loop(manager);
2170 }
2171