1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
4 */
5
6 #include <linux/miscdevice.h>
7 #include <linux/init.h>
8 #include <linux/wait.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/poll.h>
12 #include <linux/signal.h>
13 #include <linux/spinlock.h>
14 #include <linux/dlm.h>
15 #include <linux/dlm_device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/signal.h>
18
19 #include <trace/events/dlm.h>
20
21 #include "dlm_internal.h"
22 #include "lockspace.h"
23 #include "lock.h"
24 #include "lvb_table.h"
25 #include "user.h"
26 #include "ast.h"
27 #include "config.h"
28
29 static const char name_prefix[] = "dlm";
30 static const struct file_operations device_fops;
31 static atomic_t dlm_monitor_opened;
32 static int dlm_monitor_unused = 1;
33
34 #ifdef CONFIG_COMPAT
35
36 struct dlm_lock_params32 {
37 __u8 mode;
38 __u8 namelen;
39 __u16 unused;
40 __u32 flags;
41 __u32 lkid;
42 __u32 parent;
43 __u64 xid;
44 __u64 timeout;
45 __u32 castparam;
46 __u32 castaddr;
47 __u32 bastparam;
48 __u32 bastaddr;
49 __u32 lksb;
50 char lvb[DLM_USER_LVB_LEN];
51 char name[];
52 };
53
54 struct dlm_write_request32 {
55 __u32 version[3];
56 __u8 cmd;
57 __u8 is64bit;
58 __u8 unused[2];
59
60 union {
61 struct dlm_lock_params32 lock;
62 struct dlm_lspace_params lspace;
63 struct dlm_purge_params purge;
64 } i;
65 };
66
67 struct dlm_lksb32 {
68 __u32 sb_status;
69 __u32 sb_lkid;
70 __u8 sb_flags;
71 __u32 sb_lvbptr;
72 };
73
74 struct dlm_lock_result32 {
75 __u32 version[3];
76 __u32 length;
77 __u32 user_astaddr;
78 __u32 user_astparam;
79 __u32 user_lksb;
80 struct dlm_lksb32 lksb;
81 __u8 bast_mode;
82 __u8 unused[3];
83 /* Offsets may be zero if no data is present */
84 __u32 lvb_offset;
85 };
86
compat_input(struct dlm_write_request * kb,struct dlm_write_request32 * kb32,int namelen)87 static void compat_input(struct dlm_write_request *kb,
88 struct dlm_write_request32 *kb32,
89 int namelen)
90 {
91 kb->version[0] = kb32->version[0];
92 kb->version[1] = kb32->version[1];
93 kb->version[2] = kb32->version[2];
94
95 kb->cmd = kb32->cmd;
96 kb->is64bit = kb32->is64bit;
97 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
98 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
99 kb->i.lspace.flags = kb32->i.lspace.flags;
100 kb->i.lspace.minor = kb32->i.lspace.minor;
101 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
102 } else if (kb->cmd == DLM_USER_PURGE) {
103 kb->i.purge.nodeid = kb32->i.purge.nodeid;
104 kb->i.purge.pid = kb32->i.purge.pid;
105 } else {
106 kb->i.lock.mode = kb32->i.lock.mode;
107 kb->i.lock.namelen = kb32->i.lock.namelen;
108 kb->i.lock.flags = kb32->i.lock.flags;
109 kb->i.lock.lkid = kb32->i.lock.lkid;
110 kb->i.lock.parent = kb32->i.lock.parent;
111 kb->i.lock.xid = kb32->i.lock.xid;
112 kb->i.lock.timeout = kb32->i.lock.timeout;
113 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
114 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
115 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
116 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
117 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
118 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
119 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
120 }
121 }
122
compat_output(struct dlm_lock_result * res,struct dlm_lock_result32 * res32)123 static void compat_output(struct dlm_lock_result *res,
124 struct dlm_lock_result32 *res32)
125 {
126 memset(res32, 0, sizeof(*res32));
127
128 res32->version[0] = res->version[0];
129 res32->version[1] = res->version[1];
130 res32->version[2] = res->version[2];
131
132 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
133 res32->user_astparam = (__u32)(__force long)res->user_astparam;
134 res32->user_lksb = (__u32)(__force long)res->user_lksb;
135 res32->bast_mode = res->bast_mode;
136
137 res32->lvb_offset = res->lvb_offset;
138 res32->length = res->length;
139
140 res32->lksb.sb_status = res->lksb.sb_status;
141 res32->lksb.sb_flags = res->lksb.sb_flags;
142 res32->lksb.sb_lkid = res->lksb.sb_lkid;
143 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
144 }
145 #endif
146
147 /* Figure out if this lock is at the end of its life and no longer
148 available for the application to use. The lkb still exists until
149 the final ast is read. A lock becomes EOL in three situations:
150 1. a noqueue request fails with EAGAIN
151 2. an unlock completes with EUNLOCK
152 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
153 An EOL lock needs to be removed from the process's list of locks.
154 And we can't allow any new operation on an EOL lock. This is
155 not related to the lifetime of the lkb struct which is managed
156 entirely by refcount. */
157
lkb_is_endoflife(int mode,int status)158 static int lkb_is_endoflife(int mode, int status)
159 {
160 switch (status) {
161 case -DLM_EUNLOCK:
162 return 1;
163 case -DLM_ECANCEL:
164 case -ETIMEDOUT:
165 case -EDEADLK:
166 case -EAGAIN:
167 if (mode == DLM_LOCK_IV)
168 return 1;
169 break;
170 }
171 return 0;
172 }
173
174 /* we could possibly check if the cancel of an orphan has resulted in the lkb
175 being removed and then remove that lkb from the orphans list and free it */
176
dlm_user_add_ast(struct dlm_lkb * lkb,uint32_t flags,int mode,int status,uint32_t sbflags,uint64_t seq)177 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
178 int status, uint32_t sbflags, uint64_t seq)
179 {
180 struct dlm_ls *ls;
181 struct dlm_user_args *ua;
182 struct dlm_user_proc *proc;
183 int rv;
184
185 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
186 return;
187
188 ls = lkb->lkb_resource->res_ls;
189 spin_lock(&ls->ls_clear_proc_locks);
190
191 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
192 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
193 lkb->ua so we can't try to use it. This second check is necessary
194 for cases where a completion ast is received for an operation that
195 began before clear_proc_locks did its cancel/unlock. */
196
197 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
198 goto out;
199
200 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
201 ua = lkb->lkb_ua;
202 proc = ua->proc;
203
204 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
205 goto out;
206
207 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
208 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
209
210 spin_lock(&proc->asts_spin);
211
212 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
213 if (rv < 0) {
214 spin_unlock(&proc->asts_spin);
215 goto out;
216 }
217
218 if (list_empty(&lkb->lkb_cb_list)) {
219 kref_get(&lkb->lkb_ref);
220 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
221 wake_up_interruptible(&proc->wait);
222 }
223 spin_unlock(&proc->asts_spin);
224
225 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
226 /* N.B. spin_lock locks_spin, not asts_spin */
227 spin_lock(&proc->locks_spin);
228 if (!list_empty(&lkb->lkb_ownqueue)) {
229 list_del_init(&lkb->lkb_ownqueue);
230 dlm_put_lkb(lkb);
231 }
232 spin_unlock(&proc->locks_spin);
233 }
234 out:
235 spin_unlock(&ls->ls_clear_proc_locks);
236 }
237
device_user_lock(struct dlm_user_proc * proc,struct dlm_lock_params * params)238 static int device_user_lock(struct dlm_user_proc *proc,
239 struct dlm_lock_params *params)
240 {
241 struct dlm_ls *ls;
242 struct dlm_user_args *ua;
243 uint32_t lkid;
244 int error = -ENOMEM;
245
246 ls = dlm_find_lockspace_local(proc->lockspace);
247 if (!ls)
248 return -ENOENT;
249
250 if (!params->castaddr || !params->lksb) {
251 error = -EINVAL;
252 goto out;
253 }
254
255 #ifdef CONFIG_DLM_DEPRECATED_API
256 if (params->timeout)
257 pr_warn_once("========================================================\n"
258 "WARNING: the lkb timeout feature is being deprecated and\n"
259 " will be removed in v6.2!\n"
260 "========================================================\n");
261 #endif
262
263 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
264 if (!ua)
265 goto out;
266 ua->proc = proc;
267 ua->user_lksb = params->lksb;
268 ua->castparam = params->castparam;
269 ua->castaddr = params->castaddr;
270 ua->bastparam = params->bastparam;
271 ua->bastaddr = params->bastaddr;
272 ua->xid = params->xid;
273
274 if (params->flags & DLM_LKF_CONVERT) {
275 #ifdef CONFIG_DLM_DEPRECATED_API
276 error = dlm_user_convert(ls, ua,
277 params->mode, params->flags,
278 params->lkid, params->lvb,
279 (unsigned long) params->timeout);
280 #else
281 error = dlm_user_convert(ls, ua,
282 params->mode, params->flags,
283 params->lkid, params->lvb);
284 #endif
285 } else if (params->flags & DLM_LKF_ORPHAN) {
286 error = dlm_user_adopt_orphan(ls, ua,
287 params->mode, params->flags,
288 params->name, params->namelen,
289 &lkid);
290 if (!error)
291 error = lkid;
292 } else {
293 #ifdef CONFIG_DLM_DEPRECATED_API
294 error = dlm_user_request(ls, ua,
295 params->mode, params->flags,
296 params->name, params->namelen,
297 (unsigned long) params->timeout);
298 #else
299 error = dlm_user_request(ls, ua,
300 params->mode, params->flags,
301 params->name, params->namelen);
302 #endif
303 if (!error)
304 error = ua->lksb.sb_lkid;
305 }
306 out:
307 dlm_put_lockspace(ls);
308 return error;
309 }
310
device_user_unlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)311 static int device_user_unlock(struct dlm_user_proc *proc,
312 struct dlm_lock_params *params)
313 {
314 struct dlm_ls *ls;
315 struct dlm_user_args *ua;
316 int error = -ENOMEM;
317
318 ls = dlm_find_lockspace_local(proc->lockspace);
319 if (!ls)
320 return -ENOENT;
321
322 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
323 if (!ua)
324 goto out;
325 ua->proc = proc;
326 ua->user_lksb = params->lksb;
327 ua->castparam = params->castparam;
328 ua->castaddr = params->castaddr;
329
330 if (params->flags & DLM_LKF_CANCEL)
331 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
332 else
333 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
334 params->lvb);
335 out:
336 dlm_put_lockspace(ls);
337 return error;
338 }
339
device_user_deadlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)340 static int device_user_deadlock(struct dlm_user_proc *proc,
341 struct dlm_lock_params *params)
342 {
343 struct dlm_ls *ls;
344 int error;
345
346 ls = dlm_find_lockspace_local(proc->lockspace);
347 if (!ls)
348 return -ENOENT;
349
350 error = dlm_user_deadlock(ls, params->flags, params->lkid);
351
352 dlm_put_lockspace(ls);
353 return error;
354 }
355
dlm_device_register(struct dlm_ls * ls,char * name)356 static int dlm_device_register(struct dlm_ls *ls, char *name)
357 {
358 int error, len;
359
360 /* The device is already registered. This happens when the
361 lockspace is created multiple times from userspace. */
362 if (ls->ls_device.name)
363 return 0;
364
365 error = -ENOMEM;
366 len = strlen(name) + strlen(name_prefix) + 2;
367 ls->ls_device.name = kzalloc(len, GFP_NOFS);
368 if (!ls->ls_device.name)
369 goto fail;
370
371 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
372 name);
373 ls->ls_device.fops = &device_fops;
374 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
375
376 error = misc_register(&ls->ls_device);
377 if (error) {
378 kfree(ls->ls_device.name);
379 /* this has to be set to NULL
380 * to avoid a double-free in dlm_device_deregister
381 */
382 ls->ls_device.name = NULL;
383 }
384 fail:
385 return error;
386 }
387
dlm_device_deregister(struct dlm_ls * ls)388 int dlm_device_deregister(struct dlm_ls *ls)
389 {
390 /* The device is not registered. This happens when the lockspace
391 was never used from userspace, or when device_create_lockspace()
392 calls dlm_release_lockspace() after the register fails. */
393 if (!ls->ls_device.name)
394 return 0;
395
396 misc_deregister(&ls->ls_device);
397 kfree(ls->ls_device.name);
398 return 0;
399 }
400
device_user_purge(struct dlm_user_proc * proc,struct dlm_purge_params * params)401 static int device_user_purge(struct dlm_user_proc *proc,
402 struct dlm_purge_params *params)
403 {
404 struct dlm_ls *ls;
405 int error;
406
407 ls = dlm_find_lockspace_local(proc->lockspace);
408 if (!ls)
409 return -ENOENT;
410
411 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
412
413 dlm_put_lockspace(ls);
414 return error;
415 }
416
device_create_lockspace(struct dlm_lspace_params * params)417 static int device_create_lockspace(struct dlm_lspace_params *params)
418 {
419 dlm_lockspace_t *lockspace;
420 struct dlm_ls *ls;
421 int error;
422
423 if (!capable(CAP_SYS_ADMIN))
424 return -EPERM;
425
426 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
427 params->flags, DLM_USER_LVB_LEN, NULL,
428 NULL, NULL, &lockspace);
429 if (error)
430 return error;
431
432 ls = dlm_find_lockspace_local(lockspace);
433 if (!ls)
434 return -ENOENT;
435
436 error = dlm_device_register(ls, params->name);
437 dlm_put_lockspace(ls);
438
439 if (error)
440 dlm_release_lockspace(lockspace, 0);
441 else
442 error = ls->ls_device.minor;
443
444 return error;
445 }
446
device_remove_lockspace(struct dlm_lspace_params * params)447 static int device_remove_lockspace(struct dlm_lspace_params *params)
448 {
449 dlm_lockspace_t *lockspace;
450 struct dlm_ls *ls;
451 int error, force = 0;
452
453 if (!capable(CAP_SYS_ADMIN))
454 return -EPERM;
455
456 ls = dlm_find_lockspace_device(params->minor);
457 if (!ls)
458 return -ENOENT;
459
460 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
461 force = 2;
462
463 lockspace = ls->ls_local_handle;
464 dlm_put_lockspace(ls);
465
466 /* The final dlm_release_lockspace waits for references to go to
467 zero, so all processes will need to close their device for the
468 ls before the release will proceed. release also calls the
469 device_deregister above. Converting a positive return value
470 from release to zero means that userspace won't know when its
471 release was the final one, but it shouldn't need to know. */
472
473 error = dlm_release_lockspace(lockspace, force);
474 if (error > 0)
475 error = 0;
476 return error;
477 }
478
479 /* Check the user's version matches ours */
check_version(struct dlm_write_request * req)480 static int check_version(struct dlm_write_request *req)
481 {
482 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
483 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
484 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
485
486 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
487 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
488 current->comm,
489 task_pid_nr(current),
490 req->version[0],
491 req->version[1],
492 req->version[2],
493 DLM_DEVICE_VERSION_MAJOR,
494 DLM_DEVICE_VERSION_MINOR,
495 DLM_DEVICE_VERSION_PATCH);
496 return -EINVAL;
497 }
498 return 0;
499 }
500
501 /*
502 * device_write
503 *
504 * device_user_lock
505 * dlm_user_request -> request_lock
506 * dlm_user_convert -> convert_lock
507 *
508 * device_user_unlock
509 * dlm_user_unlock -> unlock_lock
510 * dlm_user_cancel -> cancel_lock
511 *
512 * device_create_lockspace
513 * dlm_new_lockspace
514 *
515 * device_remove_lockspace
516 * dlm_release_lockspace
517 */
518
519 /* a write to a lockspace device is a lock or unlock request, a write
520 to the control device is to create/remove a lockspace */
521
device_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)522 static ssize_t device_write(struct file *file, const char __user *buf,
523 size_t count, loff_t *ppos)
524 {
525 struct dlm_user_proc *proc = file->private_data;
526 struct dlm_write_request *kbuf;
527 int error;
528
529 #ifdef CONFIG_COMPAT
530 if (count < sizeof(struct dlm_write_request32))
531 #else
532 if (count < sizeof(struct dlm_write_request))
533 #endif
534 return -EINVAL;
535
536 /*
537 * can't compare against COMPAT/dlm_write_request32 because
538 * we don't yet know if is64bit is zero
539 */
540 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
541 return -EINVAL;
542
543 kbuf = memdup_user_nul(buf, count);
544 if (IS_ERR(kbuf))
545 return PTR_ERR(kbuf);
546
547 if (check_version(kbuf)) {
548 error = -EBADE;
549 goto out_free;
550 }
551
552 #ifdef CONFIG_COMPAT
553 if (!kbuf->is64bit) {
554 struct dlm_write_request32 *k32buf;
555 int namelen = 0;
556
557 if (count > sizeof(struct dlm_write_request32))
558 namelen = count - sizeof(struct dlm_write_request32);
559
560 k32buf = (struct dlm_write_request32 *)kbuf;
561
562 /* add 1 after namelen so that the name string is terminated */
563 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
564 GFP_NOFS);
565 if (!kbuf) {
566 kfree(k32buf);
567 return -ENOMEM;
568 }
569
570 if (proc)
571 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
572
573 compat_input(kbuf, k32buf, namelen);
574 kfree(k32buf);
575 }
576 #endif
577
578 /* do we really need this? can a write happen after a close? */
579 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
580 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
581 error = -EINVAL;
582 goto out_free;
583 }
584
585 error = -EINVAL;
586
587 switch (kbuf->cmd)
588 {
589 case DLM_USER_LOCK:
590 if (!proc) {
591 log_print("no locking on control device");
592 goto out_free;
593 }
594 error = device_user_lock(proc, &kbuf->i.lock);
595 break;
596
597 case DLM_USER_UNLOCK:
598 if (!proc) {
599 log_print("no locking on control device");
600 goto out_free;
601 }
602 error = device_user_unlock(proc, &kbuf->i.lock);
603 break;
604
605 case DLM_USER_DEADLOCK:
606 if (!proc) {
607 log_print("no locking on control device");
608 goto out_free;
609 }
610 error = device_user_deadlock(proc, &kbuf->i.lock);
611 break;
612
613 case DLM_USER_CREATE_LOCKSPACE:
614 if (proc) {
615 log_print("create/remove only on control device");
616 goto out_free;
617 }
618 error = device_create_lockspace(&kbuf->i.lspace);
619 break;
620
621 case DLM_USER_REMOVE_LOCKSPACE:
622 if (proc) {
623 log_print("create/remove only on control device");
624 goto out_free;
625 }
626 error = device_remove_lockspace(&kbuf->i.lspace);
627 break;
628
629 case DLM_USER_PURGE:
630 if (!proc) {
631 log_print("no locking on control device");
632 goto out_free;
633 }
634 error = device_user_purge(proc, &kbuf->i.purge);
635 break;
636
637 default:
638 log_print("Unknown command passed to DLM device : %d\n",
639 kbuf->cmd);
640 }
641
642 out_free:
643 kfree(kbuf);
644 return error;
645 }
646
647 /* Every process that opens the lockspace device has its own "proc" structure
648 hanging off the open file that's used to keep track of locks owned by the
649 process and asts that need to be delivered to the process. */
650
device_open(struct inode * inode,struct file * file)651 static int device_open(struct inode *inode, struct file *file)
652 {
653 struct dlm_user_proc *proc;
654 struct dlm_ls *ls;
655
656 ls = dlm_find_lockspace_device(iminor(inode));
657 if (!ls)
658 return -ENOENT;
659
660 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
661 if (!proc) {
662 dlm_put_lockspace(ls);
663 return -ENOMEM;
664 }
665
666 proc->lockspace = ls->ls_local_handle;
667 INIT_LIST_HEAD(&proc->asts);
668 INIT_LIST_HEAD(&proc->locks);
669 INIT_LIST_HEAD(&proc->unlocking);
670 spin_lock_init(&proc->asts_spin);
671 spin_lock_init(&proc->locks_spin);
672 init_waitqueue_head(&proc->wait);
673 file->private_data = proc;
674
675 return 0;
676 }
677
device_close(struct inode * inode,struct file * file)678 static int device_close(struct inode *inode, struct file *file)
679 {
680 struct dlm_user_proc *proc = file->private_data;
681 struct dlm_ls *ls;
682
683 ls = dlm_find_lockspace_local(proc->lockspace);
684 if (!ls)
685 return -ENOENT;
686
687 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
688
689 dlm_clear_proc_locks(ls, proc);
690
691 /* at this point no more lkb's should exist for this lockspace,
692 so there's no chance of dlm_user_add_ast() being called and
693 looking for lkb->ua->proc */
694
695 kfree(proc);
696 file->private_data = NULL;
697
698 dlm_put_lockspace(ls);
699 dlm_put_lockspace(ls); /* for the find in device_open() */
700
701 /* FIXME: AUTOFREE: if this ls is no longer used do
702 device_remove_lockspace() */
703
704 return 0;
705 }
706
copy_result_to_user(struct dlm_user_args * ua,int compat,uint32_t flags,int mode,int copy_lvb,char __user * buf,size_t count)707 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
708 uint32_t flags, int mode, int copy_lvb,
709 char __user *buf, size_t count)
710 {
711 #ifdef CONFIG_COMPAT
712 struct dlm_lock_result32 result32;
713 #endif
714 struct dlm_lock_result result;
715 void *resultptr;
716 int error=0;
717 int len;
718 int struct_len;
719
720 memset(&result, 0, sizeof(struct dlm_lock_result));
721 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
722 result.version[1] = DLM_DEVICE_VERSION_MINOR;
723 result.version[2] = DLM_DEVICE_VERSION_PATCH;
724 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
725 result.user_lksb = ua->user_lksb;
726
727 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
728 in a conversion unless the conversion is successful. See code
729 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
730 notes that a new blocking AST address and parameter are set even if
731 the conversion fails, so maybe we should just do that. */
732
733 if (flags & DLM_CB_BAST) {
734 result.user_astaddr = ua->bastaddr;
735 result.user_astparam = ua->bastparam;
736 result.bast_mode = mode;
737 } else {
738 result.user_astaddr = ua->castaddr;
739 result.user_astparam = ua->castparam;
740 }
741
742 #ifdef CONFIG_COMPAT
743 if (compat)
744 len = sizeof(struct dlm_lock_result32);
745 else
746 #endif
747 len = sizeof(struct dlm_lock_result);
748 struct_len = len;
749
750 /* copy lvb to userspace if there is one, it's been updated, and
751 the user buffer has space for it */
752
753 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
754 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
755 DLM_USER_LVB_LEN)) {
756 error = -EFAULT;
757 goto out;
758 }
759
760 result.lvb_offset = len;
761 len += DLM_USER_LVB_LEN;
762 }
763
764 result.length = len;
765 resultptr = &result;
766 #ifdef CONFIG_COMPAT
767 if (compat) {
768 compat_output(&result, &result32);
769 resultptr = &result32;
770 }
771 #endif
772
773 if (copy_to_user(buf, resultptr, struct_len))
774 error = -EFAULT;
775 else
776 error = len;
777 out:
778 return error;
779 }
780
copy_version_to_user(char __user * buf,size_t count)781 static int copy_version_to_user(char __user *buf, size_t count)
782 {
783 struct dlm_device_version ver;
784
785 memset(&ver, 0, sizeof(struct dlm_device_version));
786 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
787 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
788 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
789
790 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
791 return -EFAULT;
792 return sizeof(struct dlm_device_version);
793 }
794
795 /* a read returns a single ast described in a struct dlm_lock_result */
796
device_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)797 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
798 loff_t *ppos)
799 {
800 struct dlm_user_proc *proc = file->private_data;
801 struct dlm_lkb *lkb;
802 DECLARE_WAITQUEUE(wait, current);
803 struct dlm_callback cb;
804 int rv, resid, copy_lvb = 0;
805 int old_mode, new_mode;
806
807 if (count == sizeof(struct dlm_device_version)) {
808 rv = copy_version_to_user(buf, count);
809 return rv;
810 }
811
812 if (!proc) {
813 log_print("non-version read from control device %zu", count);
814 return -EINVAL;
815 }
816
817 #ifdef CONFIG_COMPAT
818 if (count < sizeof(struct dlm_lock_result32))
819 #else
820 if (count < sizeof(struct dlm_lock_result))
821 #endif
822 return -EINVAL;
823
824 try_another:
825
826 /* do we really need this? can a read happen after a close? */
827 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
828 return -EINVAL;
829
830 spin_lock(&proc->asts_spin);
831 if (list_empty(&proc->asts)) {
832 if (file->f_flags & O_NONBLOCK) {
833 spin_unlock(&proc->asts_spin);
834 return -EAGAIN;
835 }
836
837 add_wait_queue(&proc->wait, &wait);
838
839 repeat:
840 set_current_state(TASK_INTERRUPTIBLE);
841 if (list_empty(&proc->asts) && !signal_pending(current)) {
842 spin_unlock(&proc->asts_spin);
843 schedule();
844 spin_lock(&proc->asts_spin);
845 goto repeat;
846 }
847 set_current_state(TASK_RUNNING);
848 remove_wait_queue(&proc->wait, &wait);
849
850 if (signal_pending(current)) {
851 spin_unlock(&proc->asts_spin);
852 return -ERESTARTSYS;
853 }
854 }
855
856 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
857 without removing lkb_cb_list; so empty lkb_cb_list is always
858 consistent with empty lkb_callbacks */
859
860 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
861
862 /* rem_lkb_callback sets a new lkb_last_cast */
863 old_mode = lkb->lkb_last_cast.mode;
864
865 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
866 if (rv < 0) {
867 /* this shouldn't happen; lkb should have been removed from
868 list when resid was zero */
869 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
870 list_del_init(&lkb->lkb_cb_list);
871 spin_unlock(&proc->asts_spin);
872 /* removes ref for proc->asts, may cause lkb to be freed */
873 dlm_put_lkb(lkb);
874 goto try_another;
875 }
876 if (!resid)
877 list_del_init(&lkb->lkb_cb_list);
878 spin_unlock(&proc->asts_spin);
879
880 if (cb.flags & DLM_CB_SKIP) {
881 /* removes ref for proc->asts, may cause lkb to be freed */
882 if (!resid)
883 dlm_put_lkb(lkb);
884 goto try_another;
885 }
886
887 if (cb.flags & DLM_CB_BAST) {
888 trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb.mode);
889 } else if (cb.flags & DLM_CB_CAST) {
890 new_mode = cb.mode;
891
892 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
893 dlm_lvb_operations[old_mode + 1][new_mode + 1])
894 copy_lvb = 1;
895
896 lkb->lkb_lksb->sb_status = cb.sb_status;
897 lkb->lkb_lksb->sb_flags = cb.sb_flags;
898 trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
899 }
900
901 rv = copy_result_to_user(lkb->lkb_ua,
902 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
903 cb.flags, cb.mode, copy_lvb, buf, count);
904
905 /* removes ref for proc->asts, may cause lkb to be freed */
906 if (!resid)
907 dlm_put_lkb(lkb);
908
909 return rv;
910 }
911
device_poll(struct file * file,poll_table * wait)912 static __poll_t device_poll(struct file *file, poll_table *wait)
913 {
914 struct dlm_user_proc *proc = file->private_data;
915
916 poll_wait(file, &proc->wait, wait);
917
918 spin_lock(&proc->asts_spin);
919 if (!list_empty(&proc->asts)) {
920 spin_unlock(&proc->asts_spin);
921 return EPOLLIN | EPOLLRDNORM;
922 }
923 spin_unlock(&proc->asts_spin);
924 return 0;
925 }
926
dlm_user_daemon_available(void)927 int dlm_user_daemon_available(void)
928 {
929 /* dlm_controld hasn't started (or, has started, but not
930 properly populated configfs) */
931
932 if (!dlm_our_nodeid())
933 return 0;
934
935 /* This is to deal with versions of dlm_controld that don't
936 know about the monitor device. We assume that if the
937 dlm_controld was started (above), but the monitor device
938 was never opened, that it's an old version. dlm_controld
939 should open the monitor device before populating configfs. */
940
941 if (dlm_monitor_unused)
942 return 1;
943
944 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
945 }
946
ctl_device_open(struct inode * inode,struct file * file)947 static int ctl_device_open(struct inode *inode, struct file *file)
948 {
949 file->private_data = NULL;
950 return 0;
951 }
952
ctl_device_close(struct inode * inode,struct file * file)953 static int ctl_device_close(struct inode *inode, struct file *file)
954 {
955 return 0;
956 }
957
monitor_device_open(struct inode * inode,struct file * file)958 static int monitor_device_open(struct inode *inode, struct file *file)
959 {
960 atomic_inc(&dlm_monitor_opened);
961 dlm_monitor_unused = 0;
962 return 0;
963 }
964
monitor_device_close(struct inode * inode,struct file * file)965 static int monitor_device_close(struct inode *inode, struct file *file)
966 {
967 if (atomic_dec_and_test(&dlm_monitor_opened))
968 dlm_stop_lockspaces();
969 return 0;
970 }
971
972 static const struct file_operations device_fops = {
973 .open = device_open,
974 .release = device_close,
975 .read = device_read,
976 .write = device_write,
977 .poll = device_poll,
978 .owner = THIS_MODULE,
979 .llseek = noop_llseek,
980 };
981
982 static const struct file_operations ctl_device_fops = {
983 .open = ctl_device_open,
984 .release = ctl_device_close,
985 .read = device_read,
986 .write = device_write,
987 .owner = THIS_MODULE,
988 .llseek = noop_llseek,
989 };
990
991 static struct miscdevice ctl_device = {
992 .name = "dlm-control",
993 .fops = &ctl_device_fops,
994 .minor = MISC_DYNAMIC_MINOR,
995 };
996
997 static const struct file_operations monitor_device_fops = {
998 .open = monitor_device_open,
999 .release = monitor_device_close,
1000 .owner = THIS_MODULE,
1001 .llseek = noop_llseek,
1002 };
1003
1004 static struct miscdevice monitor_device = {
1005 .name = "dlm-monitor",
1006 .fops = &monitor_device_fops,
1007 .minor = MISC_DYNAMIC_MINOR,
1008 };
1009
dlm_user_init(void)1010 int __init dlm_user_init(void)
1011 {
1012 int error;
1013
1014 atomic_set(&dlm_monitor_opened, 0);
1015
1016 error = misc_register(&ctl_device);
1017 if (error) {
1018 log_print("misc_register failed for control device");
1019 goto out;
1020 }
1021
1022 error = misc_register(&monitor_device);
1023 if (error) {
1024 log_print("misc_register failed for monitor device");
1025 misc_deregister(&ctl_device);
1026 }
1027 out:
1028 return error;
1029 }
1030
dlm_user_exit(void)1031 void dlm_user_exit(void)
1032 {
1033 misc_deregister(&ctl_device);
1034 misc_deregister(&monitor_device);
1035 }
1036
1037