1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  */
17 
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/shm.h>
21 #include <linux/init.h>
22 #include <linux/file.h>
23 #include <linux/mman.h>
24 #include <linux/proc_fs.h>
25 #include <asm/uaccess.h>
26 
27 #include "util.h"
28 
29 struct shmid_kernel /* private to the kernel */
30 {
31 	struct kern_ipc_perm	shm_perm;
32 	struct file *		shm_file;
33 	int			id;
34 	unsigned long		shm_nattch;
35 	unsigned long		shm_segsz;
36 	time_t			shm_atim;
37 	time_t			shm_dtim;
38 	time_t			shm_ctim;
39 	pid_t			shm_cprid;
40 	pid_t			shm_lprid;
41 };
42 
43 #define shm_flags	shm_perm.mode
44 
45 static struct file_operations shm_file_operations;
46 static struct vm_operations_struct shm_vm_ops;
47 
48 static struct ipc_ids shm_ids;
49 
50 #define shm_lock(id)	((struct shmid_kernel*)ipc_lock(&shm_ids,id))
51 #define shm_unlock(id)	ipc_unlock(&shm_ids,id)
52 #define shm_lockall()	ipc_lockall(&shm_ids)
53 #define shm_unlockall()	ipc_unlockall(&shm_ids)
54 #define shm_get(id)	((struct shmid_kernel*)ipc_get(&shm_ids,id))
55 #define shm_buildid(id, seq) \
56 	ipc_buildid(&shm_ids, id, seq)
57 
58 static int newseg (key_t key, int shmflg, size_t size);
59 static void shm_open (struct vm_area_struct *shmd);
60 static void shm_close (struct vm_area_struct *shmd);
61 #ifdef CONFIG_PROC_FS
62 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
63 #endif
64 
65 size_t	shm_ctlmax = SHMMAX;
66 size_t 	shm_ctlall = SHMALL;
67 int 	shm_ctlmni = SHMMNI;
68 
69 static int shm_tot; /* total number of shared memory pages */
70 
shm_init(void)71 void __init shm_init (void)
72 {
73 	ipc_init_ids(&shm_ids, 1);
74 #ifdef CONFIG_PROC_FS
75 	create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL);
76 #endif
77 }
78 
shm_checkid(struct shmid_kernel * s,int id)79 static inline int shm_checkid(struct shmid_kernel *s, int id)
80 {
81 	if (ipc_checkid(&shm_ids,&s->shm_perm,id))
82 		return -EIDRM;
83 	return 0;
84 }
85 
shm_rmid(int id)86 static inline struct shmid_kernel *shm_rmid(int id)
87 {
88 	return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
89 }
90 
shm_addid(struct shmid_kernel * shp)91 static inline int shm_addid(struct shmid_kernel *shp)
92 {
93 	return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni+1);
94 }
95 
96 
97 
shm_inc(int id)98 static inline void shm_inc (int id) {
99 	struct shmid_kernel *shp;
100 
101 	if(!(shp = shm_lock(id)))
102 		BUG();
103 	shp->shm_atim = CURRENT_TIME;
104 	shp->shm_lprid = current->pid;
105 	shp->shm_nattch++;
106 	shm_unlock(id);
107 }
108 
109 /* This is called by fork, once for every shm attach. */
shm_open(struct vm_area_struct * shmd)110 static void shm_open (struct vm_area_struct *shmd)
111 {
112 	shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
113 }
114 
115 /*
116  * shm_destroy - free the struct shmid_kernel
117  *
118  * @shp: struct to free
119  *
120  * It has to be called with shp and shm_ids.sem locked,
121  * but returns with shp unlocked and freed.
122  */
shm_destroy(struct shmid_kernel * shp)123 static void shm_destroy (struct shmid_kernel *shp)
124 {
125 	shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
126 	shm_rmid (shp->id);
127 	shm_unlock(shp->id);
128 	shmem_lock(shp->shm_file, 0);
129 	fput (shp->shm_file);
130 	kfree (shp);
131 }
132 
133 /*
134  * remove the attach descriptor shmd.
135  * free memory for segment if it is marked destroyed.
136  * The descriptor has already been removed from the current->mm->mmap list
137  * and will later be kfree()d.
138  */
shm_close(struct vm_area_struct * shmd)139 static void shm_close (struct vm_area_struct *shmd)
140 {
141 	struct file * file = shmd->vm_file;
142 	int id = file->f_dentry->d_inode->i_ino;
143 	struct shmid_kernel *shp;
144 
145 	down (&shm_ids.sem);
146 	/* remove from the list of attaches of the shm segment */
147 	if(!(shp = shm_lock(id)))
148 		BUG();
149 	shp->shm_lprid = current->pid;
150 	shp->shm_dtim = CURRENT_TIME;
151 	shp->shm_nattch--;
152 	if(shp->shm_nattch == 0 &&
153 	   shp->shm_flags & SHM_DEST)
154 		shm_destroy (shp);
155 	else
156 		shm_unlock(id);
157 	up (&shm_ids.sem);
158 }
159 
shm_mmap(struct file * file,struct vm_area_struct * vma)160 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
161 {
162 	UPDATE_ATIME(file->f_dentry->d_inode);
163 	vma->vm_ops = &shm_vm_ops;
164 	if (!(vma->vm_flags & VM_WRITE))
165 		vma->vm_flags &= ~VM_MAYWRITE;
166 	shm_inc(file->f_dentry->d_inode->i_ino);
167 	return 0;
168 }
169 
170 static struct file_operations shm_file_operations = {
171 	mmap:	shm_mmap
172 };
173 
174 static struct vm_operations_struct shm_vm_ops = {
175 	open:	shm_open,	/* callback for a new vm-area open */
176 	close:	shm_close,	/* callback for when the vm-area is released */
177 	nopage:	shmem_nopage,
178 };
179 
newseg(key_t key,int shmflg,size_t size)180 static int newseg (key_t key, int shmflg, size_t size)
181 {
182 	int error;
183 	struct shmid_kernel *shp;
184 	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
185 	struct file * file;
186 	char name[13];
187 	int id;
188 
189 	if (size < SHMMIN || size > shm_ctlmax)
190 		return -EINVAL;
191 
192 	if (shm_tot + numpages >= shm_ctlall)
193 		return -ENOSPC;
194 
195 	shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER);
196 	if (!shp)
197 		return -ENOMEM;
198 	sprintf (name, "SYSV%08x", key);
199 	file = shmem_file_setup(name, size);
200 	error = PTR_ERR(file);
201 	if (IS_ERR(file))
202 		goto no_file;
203 
204 	error = -ENOSPC;
205 	id = shm_addid(shp);
206 	if(id == -1)
207 		goto no_id;
208 	shp->shm_perm.key = key;
209 	shp->shm_flags = (shmflg & S_IRWXUGO);
210 	shp->shm_cprid = current->pid;
211 	shp->shm_lprid = 0;
212 	shp->shm_atim = shp->shm_dtim = 0;
213 	shp->shm_ctim = CURRENT_TIME;
214 	shp->shm_segsz = size;
215 	shp->shm_nattch = 0;
216 	shp->id = shm_buildid(id,shp->shm_perm.seq);
217 	shp->shm_file = file;
218 	file->f_dentry->d_inode->i_ino = shp->id;
219 	file->f_op = &shm_file_operations;
220 	shm_tot += numpages;
221 	shm_unlock (id);
222 	return shp->id;
223 
224 no_id:
225 	fput(file);
226 no_file:
227 	kfree(shp);
228 	return error;
229 }
230 
sys_shmget(key_t key,size_t size,int shmflg)231 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
232 {
233 	struct shmid_kernel *shp;
234 	int err, id = 0;
235 
236 	down(&shm_ids.sem);
237 	if (key == IPC_PRIVATE) {
238 		err = newseg(key, shmflg, size);
239 	} else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
240 		if (!(shmflg & IPC_CREAT))
241 			err = -ENOENT;
242 		else
243 			err = newseg(key, shmflg, size);
244 	} else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
245 		err = -EEXIST;
246 	} else {
247 		shp = shm_lock(id);
248 		if(shp==NULL)
249 			BUG();
250 		if (shp->shm_segsz < size)
251 			err = -EINVAL;
252 		else if (ipcperms(&shp->shm_perm, shmflg))
253 			err = -EACCES;
254 		else
255 			err = shm_buildid(id, shp->shm_perm.seq);
256 		shm_unlock(id);
257 	}
258 	up(&shm_ids.sem);
259 	return err;
260 }
261 
copy_shmid_to_user(void * buf,struct shmid64_ds * in,int version)262 static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version)
263 {
264 	switch(version) {
265 	case IPC_64:
266 		return copy_to_user(buf, in, sizeof(*in));
267 	case IPC_OLD:
268 	    {
269 		struct shmid_ds out;
270 
271 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
272 		out.shm_segsz	= in->shm_segsz;
273 		out.shm_atime	= in->shm_atime;
274 		out.shm_dtime	= in->shm_dtime;
275 		out.shm_ctime	= in->shm_ctime;
276 		out.shm_cpid	= in->shm_cpid;
277 		out.shm_lpid	= in->shm_lpid;
278 		out.shm_nattch	= in->shm_nattch;
279 
280 		return copy_to_user(buf, &out, sizeof(out));
281 	    }
282 	default:
283 		return -EINVAL;
284 	}
285 }
286 
287 struct shm_setbuf {
288 	uid_t	uid;
289 	gid_t	gid;
290 	mode_t	mode;
291 };
292 
copy_shmid_from_user(struct shm_setbuf * out,void * buf,int version)293 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version)
294 {
295 	switch(version) {
296 	case IPC_64:
297 	    {
298 		struct shmid64_ds tbuf;
299 
300 		if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
301 			return -EFAULT;
302 
303 		out->uid	= tbuf.shm_perm.uid;
304 		out->gid	= tbuf.shm_perm.gid;
305 		out->mode	= tbuf.shm_flags;
306 
307 		return 0;
308 	    }
309 	case IPC_OLD:
310 	    {
311 		struct shmid_ds tbuf_old;
312 
313 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
314 			return -EFAULT;
315 
316 		out->uid	= tbuf_old.shm_perm.uid;
317 		out->gid	= tbuf_old.shm_perm.gid;
318 		out->mode	= tbuf_old.shm_flags;
319 
320 		return 0;
321 	    }
322 	default:
323 		return -EINVAL;
324 	}
325 }
326 
copy_shminfo_to_user(void * buf,struct shminfo64 * in,int version)327 static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version)
328 {
329 	switch(version) {
330 	case IPC_64:
331 		return copy_to_user(buf, in, sizeof(*in));
332 	case IPC_OLD:
333 	    {
334 		struct shminfo out;
335 
336 		if(in->shmmax > INT_MAX)
337 			out.shmmax = INT_MAX;
338 		else
339 			out.shmmax = (int)in->shmmax;
340 
341 		out.shmmin	= in->shmmin;
342 		out.shmmni	= in->shmmni;
343 		out.shmseg	= in->shmseg;
344 		out.shmall	= in->shmall;
345 
346 		return copy_to_user(buf, &out, sizeof(out));
347 	    }
348 	default:
349 		return -EINVAL;
350 	}
351 }
352 
shm_get_stat(unsigned long * rss,unsigned long * swp)353 static void shm_get_stat (unsigned long *rss, unsigned long *swp)
354 {
355 	struct shmem_inode_info *info;
356 	int i;
357 
358 	*rss = 0;
359 	*swp = 0;
360 
361 	for(i = 0; i <= shm_ids.max_id; i++) {
362 		struct shmid_kernel* shp;
363 		struct inode * inode;
364 
365 		shp = shm_get(i);
366 		if(shp == NULL)
367 			continue;
368 		inode = shp->shm_file->f_dentry->d_inode;
369 		info = SHMEM_I(inode);
370 		spin_lock (&info->lock);
371 		*rss += inode->i_mapping->nrpages;
372 		*swp += info->swapped;
373 		spin_unlock (&info->lock);
374 	}
375 }
376 
sys_shmctl(int shmid,int cmd,struct shmid_ds * buf)377 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
378 {
379 	struct shm_setbuf setbuf;
380 	struct shmid_kernel *shp;
381 	int err, version;
382 
383 	if (cmd < 0 || shmid < 0)
384 		return -EINVAL;
385 
386 	version = ipc_parse_version(&cmd);
387 
388 	switch (cmd) { /* replace with proc interface ? */
389 	case IPC_INFO:
390 	{
391 		struct shminfo64 shminfo;
392 
393 		memset(&shminfo,0,sizeof(shminfo));
394 		shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
395 		shminfo.shmmax = shm_ctlmax;
396 		shminfo.shmall = shm_ctlall;
397 
398 		shminfo.shmmin = SHMMIN;
399 		if(copy_shminfo_to_user (buf, &shminfo, version))
400 			return -EFAULT;
401 		/* reading a integer is always atomic */
402 		err= shm_ids.max_id;
403 		if(err<0)
404 			err = 0;
405 		return err;
406 	}
407 	case SHM_INFO:
408 	{
409 		struct shm_info shm_info;
410 
411 		memset(&shm_info,0,sizeof(shm_info));
412 		down(&shm_ids.sem);
413 		shm_lockall();
414 		shm_info.used_ids = shm_ids.in_use;
415 		shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
416 		shm_info.shm_tot = shm_tot;
417 		shm_info.swap_attempts = 0;
418 		shm_info.swap_successes = 0;
419 		err = shm_ids.max_id;
420 		shm_unlockall();
421 		up(&shm_ids.sem);
422 		if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
423 			return -EFAULT;
424 
425 		return err < 0 ? 0 : err;
426 	}
427 	case SHM_STAT:
428 	case IPC_STAT:
429 	{
430 		struct shmid64_ds tbuf;
431 		int result;
432 		memset(&tbuf, 0, sizeof(tbuf));
433 		shp = shm_lock(shmid);
434 		if(shp==NULL)
435 			return -EINVAL;
436 		if(cmd==SHM_STAT) {
437 			err = -EINVAL;
438 			if (shmid > shm_ids.max_id)
439 				goto out_unlock;
440 			result = shm_buildid(shmid, shp->shm_perm.seq);
441 		} else {
442 			err = shm_checkid(shp,shmid);
443 			if(err)
444 				goto out_unlock;
445 			result = 0;
446 		}
447 		err=-EACCES;
448 		if (ipcperms (&shp->shm_perm, S_IRUGO))
449 			goto out_unlock;
450 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
451 		tbuf.shm_segsz	= shp->shm_segsz;
452 		tbuf.shm_atime	= shp->shm_atim;
453 		tbuf.shm_dtime	= shp->shm_dtim;
454 		tbuf.shm_ctime	= shp->shm_ctim;
455 		tbuf.shm_cpid	= shp->shm_cprid;
456 		tbuf.shm_lpid	= shp->shm_lprid;
457 		tbuf.shm_nattch	= shp->shm_nattch;
458 		shm_unlock(shmid);
459 		if(copy_shmid_to_user (buf, &tbuf, version))
460 			return -EFAULT;
461 		return result;
462 	}
463 	case SHM_LOCK:
464 	case SHM_UNLOCK:
465 	{
466 /* Allow superuser to lock segment in memory */
467 /* Should the pages be faulted in here or leave it to user? */
468 /* need to determine interaction with current->swappable */
469 		if (!capable(CAP_IPC_LOCK))
470 			return -EPERM;
471 
472 		shp = shm_lock(shmid);
473 		if(shp==NULL)
474 			return -EINVAL;
475 		err = shm_checkid(shp,shmid);
476 		if(err)
477 			goto out_unlock;
478 		if(cmd==SHM_LOCK) {
479 			shmem_lock(shp->shm_file, 1);
480 			shp->shm_flags |= SHM_LOCKED;
481 		} else {
482 			shmem_lock(shp->shm_file, 0);
483 			shp->shm_flags &= ~SHM_LOCKED;
484 		}
485 		shm_unlock(shmid);
486 		return err;
487 	}
488 	case IPC_RMID:
489 	{
490 		/*
491 		 *	We cannot simply remove the file. The SVID states
492 		 *	that the block remains until the last person
493 		 *	detaches from it, then is deleted. A shmat() on
494 		 *	an RMID segment is legal in older Linux and if
495 		 *	we change it apps break...
496 		 *
497 		 *	Instead we set a destroyed flag, and then blow
498 		 *	the name away when the usage hits zero.
499 		 */
500 		down(&shm_ids.sem);
501 		shp = shm_lock(shmid);
502 		err = -EINVAL;
503 		if (shp == NULL)
504 			goto out_up;
505 		err = shm_checkid(shp, shmid);
506 		if(err)
507 			goto out_unlock_up;
508 		if (current->euid != shp->shm_perm.uid &&
509 		    current->euid != shp->shm_perm.cuid &&
510 		    !capable(CAP_SYS_ADMIN)) {
511 			err=-EPERM;
512 			goto out_unlock_up;
513 		}
514 		if (shp->shm_nattch){
515 			shp->shm_flags |= SHM_DEST;
516 			/* Do not find it any more */
517 			shp->shm_perm.key = IPC_PRIVATE;
518 			shm_unlock(shmid);
519 		} else
520 			shm_destroy (shp);
521 		up(&shm_ids.sem);
522 		return err;
523 	}
524 
525 	case IPC_SET:
526 	{
527 		if(copy_shmid_from_user (&setbuf, buf, version))
528 			return -EFAULT;
529 		down(&shm_ids.sem);
530 		shp = shm_lock(shmid);
531 		err=-EINVAL;
532 		if(shp==NULL)
533 			goto out_up;
534 		err = shm_checkid(shp,shmid);
535 		if(err)
536 			goto out_unlock_up;
537 		err=-EPERM;
538 		if (current->euid != shp->shm_perm.uid &&
539 		    current->euid != shp->shm_perm.cuid &&
540 		    !capable(CAP_SYS_ADMIN)) {
541 			goto out_unlock_up;
542 		}
543 
544 		shp->shm_perm.uid = setbuf.uid;
545 		shp->shm_perm.gid = setbuf.gid;
546 		shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
547 			| (setbuf.mode & S_IRWXUGO);
548 		shp->shm_ctim = CURRENT_TIME;
549 		break;
550 	}
551 
552 	default:
553 		return -EINVAL;
554 	}
555 
556 	err = 0;
557 out_unlock_up:
558 	shm_unlock(shmid);
559 out_up:
560 	up(&shm_ids.sem);
561 	return err;
562 out_unlock:
563 	shm_unlock(shmid);
564 	return err;
565 }
566 
567 /*
568  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
569  */
sys_shmat(int shmid,char * shmaddr,int shmflg,ulong * raddr)570 asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
571 {
572 	struct shmid_kernel *shp;
573 	unsigned long addr;
574 	unsigned long size;
575 	struct file * file;
576 	int    err;
577 	unsigned long flags;
578 	unsigned long prot;
579 	unsigned long o_flags;
580 	int acc_mode;
581 	void *user_addr;
582 
583 	if (shmid < 0)
584 		return -EINVAL;
585 
586 	if ((addr = (ulong)shmaddr)) {
587 		if (addr & (SHMLBA-1)) {
588 			if (shmflg & SHM_RND)
589 				addr &= ~(SHMLBA-1);	   /* round down */
590 			else
591 				return -EINVAL;
592 		}
593 		flags = MAP_SHARED | MAP_FIXED;
594 	} else {
595 		if ((shmflg & SHM_REMAP))
596 			return -EINVAL;
597 
598 		flags = MAP_SHARED;
599 	}
600 
601 	if (shmflg & SHM_RDONLY) {
602 		prot = PROT_READ;
603 		o_flags = O_RDONLY;
604 		acc_mode = S_IRUGO;
605 	} else {
606 		prot = PROT_READ | PROT_WRITE;
607 		o_flags = O_RDWR;
608 		acc_mode = S_IRUGO | S_IWUGO;
609 	}
610 
611 	/*
612 	 * We cannot rely on the fs check since SYSV IPC does have an
613 	 * additional creator id...
614 	 */
615 	shp = shm_lock(shmid);
616 	if(shp == NULL)
617 		return -EINVAL;
618 	err = shm_checkid(shp,shmid);
619 	if (err) {
620 		shm_unlock(shmid);
621 		return err;
622 	}
623 	if (ipcperms(&shp->shm_perm, acc_mode)) {
624 		shm_unlock(shmid);
625 		return -EACCES;
626 	}
627 	file = shp->shm_file;
628 	size = file->f_dentry->d_inode->i_size;
629 	shp->shm_nattch++;
630 	shm_unlock(shmid);
631 
632 	down_write(&current->mm->mmap_sem);
633 	if (addr && !(shmflg & SHM_REMAP)) {
634 		user_addr = ERR_PTR(-EINVAL);
635 		if (find_vma_intersection(current->mm, addr, addr + size))
636 			goto invalid;
637 		/*
638 		 * If shm segment goes below stack, make sure there is some
639 		 * space left for the stack to grow (at least 4 pages).
640 		 */
641 		if (addr < current->mm->start_stack &&
642 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
643 			goto invalid;
644 	}
645 
646 	user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
647 
648 invalid:
649 	up_write(&current->mm->mmap_sem);
650 
651 	down (&shm_ids.sem);
652 	if(!(shp = shm_lock(shmid)))
653 		BUG();
654 	shp->shm_nattch--;
655 	if(shp->shm_nattch == 0 &&
656 	   shp->shm_flags & SHM_DEST)
657 		shm_destroy (shp);
658 	else
659 		shm_unlock(shmid);
660 	up (&shm_ids.sem);
661 
662 	*raddr = (unsigned long) user_addr;
663 	err = 0;
664 	if (IS_ERR(user_addr))
665 		err = PTR_ERR(user_addr);
666 	return err;
667 
668 }
669 
670 /*
671  * detach and kill segment if marked destroyed.
672  * The work is done in shm_close.
673  */
sys_shmdt(char * shmaddr)674 asmlinkage long sys_shmdt (char *shmaddr)
675 {
676 	struct mm_struct *mm = current->mm;
677 	struct vm_area_struct *shmd, *shmdnext;
678 	int retval = -EINVAL;
679 
680 	down_write(&mm->mmap_sem);
681 	for (shmd = mm->mmap; shmd; shmd = shmdnext) {
682 		shmdnext = shmd->vm_next;
683 		if (shmd->vm_ops == &shm_vm_ops
684 		    && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
685 			do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start);
686 			retval = 0;
687 		}
688 	}
689 	up_write(&mm->mmap_sem);
690 	return retval;
691 }
692 
693 #ifdef CONFIG_PROC_FS
sysvipc_shm_read_proc(char * buffer,char ** start,off_t offset,int length,int * eof,void * data)694 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
695 {
696 	off_t pos = 0;
697 	off_t begin = 0;
698 	int i, len = 0;
699 
700 	down(&shm_ids.sem);
701 	len += sprintf(buffer, "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n");
702 
703 	for(i = 0; i <= shm_ids.max_id; i++) {
704 		struct shmid_kernel* shp;
705 
706 		shp = shm_lock(i);
707 		if(shp!=NULL) {
708 #define SMALL_STRING "%10d %10d  %4o %10u %5u %5u  %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
709 #define BIG_STRING   "%10d %10d  %4o %21u %5u %5u  %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
710 			char *format;
711 
712 			if (sizeof(size_t) <= sizeof(int))
713 				format = SMALL_STRING;
714 			else
715 				format = BIG_STRING;
716 			len += sprintf(buffer + len, format,
717 				shp->shm_perm.key,
718 				shm_buildid(i, shp->shm_perm.seq),
719 				shp->shm_flags,
720 				shp->shm_segsz,
721 				shp->shm_cprid,
722 				shp->shm_lprid,
723 				shp->shm_nattch,
724 				shp->shm_perm.uid,
725 				shp->shm_perm.gid,
726 				shp->shm_perm.cuid,
727 				shp->shm_perm.cgid,
728 				shp->shm_atim,
729 				shp->shm_dtim,
730 				shp->shm_ctim);
731 			shm_unlock(i);
732 
733 			pos += len;
734 			if(pos < offset) {
735 				len = 0;
736 				begin = pos;
737 			}
738 			if(pos > offset + length)
739 				goto done;
740 		}
741 	}
742 	*eof = 1;
743 done:
744 	up(&shm_ids.sem);
745 	*start = buffer + (offset - begin);
746 	len -= (offset - begin);
747 	if(len > length)
748 		len = length;
749 	if(len < 0)
750 		len = 0;
751 	return len;
752 }
753 #endif
754