1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/errno.h>
12 #include <linux/mm.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
15 
16 #include <asm/pgtable.h>
17 #include <asm/uaccess.h>
18 
19 /*
20  * Check that we have indeed attached to the thing..
21  */
ptrace_check_attach(struct task_struct * child,int kill)22 int ptrace_check_attach(struct task_struct *child, int kill)
23 {
24 
25 	if (!(child->ptrace & PT_PTRACED))
26 		return -ESRCH;
27 
28 	if (child->p_pptr != current)
29 		return -ESRCH;
30 
31 	if (!kill) {
32 		if (child->state != TASK_STOPPED)
33 			return -ESRCH;
34 #ifdef CONFIG_SMP
35 		/* Make sure the child gets off its CPU.. */
36 		for (;;) {
37 			task_lock(child);
38 			if (!task_has_cpu(child))
39 				break;
40 			task_unlock(child);
41 			do {
42 				if (child->state != TASK_STOPPED)
43 					return -ESRCH;
44 				barrier();
45 				cpu_relax();
46 			} while (task_has_cpu(child));
47 		}
48 		task_unlock(child);
49 #endif
50 	}
51 
52 	/* All systems go.. */
53 	return 0;
54 }
55 
ptrace_attach(struct task_struct * task)56 int ptrace_attach(struct task_struct *task)
57 {
58 	task_lock(task);
59 	if (task->pid <= 1)
60 		goto bad;
61 	if (task->tgid == current->tgid)
62 		goto bad;
63 	if (!task->mm)
64 		goto bad;
65 	if(((current->uid != task->euid) ||
66 	    (current->uid != task->suid) ||
67 	    (current->uid != task->uid) ||
68  	    (current->gid != task->egid) ||
69  	    (current->gid != task->sgid) ||
70  	    (!cap_issubset(task->cap_permitted, current->cap_permitted)) ||
71  	    (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
72 		goto bad;
73 	rmb();
74 	if (!is_dumpable(task) && !capable(CAP_SYS_PTRACE))
75 		goto bad;
76 	/* the same process cannot be attached many times */
77 	if (task->ptrace & PT_PTRACED)
78 		goto bad;
79 
80 	/* Go */
81 	task->ptrace |= PT_PTRACED;
82 	if (capable(CAP_SYS_PTRACE))
83 		task->ptrace |= PT_PTRACE_CAP;
84 	task_unlock(task);
85 
86 	write_lock_irq(&tasklist_lock);
87 	if (task->p_pptr != current) {
88 		REMOVE_LINKS(task);
89 		task->p_pptr = current;
90 		SET_LINKS(task);
91 	}
92 	write_unlock_irq(&tasklist_lock);
93 
94 	send_sig(SIGSTOP, task, 1);
95 	return 0;
96 
97 bad:
98 	task_unlock(task);
99 	return -EPERM;
100 }
101 
ptrace_detach(struct task_struct * child,unsigned int data)102 int ptrace_detach(struct task_struct *child, unsigned int data)
103 {
104 	if ((unsigned long) data > _NSIG)
105 		return	-EIO;
106 
107 	/* Architecture-specific hardware disable .. */
108 	ptrace_disable(child);
109 
110 	/* .. re-parent .. */
111 	child->ptrace = 0;
112 	child->exit_code = data;
113 	write_lock_irq(&tasklist_lock);
114 	REMOVE_LINKS(child);
115 	child->p_pptr = child->p_opptr;
116 	SET_LINKS(child);
117 	write_unlock_irq(&tasklist_lock);
118 
119 	/* .. and wake it up. */
120 	wake_up_process(child);
121 	return 0;
122 }
123 
124 /*
125  * Access another process' address space.
126  * Source/target buffer must be kernel space,
127  * Do not walk the page table directly, use get_user_pages
128  */
129 
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,int write)130 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
131 {
132 	struct mm_struct *mm;
133 	struct vm_area_struct *vma;
134 	struct page *page;
135 	void *old_buf = buf;
136 
137 	/* Worry about races with exit() */
138 	task_lock(tsk);
139 	mm = tsk->mm;
140 	if (mm)
141 		atomic_inc(&mm->mm_users);
142 	task_unlock(tsk);
143 	if (!mm)
144 		return 0;
145 
146 	down_read(&mm->mmap_sem);
147 	/* ignore errors, just check how much was sucessfully transfered */
148 	while (len) {
149 		int bytes, ret, offset;
150 		void *maddr;
151 
152 		ret = get_user_pages(current, mm, addr, 1,
153 				write, 1, &page, &vma);
154 		if (ret <= 0)
155 			break;
156 
157 		bytes = len;
158 		offset = addr & (PAGE_SIZE-1);
159 		if (bytes > PAGE_SIZE-offset)
160 			bytes = PAGE_SIZE-offset;
161 
162 		flush_cache_page(vma, addr);
163 
164 		maddr = kmap(page);
165 		if (write) {
166 			memcpy(maddr + offset, buf, bytes);
167 			flush_page_to_ram(page);
168 			flush_icache_user_range(vma, page, addr, len);
169 			set_page_dirty(page);
170 		} else {
171 			memcpy(buf, maddr + offset, bytes);
172 			flush_page_to_ram(page);
173 		}
174 		kunmap(page);
175 		put_page(page);
176 		len -= bytes;
177 		buf += bytes;
178 		addr += bytes;
179 	}
180 	up_read(&mm->mmap_sem);
181 	mmput(mm);
182 
183 	return buf - old_buf;
184 }
185 
ptrace_readdata(struct task_struct * tsk,unsigned long src,char * dst,int len)186 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len)
187 {
188 	int copied = 0;
189 
190 	while (len > 0) {
191 		char buf[128];
192 		int this_len, retval;
193 
194 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
195 		retval = access_process_vm(tsk, src, buf, this_len, 0);
196 		if (!retval) {
197 			if (copied)
198 				break;
199 			return -EIO;
200 		}
201 		if (copy_to_user(dst, buf, retval))
202 			return -EFAULT;
203 		copied += retval;
204 		src += retval;
205 		dst += retval;
206 		len -= retval;
207 	}
208 	return copied;
209 }
210 
ptrace_writedata(struct task_struct * tsk,char * src,unsigned long dst,int len)211 int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len)
212 {
213 	int copied = 0;
214 
215 	while (len > 0) {
216 		char buf[128];
217 		int this_len, retval;
218 
219 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
220 		if (copy_from_user(buf, src, this_len))
221 			return -EFAULT;
222 		retval = access_process_vm(tsk, dst, buf, this_len, 1);
223 		if (!retval) {
224 			if (copied)
225 				break;
226 			return -EIO;
227 		}
228 		copied += retval;
229 		src += retval;
230 		dst += retval;
231 		len -= retval;
232 	}
233 	return copied;
234 }
235