xref: /DragonOS/kernel/src/process/syscall.rs (revision 7a29d4fcbcd89a226289c7bf541c2c78623de3ad)
1 use core::ffi::c_void;
2 
3 use alloc::{
4     string::{String, ToString},
5     sync::Arc,
6     vec::Vec,
7 };
8 use system_error::SystemError;
9 
10 use super::{
11     abi::WaitOption,
12     exit::kernel_wait4,
13     fork::{CloneFlags, KernelCloneArgs},
14     resource::{RLimit64, RLimitID, RUsage, RUsageWho},
15     KernelStack, Pid, ProcessManager,
16 };
17 use crate::{
18     arch::{interrupt::TrapFrame, MMArch},
19     filesystem::{
20         procfs::procfs_register_pid,
21         vfs::{file::FileDescriptorVec, MAX_PATHLEN},
22     },
23     mm::{ucontext::UserStack, verify_area, MemoryManagementArch, VirtAddr},
24     process::ProcessControlBlock,
25     sched::completion::Completion,
26     syscall::{
27         user_access::{check_and_clone_cstr, check_and_clone_cstr_array, UserBufferWriter},
28         Syscall,
29     },
30 };
31 
32 impl Syscall {
33     pub fn fork(frame: &mut TrapFrame) -> Result<usize, SystemError> {
34         let r = ProcessManager::fork(frame, CloneFlags::empty()).map(|pid| pid.into());
35         return r;
36     }
37 
38     pub fn vfork(frame: &mut TrapFrame) -> Result<usize, SystemError> {
39         // 由于Linux vfork需要保证子进程先运行(除非子进程调用execve或者exit),
40         // 而我们目前没有实现这个特性,所以暂时使用fork代替vfork(linux文档表示这样也是也可以的)
41         Self::fork(frame)
42 
43         // 下面是以前的实现,除非我们实现了子进程先运行的特性,否则不要使用,不然会导致父进程数据损坏
44         // ProcessManager::fork(
45         //     frame,
46         //     CloneFlags::CLONE_VM | CloneFlags::CLONE_FS | CloneFlags::CLONE_SIGNAL,
47         // )
48         // .map(|pid| pid.into())
49     }
50 
51     pub fn execve(
52         path: *const u8,
53         argv: *const *const u8,
54         envp: *const *const u8,
55         frame: &mut TrapFrame,
56     ) -> Result<(), SystemError> {
57         // kdebug!(
58         //     "execve path: {:?}, argv: {:?}, envp: {:?}\n",
59         //     path,
60         //     argv,
61         //     envp
62         // );
63         // kdebug!(
64         //     "before execve: strong count: {}",
65         //     Arc::strong_count(&ProcessManager::current_pcb())
66         // );
67 
68         if path.is_null() {
69             return Err(SystemError::EINVAL);
70         }
71 
72         let x = || {
73             let path: String = check_and_clone_cstr(path, Some(MAX_PATHLEN))?;
74             let argv: Vec<String> = check_and_clone_cstr_array(argv)?;
75             let envp: Vec<String> = check_and_clone_cstr_array(envp)?;
76             Ok((path, argv, envp))
77         };
78         let r: Result<(String, Vec<String>, Vec<String>), SystemError> = x();
79         if let Err(e) = r {
80             panic!("Failed to execve: {:?}", e);
81         }
82         let (path, argv, envp) = r.unwrap();
83         ProcessManager::current_pcb()
84             .basic_mut()
85             .set_name(ProcessControlBlock::generate_name(&path, &argv));
86 
87         Self::do_execve(path, argv, envp, frame)?;
88 
89         // 关闭设置了O_CLOEXEC的文件描述符
90         let fd_table = ProcessManager::current_pcb().fd_table();
91         fd_table.write().close_on_exec();
92         // kdebug!(
93         //     "after execve: strong count: {}",
94         //     Arc::strong_count(&ProcessManager::current_pcb())
95         // );
96 
97         return Ok(());
98     }
99 
100     pub fn wait4(
101         pid: i64,
102         wstatus: *mut i32,
103         options: i32,
104         rusage: *mut c_void,
105     ) -> Result<usize, SystemError> {
106         let options = WaitOption::from_bits(options as u32).ok_or(SystemError::EINVAL)?;
107 
108         let wstatus_buf = if wstatus.is_null() {
109             None
110         } else {
111             Some(UserBufferWriter::new(
112                 wstatus,
113                 core::mem::size_of::<i32>(),
114                 true,
115             )?)
116         };
117 
118         let mut tmp_rusage = if rusage.is_null() {
119             None
120         } else {
121             Some(RUsage::default())
122         };
123 
124         let r = kernel_wait4(pid, wstatus_buf, options, tmp_rusage.as_mut())?;
125 
126         if !rusage.is_null() {
127             let mut rusage_buf = UserBufferWriter::new::<RUsage>(
128                 rusage as *mut RUsage,
129                 core::mem::size_of::<RUsage>(),
130                 true,
131             )?;
132             rusage_buf.copy_one_to_user(&tmp_rusage.unwrap(), 0)?;
133         }
134         return Ok(r);
135     }
136 
137     /// # 退出进程
138     ///
139     /// ## 参数
140     ///
141     /// - status: 退出状态
142     pub fn exit(status: usize) -> ! {
143         ProcessManager::exit(status);
144     }
145 
146     /// @brief 获取当前进程的pid
147     pub fn getpid() -> Result<Pid, SystemError> {
148         let current_pcb = ProcessManager::current_pcb();
149         return Ok(current_pcb.tgid());
150     }
151 
152     /// @brief 获取指定进程的pgid
153     ///
154     /// @param pid 指定一个进程号
155     ///
156     /// @return 成功,指定进程的进程组id
157     /// @return 错误,不存在该进程
158     pub fn getpgid(mut pid: Pid) -> Result<Pid, SystemError> {
159         if pid == Pid(0) {
160             let current_pcb = ProcessManager::current_pcb();
161             pid = current_pcb.pid();
162         }
163         let target_proc = ProcessManager::find(pid).ok_or(SystemError::ESRCH)?;
164         return Ok(target_proc.basic().pgid());
165     }
166     /// @brief 获取当前进程的父进程id
167 
168     /// 若为initproc则ppid设置为0
169     pub fn getppid() -> Result<Pid, SystemError> {
170         let current_pcb = ProcessManager::current_pcb();
171         return Ok(current_pcb.basic().ppid());
172     }
173 
174     pub fn clone(
175         current_trapframe: &mut TrapFrame,
176         clone_args: KernelCloneArgs,
177     ) -> Result<usize, SystemError> {
178         let flags = clone_args.flags;
179 
180         let vfork = Arc::new(Completion::new());
181 
182         if flags.contains(CloneFlags::CLONE_PIDFD)
183             && flags.contains(CloneFlags::CLONE_PARENT_SETTID)
184         {
185             return Err(SystemError::EINVAL);
186         }
187 
188         let current_pcb = ProcessManager::current_pcb();
189         let new_kstack = KernelStack::new()?;
190         let name = current_pcb.basic().name().to_string();
191         let pcb = ProcessControlBlock::new(name, new_kstack);
192         // 克隆pcb
193         ProcessManager::copy_process(&current_pcb, &pcb, clone_args, current_trapframe)?;
194         ProcessManager::add_pcb(pcb.clone());
195 
196         // 向procfs注册进程
197         procfs_register_pid(pcb.pid()).unwrap_or_else(|e| {
198             panic!(
199                 "fork: Failed to register pid to procfs, pid: [{:?}]. Error: {:?}",
200                 pcb.pid(),
201                 e
202             )
203         });
204 
205         if flags.contains(CloneFlags::CLONE_VFORK) {
206             pcb.thread.write().vfork_done = Some(vfork.clone());
207         }
208 
209         if pcb.thread.read().set_child_tid.is_some() {
210             let addr = pcb.thread.read().set_child_tid.unwrap();
211             let mut writer =
212                 UserBufferWriter::new(addr.as_ptr::<i32>(), core::mem::size_of::<i32>(), true)?;
213             writer.copy_one_to_user(&(pcb.pid().data() as i32), 0)?;
214         }
215 
216         ProcessManager::wakeup(&pcb).unwrap_or_else(|e| {
217             panic!(
218                 "fork: Failed to wakeup new process, pid: [{:?}]. Error: {:?}",
219                 pcb.pid(),
220                 e
221             )
222         });
223 
224         if flags.contains(CloneFlags::CLONE_VFORK) {
225             // 等待子进程结束或者exec;
226             vfork.wait_for_completion_interruptible()?;
227         }
228 
229         return Ok(pcb.pid().0);
230     }
231 
232     /// 设置线程地址
233     pub fn set_tid_address(ptr: usize) -> Result<usize, SystemError> {
234         verify_area(VirtAddr::new(ptr), core::mem::size_of::<i32>())
235             .map_err(|_| SystemError::EFAULT)?;
236 
237         let pcb = ProcessManager::current_pcb();
238         pcb.thread.write().clear_child_tid = Some(VirtAddr::new(ptr));
239         Ok(pcb.pid.0)
240     }
241 
242     pub fn gettid() -> Result<Pid, SystemError> {
243         let pcb = ProcessManager::current_pcb();
244         Ok(pcb.pid)
245     }
246 
247     pub fn getuid() -> Result<usize, SystemError> {
248         // todo: 增加credit功能之后,需要修改
249         return Ok(0);
250     }
251 
252     pub fn getgid() -> Result<usize, SystemError> {
253         // todo: 增加credit功能之后,需要修改
254         return Ok(0);
255     }
256 
257     pub fn geteuid() -> Result<usize, SystemError> {
258         // todo: 增加credit功能之后,需要修改
259         return Ok(0);
260     }
261 
262     pub fn getegid() -> Result<usize, SystemError> {
263         // todo: 增加credit功能之后,需要修改
264         return Ok(0);
265     }
266 
267     pub fn get_rusage(who: i32, rusage: *mut RUsage) -> Result<usize, SystemError> {
268         let who = RUsageWho::try_from(who)?;
269         let mut writer = UserBufferWriter::new(rusage, core::mem::size_of::<RUsage>(), true)?;
270         let pcb = ProcessManager::current_pcb();
271         let rusage = pcb.get_rusage(who).ok_or(SystemError::EINVAL)?;
272 
273         let ubuf = writer.buffer::<RUsage>(0).unwrap();
274         ubuf.copy_from_slice(&[rusage]);
275 
276         return Ok(0);
277     }
278 
279     /// # 设置资源限制
280     ///
281     /// TODO: 目前暂时不支持设置资源限制,只提供读取默认值的功能
282     ///
283     /// ## 参数
284     ///
285     /// - pid: 进程号
286     /// - resource: 资源类型
287     /// - new_limit: 新的资源限制
288     /// - old_limit: 旧的资源限制
289     ///
290     /// ## 返回值
291     ///
292     /// - 成功,0
293     /// - 如果old_limit不为NULL,则返回旧的资源限制到old_limit
294     ///
295     pub fn prlimit64(
296         _pid: Pid,
297         resource: usize,
298         _new_limit: *const RLimit64,
299         old_limit: *mut RLimit64,
300     ) -> Result<usize, SystemError> {
301         let resource = RLimitID::try_from(resource)?;
302         let mut writer = None;
303 
304         if !old_limit.is_null() {
305             writer = Some(UserBufferWriter::new(
306                 old_limit,
307                 core::mem::size_of::<RLimit64>(),
308                 true,
309             )?);
310         }
311 
312         match resource {
313             RLimitID::Stack => {
314                 if let Some(mut writer) = writer {
315                     let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0];
316                     rlimit.rlim_cur = UserStack::DEFAULT_USER_STACK_SIZE as u64;
317                     rlimit.rlim_max = UserStack::DEFAULT_USER_STACK_SIZE as u64;
318                 }
319                 return Ok(0);
320             }
321 
322             RLimitID::Nofile => {
323                 if let Some(mut writer) = writer {
324                     let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0];
325                     rlimit.rlim_cur = FileDescriptorVec::PROCESS_MAX_FD as u64;
326                     rlimit.rlim_max = FileDescriptorVec::PROCESS_MAX_FD as u64;
327                 }
328                 return Ok(0);
329             }
330 
331             RLimitID::As | RLimitID::Rss => {
332                 if let Some(mut writer) = writer {
333                     let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0];
334                     rlimit.rlim_cur = MMArch::USER_END_VADDR.data() as u64;
335                     rlimit.rlim_max = MMArch::USER_END_VADDR.data() as u64;
336                 }
337                 return Ok(0);
338             }
339 
340             _ => {
341                 return Err(SystemError::ENOSYS);
342             }
343         }
344     }
345 }
346