1 use core::ffi::c_void; 2 3 use alloc::{ 4 string::{String, ToString}, 5 sync::Arc, 6 vec::Vec, 7 }; 8 9 use super::{ 10 abi::WaitOption, 11 exit::kernel_wait4, 12 fork::{CloneFlags, KernelCloneArgs}, 13 resource::{RLimit64, RLimitID, RUsage, RUsageWho}, 14 KernelStack, Pid, ProcessManager, 15 }; 16 use crate::{ 17 arch::{interrupt::TrapFrame, MMArch}, 18 filesystem::{ 19 procfs::procfs_register_pid, 20 vfs::{file::FileDescriptorVec, MAX_PATHLEN}, 21 }, 22 mm::{ucontext::UserStack, verify_area, MemoryManagementArch, VirtAddr}, 23 process::ProcessControlBlock, 24 sched::completion::Completion, 25 syscall::{ 26 user_access::{check_and_clone_cstr, check_and_clone_cstr_array, UserBufferWriter}, 27 Syscall, SystemError, 28 }, 29 }; 30 31 impl Syscall { 32 pub fn fork(frame: &mut TrapFrame) -> Result<usize, SystemError> { 33 let r = ProcessManager::fork(frame, CloneFlags::empty()).map(|pid| pid.into()); 34 return r; 35 } 36 37 pub fn vfork(frame: &mut TrapFrame) -> Result<usize, SystemError> { 38 // 由于Linux vfork需要保证子进程先运行(除非子进程调用execve或者exit), 39 // 而我们目前没有实现这个特性,所以暂时使用fork代替vfork(linux文档表示这样也是也可以的) 40 Self::fork(frame) 41 42 // 下面是以前的实现,除非我们实现了子进程先运行的特性,否则不要使用,不然会导致父进程数据损坏 43 // ProcessManager::fork( 44 // frame, 45 // CloneFlags::CLONE_VM | CloneFlags::CLONE_FS | CloneFlags::CLONE_SIGNAL, 46 // ) 47 // .map(|pid| pid.into()) 48 } 49 50 pub fn execve( 51 path: *const u8, 52 argv: *const *const u8, 53 envp: *const *const u8, 54 frame: &mut TrapFrame, 55 ) -> Result<(), SystemError> { 56 // kdebug!( 57 // "execve path: {:?}, argv: {:?}, envp: {:?}\n", 58 // path, 59 // argv, 60 // envp 61 // ); 62 // kdebug!( 63 // "before execve: strong count: {}", 64 // Arc::strong_count(&ProcessManager::current_pcb()) 65 // ); 66 67 if path.is_null() { 68 return Err(SystemError::EINVAL); 69 } 70 71 let x = || { 72 let path: String = check_and_clone_cstr(path, Some(MAX_PATHLEN))?; 73 let argv: Vec<String> = check_and_clone_cstr_array(argv)?; 74 let envp: Vec<String> = check_and_clone_cstr_array(envp)?; 75 Ok((path, argv, envp)) 76 }; 77 let r: Result<(String, Vec<String>, Vec<String>), SystemError> = x(); 78 if let Err(e) = r { 79 panic!("Failed to execve: {:?}", e); 80 } 81 let (path, argv, envp) = r.unwrap(); 82 ProcessManager::current_pcb() 83 .basic_mut() 84 .set_name(ProcessControlBlock::generate_name(&path, &argv)); 85 86 Self::do_execve(path, argv, envp, frame)?; 87 88 // 关闭设置了O_CLOEXEC的文件描述符 89 let fd_table = ProcessManager::current_pcb().fd_table(); 90 fd_table.write().close_on_exec(); 91 // kdebug!( 92 // "after execve: strong count: {}", 93 // Arc::strong_count(&ProcessManager::current_pcb()) 94 // ); 95 96 return Ok(()); 97 } 98 99 pub fn wait4( 100 pid: i64, 101 wstatus: *mut i32, 102 options: i32, 103 rusage: *mut c_void, 104 ) -> Result<usize, SystemError> { 105 let options = WaitOption::from_bits(options as u32).ok_or(SystemError::EINVAL)?; 106 107 let wstatus_buf = if wstatus.is_null() { 108 None 109 } else { 110 Some(UserBufferWriter::new( 111 wstatus, 112 core::mem::size_of::<i32>(), 113 true, 114 )?) 115 }; 116 117 let mut tmp_rusage = if rusage.is_null() { 118 None 119 } else { 120 Some(RUsage::default()) 121 }; 122 123 let r = kernel_wait4(pid, wstatus_buf, options, tmp_rusage.as_mut())?; 124 125 if !rusage.is_null() { 126 let mut rusage_buf = UserBufferWriter::new::<RUsage>( 127 rusage as *mut RUsage, 128 core::mem::size_of::<RUsage>(), 129 true, 130 )?; 131 rusage_buf.copy_one_to_user(&tmp_rusage.unwrap(), 0)?; 132 } 133 return Ok(r); 134 } 135 136 /// # 退出进程 137 /// 138 /// ## 参数 139 /// 140 /// - status: 退出状态 141 pub fn exit(status: usize) -> ! { 142 ProcessManager::exit(status); 143 } 144 145 /// @brief 获取当前进程的pid 146 pub fn getpid() -> Result<Pid, SystemError> { 147 let current_pcb = ProcessManager::current_pcb(); 148 return Ok(current_pcb.tgid()); 149 } 150 151 /// @brief 获取指定进程的pgid 152 /// 153 /// @param pid 指定一个进程号 154 /// 155 /// @return 成功,指定进程的进程组id 156 /// @return 错误,不存在该进程 157 pub fn getpgid(mut pid: Pid) -> Result<Pid, SystemError> { 158 if pid == Pid(0) { 159 let current_pcb = ProcessManager::current_pcb(); 160 pid = current_pcb.pid(); 161 } 162 let target_proc = ProcessManager::find(pid).ok_or(SystemError::ESRCH)?; 163 return Ok(target_proc.basic().pgid()); 164 } 165 /// @brief 获取当前进程的父进程id 166 167 /// 若为initproc则ppid设置为0 168 pub fn getppid() -> Result<Pid, SystemError> { 169 let current_pcb = ProcessManager::current_pcb(); 170 return Ok(current_pcb.basic().ppid()); 171 } 172 173 pub fn clone( 174 current_trapframe: &mut TrapFrame, 175 clone_args: KernelCloneArgs, 176 ) -> Result<usize, SystemError> { 177 let flags = clone_args.flags; 178 179 let vfork = Arc::new(Completion::new()); 180 181 if flags.contains(CloneFlags::CLONE_PIDFD) 182 && flags.contains(CloneFlags::CLONE_PARENT_SETTID) 183 { 184 return Err(SystemError::EINVAL); 185 } 186 187 let current_pcb = ProcessManager::current_pcb(); 188 let new_kstack = KernelStack::new()?; 189 let name = current_pcb.basic().name().to_string(); 190 let pcb = ProcessControlBlock::new(name, new_kstack); 191 // 克隆pcb 192 ProcessManager::copy_process(¤t_pcb, &pcb, clone_args, current_trapframe)?; 193 ProcessManager::add_pcb(pcb.clone()); 194 195 // 向procfs注册进程 196 procfs_register_pid(pcb.pid()).unwrap_or_else(|e| { 197 panic!( 198 "fork: Failed to register pid to procfs, pid: [{:?}]. Error: {:?}", 199 pcb.pid(), 200 e 201 ) 202 }); 203 204 if flags.contains(CloneFlags::CLONE_VFORK) { 205 pcb.thread.write().vfork_done = Some(vfork.clone()); 206 } 207 208 if pcb.thread.read().set_child_tid.is_some() { 209 let addr = pcb.thread.read().set_child_tid.unwrap(); 210 let mut writer = 211 UserBufferWriter::new(addr.as_ptr::<i32>(), core::mem::size_of::<i32>(), true)?; 212 writer.copy_one_to_user(&(pcb.pid().data() as i32), 0)?; 213 } 214 215 ProcessManager::wakeup(&pcb).unwrap_or_else(|e| { 216 panic!( 217 "fork: Failed to wakeup new process, pid: [{:?}]. Error: {:?}", 218 pcb.pid(), 219 e 220 ) 221 }); 222 223 if flags.contains(CloneFlags::CLONE_VFORK) { 224 // 等待子进程结束或者exec; 225 vfork.wait_for_completion_interruptible()?; 226 } 227 228 return Ok(pcb.pid().0); 229 } 230 231 /// 设置线程地址 232 pub fn set_tid_address(ptr: usize) -> Result<usize, SystemError> { 233 verify_area(VirtAddr::new(ptr), core::mem::size_of::<i32>()) 234 .map_err(|_| SystemError::EFAULT)?; 235 236 let pcb = ProcessManager::current_pcb(); 237 pcb.thread.write().clear_child_tid = Some(VirtAddr::new(ptr)); 238 Ok(pcb.pid.0) 239 } 240 241 pub fn gettid() -> Result<Pid, SystemError> { 242 let pcb = ProcessManager::current_pcb(); 243 Ok(pcb.pid) 244 } 245 246 pub fn getuid() -> Result<usize, SystemError> { 247 // todo: 增加credit功能之后,需要修改 248 return Ok(0); 249 } 250 251 pub fn getgid() -> Result<usize, SystemError> { 252 // todo: 增加credit功能之后,需要修改 253 return Ok(0); 254 } 255 256 pub fn geteuid() -> Result<usize, SystemError> { 257 // todo: 增加credit功能之后,需要修改 258 return Ok(0); 259 } 260 261 pub fn getegid() -> Result<usize, SystemError> { 262 // todo: 增加credit功能之后,需要修改 263 return Ok(0); 264 } 265 266 pub fn get_rusage(who: i32, rusage: *mut RUsage) -> Result<usize, SystemError> { 267 let who = RUsageWho::try_from(who)?; 268 let mut writer = UserBufferWriter::new(rusage, core::mem::size_of::<RUsage>(), true)?; 269 let pcb = ProcessManager::current_pcb(); 270 let rusage = pcb.get_rusage(who).ok_or(SystemError::EINVAL)?; 271 272 let ubuf = writer.buffer::<RUsage>(0).unwrap(); 273 ubuf.copy_from_slice(&[rusage]); 274 275 return Ok(0); 276 } 277 278 /// # 设置资源限制 279 /// 280 /// TODO: 目前暂时不支持设置资源限制,只提供读取默认值的功能 281 /// 282 /// ## 参数 283 /// 284 /// - pid: 进程号 285 /// - resource: 资源类型 286 /// - new_limit: 新的资源限制 287 /// - old_limit: 旧的资源限制 288 /// 289 /// ## 返回值 290 /// 291 /// - 成功,0 292 /// - 如果old_limit不为NULL,则返回旧的资源限制到old_limit 293 /// 294 pub fn prlimit64( 295 _pid: Pid, 296 resource: usize, 297 _new_limit: *const RLimit64, 298 old_limit: *mut RLimit64, 299 ) -> Result<usize, SystemError> { 300 let resource = RLimitID::try_from(resource)?; 301 let mut writer = None; 302 303 if !old_limit.is_null() { 304 writer = Some(UserBufferWriter::new( 305 old_limit, 306 core::mem::size_of::<RLimit64>(), 307 true, 308 )?); 309 } 310 311 match resource { 312 RLimitID::Stack => { 313 if let Some(mut writer) = writer { 314 let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0]; 315 rlimit.rlim_cur = UserStack::DEFAULT_USER_STACK_SIZE as u64; 316 rlimit.rlim_max = UserStack::DEFAULT_USER_STACK_SIZE as u64; 317 } 318 return Ok(0); 319 } 320 321 RLimitID::Nofile => { 322 if let Some(mut writer) = writer { 323 let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0]; 324 rlimit.rlim_cur = FileDescriptorVec::PROCESS_MAX_FD as u64; 325 rlimit.rlim_max = FileDescriptorVec::PROCESS_MAX_FD as u64; 326 } 327 return Ok(0); 328 } 329 330 RLimitID::As | RLimitID::Rss => { 331 if let Some(mut writer) = writer { 332 let mut rlimit = writer.buffer::<RLimit64>(0).unwrap()[0]; 333 rlimit.rlim_cur = MMArch::USER_END_VADDR.data() as u64; 334 rlimit.rlim_max = MMArch::USER_END_VADDR.data() as u64; 335 } 336 return Ok(0); 337 } 338 339 _ => { 340 return Err(SystemError::ENOSYS); 341 } 342 } 343 } 344 } 345