1 use core::{any::Any, fmt::Debug, sync::atomic::AtomicUsize}; 2 3 use alloc::{ 4 boxed::Box, 5 collections::LinkedList, 6 string::String, 7 sync::{Arc, Weak}, 8 vec::Vec, 9 }; 10 use hashbrown::HashMap; 11 use smoltcp::{ 12 iface::{SocketHandle, SocketSet}, 13 socket::{self, tcp, udp}, 14 }; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::rand::rand, 19 filesystem::vfs::{ 20 file::FileMode, syscall::ModeType, FilePrivateData, FileSystem, FileType, IndexNode, 21 Metadata, 22 }, 23 libs::{ 24 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 25 spinlock::{SpinLock, SpinLockGuard}, 26 wait_queue::EventWaitQueue, 27 }, 28 sched::{schedule, SchedMode}, 29 }; 30 31 use self::{ 32 inet::{RawSocket, TcpSocket, UdpSocket}, 33 unix::{SeqpacketSocket, StreamSocket}, 34 }; 35 36 use super::{ 37 event_poll::{EPollEventType, EPollItem, EventPoll}, 38 net_core::poll_ifaces, 39 Endpoint, Protocol, ShutdownType, 40 }; 41 42 pub mod inet; 43 pub mod unix; 44 45 lazy_static! { 46 /// 所有socket的集合 47 /// TODO: 优化这里,自己实现SocketSet!!!现在这样的话,不管全局有多少个网卡,每个时间点都只会有1个进程能够访问socket 48 pub static ref SOCKET_SET: SpinLock<SocketSet<'static >> = SpinLock::new(SocketSet::new(vec![])); 49 /// SocketHandle表,每个SocketHandle对应一个SocketHandleItem, 50 /// 注意!:在网卡中断中需要拿到这张表的,在获取读锁时应该确保关中断避免死锁 51 pub static ref HANDLE_MAP: RwLock<HashMap<SocketHandle, SocketHandleItem>> = RwLock::new(HashMap::new()); 52 /// 端口管理器 53 pub static ref PORT_MANAGER: PortManager = PortManager::new(); 54 } 55 56 /* For setsockopt(2) */ 57 // See: linux-5.19.10/include/uapi/asm-generic/socket.h#9 58 pub const SOL_SOCKET: u8 = 1; 59 60 /// 根据地址族、socket类型和协议创建socket 61 pub(super) fn new_socket( 62 address_family: AddressFamily, 63 socket_type: PosixSocketType, 64 protocol: Protocol, 65 ) -> Result<Box<dyn Socket>, SystemError> { 66 let socket: Box<dyn Socket> = match address_family { 67 AddressFamily::Unix => match socket_type { 68 PosixSocketType::Stream => Box::new(StreamSocket::new(SocketOptions::default())), 69 PosixSocketType::SeqPacket => Box::new(SeqpacketSocket::new(SocketOptions::default())), 70 _ => { 71 return Err(SystemError::EINVAL); 72 } 73 }, 74 AddressFamily::INet => match socket_type { 75 PosixSocketType::Stream => Box::new(TcpSocket::new(SocketOptions::default())), 76 PosixSocketType::Datagram => Box::new(UdpSocket::new(SocketOptions::default())), 77 PosixSocketType::Raw => Box::new(RawSocket::new(protocol, SocketOptions::default())), 78 _ => { 79 return Err(SystemError::EINVAL); 80 } 81 }, 82 _ => { 83 return Err(SystemError::EAFNOSUPPORT); 84 } 85 }; 86 Ok(socket) 87 } 88 89 pub trait Socket: Sync + Send + Debug + Any { 90 /// @brief 从socket中读取数据,如果socket是阻塞的,那么直到读取到数据才返回 91 /// 92 /// @param buf 读取到的数据存放的缓冲区 93 /// 94 /// @return - 成功:(返回读取的数据的长度,读取数据的端点). 95 /// - 失败:错误码 96 fn read(&self, buf: &mut [u8]) -> (Result<usize, SystemError>, Endpoint); 97 98 /// @brief 向socket中写入数据。如果socket是阻塞的,那么直到写入的数据全部写入socket中才返回 99 /// 100 /// @param buf 要写入的数据 101 /// @param to 要写入的目的端点,如果是None,那么写入的数据将会被丢弃 102 /// 103 /// @return 返回写入的数据的长度 104 fn write(&self, buf: &[u8], to: Option<Endpoint>) -> Result<usize, SystemError>; 105 106 /// @brief 对应于POSIX的connect函数,用于连接到指定的远程服务器端点 107 /// 108 /// It is used to establish a connection to a remote server. 109 /// When a socket is connected to a remote server, 110 /// the operating system will establish a network connection with the server 111 /// and allow data to be sent and received between the local socket and the remote server. 112 /// 113 /// @param endpoint 要连接的端点 114 /// 115 /// @return 返回连接是否成功 116 fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError>; 117 118 /// @brief 对应于POSIX的bind函数,用于绑定到本机指定的端点 119 /// 120 /// The bind() function is used to associate a socket with a particular IP address and port number on the local machine. 121 /// 122 /// @param endpoint 要绑定的端点 123 /// 124 /// @return 返回绑定是否成功 125 fn bind(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> { 126 Err(SystemError::ENOSYS) 127 } 128 129 /// @brief 对应于 POSIX 的 shutdown 函数,用于关闭socket。 130 /// 131 /// shutdown() 函数用于启动网络连接的正常关闭。 132 /// 当在两个端点之间建立网络连接时,任一端点都可以通过调用其端点对象上的 shutdown() 函数来启动关闭序列。 133 /// 此函数向远程端点发送关闭消息以指示本地端点不再接受新数据。 134 /// 135 /// @return 返回是否成功关闭 136 fn shutdown(&mut self, _type: ShutdownType) -> Result<(), SystemError> { 137 Err(SystemError::ENOSYS) 138 } 139 140 /// @brief 对应于POSIX的listen函数,用于监听端点 141 /// 142 /// @param backlog 最大的等待连接数 143 /// 144 /// @return 返回监听是否成功 145 fn listen(&mut self, _backlog: usize) -> Result<(), SystemError> { 146 Err(SystemError::ENOSYS) 147 } 148 149 /// @brief 对应于POSIX的accept函数,用于接受连接 150 /// 151 /// @param endpoint 对端的端点 152 /// 153 /// @return 返回接受连接是否成功 154 fn accept(&mut self) -> Result<(Box<dyn Socket>, Endpoint), SystemError> { 155 Err(SystemError::ENOSYS) 156 } 157 158 /// @brief 获取socket的端点 159 /// 160 /// @return 返回socket的端点 161 fn endpoint(&self) -> Option<Endpoint> { 162 None 163 } 164 165 /// @brief 获取socket的对端端点 166 /// 167 /// @return 返回socket的对端端点 168 fn peer_endpoint(&self) -> Option<Endpoint> { 169 None 170 } 171 172 /// @brief 173 /// The purpose of the poll function is to provide 174 /// a non-blocking way to check if a socket is ready for reading or writing, 175 /// so that you can efficiently handle multiple sockets in a single thread or event loop. 176 /// 177 /// @return (in, out, err) 178 /// 179 /// The first boolean value indicates whether the socket is ready for reading. If it is true, then there is data available to be read from the socket without blocking. 180 /// The second boolean value indicates whether the socket is ready for writing. If it is true, then data can be written to the socket without blocking. 181 /// The third boolean value indicates whether the socket has encountered an error condition. If it is true, then the socket is in an error state and should be closed or reset 182 /// 183 fn poll(&self) -> EPollEventType { 184 EPollEventType::empty() 185 } 186 187 /// @brief socket的ioctl函数 188 /// 189 /// @param cmd ioctl命令 190 /// @param arg0 ioctl命令的第一个参数 191 /// @param arg1 ioctl命令的第二个参数 192 /// @param arg2 ioctl命令的第三个参数 193 /// 194 /// @return 返回ioctl命令的返回值 195 fn ioctl( 196 &self, 197 _cmd: usize, 198 _arg0: usize, 199 _arg1: usize, 200 _arg2: usize, 201 ) -> Result<usize, SystemError> { 202 Ok(0) 203 } 204 205 /// @brief 获取socket的元数据 206 fn metadata(&self) -> SocketMetadata; 207 208 fn box_clone(&self) -> Box<dyn Socket>; 209 210 /// @brief 设置socket的选项 211 /// 212 /// @param level 选项的层次 213 /// @param optname 选项的名称 214 /// @param optval 选项的值 215 /// 216 /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS 217 fn setsockopt( 218 &self, 219 _level: usize, 220 _optname: usize, 221 _optval: &[u8], 222 ) -> Result<(), SystemError> { 223 kwarn!("setsockopt is not implemented"); 224 Ok(()) 225 } 226 227 fn socket_handle(&self) -> SocketHandle { 228 todo!() 229 } 230 231 fn write_buffer(&self, _buf: &[u8]) -> Result<usize, SystemError> { 232 todo!() 233 } 234 235 fn as_any_ref(&self) -> &dyn Any; 236 237 fn as_any_mut(&mut self) -> &mut dyn Any; 238 239 fn add_epoll(&mut self, epitem: Arc<EPollItem>) -> Result<(), SystemError> { 240 HANDLE_MAP 241 .write_irqsave() 242 .get_mut(&self.socket_handle()) 243 .unwrap() 244 .add_epoll(epitem); 245 Ok(()) 246 } 247 248 fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> { 249 HANDLE_MAP 250 .write_irqsave() 251 .get_mut(&self.socket_handle()) 252 .unwrap() 253 .remove_epoll(epoll)?; 254 255 Ok(()) 256 } 257 258 fn clear_epoll(&mut self) -> Result<(), SystemError> { 259 let mut handle_map_guard = HANDLE_MAP.write_irqsave(); 260 let handle_item = handle_map_guard.get_mut(&self.socket_handle()).unwrap(); 261 262 for epitem in handle_item.epitems.lock_irqsave().iter() { 263 let epoll = epitem.epoll(); 264 if epoll.upgrade().is_some() { 265 EventPoll::ep_remove( 266 &mut epoll.upgrade().unwrap().lock_irqsave(), 267 epitem.fd(), 268 None, 269 )?; 270 } 271 } 272 273 Ok(()) 274 } 275 } 276 277 impl Clone for Box<dyn Socket> { 278 fn clone(&self) -> Box<dyn Socket> { 279 self.box_clone() 280 } 281 } 282 283 /// # Socket在文件系统中的inode封装 284 #[derive(Debug)] 285 pub struct SocketInode(SpinLock<Box<dyn Socket>>, AtomicUsize); 286 287 impl SocketInode { 288 pub fn new(socket: Box<dyn Socket>) -> Arc<Self> { 289 Arc::new(Self(SpinLock::new(socket), AtomicUsize::new(0))) 290 } 291 292 #[inline] 293 pub fn inner(&self) -> SpinLockGuard<Box<dyn Socket>> { 294 self.0.lock() 295 } 296 297 pub unsafe fn inner_no_preempt(&self) -> SpinLockGuard<Box<dyn Socket>> { 298 self.0.lock_no_preempt() 299 } 300 } 301 302 impl IndexNode for SocketInode { 303 fn open( 304 &self, 305 _data: SpinLockGuard<FilePrivateData>, 306 _mode: &FileMode, 307 ) -> Result<(), SystemError> { 308 self.1.fetch_add(1, core::sync::atomic::Ordering::SeqCst); 309 Ok(()) 310 } 311 312 fn close(&self, _data: SpinLockGuard<FilePrivateData>) -> Result<(), SystemError> { 313 let prev_ref_count = self.1.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); 314 if prev_ref_count == 1 { 315 // 最后一次关闭,需要释放 316 let mut socket = self.0.lock_irqsave(); 317 318 if socket.metadata().socket_type == SocketType::Unix { 319 return Ok(()); 320 } 321 322 if let Some(Endpoint::Ip(Some(ip))) = socket.endpoint() { 323 PORT_MANAGER.unbind_port(socket.metadata().socket_type, ip.port)?; 324 } 325 326 socket.clear_epoll()?; 327 328 HANDLE_MAP 329 .write_irqsave() 330 .remove(&socket.socket_handle()) 331 .unwrap(); 332 } 333 334 Ok(()) 335 } 336 337 fn read_at( 338 &self, 339 _offset: usize, 340 len: usize, 341 buf: &mut [u8], 342 data: SpinLockGuard<FilePrivateData>, 343 ) -> Result<usize, SystemError> { 344 drop(data); 345 self.0.lock_no_preempt().read(&mut buf[0..len]).0 346 } 347 348 fn write_at( 349 &self, 350 _offset: usize, 351 len: usize, 352 buf: &[u8], 353 data: SpinLockGuard<FilePrivateData>, 354 ) -> Result<usize, SystemError> { 355 drop(data); 356 self.0.lock_no_preempt().write(&buf[0..len], None) 357 } 358 359 fn poll(&self, _private_data: &FilePrivateData) -> Result<usize, SystemError> { 360 let events = self.0.lock_irqsave().poll(); 361 return Ok(events.bits() as usize); 362 } 363 364 fn fs(&self) -> Arc<dyn FileSystem> { 365 todo!() 366 } 367 368 fn as_any_ref(&self) -> &dyn Any { 369 self 370 } 371 372 fn list(&self) -> Result<Vec<String>, SystemError> { 373 return Err(SystemError::ENOTDIR); 374 } 375 376 fn metadata(&self) -> Result<Metadata, SystemError> { 377 let meta = Metadata { 378 mode: ModeType::from_bits_truncate(0o755), 379 file_type: FileType::Socket, 380 ..Default::default() 381 }; 382 383 return Ok(meta); 384 } 385 386 fn resize(&self, _len: usize) -> Result<(), SystemError> { 387 return Ok(()); 388 } 389 } 390 391 #[derive(Debug)] 392 pub struct SocketHandleItem { 393 /// shutdown状态 394 pub shutdown_type: RwLock<ShutdownType>, 395 /// socket的waitqueue 396 pub wait_queue: EventWaitQueue, 397 /// epitems,考虑写在这是否是最优解? 398 pub epitems: SpinLock<LinkedList<Arc<EPollItem>>>, 399 } 400 401 impl SocketHandleItem { 402 pub fn new() -> Self { 403 Self { 404 shutdown_type: RwLock::new(ShutdownType::empty()), 405 wait_queue: EventWaitQueue::new(), 406 epitems: SpinLock::new(LinkedList::new()), 407 } 408 } 409 410 /// ## 在socket的等待队列上睡眠 411 pub fn sleep( 412 socket_handle: SocketHandle, 413 events: u64, 414 handle_map_guard: RwLockReadGuard<'_, HashMap<SocketHandle, SocketHandleItem>>, 415 ) { 416 unsafe { 417 handle_map_guard 418 .get(&socket_handle) 419 .unwrap() 420 .wait_queue 421 .sleep_without_schedule(events) 422 }; 423 drop(handle_map_guard); 424 schedule(SchedMode::SM_NONE); 425 } 426 427 pub fn shutdown_type(&self) -> ShutdownType { 428 *self.shutdown_type.read() 429 } 430 431 pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard<ShutdownType> { 432 self.shutdown_type.write_irqsave() 433 } 434 435 pub fn add_epoll(&mut self, epitem: Arc<EPollItem>) { 436 self.epitems.lock_irqsave().push_back(epitem) 437 } 438 439 pub fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> { 440 let is_remove = !self 441 .epitems 442 .lock_irqsave() 443 .extract_if(|x| x.epoll().ptr_eq(epoll)) 444 .collect::<Vec<_>>() 445 .is_empty(); 446 447 if is_remove { 448 return Ok(()); 449 } 450 451 Err(SystemError::ENOENT) 452 } 453 } 454 455 /// # TCP 和 UDP 的端口管理器。 456 /// 如果 TCP/UDP 的 socket 绑定了某个端口,它会在对应的表中记录,以检测端口冲突。 457 pub struct PortManager { 458 // TCP 端口记录表 459 tcp_port_table: SpinLock<HashMap<u16, Arc<dyn Socket>>>, 460 // UDP 端口记录表 461 udp_port_table: SpinLock<HashMap<u16, Arc<dyn Socket>>>, 462 } 463 464 impl PortManager { 465 pub fn new() -> Self { 466 return Self { 467 tcp_port_table: SpinLock::new(HashMap::new()), 468 udp_port_table: SpinLock::new(HashMap::new()), 469 }; 470 } 471 472 /// @brief 自动分配一个相对应协议中未被使用的PORT,如果动态端口均已被占用,返回错误码 EADDRINUSE 473 pub fn get_ephemeral_port(&self, socket_type: SocketType) -> Result<u16, SystemError> { 474 // TODO: selects non-conflict high port 475 476 static mut EPHEMERAL_PORT: u16 = 0; 477 unsafe { 478 if EPHEMERAL_PORT == 0 { 479 EPHEMERAL_PORT = (49152 + rand() % (65536 - 49152)) as u16; 480 } 481 } 482 483 let mut remaining = 65536 - 49152; // 剩余尝试分配端口次数 484 let mut port: u16; 485 while remaining > 0 { 486 unsafe { 487 if EPHEMERAL_PORT == 65535 { 488 EPHEMERAL_PORT = 49152; 489 } else { 490 EPHEMERAL_PORT += 1; 491 } 492 port = EPHEMERAL_PORT; 493 } 494 495 // 使用 ListenTable 检查端口是否被占用 496 let listen_table_guard = match socket_type { 497 SocketType::Udp => self.udp_port_table.lock(), 498 SocketType::Tcp => self.tcp_port_table.lock(), 499 _ => panic!("{:?} cann't get a port", socket_type), 500 }; 501 if listen_table_guard.get(&port).is_none() { 502 drop(listen_table_guard); 503 return Ok(port); 504 } 505 remaining -= 1; 506 } 507 return Err(SystemError::EADDRINUSE); 508 } 509 510 /// @brief 检测给定端口是否已被占用,如果未被占用则在 TCP/UDP 对应的表中记录 511 /// 512 /// TODO: 增加支持端口复用的逻辑 513 pub fn bind_port( 514 &self, 515 socket_type: SocketType, 516 port: u16, 517 socket: impl Socket, 518 ) -> Result<(), SystemError> { 519 if port > 0 { 520 let mut listen_table_guard = match socket_type { 521 SocketType::Udp => self.udp_port_table.lock(), 522 SocketType::Tcp => self.tcp_port_table.lock(), 523 _ => panic!("{:?} cann't bind a port", socket_type), 524 }; 525 match listen_table_guard.get(&port) { 526 Some(_) => return Err(SystemError::EADDRINUSE), 527 None => listen_table_guard.insert(port, Arc::new(socket)), 528 }; 529 drop(listen_table_guard); 530 } 531 return Ok(()); 532 } 533 534 /// @brief 在对应的端口记录表中将端口和 socket 解绑 535 pub fn unbind_port(&self, socket_type: SocketType, port: u16) -> Result<(), SystemError> { 536 let mut listen_table_guard = match socket_type { 537 SocketType::Udp => self.udp_port_table.lock(), 538 SocketType::Tcp => self.tcp_port_table.lock(), 539 _ => return Ok(()), 540 }; 541 listen_table_guard.remove(&port); 542 drop(listen_table_guard); 543 return Ok(()); 544 } 545 } 546 547 /// # socket的句柄管理组件 548 /// 它在smoltcp的SocketHandle上封装了一层,增加更多的功能。 549 /// 比如,在socket被关闭时,自动释放socket的资源,通知系统的其他组件。 550 #[derive(Debug)] 551 pub struct GlobalSocketHandle(SocketHandle); 552 553 impl GlobalSocketHandle { 554 pub fn new(handle: SocketHandle) -> Arc<Self> { 555 return Arc::new(Self(handle)); 556 } 557 } 558 559 impl Clone for GlobalSocketHandle { 560 fn clone(&self) -> Self { 561 Self(self.0) 562 } 563 } 564 565 impl Drop for GlobalSocketHandle { 566 fn drop(&mut self) { 567 let mut socket_set_guard = SOCKET_SET.lock_irqsave(); 568 socket_set_guard.remove(self.0); // 删除的时候,会发送一条FINISH的信息? 569 drop(socket_set_guard); 570 poll_ifaces(); 571 } 572 } 573 574 /// @brief socket的类型 575 #[derive(Debug, Clone, Copy, PartialEq)] 576 pub enum SocketType { 577 /// 原始的socket 578 Raw, 579 /// 用于Tcp通信的 Socket 580 Tcp, 581 /// 用于Udp通信的 Socket 582 Udp, 583 /// unix域的 Socket 584 Unix, 585 } 586 587 bitflags! { 588 /// @brief socket的选项 589 #[derive(Default)] 590 pub struct SocketOptions: u32 { 591 /// 是否阻塞 592 const BLOCK = 1 << 0; 593 /// 是否允许广播 594 const BROADCAST = 1 << 1; 595 /// 是否允许多播 596 const MULTICAST = 1 << 2; 597 /// 是否允许重用地址 598 const REUSEADDR = 1 << 3; 599 /// 是否允许重用端口 600 const REUSEPORT = 1 << 4; 601 } 602 } 603 604 #[derive(Debug, Clone)] 605 /// @brief 在trait Socket的metadata函数中返回该结构体供外部使用 606 pub struct SocketMetadata { 607 /// socket的类型 608 pub socket_type: SocketType, 609 /// 接收缓冲区的大小 610 pub rx_buf_size: usize, 611 /// 发送缓冲区的大小 612 pub tx_buf_size: usize, 613 /// 元数据的缓冲区的大小 614 pub metadata_buf_size: usize, 615 /// socket的选项 616 pub options: SocketOptions, 617 } 618 619 impl SocketMetadata { 620 fn new( 621 socket_type: SocketType, 622 rx_buf_size: usize, 623 tx_buf_size: usize, 624 metadata_buf_size: usize, 625 options: SocketOptions, 626 ) -> Self { 627 Self { 628 socket_type, 629 rx_buf_size, 630 tx_buf_size, 631 metadata_buf_size, 632 options, 633 } 634 } 635 } 636 637 /// @brief 地址族的枚举 638 /// 639 /// 参考:https://code.dragonos.org.cn/xref/linux-5.19.10/include/linux/socket.h#180 640 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] 641 pub enum AddressFamily { 642 /// AF_UNSPEC 表示地址族未指定 643 Unspecified = 0, 644 /// AF_UNIX 表示Unix域的socket (与AF_LOCAL相同) 645 Unix = 1, 646 /// AF_INET 表示IPv4的socket 647 INet = 2, 648 /// AF_AX25 表示AMPR AX.25的socket 649 AX25 = 3, 650 /// AF_IPX 表示IPX的socket 651 IPX = 4, 652 /// AF_APPLETALK 表示Appletalk的socket 653 Appletalk = 5, 654 /// AF_NETROM 表示AMPR NET/ROM的socket 655 Netrom = 6, 656 /// AF_BRIDGE 表示多协议桥接的socket 657 Bridge = 7, 658 /// AF_ATMPVC 表示ATM PVCs的socket 659 Atmpvc = 8, 660 /// AF_X25 表示X.25的socket 661 X25 = 9, 662 /// AF_INET6 表示IPv6的socket 663 INet6 = 10, 664 /// AF_ROSE 表示AMPR ROSE的socket 665 Rose = 11, 666 /// AF_DECnet Reserved for DECnet project 667 Decnet = 12, 668 /// AF_NETBEUI Reserved for 802.2LLC project 669 Netbeui = 13, 670 /// AF_SECURITY 表示Security callback的伪AF 671 Security = 14, 672 /// AF_KEY 表示Key management API 673 Key = 15, 674 /// AF_NETLINK 表示Netlink的socket 675 Netlink = 16, 676 /// AF_PACKET 表示Low level packet interface 677 Packet = 17, 678 /// AF_ASH 表示Ash 679 Ash = 18, 680 /// AF_ECONET 表示Acorn Econet 681 Econet = 19, 682 /// AF_ATMSVC 表示ATM SVCs 683 Atmsvc = 20, 684 /// AF_RDS 表示Reliable Datagram Sockets 685 Rds = 21, 686 /// AF_SNA 表示Linux SNA Project 687 Sna = 22, 688 /// AF_IRDA 表示IRDA sockets 689 Irda = 23, 690 /// AF_PPPOX 表示PPPoX sockets 691 Pppox = 24, 692 /// AF_WANPIPE 表示WANPIPE API sockets 693 WanPipe = 25, 694 /// AF_LLC 表示Linux LLC 695 Llc = 26, 696 /// AF_IB 表示Native InfiniBand address 697 /// 介绍:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_infiniband_and_rdma_networks/index#understanding-infiniband-and-rdma_configuring-infiniband-and-rdma-networks 698 Ib = 27, 699 /// AF_MPLS 表示MPLS 700 Mpls = 28, 701 /// AF_CAN 表示Controller Area Network 702 Can = 29, 703 /// AF_TIPC 表示TIPC sockets 704 Tipc = 30, 705 /// AF_BLUETOOTH 表示Bluetooth sockets 706 Bluetooth = 31, 707 /// AF_IUCV 表示IUCV sockets 708 Iucv = 32, 709 /// AF_RXRPC 表示RxRPC sockets 710 Rxrpc = 33, 711 /// AF_ISDN 表示mISDN sockets 712 Isdn = 34, 713 /// AF_PHONET 表示Phonet sockets 714 Phonet = 35, 715 /// AF_IEEE802154 表示IEEE 802.15.4 sockets 716 Ieee802154 = 36, 717 /// AF_CAIF 表示CAIF sockets 718 Caif = 37, 719 /// AF_ALG 表示Algorithm sockets 720 Alg = 38, 721 /// AF_NFC 表示NFC sockets 722 Nfc = 39, 723 /// AF_VSOCK 表示vSockets 724 Vsock = 40, 725 /// AF_KCM 表示Kernel Connection Multiplexor 726 Kcm = 41, 727 /// AF_QIPCRTR 表示Qualcomm IPC Router 728 Qipcrtr = 42, 729 /// AF_SMC 表示SMC-R sockets. 730 /// reserve number for PF_SMC protocol family that reuses AF_INET address family 731 Smc = 43, 732 /// AF_XDP 表示XDP sockets 733 Xdp = 44, 734 /// AF_MCTP 表示Management Component Transport Protocol 735 Mctp = 45, 736 /// AF_MAX 表示最大的地址族 737 Max = 46, 738 } 739 740 impl TryFrom<u16> for AddressFamily { 741 type Error = SystemError; 742 fn try_from(x: u16) -> Result<Self, Self::Error> { 743 use num_traits::FromPrimitive; 744 return <Self as FromPrimitive>::from_u16(x).ok_or(SystemError::EINVAL); 745 } 746 } 747 748 /// @brief posix套接字类型的枚举(这些值与linux内核中的值一致) 749 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] 750 pub enum PosixSocketType { 751 Stream = 1, 752 Datagram = 2, 753 Raw = 3, 754 Rdm = 4, 755 SeqPacket = 5, 756 Dccp = 6, 757 Packet = 10, 758 } 759 760 impl TryFrom<u8> for PosixSocketType { 761 type Error = SystemError; 762 fn try_from(x: u8) -> Result<Self, Self::Error> { 763 use num_traits::FromPrimitive; 764 return <Self as FromPrimitive>::from_u8(x).ok_or(SystemError::EINVAL); 765 } 766 } 767 768 /// ### 为socket提供无锁的poll方法 769 /// 770 /// 因为在网卡中断中,需要轮询socket的状态,如果使用socket文件或者其inode来poll 771 /// 在当前的设计,会必然死锁,所以引用这一个设计来解决,提供无的poll 772 pub struct SocketPollMethod; 773 774 impl SocketPollMethod { 775 pub fn poll(socket: &socket::Socket, shutdown: ShutdownType) -> EPollEventType { 776 match socket { 777 socket::Socket::Udp(udp) => Self::udp_poll(udp, shutdown), 778 socket::Socket::Tcp(tcp) => Self::tcp_poll(tcp, shutdown), 779 _ => todo!(), 780 } 781 } 782 783 pub fn tcp_poll(socket: &tcp::Socket, shutdown: ShutdownType) -> EPollEventType { 784 let mut events = EPollEventType::empty(); 785 if socket.is_listening() && socket.is_active() { 786 events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); 787 return events; 788 } 789 790 // socket已经关闭 791 if !socket.is_open() { 792 events.insert(EPollEventType::EPOLLHUP) 793 } 794 if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { 795 events.insert( 796 EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM | EPollEventType::EPOLLRDHUP, 797 ); 798 } 799 800 let state = socket.state(); 801 if state != tcp::State::SynSent && state != tcp::State::SynReceived { 802 // socket有可读数据 803 if socket.can_recv() { 804 events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); 805 } 806 807 if !(shutdown.contains(ShutdownType::SEND_SHUTDOWN)) { 808 // 缓冲区可写 809 if socket.send_queue() < socket.send_capacity() { 810 events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); 811 } else { 812 // TODO:触发缓冲区已满的信号 813 todo!("A signal that the buffer is full needs to be sent"); 814 } 815 } else { 816 // 如果我们的socket关闭了SEND_SHUTDOWN,epoll事件就是EPOLLOUT 817 events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); 818 } 819 } else if state == tcp::State::SynSent { 820 events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); 821 } 822 823 // socket发生错误 824 if !socket.is_active() { 825 events.insert(EPollEventType::EPOLLERR); 826 } 827 828 events 829 } 830 831 pub fn udp_poll(socket: &udp::Socket, shutdown: ShutdownType) -> EPollEventType { 832 let mut event = EPollEventType::empty(); 833 834 if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { 835 event.insert( 836 EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, 837 ); 838 } 839 if shutdown.contains(ShutdownType::SHUTDOWN_MASK) { 840 event.insert(EPollEventType::EPOLLHUP); 841 } 842 843 if socket.can_recv() { 844 event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); 845 } 846 847 if socket.can_send() { 848 event.insert( 849 EPollEventType::EPOLLOUT 850 | EPollEventType::EPOLLWRNORM 851 | EPollEventType::EPOLLWRBAND, 852 ); 853 } else { 854 // TODO: 缓冲区空间不够,需要使用信号处理 855 todo!() 856 } 857 858 return event; 859 } 860 } 861