xref: /DragonOS/kernel/src/net/socket/mod.rs (revision 2eab6dd743e94a86a685f1f3c01e599adf86610a)
1 use core::{any::Any, fmt::Debug, sync::atomic::AtomicUsize};
2 
3 use alloc::{
4     boxed::Box,
5     collections::LinkedList,
6     string::String,
7     sync::{Arc, Weak},
8     vec::Vec,
9 };
10 use hashbrown::HashMap;
11 use log::warn;
12 use smoltcp::{
13     iface::SocketSet,
14     socket::{self, raw, tcp, udp},
15 };
16 use system_error::SystemError;
17 
18 use crate::{
19     arch::rand::rand,
20     filesystem::vfs::{
21         file::FileMode, syscall::ModeType, FilePrivateData, FileSystem, FileType, IndexNode,
22         Metadata,
23     },
24     libs::{
25         rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
26         spinlock::{SpinLock, SpinLockGuard},
27         wait_queue::EventWaitQueue,
28     },
29     process::{Pid, ProcessManager},
30     sched::{schedule, SchedMode},
31 };
32 
33 use self::{
34     handle::GlobalSocketHandle,
35     inet::{RawSocket, TcpSocket, UdpSocket},
36     unix::{SeqpacketSocket, StreamSocket},
37 };
38 
39 use super::{
40     event_poll::{EPollEventType, EPollItem, EventPoll},
41     Endpoint, Protocol, ShutdownType,
42 };
43 
44 pub mod handle;
45 pub mod inet;
46 pub mod unix;
47 
48 lazy_static! {
49     /// 所有socket的集合
50     /// TODO: 优化这里,自己实现SocketSet!!!现在这样的话,不管全局有多少个网卡,每个时间点都只会有1个进程能够访问socket
51     pub static ref SOCKET_SET: SpinLock<SocketSet<'static >> = SpinLock::new(SocketSet::new(vec![]));
52     /// SocketHandle表,每个SocketHandle对应一个SocketHandleItem,
53     /// 注意!:在网卡中断中需要拿到这张表的��,在获取读锁时应该确保关中断避免死锁
54     pub static ref HANDLE_MAP: RwLock<HashMap<GlobalSocketHandle, SocketHandleItem>> = RwLock::new(HashMap::new());
55     /// 端口管理器
56     pub static ref PORT_MANAGER: PortManager = PortManager::new();
57 }
58 
59 /* For setsockopt(2) */
60 // See: linux-5.19.10/include/uapi/asm-generic/socket.h#9
61 pub const SOL_SOCKET: u8 = 1;
62 
63 /// 根据地址族、socket类型和协议创建socket
64 pub(super) fn new_socket(
65     address_family: AddressFamily,
66     socket_type: PosixSocketType,
67     protocol: Protocol,
68 ) -> Result<Box<dyn Socket>, SystemError> {
69     let socket: Box<dyn Socket> = match address_family {
70         AddressFamily::Unix => match socket_type {
71             PosixSocketType::Stream => Box::new(StreamSocket::new(SocketOptions::default())),
72             PosixSocketType::SeqPacket => Box::new(SeqpacketSocket::new(SocketOptions::default())),
73             _ => {
74                 return Err(SystemError::EINVAL);
75             }
76         },
77         AddressFamily::INet => match socket_type {
78             PosixSocketType::Stream => Box::new(TcpSocket::new(SocketOptions::default())),
79             PosixSocketType::Datagram => Box::new(UdpSocket::new(SocketOptions::default())),
80             PosixSocketType::Raw => Box::new(RawSocket::new(protocol, SocketOptions::default())),
81             _ => {
82                 return Err(SystemError::EINVAL);
83             }
84         },
85         _ => {
86             return Err(SystemError::EAFNOSUPPORT);
87         }
88     };
89 
90     let handle_item = SocketHandleItem::new(None);
91     HANDLE_MAP
92         .write_irqsave()
93         .insert(socket.socket_handle(), handle_item);
94     Ok(socket)
95 }
96 
97 pub trait Socket: Sync + Send + Debug + Any {
98     /// @brief 从socket中读取数据,如果socket是阻塞的,那么直到读取到数据才返回
99     ///
100     /// @param buf 读取到的数据存放的缓冲区
101     ///
102     /// @return - 成功:(返回读取的数据的长度,读取数据的端点).
103     ///         - 失败:错误码
104     fn read(&self, buf: &mut [u8]) -> (Result<usize, SystemError>, Endpoint);
105 
106     /// @brief 向socket中写入数据。如果socket是阻塞的,那么直到写入的数据全部写入socket中才返回
107     ///
108     /// @param buf 要写入的数据
109     /// @param to 要写入的目的端点,如果是None,那么写入的数据将会被丢弃
110     ///
111     /// @return 返回写入的数据的长度
112     fn write(&self, buf: &[u8], to: Option<Endpoint>) -> Result<usize, SystemError>;
113 
114     /// @brief 对应于POSIX的connect函数,用于连接到指定的远程服务器端点
115     ///
116     /// It is used to establish a connection to a remote server.
117     /// When a socket is connected to a remote server,
118     /// the operating system will establish a network connection with the server
119     /// and allow data to be sent and received between the local socket and the remote server.
120     ///
121     /// @param endpoint 要连接的端点
122     ///
123     /// @return 返回连接是否成功
124     fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError>;
125 
126     /// @brief 对应于POSIX的bind函数,用于绑定到本机指定的端点
127     ///
128     /// The bind() function is used to associate a socket with a particular IP address and port number on the local machine.
129     ///
130     /// @param endpoint 要绑定的端点
131     ///
132     /// @return 返回绑定是否成功
133     fn bind(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> {
134         Err(SystemError::ENOSYS)
135     }
136 
137     /// @brief 对应于 POSIX 的 shutdown 函数,用于关闭socket。
138     ///
139     /// shutdown() 函数用于启动网络连接的正常关闭。
140     /// 当在两个端点之间建立网络连接时,任一端点都可以通过调用其端点对象上的 shutdown() 函数来启动关闭序列。
141     /// 此函数向远程端点发送关闭消息以指示本地端点不再接受新数据。
142     ///
143     /// @return 返回是否成功关闭
144     fn shutdown(&mut self, _type: ShutdownType) -> Result<(), SystemError> {
145         Err(SystemError::ENOSYS)
146     }
147 
148     /// @brief 对应于POSIX的listen函数,用于监听端点
149     ///
150     /// @param backlog 最大的等待连接数
151     ///
152     /// @return 返回监听是否成功
153     fn listen(&mut self, _backlog: usize) -> Result<(), SystemError> {
154         Err(SystemError::ENOSYS)
155     }
156 
157     /// @brief 对应于POSIX的accept函数,用于接受连接
158     ///
159     /// @param endpoint 对端的端点
160     ///
161     /// @return 返回接受连接是否成功
162     fn accept(&mut self) -> Result<(Box<dyn Socket>, Endpoint), SystemError> {
163         Err(SystemError::ENOSYS)
164     }
165 
166     /// @brief 获取socket的端点
167     ///
168     /// @return 返回socket的端点
169     fn endpoint(&self) -> Option<Endpoint> {
170         None
171     }
172 
173     /// @brief 获取socket的对端端点
174     ///
175     /// @return 返回socket的对端端点
176     fn peer_endpoint(&self) -> Option<Endpoint> {
177         None
178     }
179 
180     /// @brief
181     ///     The purpose of the poll function is to provide
182     ///     a non-blocking way to check if a socket is ready for reading or writing,
183     ///     so that you can efficiently handle multiple sockets in a single thread or event loop.
184     ///
185     /// @return (in, out, err)
186     ///
187     ///     The first boolean value indicates whether the socket is ready for reading. If it is true, then there is data available to be read from the socket without blocking.
188     ///     The second boolean value indicates whether the socket is ready for writing. If it is true, then data can be written to the socket without blocking.
189     ///     The third boolean value indicates whether the socket has encountered an error condition. If it is true, then the socket is in an error state and should be closed or reset
190     ///
191     fn poll(&self) -> EPollEventType {
192         EPollEventType::empty()
193     }
194 
195     /// @brief socket的ioctl函数
196     ///
197     /// @param cmd ioctl命令
198     /// @param arg0 ioctl命令的第一个参数
199     /// @param arg1 ioctl命令的第二个参数
200     /// @param arg2 ioctl命令的第三个参数
201     ///
202     /// @return 返回ioctl命令的返回值
203     fn ioctl(
204         &self,
205         _cmd: usize,
206         _arg0: usize,
207         _arg1: usize,
208         _arg2: usize,
209     ) -> Result<usize, SystemError> {
210         Ok(0)
211     }
212 
213     /// @brief 获取socket的元数据
214     fn metadata(&self) -> SocketMetadata;
215 
216     fn box_clone(&self) -> Box<dyn Socket>;
217 
218     /// @brief 设置socket的选项
219     ///
220     /// @param level 选项的层次
221     /// @param optname 选项的名称
222     /// @param optval 选项的值
223     ///
224     /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS
225     fn setsockopt(
226         &self,
227         _level: usize,
228         _optname: usize,
229         _optval: &[u8],
230     ) -> Result<(), SystemError> {
231         warn!("setsockopt is not implemented");
232         Ok(())
233     }
234 
235     fn socket_handle(&self) -> GlobalSocketHandle;
236 
237     fn write_buffer(&self, _buf: &[u8]) -> Result<usize, SystemError> {
238         todo!()
239     }
240 
241     fn as_any_ref(&self) -> &dyn Any;
242 
243     fn as_any_mut(&mut self) -> &mut dyn Any;
244 
245     fn add_epoll(&mut self, epitem: Arc<EPollItem>) -> Result<(), SystemError> {
246         HANDLE_MAP
247             .write_irqsave()
248             .get_mut(&self.socket_handle())
249             .unwrap()
250             .add_epoll(epitem);
251         Ok(())
252     }
253 
254     fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> {
255         HANDLE_MAP
256             .write_irqsave()
257             .get_mut(&self.socket_handle())
258             .unwrap()
259             .remove_epoll(epoll)?;
260 
261         Ok(())
262     }
263 
264     fn clear_epoll(&mut self) -> Result<(), SystemError> {
265         let mut handle_map_guard = HANDLE_MAP.write_irqsave();
266         let handle_item = handle_map_guard.get_mut(&self.socket_handle()).unwrap();
267 
268         for epitem in handle_item.epitems.lock_irqsave().iter() {
269             let epoll = epitem.epoll();
270             if epoll.upgrade().is_some() {
271                 EventPoll::ep_remove(
272                     &mut epoll.upgrade().unwrap().lock_irqsave(),
273                     epitem.fd(),
274                     None,
275                 )?;
276             }
277         }
278 
279         Ok(())
280     }
281 
282     fn close(&mut self);
283 }
284 
285 impl Clone for Box<dyn Socket> {
286     fn clone(&self) -> Box<dyn Socket> {
287         self.box_clone()
288     }
289 }
290 
291 /// # Socket在文件系统中的inode封装
292 #[derive(Debug)]
293 pub struct SocketInode(SpinLock<Box<dyn Socket>>, AtomicUsize);
294 
295 impl SocketInode {
296     pub fn new(socket: Box<dyn Socket>) -> Arc<Self> {
297         Arc::new(Self(SpinLock::new(socket), AtomicUsize::new(0)))
298     }
299 
300     #[inline]
301     pub fn inner(&self) -> SpinLockGuard<Box<dyn Socket>> {
302         self.0.lock()
303     }
304 
305     pub unsafe fn inner_no_preempt(&self) -> SpinLockGuard<Box<dyn Socket>> {
306         self.0.lock_no_preempt()
307     }
308 
309     fn do_close(&self) -> Result<(), SystemError> {
310         let prev_ref_count = self.1.fetch_sub(1, core::sync::atomic::Ordering::SeqCst);
311         if prev_ref_count == 1 {
312             // 最后一次关闭,需要释放
313             let mut socket = self.0.lock_irqsave();
314 
315             if socket.metadata().socket_type == SocketType::Unix {
316                 return Ok(());
317             }
318 
319             if let Some(Endpoint::Ip(Some(ip))) = socket.endpoint() {
320                 PORT_MANAGER.unbind_port(socket.metadata().socket_type, ip.port);
321             }
322 
323             socket.clear_epoll()?;
324 
325             HANDLE_MAP
326                 .write_irqsave()
327                 .remove(&socket.socket_handle())
328                 .unwrap();
329             socket.close();
330         }
331 
332         Ok(())
333     }
334 }
335 
336 impl Drop for SocketInode {
337     fn drop(&mut self) {
338         for _ in 0..self.1.load(core::sync::atomic::Ordering::SeqCst) {
339             let _ = self.do_close();
340         }
341     }
342 }
343 
344 impl IndexNode for SocketInode {
345     fn open(
346         &self,
347         _data: SpinLockGuard<FilePrivateData>,
348         _mode: &FileMode,
349     ) -> Result<(), SystemError> {
350         self.1.fetch_add(1, core::sync::atomic::Ordering::SeqCst);
351         Ok(())
352     }
353 
354     fn close(&self, _data: SpinLockGuard<FilePrivateData>) -> Result<(), SystemError> {
355         self.do_close()
356     }
357 
358     fn read_at(
359         &self,
360         _offset: usize,
361         len: usize,
362         buf: &mut [u8],
363         data: SpinLockGuard<FilePrivateData>,
364     ) -> Result<usize, SystemError> {
365         drop(data);
366         self.0.lock_no_preempt().read(&mut buf[0..len]).0
367     }
368 
369     fn write_at(
370         &self,
371         _offset: usize,
372         len: usize,
373         buf: &[u8],
374         data: SpinLockGuard<FilePrivateData>,
375     ) -> Result<usize, SystemError> {
376         drop(data);
377         self.0.lock_no_preempt().write(&buf[0..len], None)
378     }
379 
380     fn poll(&self, _private_data: &FilePrivateData) -> Result<usize, SystemError> {
381         let events = self.0.lock_irqsave().poll();
382         return Ok(events.bits() as usize);
383     }
384 
385     fn fs(&self) -> Arc<dyn FileSystem> {
386         todo!()
387     }
388 
389     fn as_any_ref(&self) -> &dyn Any {
390         self
391     }
392 
393     fn list(&self) -> Result<Vec<String>, SystemError> {
394         return Err(SystemError::ENOTDIR);
395     }
396 
397     fn metadata(&self) -> Result<Metadata, SystemError> {
398         let meta = Metadata {
399             mode: ModeType::from_bits_truncate(0o755),
400             file_type: FileType::Socket,
401             ..Default::default()
402         };
403 
404         return Ok(meta);
405     }
406 
407     fn resize(&self, _len: usize) -> Result<(), SystemError> {
408         return Ok(());
409     }
410 }
411 
412 #[derive(Debug)]
413 pub struct SocketHandleItem {
414     /// shutdown状态
415     pub shutdown_type: RwLock<ShutdownType>,
416     /// socket的waitqueue
417     pub wait_queue: Arc<EventWaitQueue>,
418     /// epitems,考虑写在这是否是最优解?
419     pub epitems: SpinLock<LinkedList<Arc<EPollItem>>>,
420 }
421 
422 impl SocketHandleItem {
423     pub fn new(wait_queue: Option<Arc<EventWaitQueue>>) -> Self {
424         Self {
425             shutdown_type: RwLock::new(ShutdownType::empty()),
426             wait_queue: wait_queue.unwrap_or(Arc::new(EventWaitQueue::new())),
427             epitems: SpinLock::new(LinkedList::new()),
428         }
429     }
430 
431     /// ## 在socket的等待队列上睡眠
432     pub fn sleep(
433         socket_handle: GlobalSocketHandle,
434         events: u64,
435         handle_map_guard: RwLockReadGuard<'_, HashMap<GlobalSocketHandle, SocketHandleItem>>,
436     ) {
437         unsafe {
438             handle_map_guard
439                 .get(&socket_handle)
440                 .unwrap()
441                 .wait_queue
442                 .sleep_without_schedule(events)
443         };
444         drop(handle_map_guard);
445         schedule(SchedMode::SM_NONE);
446     }
447 
448     pub fn shutdown_type(&self) -> ShutdownType {
449         *self.shutdown_type.read()
450     }
451 
452     pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard<ShutdownType> {
453         self.shutdown_type.write_irqsave()
454     }
455 
456     pub fn add_epoll(&mut self, epitem: Arc<EPollItem>) {
457         self.epitems.lock_irqsave().push_back(epitem)
458     }
459 
460     pub fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> {
461         let is_remove = !self
462             .epitems
463             .lock_irqsave()
464             .extract_if(|x| x.epoll().ptr_eq(epoll))
465             .collect::<Vec<_>>()
466             .is_empty();
467 
468         if is_remove {
469             return Ok(());
470         }
471 
472         Err(SystemError::ENOENT)
473     }
474 }
475 
476 /// # TCP 和 UDP 的端口管理器。
477 /// 如果 TCP/UDP 的 socket 绑定了某个端口,它会在对应的表中记录,以检测端口冲突。
478 pub struct PortManager {
479     // TCP 端口记录表
480     tcp_port_table: SpinLock<HashMap<u16, Pid>>,
481     // UDP 端口记录表
482     udp_port_table: SpinLock<HashMap<u16, Pid>>,
483 }
484 
485 impl PortManager {
486     pub fn new() -> Self {
487         return Self {
488             tcp_port_table: SpinLock::new(HashMap::new()),
489             udp_port_table: SpinLock::new(HashMap::new()),
490         };
491     }
492 
493     /// @brief 自动分配一个相对应协议中未被使用的PORT,如果动态端口均已被占用,返回错误码 EADDRINUSE
494     pub fn get_ephemeral_port(&self, socket_type: SocketType) -> Result<u16, SystemError> {
495         // TODO: selects non-conflict high port
496 
497         static mut EPHEMERAL_PORT: u16 = 0;
498         unsafe {
499             if EPHEMERAL_PORT == 0 {
500                 EPHEMERAL_PORT = (49152 + rand() % (65536 - 49152)) as u16;
501             }
502         }
503 
504         let mut remaining = 65536 - 49152; // 剩余尝试分配端口次数
505         let mut port: u16;
506         while remaining > 0 {
507             unsafe {
508                 if EPHEMERAL_PORT == 65535 {
509                     EPHEMERAL_PORT = 49152;
510                 } else {
511                     EPHEMERAL_PORT += 1;
512                 }
513                 port = EPHEMERAL_PORT;
514             }
515 
516             // 使用 ListenTable 检查端口是否被占用
517             let listen_table_guard = match socket_type {
518                 SocketType::Udp => self.udp_port_table.lock(),
519                 SocketType::Tcp => self.tcp_port_table.lock(),
520                 _ => panic!("{:?} cann't get a port", socket_type),
521             };
522             if listen_table_guard.get(&port).is_none() {
523                 drop(listen_table_guard);
524                 return Ok(port);
525             }
526             remaining -= 1;
527         }
528         return Err(SystemError::EADDRINUSE);
529     }
530 
531     /// @brief 检测给定端口是否已被占用,如果未被占用则在 TCP/UDP 对应的表中记录
532     ///
533     /// TODO: 增加支持端口复用的逻辑
534     pub fn bind_port(&self, socket_type: SocketType, port: u16) -> Result<(), SystemError> {
535         if port > 0 {
536             let mut listen_table_guard = match socket_type {
537                 SocketType::Udp => self.udp_port_table.lock(),
538                 SocketType::Tcp => self.tcp_port_table.lock(),
539                 _ => panic!("{:?} cann't bind a port", socket_type),
540             };
541             match listen_table_guard.get(&port) {
542                 Some(_) => return Err(SystemError::EADDRINUSE),
543                 None => listen_table_guard.insert(port, ProcessManager::current_pid()),
544             };
545             drop(listen_table_guard);
546         }
547         return Ok(());
548     }
549 
550     /// @brief 在对应的端口记录表中将端口和 socket 解绑
551     /// should call this function when socket is closed or aborted
552     pub fn unbind_port(&self, socket_type: SocketType, port: u16) {
553         let mut listen_table_guard = match socket_type {
554             SocketType::Udp => self.udp_port_table.lock(),
555             SocketType::Tcp => self.tcp_port_table.lock(),
556             _ => {
557                 return;
558             }
559         };
560         listen_table_guard.remove(&port);
561         drop(listen_table_guard);
562     }
563 }
564 
565 /// @brief socket的类型
566 #[derive(Debug, Clone, Copy, PartialEq)]
567 pub enum SocketType {
568     /// 原始的socket
569     Raw,
570     /// 用于Tcp通信的 Socket
571     Tcp,
572     /// 用于Udp通信的 Socket
573     Udp,
574     /// unix域的 Socket
575     Unix,
576 }
577 
578 bitflags! {
579     /// @brief socket的选项
580     #[derive(Default)]
581     pub struct SocketOptions: u32 {
582         /// 是否阻塞
583         const BLOCK = 1 << 0;
584         /// 是否允许广播
585         const BROADCAST = 1 << 1;
586         /// 是否允许多播
587         const MULTICAST = 1 << 2;
588         /// 是否允许重用地址
589         const REUSEADDR = 1 << 3;
590         /// 是否允许重用端口
591         const REUSEPORT = 1 << 4;
592     }
593 }
594 
595 #[derive(Debug, Clone)]
596 /// @brief 在trait Socket的metadata函数中返回该结构体供外部使用
597 pub struct SocketMetadata {
598     /// socket的类型
599     pub socket_type: SocketType,
600     /// 接收缓冲区的大小
601     pub rx_buf_size: usize,
602     /// 发送缓冲区的大小
603     pub tx_buf_size: usize,
604     /// 元数据的缓冲区的大小
605     pub metadata_buf_size: usize,
606     /// socket的选项
607     pub options: SocketOptions,
608 }
609 
610 impl SocketMetadata {
611     fn new(
612         socket_type: SocketType,
613         rx_buf_size: usize,
614         tx_buf_size: usize,
615         metadata_buf_size: usize,
616         options: SocketOptions,
617     ) -> Self {
618         Self {
619             socket_type,
620             rx_buf_size,
621             tx_buf_size,
622             metadata_buf_size,
623             options,
624         }
625     }
626 }
627 
628 /// @brief 地址族的枚举
629 ///
630 /// 参考:https://code.dragonos.org.cn/xref/linux-5.19.10/include/linux/socket.h#180
631 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
632 pub enum AddressFamily {
633     /// AF_UNSPEC 表示地址族未指定
634     Unspecified = 0,
635     /// AF_UNIX 表示Unix域的socket (与AF_LOCAL相同)
636     Unix = 1,
637     ///  AF_INET 表示IPv4的socket
638     INet = 2,
639     /// AF_AX25 表示AMPR AX.25的socket
640     AX25 = 3,
641     /// AF_IPX 表示IPX的socket
642     IPX = 4,
643     /// AF_APPLETALK 表示Appletalk的socket
644     Appletalk = 5,
645     /// AF_NETROM 表示AMPR NET/ROM的socket
646     Netrom = 6,
647     /// AF_BRIDGE 表示多协议桥接的socket
648     Bridge = 7,
649     /// AF_ATMPVC 表示ATM PVCs的socket
650     Atmpvc = 8,
651     /// AF_X25 表示X.25的socket
652     X25 = 9,
653     /// AF_INET6 表示IPv6的socket
654     INet6 = 10,
655     /// AF_ROSE 表示AMPR ROSE的socket
656     Rose = 11,
657     /// AF_DECnet Reserved for DECnet project
658     Decnet = 12,
659     /// AF_NETBEUI Reserved for 802.2LLC project
660     Netbeui = 13,
661     /// AF_SECURITY 表示Security callback的伪AF
662     Security = 14,
663     /// AF_KEY 表示Key management API
664     Key = 15,
665     /// AF_NETLINK 表示Netlink的socket
666     Netlink = 16,
667     /// AF_PACKET 表示Low level packet interface
668     Packet = 17,
669     /// AF_ASH 表示Ash
670     Ash = 18,
671     /// AF_ECONET 表示Acorn Econet
672     Econet = 19,
673     /// AF_ATMSVC 表示ATM SVCs
674     Atmsvc = 20,
675     /// AF_RDS 表示Reliable Datagram Sockets
676     Rds = 21,
677     /// AF_SNA 表示Linux SNA Project
678     Sna = 22,
679     /// AF_IRDA 表示IRDA sockets
680     Irda = 23,
681     /// AF_PPPOX 表示PPPoX sockets
682     Pppox = 24,
683     /// AF_WANPIPE 表示WANPIPE API sockets
684     WanPipe = 25,
685     /// AF_LLC 表示Linux LLC
686     Llc = 26,
687     /// AF_IB 表示Native InfiniBand address
688     /// 介绍:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_infiniband_and_rdma_networks/index#understanding-infiniband-and-rdma_configuring-infiniband-and-rdma-networks
689     Ib = 27,
690     /// AF_MPLS 表示MPLS
691     Mpls = 28,
692     /// AF_CAN 表示Controller Area Network
693     Can = 29,
694     /// AF_TIPC 表示TIPC sockets
695     Tipc = 30,
696     /// AF_BLUETOOTH 表示Bluetooth sockets
697     Bluetooth = 31,
698     /// AF_IUCV 表示IUCV sockets
699     Iucv = 32,
700     /// AF_RXRPC 表示RxRPC sockets
701     Rxrpc = 33,
702     /// AF_ISDN 表示mISDN sockets
703     Isdn = 34,
704     /// AF_PHONET 表示Phonet sockets
705     Phonet = 35,
706     /// AF_IEEE802154 表示IEEE 802.15.4 sockets
707     Ieee802154 = 36,
708     /// AF_CAIF 表示CAIF sockets
709     Caif = 37,
710     /// AF_ALG 表示Algorithm sockets
711     Alg = 38,
712     /// AF_NFC 表示NFC sockets
713     Nfc = 39,
714     /// AF_VSOCK 表示vSockets
715     Vsock = 40,
716     /// AF_KCM 表示Kernel Connection Multiplexor
717     Kcm = 41,
718     /// AF_QIPCRTR 表示Qualcomm IPC Router
719     Qipcrtr = 42,
720     /// AF_SMC 表示SMC-R sockets.
721     /// reserve number for PF_SMC protocol family that reuses AF_INET address family
722     Smc = 43,
723     /// AF_XDP 表示XDP sockets
724     Xdp = 44,
725     /// AF_MCTP 表示Management Component Transport Protocol
726     Mctp = 45,
727     /// AF_MAX 表示最大的地址族
728     Max = 46,
729 }
730 
731 impl TryFrom<u16> for AddressFamily {
732     type Error = SystemError;
733     fn try_from(x: u16) -> Result<Self, Self::Error> {
734         use num_traits::FromPrimitive;
735         return <Self as FromPrimitive>::from_u16(x).ok_or(SystemError::EINVAL);
736     }
737 }
738 
739 /// @brief posix套接字类型的枚举(这些值与linux内核中的值一致)
740 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
741 pub enum PosixSocketType {
742     Stream = 1,
743     Datagram = 2,
744     Raw = 3,
745     Rdm = 4,
746     SeqPacket = 5,
747     Dccp = 6,
748     Packet = 10,
749 }
750 
751 impl TryFrom<u8> for PosixSocketType {
752     type Error = SystemError;
753     fn try_from(x: u8) -> Result<Self, Self::Error> {
754         use num_traits::FromPrimitive;
755         return <Self as FromPrimitive>::from_u8(x).ok_or(SystemError::EINVAL);
756     }
757 }
758 
759 /// ### 为socket提供无锁的poll方法
760 ///
761 /// 因为在网卡中断中,需要轮询socket的状态,如果使用socket文件或者其inode来poll
762 /// 在当前的设计,会必然死锁,所以引用这一个设计来解决,提供无��的poll
763 pub struct SocketPollMethod;
764 
765 impl SocketPollMethod {
766     pub fn poll(socket: &socket::Socket, shutdown: ShutdownType) -> EPollEventType {
767         match socket {
768             socket::Socket::Udp(udp) => Self::udp_poll(udp, shutdown),
769             socket::Socket::Tcp(tcp) => Self::tcp_poll(tcp, shutdown),
770             socket::Socket::Raw(raw) => Self::raw_poll(raw, shutdown),
771             _ => todo!(),
772         }
773     }
774 
775     pub fn tcp_poll(socket: &tcp::Socket, shutdown: ShutdownType) -> EPollEventType {
776         let mut events = EPollEventType::empty();
777         if socket.is_listening() && socket.is_active() {
778             events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
779             return events;
780         }
781 
782         // socket已经关闭
783         if !socket.is_open() {
784             events.insert(EPollEventType::EPOLLHUP)
785         }
786         if shutdown.contains(ShutdownType::RCV_SHUTDOWN) {
787             events.insert(
788                 EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM | EPollEventType::EPOLLRDHUP,
789             );
790         }
791 
792         let state = socket.state();
793         if state != tcp::State::SynSent && state != tcp::State::SynReceived {
794             // socket有可读数据
795             if socket.can_recv() {
796                 events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
797             }
798 
799             if !(shutdown.contains(ShutdownType::SEND_SHUTDOWN)) {
800                 // 缓冲区可写
801                 if socket.send_queue() < socket.send_capacity() {
802                     events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
803                 } else {
804                     // TODO:触发缓冲区已满的信号
805                     todo!("A signal that the buffer is full needs to be sent");
806                 }
807             } else {
808                 // 如果我们的socket关闭了SEND_SHUTDOWN,epoll事件就是EPOLLOUT
809                 events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
810             }
811         } else if state == tcp::State::SynSent {
812             events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
813         }
814 
815         // socket发生错误
816         if !socket.is_active() {
817             events.insert(EPollEventType::EPOLLERR);
818         }
819 
820         events
821     }
822 
823     pub fn udp_poll(socket: &udp::Socket, shutdown: ShutdownType) -> EPollEventType {
824         let mut event = EPollEventType::empty();
825 
826         if shutdown.contains(ShutdownType::RCV_SHUTDOWN) {
827             event.insert(
828                 EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM,
829             );
830         }
831         if shutdown.contains(ShutdownType::SHUTDOWN_MASK) {
832             event.insert(EPollEventType::EPOLLHUP);
833         }
834 
835         if socket.can_recv() {
836             event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
837         }
838 
839         if socket.can_send() {
840             event.insert(
841                 EPollEventType::EPOLLOUT
842                     | EPollEventType::EPOLLWRNORM
843                     | EPollEventType::EPOLLWRBAND,
844             );
845         } else {
846             // TODO: 缓冲区空间不够,需要使用信号处理
847             todo!()
848         }
849 
850         return event;
851     }
852 
853     pub fn raw_poll(socket: &raw::Socket, shutdown: ShutdownType) -> EPollEventType {
854         //debug!("enter raw_poll!");
855         let mut event = EPollEventType::empty();
856 
857         if shutdown.contains(ShutdownType::RCV_SHUTDOWN) {
858             event.insert(
859                 EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM,
860             );
861         }
862         if shutdown.contains(ShutdownType::SHUTDOWN_MASK) {
863             event.insert(EPollEventType::EPOLLHUP);
864         }
865 
866         if socket.can_recv() {
867             //debug!("poll can recv!");
868             event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
869         } else {
870             //debug!("poll can not recv!");
871         }
872 
873         if socket.can_send() {
874             //debug!("poll can send!");
875             event.insert(
876                 EPollEventType::EPOLLOUT
877                     | EPollEventType::EPOLLWRNORM
878                     | EPollEventType::EPOLLWRBAND,
879             );
880         } else {
881             //debug!("poll can not send!");
882             // TODO: 缓冲区空间不够,需要使用信号处理
883             todo!()
884         }
885         return event;
886     }
887 }
888