xref: /DragonOS/kernel/src/net/socket/mod.rs (revision 6fc066ac11d2f9a3ac629d57487a6144fda1ac63)
1 use core::{any::Any, fmt::Debug, sync::atomic::AtomicUsize};
2 
3 use alloc::{
4     boxed::Box,
5     collections::LinkedList,
6     string::String,
7     sync::{Arc, Weak},
8     vec::Vec,
9 };
10 use hashbrown::HashMap;
11 use smoltcp::{
12     iface::{SocketHandle, SocketSet},
13     socket::{self, tcp, udp},
14 };
15 use system_error::SystemError;
16 
17 use crate::{
18     arch::rand::rand,
19     filesystem::vfs::{
20         file::FileMode, syscall::ModeType, FilePrivateData, FileSystem, FileType, IndexNode,
21         Metadata,
22     },
23     libs::{
24         rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
25         spinlock::{SpinLock, SpinLockGuard},
26         wait_queue::EventWaitQueue,
27     },
28     sched::{schedule, SchedMode},
29 };
30 
31 use self::{
32     inet::{RawSocket, TcpSocket, UdpSocket},
33     unix::{SeqpacketSocket, StreamSocket},
34 };
35 
36 use super::{
37     event_poll::{EPollEventType, EPollItem, EventPoll},
38     net_core::poll_ifaces,
39     Endpoint, Protocol, ShutdownType,
40 };
41 
42 pub mod inet;
43 pub mod unix;
44 
45 lazy_static! {
46     /// 所有socket的集合
47     /// TODO: 优化这里,自己实现SocketSet!!!现在这样的话,不管全局有多少个网卡,每个时间点都只会有1个进程能够访问socket
48     pub static ref SOCKET_SET: SpinLock<SocketSet<'static >> = SpinLock::new(SocketSet::new(vec![]));
49     /// SocketHandle表,每个SocketHandle对应一个SocketHandleItem,
50     /// 注意!:在网卡中断中需要拿到这张表的��,在获取读锁时应该确保关中断避免死锁
51     pub static ref HANDLE_MAP: RwLock<HashMap<SocketHandle, SocketHandleItem>> = RwLock::new(HashMap::new());
52     /// 端口管理器
53     pub static ref PORT_MANAGER: PortManager = PortManager::new();
54 }
55 
56 /* For setsockopt(2) */
57 // See: linux-5.19.10/include/uapi/asm-generic/socket.h#9
58 pub const SOL_SOCKET: u8 = 1;
59 
60 /// 根据地址族、socket类型和协议创建socket
61 pub(super) fn new_socket(
62     address_family: AddressFamily,
63     socket_type: PosixSocketType,
64     protocol: Protocol,
65 ) -> Result<Box<dyn Socket>, SystemError> {
66     let socket: Box<dyn Socket> = match address_family {
67         AddressFamily::Unix => match socket_type {
68             PosixSocketType::Stream => Box::new(StreamSocket::new(SocketOptions::default())),
69             PosixSocketType::SeqPacket => Box::new(SeqpacketSocket::new(SocketOptions::default())),
70             _ => {
71                 return Err(SystemError::EINVAL);
72             }
73         },
74         AddressFamily::INet => match socket_type {
75             PosixSocketType::Stream => Box::new(TcpSocket::new(SocketOptions::default())),
76             PosixSocketType::Datagram => Box::new(UdpSocket::new(SocketOptions::default())),
77             PosixSocketType::Raw => Box::new(RawSocket::new(protocol, SocketOptions::default())),
78             _ => {
79                 return Err(SystemError::EINVAL);
80             }
81         },
82         _ => {
83             return Err(SystemError::EAFNOSUPPORT);
84         }
85     };
86     Ok(socket)
87 }
88 
89 pub trait Socket: Sync + Send + Debug + Any {
90     /// @brief 从socket中读取数据,如果socket是阻塞的,那么直到读取到数据才返回
91     ///
92     /// @param buf 读取到的数据存放的缓冲区
93     ///
94     /// @return - 成功:(返回读取的数据的长度,读取数据的端点).
95     ///         - 失败:错误码
96     fn read(&self, buf: &mut [u8]) -> (Result<usize, SystemError>, Endpoint);
97 
98     /// @brief 向socket中写入数据。如果socket是阻塞的,那么直到写入的数据全部写入socket中才返回
99     ///
100     /// @param buf 要写入的数据
101     /// @param to 要写入的目的端点,如果是None,那么写入的数据将会被丢弃
102     ///
103     /// @return 返回写入的数据的长度
104     fn write(&self, buf: &[u8], to: Option<Endpoint>) -> Result<usize, SystemError>;
105 
106     /// @brief 对应于POSIX的connect函数,用于连接到指定的远程服务器端点
107     ///
108     /// It is used to establish a connection to a remote server.
109     /// When a socket is connected to a remote server,
110     /// the operating system will establish a network connection with the server
111     /// and allow data to be sent and received between the local socket and the remote server.
112     ///
113     /// @param endpoint 要连接的端点
114     ///
115     /// @return 返回连接是否成功
116     fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError>;
117 
118     /// @brief 对应于POSIX的bind函数,用于绑定到本机指定的端点
119     ///
120     /// The bind() function is used to associate a socket with a particular IP address and port number on the local machine.
121     ///
122     /// @param endpoint 要绑定的端点
123     ///
124     /// @return 返回绑定是否成功
125     fn bind(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> {
126         Err(SystemError::ENOSYS)
127     }
128 
129     /// @brief 对应于 POSIX 的 shutdown 函数,用于关闭socket。
130     ///
131     /// shutdown() 函数用于启动网络连接的正常关闭。
132     /// 当在两个端点之间建立网络连接时,任一端点都可以通过调用其端点对象上的 shutdown() 函数来启动关闭序列。
133     /// 此函数向远程端点发送关闭消息以指示本地端点不再接受新数据。
134     ///
135     /// @return 返回是否成功关闭
136     fn shutdown(&mut self, _type: ShutdownType) -> Result<(), SystemError> {
137         Err(SystemError::ENOSYS)
138     }
139 
140     /// @brief 对应于POSIX的listen函数,用于监听端点
141     ///
142     /// @param backlog 最大的等待连接数
143     ///
144     /// @return 返回监听是否成功
145     fn listen(&mut self, _backlog: usize) -> Result<(), SystemError> {
146         Err(SystemError::ENOSYS)
147     }
148 
149     /// @brief 对应于POSIX的accept函数,用于接受连接
150     ///
151     /// @param endpoint 对端的端点
152     ///
153     /// @return 返回接受连接是否成功
154     fn accept(&mut self) -> Result<(Box<dyn Socket>, Endpoint), SystemError> {
155         Err(SystemError::ENOSYS)
156     }
157 
158     /// @brief 获取socket的端点
159     ///
160     /// @return 返回socket的端点
161     fn endpoint(&self) -> Option<Endpoint> {
162         None
163     }
164 
165     /// @brief 获取socket的对端端点
166     ///
167     /// @return 返回socket的对端端点
168     fn peer_endpoint(&self) -> Option<Endpoint> {
169         None
170     }
171 
172     /// @brief
173     ///     The purpose of the poll function is to provide
174     ///     a non-blocking way to check if a socket is ready for reading or writing,
175     ///     so that you can efficiently handle multiple sockets in a single thread or event loop.
176     ///
177     /// @return (in, out, err)
178     ///
179     ///     The first boolean value indicates whether the socket is ready for reading. If it is true, then there is data available to be read from the socket without blocking.
180     ///     The second boolean value indicates whether the socket is ready for writing. If it is true, then data can be written to the socket without blocking.
181     ///     The third boolean value indicates whether the socket has encountered an error condition. If it is true, then the socket is in an error state and should be closed or reset
182     ///
183     fn poll(&self) -> EPollEventType {
184         EPollEventType::empty()
185     }
186 
187     /// @brief socket的ioctl函数
188     ///
189     /// @param cmd ioctl命令
190     /// @param arg0 ioctl命令的第一个参数
191     /// @param arg1 ioctl命令的第二个参数
192     /// @param arg2 ioctl命令的第三个参数
193     ///
194     /// @return 返回ioctl命令的返回值
195     fn ioctl(
196         &self,
197         _cmd: usize,
198         _arg0: usize,
199         _arg1: usize,
200         _arg2: usize,
201     ) -> Result<usize, SystemError> {
202         Ok(0)
203     }
204 
205     /// @brief 获取socket的元数据
206     fn metadata(&self) -> SocketMetadata;
207 
208     fn box_clone(&self) -> Box<dyn Socket>;
209 
210     /// @brief 设置socket的选项
211     ///
212     /// @param level 选项的层次
213     /// @param optname 选项的名称
214     /// @param optval 选项的值
215     ///
216     /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS
217     fn setsockopt(
218         &self,
219         _level: usize,
220         _optname: usize,
221         _optval: &[u8],
222     ) -> Result<(), SystemError> {
223         kwarn!("setsockopt is not implemented");
224         Ok(())
225     }
226 
227     fn socket_handle(&self) -> SocketHandle {
228         todo!()
229     }
230 
231     fn write_buffer(&self, _buf: &[u8]) -> Result<usize, SystemError> {
232         todo!()
233     }
234 
235     fn as_any_ref(&self) -> &dyn Any;
236 
237     fn as_any_mut(&mut self) -> &mut dyn Any;
238 
239     fn add_epoll(&mut self, epitem: Arc<EPollItem>) -> Result<(), SystemError> {
240         HANDLE_MAP
241             .write_irqsave()
242             .get_mut(&self.socket_handle())
243             .unwrap()
244             .add_epoll(epitem);
245         Ok(())
246     }
247 
248     fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> {
249         HANDLE_MAP
250             .write_irqsave()
251             .get_mut(&self.socket_handle())
252             .unwrap()
253             .remove_epoll(epoll)?;
254 
255         Ok(())
256     }
257 
258     fn clear_epoll(&mut self) -> Result<(), SystemError> {
259         let mut handle_map_guard = HANDLE_MAP.write_irqsave();
260         let handle_item = handle_map_guard.get_mut(&self.socket_handle()).unwrap();
261 
262         for epitem in handle_item.epitems.lock_irqsave().iter() {
263             let epoll = epitem.epoll();
264             if epoll.upgrade().is_some() {
265                 EventPoll::ep_remove(
266                     &mut epoll.upgrade().unwrap().lock_irqsave(),
267                     epitem.fd(),
268                     None,
269                 )?;
270             }
271         }
272 
273         Ok(())
274     }
275 }
276 
277 impl Clone for Box<dyn Socket> {
278     fn clone(&self) -> Box<dyn Socket> {
279         self.box_clone()
280     }
281 }
282 
283 /// # Socket在文件系统中的inode封装
284 #[derive(Debug)]
285 pub struct SocketInode(SpinLock<Box<dyn Socket>>, AtomicUsize);
286 
287 impl SocketInode {
288     pub fn new(socket: Box<dyn Socket>) -> Arc<Self> {
289         Arc::new(Self(SpinLock::new(socket), AtomicUsize::new(0)))
290     }
291 
292     #[inline]
293     pub fn inner(&self) -> SpinLockGuard<Box<dyn Socket>> {
294         self.0.lock()
295     }
296 
297     pub unsafe fn inner_no_preempt(&self) -> SpinLockGuard<Box<dyn Socket>> {
298         self.0.lock_no_preempt()
299     }
300 }
301 
302 impl IndexNode for SocketInode {
303     fn open(
304         &self,
305         _data: SpinLockGuard<FilePrivateData>,
306         _mode: &FileMode,
307     ) -> Result<(), SystemError> {
308         self.1.fetch_add(1, core::sync::atomic::Ordering::SeqCst);
309         Ok(())
310     }
311 
312     fn close(&self, _data: SpinLockGuard<FilePrivateData>) -> Result<(), SystemError> {
313         let prev_ref_count = self.1.fetch_sub(1, core::sync::atomic::Ordering::SeqCst);
314         if prev_ref_count == 1 {
315             // 最后一次关闭,需要释放
316             let mut socket = self.0.lock_irqsave();
317 
318             if socket.metadata().socket_type == SocketType::Unix {
319                 return Ok(());
320             }
321 
322             if let Some(Endpoint::Ip(Some(ip))) = socket.endpoint() {
323                 PORT_MANAGER.unbind_port(socket.metadata().socket_type, ip.port)?;
324             }
325 
326             socket.clear_epoll()?;
327 
328             HANDLE_MAP
329                 .write_irqsave()
330                 .remove(&socket.socket_handle())
331                 .unwrap();
332         }
333         Ok(())
334     }
335 
336     fn read_at(
337         &self,
338         _offset: usize,
339         len: usize,
340         buf: &mut [u8],
341         data: SpinLockGuard<FilePrivateData>,
342     ) -> Result<usize, SystemError> {
343         drop(data);
344         self.0.lock_no_preempt().read(&mut buf[0..len]).0
345     }
346 
347     fn write_at(
348         &self,
349         _offset: usize,
350         len: usize,
351         buf: &[u8],
352         data: SpinLockGuard<FilePrivateData>,
353     ) -> Result<usize, SystemError> {
354         drop(data);
355         self.0.lock_no_preempt().write(&buf[0..len], None)
356     }
357 
358     fn poll(&self, _private_data: &FilePrivateData) -> Result<usize, SystemError> {
359         let events = self.0.lock_irqsave().poll();
360         return Ok(events.bits() as usize);
361     }
362 
363     fn fs(&self) -> Arc<dyn FileSystem> {
364         todo!()
365     }
366 
367     fn as_any_ref(&self) -> &dyn Any {
368         self
369     }
370 
371     fn list(&self) -> Result<Vec<String>, SystemError> {
372         return Err(SystemError::ENOTDIR);
373     }
374 
375     fn metadata(&self) -> Result<Metadata, SystemError> {
376         let meta = Metadata {
377             mode: ModeType::from_bits_truncate(0o755),
378             file_type: FileType::Socket,
379             ..Default::default()
380         };
381 
382         return Ok(meta);
383     }
384 
385     fn resize(&self, _len: usize) -> Result<(), SystemError> {
386         return Ok(());
387     }
388 }
389 
390 #[derive(Debug)]
391 pub struct SocketHandleItem {
392     /// shutdown状态
393     pub shutdown_type: RwLock<ShutdownType>,
394     /// socket的waitqueue
395     pub wait_queue: EventWaitQueue,
396     /// epitems,考虑写在这是否是最优解?
397     pub epitems: SpinLock<LinkedList<Arc<EPollItem>>>,
398 }
399 
400 impl SocketHandleItem {
401     pub fn new() -> Self {
402         Self {
403             shutdown_type: RwLock::new(ShutdownType::empty()),
404             wait_queue: EventWaitQueue::new(),
405             epitems: SpinLock::new(LinkedList::new()),
406         }
407     }
408 
409     /// ## 在socket的等待队列上睡眠
410     pub fn sleep(
411         socket_handle: SocketHandle,
412         events: u64,
413         handle_map_guard: RwLockReadGuard<'_, HashMap<SocketHandle, SocketHandleItem>>,
414     ) {
415         unsafe {
416             handle_map_guard
417                 .get(&socket_handle)
418                 .unwrap()
419                 .wait_queue
420                 .sleep_without_schedule(events)
421         };
422         drop(handle_map_guard);
423         schedule(SchedMode::SM_NONE);
424     }
425 
426     pub fn shutdown_type(&self) -> ShutdownType {
427         *self.shutdown_type.read()
428     }
429 
430     pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard<ShutdownType> {
431         self.shutdown_type.write_irqsave()
432     }
433 
434     pub fn add_epoll(&mut self, epitem: Arc<EPollItem>) {
435         self.epitems.lock_irqsave().push_back(epitem)
436     }
437 
438     pub fn remove_epoll(&mut self, epoll: &Weak<SpinLock<EventPoll>>) -> Result<(), SystemError> {
439         let is_remove = !self
440             .epitems
441             .lock_irqsave()
442             .extract_if(|x| x.epoll().ptr_eq(epoll))
443             .collect::<Vec<_>>()
444             .is_empty();
445 
446         if is_remove {
447             return Ok(());
448         }
449 
450         Err(SystemError::ENOENT)
451     }
452 }
453 
454 /// # TCP 和 UDP 的端口管理器。
455 /// 如果 TCP/UDP 的 socket 绑定了某个端口,它会在对应的表中记录,以检测端口冲突。
456 pub struct PortManager {
457     // TCP 端口记录表
458     tcp_port_table: SpinLock<HashMap<u16, Arc<GlobalSocketHandle>>>,
459     // UDP 端口记录表
460     udp_port_table: SpinLock<HashMap<u16, Arc<GlobalSocketHandle>>>,
461 }
462 
463 impl PortManager {
464     pub fn new() -> Self {
465         return Self {
466             tcp_port_table: SpinLock::new(HashMap::new()),
467             udp_port_table: SpinLock::new(HashMap::new()),
468         };
469     }
470 
471     /// @brief 自动分配一个相对应协议中未被使用的PORT,如果动态端口均已被占用,返回错误码 EADDRINUSE
472     pub fn get_ephemeral_port(&self, socket_type: SocketType) -> Result<u16, SystemError> {
473         // TODO: selects non-conflict high port
474 
475         static mut EPHEMERAL_PORT: u16 = 0;
476         unsafe {
477             if EPHEMERAL_PORT == 0 {
478                 EPHEMERAL_PORT = (49152 + rand() % (65536 - 49152)) as u16;
479             }
480         }
481 
482         let mut remaining = 65536 - 49152; // 剩余尝试分配端口次数
483         let mut port: u16;
484         while remaining > 0 {
485             unsafe {
486                 if EPHEMERAL_PORT == 65535 {
487                     EPHEMERAL_PORT = 49152;
488                 } else {
489                     EPHEMERAL_PORT += 1;
490                 }
491                 port = EPHEMERAL_PORT;
492             }
493 
494             // 使用 ListenTable 检查端口是否被占用
495             let listen_table_guard = match socket_type {
496                 SocketType::Udp => self.udp_port_table.lock(),
497                 SocketType::Tcp => self.tcp_port_table.lock(),
498                 _ => panic!("{:?} cann't get a port", socket_type),
499             };
500             if listen_table_guard.get(&port).is_none() {
501                 drop(listen_table_guard);
502                 return Ok(port);
503             }
504             remaining -= 1;
505         }
506         return Err(SystemError::EADDRINUSE);
507     }
508 
509     /// @brief 检测给定端口是否已被占用,如果未被占用则在 TCP/UDP 对应的表中记录
510     ///
511     /// TODO: 增加支持端口复用的逻辑
512     pub fn bind_port(
513         &self,
514         socket_type: SocketType,
515         port: u16,
516         handle: Arc<GlobalSocketHandle>,
517     ) -> Result<(), SystemError> {
518         if port > 0 {
519             let mut listen_table_guard = match socket_type {
520                 SocketType::Udp => self.udp_port_table.lock(),
521                 SocketType::Tcp => self.tcp_port_table.lock(),
522                 _ => panic!("{:?} cann't bind a port", socket_type),
523             };
524             match listen_table_guard.get(&port) {
525                 Some(_) => return Err(SystemError::EADDRINUSE),
526                 None => listen_table_guard.insert(port, handle),
527             };
528             drop(listen_table_guard);
529         }
530         return Ok(());
531     }
532 
533     /// @brief 在对应的端口记录表中将端口和 socket 解绑
534     pub fn unbind_port(&self, socket_type: SocketType, port: u16) -> Result<(), SystemError> {
535         let mut listen_table_guard = match socket_type {
536             SocketType::Udp => self.udp_port_table.lock(),
537             SocketType::Tcp => self.tcp_port_table.lock(),
538             _ => return Ok(()),
539         };
540         listen_table_guard.remove(&port);
541         drop(listen_table_guard);
542         return Ok(());
543     }
544 }
545 
546 /// # socket的句柄管理组件
547 /// 它在smoltcp的SocketHandle上封装了一层,增加更多的功能。
548 /// 比如,在socket被关闭时,自动释放socket的资源,通知系统的其他组件。
549 #[derive(Debug)]
550 pub struct GlobalSocketHandle(SocketHandle);
551 
552 impl GlobalSocketHandle {
553     pub fn new(handle: SocketHandle) -> Arc<Self> {
554         return Arc::new(Self(handle));
555     }
556 }
557 
558 impl Clone for GlobalSocketHandle {
559     fn clone(&self) -> Self {
560         Self(self.0)
561     }
562 }
563 
564 impl Drop for GlobalSocketHandle {
565     fn drop(&mut self) {
566         let mut socket_set_guard = SOCKET_SET.lock_irqsave();
567         socket_set_guard.remove(self.0); // 删除的时候,会发送一条FINISH的信息?
568         drop(socket_set_guard);
569         poll_ifaces();
570     }
571 }
572 
573 /// @brief socket的类型
574 #[derive(Debug, Clone, Copy, PartialEq)]
575 pub enum SocketType {
576     /// 原始的socket
577     Raw,
578     /// 用于Tcp通信的 Socket
579     Tcp,
580     /// 用于Udp通信的 Socket
581     Udp,
582     /// unix域的 Socket
583     Unix,
584 }
585 
586 bitflags! {
587     /// @brief socket的选项
588     #[derive(Default)]
589     pub struct SocketOptions: u32 {
590         /// 是否阻塞
591         const BLOCK = 1 << 0;
592         /// 是否允许广播
593         const BROADCAST = 1 << 1;
594         /// 是否允许多播
595         const MULTICAST = 1 << 2;
596         /// 是否允许重用地址
597         const REUSEADDR = 1 << 3;
598         /// 是否允许重用端口
599         const REUSEPORT = 1 << 4;
600     }
601 }
602 
603 #[derive(Debug, Clone)]
604 /// @brief 在trait Socket的metadata函数中返回该结构体供外部使用
605 pub struct SocketMetadata {
606     /// socket的类型
607     pub socket_type: SocketType,
608     /// 接收缓冲区的大小
609     pub rx_buf_size: usize,
610     /// 发送缓冲区的大小
611     pub tx_buf_size: usize,
612     /// 元数据的缓冲区的大小
613     pub metadata_buf_size: usize,
614     /// socket的选项
615     pub options: SocketOptions,
616 }
617 
618 impl SocketMetadata {
619     fn new(
620         socket_type: SocketType,
621         rx_buf_size: usize,
622         tx_buf_size: usize,
623         metadata_buf_size: usize,
624         options: SocketOptions,
625     ) -> Self {
626         Self {
627             socket_type,
628             rx_buf_size,
629             tx_buf_size,
630             metadata_buf_size,
631             options,
632         }
633     }
634 }
635 
636 /// @brief 地址族的枚举
637 ///
638 /// 参考:https://code.dragonos.org.cn/xref/linux-5.19.10/include/linux/socket.h#180
639 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
640 pub enum AddressFamily {
641     /// AF_UNSPEC 表示地址族未指定
642     Unspecified = 0,
643     /// AF_UNIX 表示Unix域的socket (与AF_LOCAL相同)
644     Unix = 1,
645     ///  AF_INET 表示IPv4的socket
646     INet = 2,
647     /// AF_AX25 表示AMPR AX.25的socket
648     AX25 = 3,
649     /// AF_IPX 表示IPX的socket
650     IPX = 4,
651     /// AF_APPLETALK 表示Appletalk的socket
652     Appletalk = 5,
653     /// AF_NETROM 表示AMPR NET/ROM的socket
654     Netrom = 6,
655     /// AF_BRIDGE 表示多协议桥接的socket
656     Bridge = 7,
657     /// AF_ATMPVC 表示ATM PVCs的socket
658     Atmpvc = 8,
659     /// AF_X25 表示X.25的socket
660     X25 = 9,
661     /// AF_INET6 表示IPv6的socket
662     INet6 = 10,
663     /// AF_ROSE 表示AMPR ROSE的socket
664     Rose = 11,
665     /// AF_DECnet Reserved for DECnet project
666     Decnet = 12,
667     /// AF_NETBEUI Reserved for 802.2LLC project
668     Netbeui = 13,
669     /// AF_SECURITY 表示Security callback的伪AF
670     Security = 14,
671     /// AF_KEY 表示Key management API
672     Key = 15,
673     /// AF_NETLINK 表示Netlink的socket
674     Netlink = 16,
675     /// AF_PACKET 表示Low level packet interface
676     Packet = 17,
677     /// AF_ASH 表示Ash
678     Ash = 18,
679     /// AF_ECONET 表示Acorn Econet
680     Econet = 19,
681     /// AF_ATMSVC 表示ATM SVCs
682     Atmsvc = 20,
683     /// AF_RDS 表示Reliable Datagram Sockets
684     Rds = 21,
685     /// AF_SNA 表示Linux SNA Project
686     Sna = 22,
687     /// AF_IRDA 表示IRDA sockets
688     Irda = 23,
689     /// AF_PPPOX 表示PPPoX sockets
690     Pppox = 24,
691     /// AF_WANPIPE 表示WANPIPE API sockets
692     WanPipe = 25,
693     /// AF_LLC 表示Linux LLC
694     Llc = 26,
695     /// AF_IB 表示Native InfiniBand address
696     /// 介绍:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_infiniband_and_rdma_networks/index#understanding-infiniband-and-rdma_configuring-infiniband-and-rdma-networks
697     Ib = 27,
698     /// AF_MPLS 表示MPLS
699     Mpls = 28,
700     /// AF_CAN 表示Controller Area Network
701     Can = 29,
702     /// AF_TIPC 表示TIPC sockets
703     Tipc = 30,
704     /// AF_BLUETOOTH 表示Bluetooth sockets
705     Bluetooth = 31,
706     /// AF_IUCV 表示IUCV sockets
707     Iucv = 32,
708     /// AF_RXRPC 表示RxRPC sockets
709     Rxrpc = 33,
710     /// AF_ISDN 表示mISDN sockets
711     Isdn = 34,
712     /// AF_PHONET 表示Phonet sockets
713     Phonet = 35,
714     /// AF_IEEE802154 表示IEEE 802.15.4 sockets
715     Ieee802154 = 36,
716     /// AF_CAIF 表示CAIF sockets
717     Caif = 37,
718     /// AF_ALG 表示Algorithm sockets
719     Alg = 38,
720     /// AF_NFC 表示NFC sockets
721     Nfc = 39,
722     /// AF_VSOCK 表示vSockets
723     Vsock = 40,
724     /// AF_KCM 表示Kernel Connection Multiplexor
725     Kcm = 41,
726     /// AF_QIPCRTR 表示Qualcomm IPC Router
727     Qipcrtr = 42,
728     /// AF_SMC 表示SMC-R sockets.
729     /// reserve number for PF_SMC protocol family that reuses AF_INET address family
730     Smc = 43,
731     /// AF_XDP 表示XDP sockets
732     Xdp = 44,
733     /// AF_MCTP 表示Management Component Transport Protocol
734     Mctp = 45,
735     /// AF_MAX 表示最大的地址族
736     Max = 46,
737 }
738 
739 impl TryFrom<u16> for AddressFamily {
740     type Error = SystemError;
741     fn try_from(x: u16) -> Result<Self, Self::Error> {
742         use num_traits::FromPrimitive;
743         return <Self as FromPrimitive>::from_u16(x).ok_or(SystemError::EINVAL);
744     }
745 }
746 
747 /// @brief posix套接字类型的枚举(这些值与linux内核中的值一致)
748 #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
749 pub enum PosixSocketType {
750     Stream = 1,
751     Datagram = 2,
752     Raw = 3,
753     Rdm = 4,
754     SeqPacket = 5,
755     Dccp = 6,
756     Packet = 10,
757 }
758 
759 impl TryFrom<u8> for PosixSocketType {
760     type Error = SystemError;
761     fn try_from(x: u8) -> Result<Self, Self::Error> {
762         use num_traits::FromPrimitive;
763         return <Self as FromPrimitive>::from_u8(x).ok_or(SystemError::EINVAL);
764     }
765 }
766 
767 /// ### 为socket提供无锁的poll方法
768 ///
769 /// 因为在网卡中断中,需要轮询socket的状态,如果使用socket文件或者其inode来poll
770 /// 在当前的设计,会必然死锁,所以引用这一个设计来解决,提供无��的poll
771 pub struct SocketPollMethod;
772 
773 impl SocketPollMethod {
774     pub fn poll(socket: &socket::Socket, shutdown: ShutdownType) -> EPollEventType {
775         match socket {
776             socket::Socket::Udp(udp) => Self::udp_poll(udp, shutdown),
777             socket::Socket::Tcp(tcp) => Self::tcp_poll(tcp, shutdown),
778             _ => todo!(),
779         }
780     }
781 
782     pub fn tcp_poll(socket: &tcp::Socket, shutdown: ShutdownType) -> EPollEventType {
783         let mut events = EPollEventType::empty();
784         if socket.is_listening() && socket.is_active() {
785             events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
786             return events;
787         }
788 
789         // socket已经关闭
790         if !socket.is_open() {
791             events.insert(EPollEventType::EPOLLHUP)
792         }
793         if shutdown.contains(ShutdownType::RCV_SHUTDOWN) {
794             events.insert(
795                 EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM | EPollEventType::EPOLLRDHUP,
796             );
797         }
798 
799         let state = socket.state();
800         if state != tcp::State::SynSent && state != tcp::State::SynReceived {
801             // socket有可读数据
802             if socket.can_recv() {
803                 events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
804             }
805 
806             if !(shutdown.contains(ShutdownType::SEND_SHUTDOWN)) {
807                 // 缓冲区可写
808                 if socket.send_queue() < socket.send_capacity() {
809                     events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
810                 } else {
811                     // TODO:触发缓冲区已满的信号
812                     todo!("A signal that the buffer is full needs to be sent");
813                 }
814             } else {
815                 // 如果我们的socket关闭了SEND_SHUTDOWN,epoll事件就是EPOLLOUT
816                 events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
817             }
818         } else if state == tcp::State::SynSent {
819             events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM);
820         }
821 
822         // socket发生错误
823         if !socket.is_active() {
824             events.insert(EPollEventType::EPOLLERR);
825         }
826 
827         events
828     }
829 
830     pub fn udp_poll(socket: &udp::Socket, shutdown: ShutdownType) -> EPollEventType {
831         let mut event = EPollEventType::empty();
832 
833         if shutdown.contains(ShutdownType::RCV_SHUTDOWN) {
834             event.insert(
835                 EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM,
836             );
837         }
838         if shutdown.contains(ShutdownType::SHUTDOWN_MASK) {
839             event.insert(EPollEventType::EPOLLHUP);
840         }
841 
842         if socket.can_recv() {
843             event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM);
844         }
845 
846         if socket.can_send() {
847             event.insert(
848                 EPollEventType::EPOLLOUT
849                     | EPollEventType::EPOLLWRNORM
850                     | EPollEventType::EPOLLWRBAND,
851             );
852         } else {
853             // TODO: 缓冲区空间不够,需要使用信号处理
854             todo!()
855         }
856 
857         return event;
858     }
859 }
860