1 #ifndef _LINUX_POLL_H
2 #define _LINUX_POLL_H
3 
4 #include <asm/poll.h>
5 
6 #ifdef __KERNEL__
7 
8 #include <linux/compiler.h>
9 #include <linux/ktime.h>
10 #include <linux/wait.h>
11 #include <linux/string.h>
12 #include <linux/fs.h>
13 #include <linux/sysctl.h>
14 #include <asm/uaccess.h>
15 
16 extern struct ctl_table epoll_table[]; /* for sysctl */
17 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
18    additional memory. */
19 #define MAX_STACK_ALLOC 832
20 #define FRONTEND_STACK_ALLOC	256
21 #define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
22 #define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
23 #define WQUEUES_STACK_ALLOC	(MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
24 #define N_INLINE_POLL_ENTRIES	(WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
25 
26 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
27 
28 struct poll_table_struct;
29 
30 /*
31  * structures and helpers for f_op->poll implementations
32  */
33 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
34 
35 /*
36  * Do not touch the structure directly, use the access functions
37  * poll_does_not_wait() and poll_requested_events() instead.
38  */
39 typedef struct poll_table_struct {
40 	poll_queue_proc _qproc;
41 	unsigned long _key;
42 } poll_table;
43 
poll_wait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)44 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
45 {
46 	if (p && p->_qproc && wait_address)
47 		p->_qproc(filp, wait_address, p);
48 }
49 
50 /*
51  * Return true if it is guaranteed that poll will not wait. This is the case
52  * if the poll() of another file descriptor in the set got an event, so there
53  * is no need for waiting.
54  */
poll_does_not_wait(const poll_table * p)55 static inline bool poll_does_not_wait(const poll_table *p)
56 {
57 	return p == NULL || p->_qproc == NULL;
58 }
59 
60 /*
61  * Return the set of events that the application wants to poll for.
62  * This is useful for drivers that need to know whether a DMA transfer has
63  * to be started implicitly on poll(). You typically only want to do that
64  * if the application is actually polling for POLLIN and/or POLLOUT.
65  */
poll_requested_events(const poll_table * p)66 static inline unsigned long poll_requested_events(const poll_table *p)
67 {
68 	return p ? p->_key : ~0UL;
69 }
70 
init_poll_funcptr(poll_table * pt,poll_queue_proc qproc)71 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
72 {
73 	pt->_qproc = qproc;
74 	pt->_key   = ~0UL; /* all events enabled */
75 }
76 
77 struct poll_table_entry {
78 	struct file *filp;
79 	unsigned long key;
80 	wait_queue_t wait;
81 	wait_queue_head_t *wait_address;
82 };
83 
84 /*
85  * Structures and helpers for select/poll syscall
86  */
87 struct poll_wqueues {
88 	poll_table pt;
89 	struct poll_table_page *table;
90 	struct task_struct *polling_task;
91 	int triggered;
92 	int error;
93 	int inline_index;
94 	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
95 };
96 
97 extern void poll_initwait(struct poll_wqueues *pwq);
98 extern void poll_freewait(struct poll_wqueues *pwq);
99 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
100 				 ktime_t *expires, unsigned long slack);
101 extern long select_estimate_accuracy(struct timespec *tv);
102 
103 
poll_schedule(struct poll_wqueues * pwq,int state)104 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
105 {
106 	return poll_schedule_timeout(pwq, state, NULL, 0);
107 }
108 
109 /*
110  * Scalable version of the fd_set.
111  */
112 
113 typedef struct {
114 	unsigned long *in, *out, *ex;
115 	unsigned long *res_in, *res_out, *res_ex;
116 } fd_set_bits;
117 
118 /*
119  * How many longwords for "nr" bits?
120  */
121 #define FDS_BITPERLONG	(8*sizeof(long))
122 #define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
123 #define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
124 
125 /*
126  * We do a VERIFY_WRITE here even though we are only reading this time:
127  * we'll write to it eventually..
128  *
129  * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
130  */
131 static inline
get_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)132 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
133 {
134 	nr = FDS_BYTES(nr);
135 	if (ufdset)
136 		return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
137 
138 	memset(fdset, 0, nr);
139 	return 0;
140 }
141 
142 static inline unsigned long __must_check
set_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)143 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
144 {
145 	if (ufdset)
146 		return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
147 	return 0;
148 }
149 
150 static inline
zero_fd_set(unsigned long nr,unsigned long * fdset)151 void zero_fd_set(unsigned long nr, unsigned long *fdset)
152 {
153 	memset(fdset, 0, FDS_BYTES(nr));
154 }
155 
156 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
157 
158 extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
159 extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
160 		       struct timespec *end_time);
161 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
162 			   fd_set __user *exp, struct timespec *end_time);
163 
164 extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
165 
166 #endif /* KERNEL */
167 
168 #endif /* _LINUX_POLL_H */
169