1 #ifndef __OSDEP_SERVICE_H_
2 #define __OSDEP_SERVICE_H_
3
4 #define _SUCCESS 1
5 #define _FAIL 0
6
7 #include "basic_types.h"
8 #include <linux/version.h>
9 #include <linux/spinlock.h>
10
11 #include <linux/semaphore.h>
12 #include <linux/sem.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <net/iw_handler.h>
16 #include <linux/proc_fs.h> /* Necessary because we use the proc fs */
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/kref.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/usb.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/io.h>
30 #include <linux/circ_buf.h>
31 #include <linux/uaccess.h>
32 #include <asm/byteorder.h>
33 #include <asm/atomic.h>
34 #include <linux/wireless.h>
35 #include <linux/rtnetlink.h>
36 #include "ethernet.h"
37 #include <linux/if_arp.h>
38 #include <linux/firmware.h>
39 #define _usb_alloc_urb(x, y) usb_alloc_urb(x, y)
40 #define _usb_submit_urb(x, y) usb_submit_urb(x, y)
41
42 struct __queue {
43 struct list_head queue;
44 spinlock_t lock;
45 };
46
47 #define _pkt struct sk_buff
48 #define _buffer unsigned char
49 #define thread_exit() complete_and_exit(NULL, 0)
50 #define _workitem struct work_struct
51 #define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
52
53 #define _init_queue(pqueue) \
54 do { \
55 _init_listhead(&((pqueue)->queue)); \
56 spin_lock_init(&((pqueue)->lock)); \
57 } while (0)
58
_netdev_priv(struct net_device * dev)59 static inline void *_netdev_priv(struct net_device *dev)
60 {
61 return netdev_priv(dev);
62 }
63
os_free_netdev(struct net_device * dev)64 static inline void os_free_netdev(struct net_device *dev)
65 {
66 free_netdev(dev);
67 }
68
get_next(struct list_head * list)69 static inline struct list_head *get_next(struct list_head *list)
70 {
71 return list->next;
72 }
73
get_list_head(struct __queue * queue)74 static inline struct list_head *get_list_head(struct __queue *queue)
75 {
76 return &(queue->queue);
77 }
78
79 #define LIST_CONTAINOR(ptr, type, member) \
80 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
81
_enter_hwio_critical(struct semaphore * prwlock,unsigned long * pirqL)82 static inline void _enter_hwio_critical(struct semaphore *prwlock,
83 unsigned long *pirqL)
84 {
85 down(prwlock);
86 }
87
_exit_hwio_critical(struct semaphore * prwlock,unsigned long * pirqL)88 static inline void _exit_hwio_critical(struct semaphore *prwlock,
89 unsigned long *pirqL)
90 {
91 up(prwlock);
92 }
93
list_delete(struct list_head * plist)94 static inline void list_delete(struct list_head *plist)
95 {
96 list_del_init(plist);
97 }
98
_init_timer(struct timer_list * ptimer,struct net_device * padapter,void * pfunc,void * cntx)99 static inline void _init_timer(struct timer_list *ptimer,
100 struct net_device *padapter,
101 void *pfunc, void *cntx)
102 {
103 ptimer->function = pfunc;
104 ptimer->data = (addr_t)cntx;
105 init_timer(ptimer);
106 }
107
_set_timer(struct timer_list * ptimer,u32 delay_time)108 static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
109 {
110 mod_timer(ptimer, (jiffies+(delay_time*HZ/1000)));
111 }
112
_cancel_timer(struct timer_list * ptimer,u8 * bcancelled)113 static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
114 {
115 del_timer(ptimer);
116 *bcancelled = true; /*true ==1; false==0*/
117 }
118
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)119 static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
120 {
121 INIT_WORK(pwork, pfunc);
122 }
123
_set_workitem(_workitem * pwork)124 static inline void _set_workitem(_workitem *pwork)
125 {
126 schedule_work(pwork);
127 }
128
129 #include "rtl871x_byteorder.h"
130
131 #ifndef BIT
132 #define BIT(x) (1 << (x))
133 #endif
134
135 /*
136 For the following list_xxx operations,
137 caller must guarantee the atomic context.
138 Otherwise, there will be racing condition.
139 */
is_list_empty(struct list_head * phead)140 static inline u32 is_list_empty(struct list_head *phead)
141 {
142 if (list_empty(phead))
143 return true;
144 else
145 return false;
146 }
147
list_insert_tail(struct list_head * plist,struct list_head * phead)148 static inline void list_insert_tail(struct list_head *plist, struct list_head *phead)
149 {
150 list_add_tail(plist, phead);
151 }
152
_down_sema(struct semaphore * sema)153 static inline u32 _down_sema(struct semaphore *sema)
154 {
155 if (down_interruptible(sema))
156 return _FAIL;
157 else
158 return _SUCCESS;
159 }
160
_rtl_rwlock_init(struct semaphore * prwlock)161 static inline void _rtl_rwlock_init(struct semaphore *prwlock)
162 {
163 sema_init(prwlock, 1);
164 }
165
_init_listhead(struct list_head * list)166 static inline void _init_listhead(struct list_head *list)
167 {
168 INIT_LIST_HEAD(list);
169 }
170
_queue_empty(struct __queue * pqueue)171 static inline u32 _queue_empty(struct __queue *pqueue)
172 {
173 return is_list_empty(&(pqueue->queue));
174 }
175
end_of_queue_search(struct list_head * head,struct list_head * plist)176 static inline u32 end_of_queue_search(struct list_head *head, struct list_head *plist)
177 {
178 if (head == plist)
179 return true;
180 else
181 return false;
182 }
183
sleep_schedulable(int ms)184 static inline void sleep_schedulable(int ms)
185 {
186 u32 delta;
187
188 delta = (ms * HZ) / 1000;/*(ms)*/
189 if (delta == 0)
190 delta = 1;/* 1 ms */
191 set_current_state(TASK_INTERRUPTIBLE);
192 if (schedule_timeout(delta) != 0)
193 return ;
194 }
195
_malloc(u32 sz)196 static inline u8 *_malloc(u32 sz)
197 {
198 return kmalloc(sz, GFP_ATOMIC);
199 }
200
_cancel_timer_ex(struct timer_list * ptimer)201 static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
202 {
203 return del_timer(ptimer);
204 }
205
thread_enter(void * context)206 static inline void thread_enter(void *context)
207 {
208 daemonize("%s", "RTKTHREAD");
209 allow_signal(SIGTERM);
210 }
211
flush_signals_thread(void)212 static inline void flush_signals_thread(void)
213 {
214 if (signal_pending(current))
215 flush_signals(current);
216 }
217
_RND8(u32 sz)218 static inline u32 _RND8(u32 sz)
219 {
220 return ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
221 }
222
_RND128(u32 sz)223 static inline u32 _RND128(u32 sz)
224 {
225 return ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
226 }
227
_RND256(u32 sz)228 static inline u32 _RND256(u32 sz)
229 {
230 return ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
231 }
232
_RND512(u32 sz)233 static inline u32 _RND512(u32 sz)
234 {
235 return ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
236 }
237
238 #define STRUCT_PACKED __attribute__ ((packed))
239
240 #endif
241
242