Lines Matching refs:tid

16 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)  in mt76_aggr_release()  argument
20 tid->head = ieee80211_sn_inc(tid->head); in mt76_aggr_release()
22 skb = tid->reorder_buf[idx]; in mt76_aggr_release()
26 tid->reorder_buf[idx] = NULL; in mt76_aggr_release()
27 tid->nframes--; in mt76_aggr_release()
32 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, in mt76_rx_aggr_release_frames() argument
38 while (ieee80211_sn_less(tid->head, head)) { in mt76_rx_aggr_release_frames()
39 idx = tid->head % tid->size; in mt76_rx_aggr_release_frames()
40 mt76_aggr_release(tid, frames, idx); in mt76_rx_aggr_release_frames()
45 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames) in mt76_rx_aggr_release_head() argument
47 int idx = tid->head % tid->size; in mt76_rx_aggr_release_head()
49 while (tid->reorder_buf[idx]) { in mt76_rx_aggr_release_head()
50 mt76_aggr_release(tid, frames, idx); in mt76_rx_aggr_release_head()
51 idx = tid->head % tid->size; in mt76_rx_aggr_release_head()
56 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) in mt76_rx_aggr_check_release() argument
62 if (!tid->nframes) in mt76_rx_aggr_check_release()
65 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_release()
67 start = tid->head % tid->size; in mt76_rx_aggr_check_release()
68 nframes = tid->nframes; in mt76_rx_aggr_check_release()
70 for (idx = (tid->head + 1) % tid->size; in mt76_rx_aggr_check_release()
72 idx = (idx + 1) % tid->size) { in mt76_rx_aggr_check_release()
73 skb = tid->reorder_buf[idx]; in mt76_rx_aggr_check_release()
81 mt76_aggr_tid_to_timeo(tid->num))) in mt76_rx_aggr_check_release()
84 mt76_rx_aggr_release_frames(tid, frames, status->seqno); in mt76_rx_aggr_check_release()
87 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_release()
93 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid, in mt76_rx_aggr_reorder_work() local
95 struct mt76_dev *dev = tid->dev; in mt76_rx_aggr_reorder_work()
104 spin_lock(&tid->lock); in mt76_rx_aggr_reorder_work()
105 mt76_rx_aggr_check_release(tid, &frames); in mt76_rx_aggr_reorder_work()
106 nframes = tid->nframes; in mt76_rx_aggr_reorder_work()
107 spin_unlock(&tid->lock); in mt76_rx_aggr_reorder_work()
110 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, in mt76_rx_aggr_reorder_work()
111 mt76_aggr_tid_to_timeo(tid->num)); in mt76_rx_aggr_reorder_work()
124 struct mt76_rx_tid *tid; in mt76_rx_aggr_check_ctl() local
136 tid = rcu_dereference(wcid->aggr[tidno]); in mt76_rx_aggr_check_ctl()
137 if (!tid) in mt76_rx_aggr_check_ctl()
140 spin_lock_bh(&tid->lock); in mt76_rx_aggr_check_ctl()
141 if (!tid->stopped) { in mt76_rx_aggr_check_ctl()
142 mt76_rx_aggr_release_frames(tid, frames, seqno); in mt76_rx_aggr_check_ctl()
143 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_ctl()
145 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_check_ctl()
153 struct mt76_rx_tid *tid; in mt76_rx_aggr_reorder() local
176 tid = rcu_dereference(wcid->aggr[tidno]); in mt76_rx_aggr_reorder()
177 if (!tid) in mt76_rx_aggr_reorder()
181 spin_lock_bh(&tid->lock); in mt76_rx_aggr_reorder()
183 if (tid->stopped) in mt76_rx_aggr_reorder()
186 head = tid->head; in mt76_rx_aggr_reorder()
188 size = tid->size; in mt76_rx_aggr_reorder()
191 if (!tid->started) { in mt76_rx_aggr_reorder()
195 tid->started = true; in mt76_rx_aggr_reorder()
205 tid->head = ieee80211_sn_inc(head); in mt76_rx_aggr_reorder()
206 if (tid->nframes) in mt76_rx_aggr_reorder()
207 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_reorder()
219 mt76_rx_aggr_release_frames(tid, frames, head); in mt76_rx_aggr_reorder()
225 if (tid->reorder_buf[idx]) { in mt76_rx_aggr_reorder()
231 tid->reorder_buf[idx] = skb; in mt76_rx_aggr_reorder()
232 tid->nframes++; in mt76_rx_aggr_reorder()
233 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_reorder()
235 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, in mt76_rx_aggr_reorder()
236 mt76_aggr_tid_to_timeo(tid->num)); in mt76_rx_aggr_reorder()
239 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_reorder()
245 struct mt76_rx_tid *tid; in mt76_rx_aggr_start() local
249 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL); in mt76_rx_aggr_start()
250 if (!tid) in mt76_rx_aggr_start()
253 tid->dev = dev; in mt76_rx_aggr_start()
254 tid->head = ssn; in mt76_rx_aggr_start()
255 tid->size = size; in mt76_rx_aggr_start()
256 tid->num = tidno; in mt76_rx_aggr_start()
257 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work); in mt76_rx_aggr_start()
258 spin_lock_init(&tid->lock); in mt76_rx_aggr_start()
260 rcu_assign_pointer(wcid->aggr[tidno], tid); in mt76_rx_aggr_start()
266 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) in mt76_rx_aggr_shutdown() argument
268 u16 size = tid->size; in mt76_rx_aggr_shutdown()
271 spin_lock_bh(&tid->lock); in mt76_rx_aggr_shutdown()
273 tid->stopped = true; in mt76_rx_aggr_shutdown()
274 for (i = 0; tid->nframes && i < size; i++) { in mt76_rx_aggr_shutdown()
275 struct sk_buff *skb = tid->reorder_buf[i]; in mt76_rx_aggr_shutdown()
280 tid->reorder_buf[i] = NULL; in mt76_rx_aggr_shutdown()
281 tid->nframes--; in mt76_rx_aggr_shutdown()
285 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_shutdown()
287 cancel_delayed_work_sync(&tid->reorder_work); in mt76_rx_aggr_shutdown()
292 struct mt76_rx_tid *tid = NULL; in mt76_rx_aggr_stop() local
294 tid = rcu_replace_pointer(wcid->aggr[tidno], tid, in mt76_rx_aggr_stop()
296 if (tid) { in mt76_rx_aggr_stop()
297 mt76_rx_aggr_shutdown(dev, tid); in mt76_rx_aggr_stop()
298 kfree_rcu(tid, rcu_head); in mt76_rx_aggr_stop()