Lines Matching refs:wl

19 static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)  in wl1251_tx_double_buffer_busy()  argument
23 data_in_count = wl->data_in_count; in wl1251_tx_double_buffer_busy()
40 static int wl1251_tx_path_status(struct wl1251 *wl) in wl1251_tx_path_status() argument
45 addr = wl->data_path->tx_control_addr; in wl1251_tx_path_status()
46 status = wl1251_mem_read32(wl, addr); in wl1251_tx_path_status()
48 busy = wl1251_tx_double_buffer_busy(wl, data_out_count); in wl1251_tx_path_status()
56 static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) in wl1251_tx_id() argument
61 if (wl->tx_frames[i] == NULL) { in wl1251_tx_id()
62 wl->tx_frames[i] = skb; in wl1251_tx_id()
134 static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_fill_hdr() argument
145 id = wl1251_tx_id(wl, skb); in wl1251_tx_fill_hdr()
153 rate = ieee80211_get_tx_rate(wl->hw, control); in wl1251_tx_fill_hdr()
167 static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_send_packet() argument
216 wl->tx_frames[tx_hdr->id] = skb = newskb; in wl1251_tx_send_packet()
234 if (wl->data_in_count & 0x1) in wl1251_tx_send_packet()
235 addr = wl->data_path->tx_packet_ring_addr + in wl1251_tx_send_packet()
236 wl->data_path->tx_packet_ring_chunk_size; in wl1251_tx_send_packet()
238 addr = wl->data_path->tx_packet_ring_addr; in wl1251_tx_send_packet()
240 wl1251_mem_write(wl, addr, skb->data, len); in wl1251_tx_send_packet()
249 static void wl1251_tx_trigger(struct wl1251 *wl) in wl1251_tx_trigger() argument
253 if (wl->data_in_count & 0x1) { in wl1251_tx_trigger()
261 wl1251_reg_write32(wl, addr, data); in wl1251_tx_trigger()
264 wl->data_in_count = (wl->data_in_count + 1) & in wl1251_tx_trigger()
268 static void enable_tx_for_packet_injection(struct wl1251 *wl) in enable_tx_for_packet_injection() argument
272 ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel, in enable_tx_for_packet_injection()
273 wl->beacon_int, wl->dtim_period); in enable_tx_for_packet_injection()
279 ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100); in enable_tx_for_packet_injection()
285 wl->joined = true; in enable_tx_for_packet_injection()
289 static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb) in wl1251_tx_frame() argument
298 if (unlikely(wl->monitor_present)) in wl1251_tx_frame()
302 if (unlikely(wl->default_key != idx)) { in wl1251_tx_frame()
303 ret = wl1251_acx_default_key(wl, idx); in wl1251_tx_frame()
310 if ((wl->vif == NULL) && !wl->joined) in wl1251_tx_frame()
311 enable_tx_for_packet_injection(wl); in wl1251_tx_frame()
313 ret = wl1251_tx_path_status(wl); in wl1251_tx_frame()
317 ret = wl1251_tx_fill_hdr(wl, skb, info); in wl1251_tx_frame()
321 ret = wl1251_tx_send_packet(wl, skb, info); in wl1251_tx_frame()
325 wl1251_tx_trigger(wl); in wl1251_tx_frame()
332 struct wl1251 *wl = container_of(work, struct wl1251, tx_work); in wl1251_tx_work() local
337 mutex_lock(&wl->mutex); in wl1251_tx_work()
339 if (unlikely(wl->state == WL1251_STATE_OFF)) in wl1251_tx_work()
342 while ((skb = skb_dequeue(&wl->tx_queue))) { in wl1251_tx_work()
344 ret = wl1251_ps_elp_wakeup(wl); in wl1251_tx_work()
350 ret = wl1251_tx_frame(wl, skb); in wl1251_tx_work()
352 skb_queue_head(&wl->tx_queue, skb); in wl1251_tx_work()
362 wl1251_ps_elp_sleep(wl); in wl1251_tx_work()
364 mutex_unlock(&wl->mutex); in wl1251_tx_work()
395 static void wl1251_tx_packet_cb(struct wl1251 *wl, in wl1251_tx_packet_cb() argument
403 skb = wl->tx_frames[result->id]; in wl1251_tx_packet_cb()
417 wl->stats.retry_count += result->ack_failures; in wl1251_tx_packet_cb()
437 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_packet_cb()
439 wl->tx_frames[result->id] = NULL; in wl1251_tx_packet_cb()
443 void wl1251_tx_complete(struct wl1251 *wl) in wl1251_tx_complete() argument
449 if (unlikely(wl->state != WL1251_STATE_ON)) in wl1251_tx_complete()
459 wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result, in wl1251_tx_complete()
462 result_index = wl->next_tx_complete; in wl1251_tx_complete()
469 wl1251_tx_packet_cb(wl, result_ptr); in wl1251_tx_complete()
482 queue_len = skb_queue_len(&wl->tx_queue); in wl1251_tx_complete()
487 ieee80211_queue_work(wl->hw, &wl->tx_work); in wl1251_tx_complete()
490 if (wl->tx_queue_stopped && in wl1251_tx_complete()
494 spin_lock_irqsave(&wl->wl_lock, flags); in wl1251_tx_complete()
495 ieee80211_wake_queues(wl->hw); in wl1251_tx_complete()
496 wl->tx_queue_stopped = false; in wl1251_tx_complete()
497 spin_unlock_irqrestore(&wl->wl_lock, flags); in wl1251_tx_complete()
506 if (result_index > wl->next_tx_complete) { in wl1251_tx_complete()
508 wl1251_mem_write(wl, in wl1251_tx_complete()
509 wl->data_path->tx_complete_addr + in wl1251_tx_complete()
510 (wl->next_tx_complete * in wl1251_tx_complete()
512 &result[wl->next_tx_complete], in wl1251_tx_complete()
517 } else if (result_index < wl->next_tx_complete) { in wl1251_tx_complete()
519 wl1251_mem_write(wl, in wl1251_tx_complete()
520 wl->data_path->tx_complete_addr + in wl1251_tx_complete()
521 (wl->next_tx_complete * in wl1251_tx_complete()
523 &result[wl->next_tx_complete], in wl1251_tx_complete()
525 wl->next_tx_complete) * in wl1251_tx_complete()
528 wl1251_mem_write(wl, in wl1251_tx_complete()
529 wl->data_path->tx_complete_addr, in wl1251_tx_complete()
533 wl->next_tx_complete) * in wl1251_tx_complete()
538 wl1251_mem_write(wl, in wl1251_tx_complete()
539 wl->data_path->tx_complete_addr, in wl1251_tx_complete()
548 wl->next_tx_complete = result_index; in wl1251_tx_complete()
552 void wl1251_tx_flush(struct wl1251 *wl) in wl1251_tx_flush() argument
561 while ((skb = skb_dequeue(&wl->tx_queue))) { in wl1251_tx_flush()
569 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_flush()
573 if (wl->tx_frames[i] != NULL) { in wl1251_tx_flush()
574 skb = wl->tx_frames[i]; in wl1251_tx_flush()
580 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_flush()
581 wl->tx_frames[i] = NULL; in wl1251_tx_flush()