Lines Matching refs:qi

248 static int vector_advancehead(struct vector_queue *qi, int advance)  in vector_advancehead()  argument
252 qi->head = in vector_advancehead()
253 (qi->head + advance) in vector_advancehead()
254 % qi->max_depth; in vector_advancehead()
257 spin_lock(&qi->tail_lock); in vector_advancehead()
258 qi->queue_depth -= advance; in vector_advancehead()
264 if (qi->queue_depth == 0) { in vector_advancehead()
265 qi->head = 0; in vector_advancehead()
266 qi->tail = 0; in vector_advancehead()
268 queue_depth = qi->queue_depth; in vector_advancehead()
269 spin_unlock(&qi->tail_lock); in vector_advancehead()
278 static int vector_advancetail(struct vector_queue *qi, int advance) in vector_advancetail() argument
282 qi->tail = in vector_advancetail()
283 (qi->tail + advance) in vector_advancetail()
284 % qi->max_depth; in vector_advancetail()
285 spin_lock(&qi->head_lock); in vector_advancetail()
286 qi->queue_depth += advance; in vector_advancetail()
287 queue_depth = qi->queue_depth; in vector_advancetail()
288 spin_unlock(&qi->head_lock); in vector_advancetail()
333 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) in vector_enqueue() argument
335 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue()
338 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_enqueue()
341 spin_lock(&qi->tail_lock); in vector_enqueue()
342 spin_lock(&qi->head_lock); in vector_enqueue()
343 queue_depth = qi->queue_depth; in vector_enqueue()
344 spin_unlock(&qi->head_lock); in vector_enqueue()
349 if (queue_depth < qi->max_depth) { in vector_enqueue()
351 *(qi->skbuff_vector + qi->tail) = skb; in vector_enqueue()
352 mmsg_vector += qi->tail; in vector_enqueue()
363 queue_depth = vector_advancetail(qi, 1); in vector_enqueue()
366 spin_unlock(&qi->tail_lock); in vector_enqueue()
369 qi->dev->stats.tx_dropped++; in vector_enqueue()
373 netdev_completed_queue(qi->dev, 1, packet_len); in vector_enqueue()
375 spin_unlock(&qi->tail_lock); in vector_enqueue()
379 static int consume_vector_skbs(struct vector_queue *qi, int count) in consume_vector_skbs() argument
385 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { in consume_vector_skbs()
386 skb = *(qi->skbuff_vector + skb_index); in consume_vector_skbs()
391 *(qi->skbuff_vector + skb_index) = NULL; in consume_vector_skbs()
394 qi->dev->stats.tx_bytes += bytes_compl; in consume_vector_skbs()
395 qi->dev->stats.tx_packets += count; in consume_vector_skbs()
396 netdev_completed_queue(qi->dev, count, bytes_compl); in consume_vector_skbs()
397 return vector_advancehead(qi, count); in consume_vector_skbs()
407 static int vector_send(struct vector_queue *qi) in vector_send() argument
409 struct vector_private *vp = netdev_priv(qi->dev); in vector_send()
411 int result = 0, send_len, queue_depth = qi->max_depth; in vector_send()
413 if (spin_trylock(&qi->head_lock)) { in vector_send()
414 if (spin_trylock(&qi->tail_lock)) { in vector_send()
416 queue_depth = qi->queue_depth; in vector_send()
417 spin_unlock(&qi->tail_lock); in vector_send()
421 send_from = qi->mmsg_vector; in vector_send()
422 send_from += qi->head; in vector_send()
424 if (send_len + qi->head > qi->max_depth) in vector_send()
425 send_len = qi->max_depth - qi->head; in vector_send()
451 consume_vector_skbs(qi, result); in vector_send()
461 netif_wake_queue(qi->dev); in vector_send()
471 spin_unlock(&qi->head_lock); in vector_send()
480 static void destroy_queue(struct vector_queue *qi) in destroy_queue() argument
484 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue()
487 if (qi == NULL) in destroy_queue()
492 if (qi->skbuff_vector != NULL) { in destroy_queue()
493 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
494 if (*(qi->skbuff_vector + i) != NULL) in destroy_queue()
495 dev_kfree_skb_any(*(qi->skbuff_vector + i)); in destroy_queue()
497 kfree(qi->skbuff_vector); in destroy_queue()
500 if (qi->mmsg_vector != NULL) { in destroy_queue()
501 mmsg_vector = qi->mmsg_vector; in destroy_queue()
502 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
512 kfree(qi->mmsg_vector); in destroy_queue()
514 kfree(qi); in destroy_queue()
673 static void prep_queue_for_rx(struct vector_queue *qi) in prep_queue_for_rx() argument
675 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx()
676 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in prep_queue_for_rx()
677 void **skbuff_vector = qi->skbuff_vector; in prep_queue_for_rx()
680 if (qi->queue_depth == 0) in prep_queue_for_rx()
682 for (i = 0; i < qi->queue_depth; i++) { in prep_queue_for_rx()
692 qi->queue_depth = 0; in prep_queue_for_rx()
958 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx() local
960 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_mmsg_rx()
961 void **skbuff_vector = qi->skbuff_vector; in vector_mmsg_rx()
968 prep_queue_for_rx(qi); in vector_mmsg_rx()
972 if (budget > qi->max_depth) in vector_mmsg_rx()
973 budget = qi->max_depth; in vector_mmsg_rx()
976 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); in vector_mmsg_rx()
989 qi->queue_depth = packet_count; in vector_mmsg_rx()