1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 
41 #include <net/tcp.h>
42 
43 #include <xen/events.h>
44 #include <xen/interface/memory.h>
45 
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/page.h>
48 
49 /*
50  * This is the maximum slots a skb can have. If a guest sends a skb
51  * which exceeds this limit it is considered malicious.
52  */
53 #define MAX_SKB_SLOTS_DEFAULT 20
54 static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
55 module_param(max_skb_slots, uint, 0444);
56 
57 typedef unsigned int pending_ring_idx_t;
58 #define INVALID_PENDING_RING_IDX (~0U)
59 
60 struct pending_tx_info {
61 	struct xen_netif_tx_request req; /* coalesced tx request */
62 	struct xenvif *vif;
63 	pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
64 				  * if it is head of one or more tx
65 				  * reqs
66 				  */
67 };
68 
69 struct netbk_rx_meta {
70 	int id;
71 	int size;
72 	int gso_size;
73 };
74 
75 #define MAX_PENDING_REQS 256
76 
77 /* Discriminate from any valid pending_idx value. */
78 #define INVALID_PENDING_IDX 0xFFFF
79 
80 #define MAX_BUFFER_OFFSET PAGE_SIZE
81 
82 /* extra field used in struct page */
83 union page_ext {
84 	struct {
85 #if BITS_PER_LONG < 64
86 #define IDX_WIDTH   8
87 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
88 		unsigned int group:GROUP_WIDTH;
89 		unsigned int idx:IDX_WIDTH;
90 #else
91 		unsigned int group, idx;
92 #endif
93 	} e;
94 	void *mapping;
95 };
96 
97 struct xen_netbk {
98 	wait_queue_head_t wq;
99 	struct task_struct *task;
100 
101 	struct sk_buff_head rx_queue;
102 	struct sk_buff_head tx_queue;
103 
104 	struct timer_list net_timer;
105 
106 	struct page *mmap_pages[MAX_PENDING_REQS];
107 
108 	pending_ring_idx_t pending_prod;
109 	pending_ring_idx_t pending_cons;
110 	struct list_head net_schedule_list;
111 
112 	/* Protect the net_schedule_list in netif. */
113 	spinlock_t net_schedule_list_lock;
114 
115 	atomic_t netfront_count;
116 
117 	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
118 	/* Coalescing tx requests before copying makes number of grant
119 	 * copy ops greater or equal to number of slots required. In
120 	 * worst case a tx request consumes 2 gnttab_copy.
121 	 */
122 	struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
123 
124 	u16 pending_ring[MAX_PENDING_REQS];
125 
126 	/*
127 	 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
128 	 * head/fragment page uses 2 copy operations because it
129 	 * straddles two buffers in the frontend.
130 	 */
131 	struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
132 	struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
133 };
134 
135 static struct xen_netbk *xen_netbk;
136 static int xen_netbk_group_nr;
137 
138 /*
139  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
140  * one or more merged tx requests, otherwise it is the continuation of
141  * previous tx request.
142  */
pending_tx_is_head(struct xen_netbk * netbk,RING_IDX idx)143 static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
144 {
145 	return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
146 }
147 
xen_netbk_add_xenvif(struct xenvif * vif)148 void xen_netbk_add_xenvif(struct xenvif *vif)
149 {
150 	int i;
151 	int min_netfront_count;
152 	int min_group = 0;
153 	struct xen_netbk *netbk;
154 
155 	min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
156 	for (i = 0; i < xen_netbk_group_nr; i++) {
157 		int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
158 		if (netfront_count < min_netfront_count) {
159 			min_group = i;
160 			min_netfront_count = netfront_count;
161 		}
162 	}
163 
164 	netbk = &xen_netbk[min_group];
165 
166 	vif->netbk = netbk;
167 	atomic_inc(&netbk->netfront_count);
168 }
169 
xen_netbk_remove_xenvif(struct xenvif * vif)170 void xen_netbk_remove_xenvif(struct xenvif *vif)
171 {
172 	struct xen_netbk *netbk = vif->netbk;
173 	vif->netbk = NULL;
174 	atomic_dec(&netbk->netfront_count);
175 }
176 
177 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
178 				  u8 status);
179 static void make_tx_response(struct xenvif *vif,
180 			     struct xen_netif_tx_request *txp,
181 			     s8       st);
182 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
183 					     u16      id,
184 					     s8       st,
185 					     u16      offset,
186 					     u16      size,
187 					     u16      flags);
188 
idx_to_pfn(struct xen_netbk * netbk,u16 idx)189 static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
190 				       u16 idx)
191 {
192 	return page_to_pfn(netbk->mmap_pages[idx]);
193 }
194 
idx_to_kaddr(struct xen_netbk * netbk,u16 idx)195 static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
196 					 u16 idx)
197 {
198 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
199 }
200 
201 /* extra field used in struct page */
set_page_ext(struct page * pg,struct xen_netbk * netbk,unsigned int idx)202 static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
203 				unsigned int idx)
204 {
205 	unsigned int group = netbk - xen_netbk;
206 	union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
207 
208 	BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
209 	pg->mapping = ext.mapping;
210 }
211 
get_page_ext(struct page * pg,unsigned int * pgroup,unsigned int * pidx)212 static int get_page_ext(struct page *pg,
213 			unsigned int *pgroup, unsigned int *pidx)
214 {
215 	union page_ext ext = { .mapping = pg->mapping };
216 	struct xen_netbk *netbk;
217 	unsigned int group, idx;
218 
219 	group = ext.e.group - 1;
220 
221 	if (group < 0 || group >= xen_netbk_group_nr)
222 		return 0;
223 
224 	netbk = &xen_netbk[group];
225 
226 	idx = ext.e.idx;
227 
228 	if ((idx < 0) || (idx >= MAX_PENDING_REQS))
229 		return 0;
230 
231 	if (netbk->mmap_pages[idx] != pg)
232 		return 0;
233 
234 	*pgroup = group;
235 	*pidx = idx;
236 
237 	return 1;
238 }
239 
240 /*
241  * This is the amount of packet we copy rather than map, so that the
242  * guest can't fiddle with the contents of the headers while we do
243  * packet processing on them (netfilter, routing, etc).
244  */
245 #define PKT_PROT_LEN    (ETH_HLEN + \
246 			 VLAN_HLEN + \
247 			 sizeof(struct iphdr) + MAX_IPOPTLEN + \
248 			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
249 
frag_get_pending_idx(skb_frag_t * frag)250 static u16 frag_get_pending_idx(skb_frag_t *frag)
251 {
252 	return (u16)frag->page_offset;
253 }
254 
frag_set_pending_idx(skb_frag_t * frag,u16 pending_idx)255 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
256 {
257 	frag->page_offset = pending_idx;
258 }
259 
pending_index(unsigned i)260 static inline pending_ring_idx_t pending_index(unsigned i)
261 {
262 	return i & (MAX_PENDING_REQS-1);
263 }
264 
nr_pending_reqs(struct xen_netbk * netbk)265 static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
266 {
267 	return MAX_PENDING_REQS -
268 		netbk->pending_prod + netbk->pending_cons;
269 }
270 
xen_netbk_kick_thread(struct xen_netbk * netbk)271 static void xen_netbk_kick_thread(struct xen_netbk *netbk)
272 {
273 	wake_up(&netbk->wq);
274 }
275 
max_required_rx_slots(struct xenvif * vif)276 static int max_required_rx_slots(struct xenvif *vif)
277 {
278 	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
279 
280 	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
281 	if (vif->can_sg || vif->gso || vif->gso_prefix)
282 		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
283 
284 	return max;
285 }
286 
xen_netbk_rx_ring_full(struct xenvif * vif)287 int xen_netbk_rx_ring_full(struct xenvif *vif)
288 {
289 	RING_IDX peek   = vif->rx_req_cons_peek;
290 	RING_IDX needed = max_required_rx_slots(vif);
291 
292 	return ((vif->rx.sring->req_prod - peek) < needed) ||
293 	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
294 }
295 
xen_netbk_must_stop_queue(struct xenvif * vif)296 int xen_netbk_must_stop_queue(struct xenvif *vif)
297 {
298 	if (!xen_netbk_rx_ring_full(vif))
299 		return 0;
300 
301 	vif->rx.sring->req_event = vif->rx_req_cons_peek +
302 		max_required_rx_slots(vif);
303 	mb(); /* request notification /then/ check the queue */
304 
305 	return xen_netbk_rx_ring_full(vif);
306 }
307 
308 /*
309  * Returns true if we should start a new receive buffer instead of
310  * adding 'size' bytes to a buffer which currently contains 'offset'
311  * bytes.
312  */
start_new_rx_buffer(int offset,unsigned long size,int head)313 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
314 {
315 	/* simple case: we have completely filled the current buffer. */
316 	if (offset == MAX_BUFFER_OFFSET)
317 		return true;
318 
319 	/*
320 	 * complex case: start a fresh buffer if the current frag
321 	 * would overflow the current buffer but only if:
322 	 *     (i)   this frag would fit completely in the next buffer
323 	 * and (ii)  there is already some data in the current buffer
324 	 * and (iii) this is not the head buffer.
325 	 *
326 	 * Where:
327 	 * - (i) stops us splitting a frag into two copies
328 	 *   unless the frag is too large for a single buffer.
329 	 * - (ii) stops us from leaving a buffer pointlessly empty.
330 	 * - (iii) stops us leaving the first buffer
331 	 *   empty. Strictly speaking this is already covered
332 	 *   by (ii) but is explicitly checked because
333 	 *   netfront relies on the first buffer being
334 	 *   non-empty and can crash otherwise.
335 	 *
336 	 * This means we will effectively linearise small
337 	 * frags but do not needlessly split large buffers
338 	 * into multiple copies tend to give large frags their
339 	 * own buffers as before.
340 	 */
341 	BUG_ON(size > MAX_BUFFER_OFFSET);
342 	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
343 		return true;
344 
345 	return false;
346 }
347 
348 /*
349  * Figure out how many ring slots we're going to need to send @skb to
350  * the guest. This function is essentially a dry run of
351  * netbk_gop_frag_copy.
352  */
xen_netbk_count_skb_slots(struct xenvif * vif,struct sk_buff * skb)353 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
354 {
355 	unsigned int count;
356 	int i, copy_off;
357 
358 	count = DIV_ROUND_UP(
359 			offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
360 
361 	copy_off = skb_headlen(skb) % PAGE_SIZE;
362 
363 	if (skb_shinfo(skb)->gso_size)
364 		count++;
365 
366 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
367 		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
368 		unsigned long bytes;
369 		while (size > 0) {
370 			BUG_ON(copy_off > MAX_BUFFER_OFFSET);
371 
372 			if (start_new_rx_buffer(copy_off, size, 0)) {
373 				count++;
374 				copy_off = 0;
375 			}
376 
377 			bytes = size;
378 			if (copy_off + bytes > MAX_BUFFER_OFFSET)
379 				bytes = MAX_BUFFER_OFFSET - copy_off;
380 
381 			copy_off += bytes;
382 			size -= bytes;
383 		}
384 	}
385 	return count;
386 }
387 
388 struct netrx_pending_operations {
389 	unsigned copy_prod, copy_cons;
390 	unsigned meta_prod, meta_cons;
391 	struct gnttab_copy *copy;
392 	struct netbk_rx_meta *meta;
393 	int copy_off;
394 	grant_ref_t copy_gref;
395 };
396 
get_next_rx_buffer(struct xenvif * vif,struct netrx_pending_operations * npo)397 static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
398 						struct netrx_pending_operations *npo)
399 {
400 	struct netbk_rx_meta *meta;
401 	struct xen_netif_rx_request *req;
402 
403 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
404 
405 	meta = npo->meta + npo->meta_prod++;
406 	meta->gso_size = 0;
407 	meta->size = 0;
408 	meta->id = req->id;
409 
410 	npo->copy_off = 0;
411 	npo->copy_gref = req->gref;
412 
413 	return meta;
414 }
415 
416 /*
417  * Set up the grant operations for this fragment. If it's a flipping
418  * interface, we also set up the unmap request from here.
419  */
netbk_gop_frag_copy(struct xenvif * vif,struct sk_buff * skb,struct netrx_pending_operations * npo,struct page * page,unsigned long size,unsigned long offset,int * head)420 static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
421 				struct netrx_pending_operations *npo,
422 				struct page *page, unsigned long size,
423 				unsigned long offset, int *head)
424 {
425 	struct gnttab_copy *copy_gop;
426 	struct netbk_rx_meta *meta;
427 	/*
428 	 * These variables are used iff get_page_ext returns true,
429 	 * in which case they are guaranteed to be initialized.
430 	 */
431 	unsigned int uninitialized_var(group), uninitialized_var(idx);
432 	int foreign = get_page_ext(page, &group, &idx);
433 	unsigned long bytes;
434 
435 	/* Data must not cross a page boundary. */
436 	BUG_ON(size + offset > PAGE_SIZE);
437 
438 	meta = npo->meta + npo->meta_prod - 1;
439 
440 	while (size > 0) {
441 		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
442 
443 		if (start_new_rx_buffer(npo->copy_off, size, *head)) {
444 			/*
445 			 * Netfront requires there to be some data in the head
446 			 * buffer.
447 			 */
448 			BUG_ON(*head);
449 
450 			meta = get_next_rx_buffer(vif, npo);
451 		}
452 
453 		bytes = size;
454 		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
455 			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
456 
457 		copy_gop = npo->copy + npo->copy_prod++;
458 		copy_gop->flags = GNTCOPY_dest_gref;
459 		if (foreign) {
460 			struct xen_netbk *netbk = &xen_netbk[group];
461 			struct pending_tx_info *src_pend;
462 
463 			src_pend = &netbk->pending_tx_info[idx];
464 
465 			copy_gop->source.domid = src_pend->vif->domid;
466 			copy_gop->source.u.ref = src_pend->req.gref;
467 			copy_gop->flags |= GNTCOPY_source_gref;
468 		} else {
469 			void *vaddr = page_address(page);
470 			copy_gop->source.domid = DOMID_SELF;
471 			copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
472 		}
473 		copy_gop->source.offset = offset;
474 		copy_gop->dest.domid = vif->domid;
475 
476 		copy_gop->dest.offset = npo->copy_off;
477 		copy_gop->dest.u.ref = npo->copy_gref;
478 		copy_gop->len = bytes;
479 
480 		npo->copy_off += bytes;
481 		meta->size += bytes;
482 
483 		offset += bytes;
484 		size -= bytes;
485 
486 		/* Leave a gap for the GSO descriptor. */
487 		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
488 			vif->rx.req_cons++;
489 
490 		*head = 0; /* There must be something in this buffer now. */
491 
492 	}
493 }
494 
495 /*
496  * Prepare an SKB to be transmitted to the frontend.
497  *
498  * This function is responsible for allocating grant operations, meta
499  * structures, etc.
500  *
501  * It returns the number of meta structures consumed. The number of
502  * ring slots used is always equal to the number of meta slots used
503  * plus the number of GSO descriptors used. Currently, we use either
504  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
505  * frontend-side LRO).
506  */
netbk_gop_skb(struct sk_buff * skb,struct netrx_pending_operations * npo)507 static int netbk_gop_skb(struct sk_buff *skb,
508 			 struct netrx_pending_operations *npo)
509 {
510 	struct xenvif *vif = netdev_priv(skb->dev);
511 	int nr_frags = skb_shinfo(skb)->nr_frags;
512 	int i;
513 	struct xen_netif_rx_request *req;
514 	struct netbk_rx_meta *meta;
515 	unsigned char *data;
516 	int head = 1;
517 	int old_meta_prod;
518 
519 	old_meta_prod = npo->meta_prod;
520 
521 	/* Set up a GSO prefix descriptor, if necessary */
522 	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
523 		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
524 		meta = npo->meta + npo->meta_prod++;
525 		meta->gso_size = skb_shinfo(skb)->gso_size;
526 		meta->size = 0;
527 		meta->id = req->id;
528 	}
529 
530 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
531 	meta = npo->meta + npo->meta_prod++;
532 
533 	if (!vif->gso_prefix)
534 		meta->gso_size = skb_shinfo(skb)->gso_size;
535 	else
536 		meta->gso_size = 0;
537 
538 	meta->size = 0;
539 	meta->id = req->id;
540 	npo->copy_off = 0;
541 	npo->copy_gref = req->gref;
542 
543 	data = skb->data;
544 	while (data < skb_tail_pointer(skb)) {
545 		unsigned int offset = offset_in_page(data);
546 		unsigned int len = PAGE_SIZE - offset;
547 
548 		if (data + len > skb_tail_pointer(skb))
549 			len = skb_tail_pointer(skb) - data;
550 
551 		netbk_gop_frag_copy(vif, skb, npo,
552 				    virt_to_page(data), len, offset, &head);
553 		data += len;
554 	}
555 
556 	for (i = 0; i < nr_frags; i++) {
557 		netbk_gop_frag_copy(vif, skb, npo,
558 				    skb_frag_page(&skb_shinfo(skb)->frags[i]),
559 				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
560 				    skb_shinfo(skb)->frags[i].page_offset,
561 				    &head);
562 	}
563 
564 	return npo->meta_prod - old_meta_prod;
565 }
566 
567 /*
568  * This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
569  * used to set up the operations on the top of
570  * netrx_pending_operations, which have since been done.  Check that
571  * they didn't give any errors and advance over them.
572  */
netbk_check_gop(struct xenvif * vif,int nr_meta_slots,struct netrx_pending_operations * npo)573 static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
574 			   struct netrx_pending_operations *npo)
575 {
576 	struct gnttab_copy     *copy_op;
577 	int status = XEN_NETIF_RSP_OKAY;
578 	int i;
579 
580 	for (i = 0; i < nr_meta_slots; i++) {
581 		copy_op = npo->copy + npo->copy_cons++;
582 		if (copy_op->status != GNTST_okay) {
583 			netdev_dbg(vif->dev,
584 				   "Bad status %d from copy to DOM%d.\n",
585 				   copy_op->status, vif->domid);
586 			status = XEN_NETIF_RSP_ERROR;
587 		}
588 	}
589 
590 	return status;
591 }
592 
netbk_add_frag_responses(struct xenvif * vif,int status,struct netbk_rx_meta * meta,int nr_meta_slots)593 static void netbk_add_frag_responses(struct xenvif *vif, int status,
594 				     struct netbk_rx_meta *meta,
595 				     int nr_meta_slots)
596 {
597 	int i;
598 	unsigned long offset;
599 
600 	/* No fragments used */
601 	if (nr_meta_slots <= 1)
602 		return;
603 
604 	nr_meta_slots--;
605 
606 	for (i = 0; i < nr_meta_slots; i++) {
607 		int flags;
608 		if (i == nr_meta_slots - 1)
609 			flags = 0;
610 		else
611 			flags = XEN_NETRXF_more_data;
612 
613 		offset = 0;
614 		make_rx_response(vif, meta[i].id, status, offset,
615 				 meta[i].size, flags);
616 	}
617 }
618 
619 struct skb_cb_overlay {
620 	int meta_slots_used;
621 };
622 
xen_netbk_rx_action(struct xen_netbk * netbk)623 static void xen_netbk_rx_action(struct xen_netbk *netbk)
624 {
625 	struct xenvif *vif = NULL, *tmp;
626 	s8 status;
627 	u16 irq, flags;
628 	struct xen_netif_rx_response *resp;
629 	struct sk_buff_head rxq;
630 	struct sk_buff *skb;
631 	LIST_HEAD(notify);
632 	int ret;
633 	int nr_frags;
634 	int count;
635 	unsigned long offset;
636 	struct skb_cb_overlay *sco;
637 
638 	struct netrx_pending_operations npo = {
639 		.copy  = netbk->grant_copy_op,
640 		.meta  = netbk->meta,
641 	};
642 
643 	skb_queue_head_init(&rxq);
644 
645 	count = 0;
646 
647 	while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
648 		vif = netdev_priv(skb->dev);
649 		nr_frags = skb_shinfo(skb)->nr_frags;
650 
651 		sco = (struct skb_cb_overlay *)skb->cb;
652 		sco->meta_slots_used = netbk_gop_skb(skb, &npo);
653 
654 		count += nr_frags + 1;
655 
656 		__skb_queue_tail(&rxq, skb);
657 
658 		/* Filled the batch queue? */
659 		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
660 		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
661 			break;
662 	}
663 
664 	BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
665 
666 	if (!npo.copy_prod)
667 		return;
668 
669 	BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
670 	ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
671 					npo.copy_prod);
672 	BUG_ON(ret != 0);
673 
674 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
675 		sco = (struct skb_cb_overlay *)skb->cb;
676 
677 		vif = netdev_priv(skb->dev);
678 
679 		if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
680 			resp = RING_GET_RESPONSE(&vif->rx,
681 						vif->rx.rsp_prod_pvt++);
682 
683 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
684 
685 			resp->offset = netbk->meta[npo.meta_cons].gso_size;
686 			resp->id = netbk->meta[npo.meta_cons].id;
687 			resp->status = sco->meta_slots_used;
688 
689 			npo.meta_cons++;
690 			sco->meta_slots_used--;
691 		}
692 
693 
694 		vif->dev->stats.tx_bytes += skb->len;
695 		vif->dev->stats.tx_packets++;
696 
697 		status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
698 
699 		if (sco->meta_slots_used == 1)
700 			flags = 0;
701 		else
702 			flags = XEN_NETRXF_more_data;
703 
704 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
705 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
706 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
707 			/* remote but checksummed. */
708 			flags |= XEN_NETRXF_data_validated;
709 
710 		offset = 0;
711 		resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
712 					status, offset,
713 					netbk->meta[npo.meta_cons].size,
714 					flags);
715 
716 		if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
717 			struct xen_netif_extra_info *gso =
718 				(struct xen_netif_extra_info *)
719 				RING_GET_RESPONSE(&vif->rx,
720 						  vif->rx.rsp_prod_pvt++);
721 
722 			resp->flags |= XEN_NETRXF_extra_info;
723 
724 			gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
725 			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
726 			gso->u.gso.pad = 0;
727 			gso->u.gso.features = 0;
728 
729 			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
730 			gso->flags = 0;
731 		}
732 
733 		netbk_add_frag_responses(vif, status,
734 					 netbk->meta + npo.meta_cons + 1,
735 					 sco->meta_slots_used);
736 
737 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
738 		irq = vif->irq;
739 		if (ret && list_empty(&vif->notify_list))
740 			list_add_tail(&vif->notify_list, &notify);
741 
742 		xenvif_notify_tx_completion(vif);
743 
744 		xenvif_put(vif);
745 		npo.meta_cons += sco->meta_slots_used;
746 		dev_kfree_skb(skb);
747 	}
748 
749 	list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
750 		notify_remote_via_irq(vif->irq);
751 		list_del_init(&vif->notify_list);
752 	}
753 
754 	/* More work to do? */
755 	if (!skb_queue_empty(&netbk->rx_queue) &&
756 			!timer_pending(&netbk->net_timer))
757 		xen_netbk_kick_thread(netbk);
758 }
759 
xen_netbk_queue_tx_skb(struct xenvif * vif,struct sk_buff * skb)760 void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
761 {
762 	struct xen_netbk *netbk = vif->netbk;
763 
764 	skb_queue_tail(&netbk->rx_queue, skb);
765 
766 	xen_netbk_kick_thread(netbk);
767 }
768 
xen_netbk_alarm(unsigned long data)769 static void xen_netbk_alarm(unsigned long data)
770 {
771 	struct xen_netbk *netbk = (struct xen_netbk *)data;
772 	xen_netbk_kick_thread(netbk);
773 }
774 
__on_net_schedule_list(struct xenvif * vif)775 static int __on_net_schedule_list(struct xenvif *vif)
776 {
777 	return !list_empty(&vif->schedule_list);
778 }
779 
780 /* Must be called with net_schedule_list_lock held */
remove_from_net_schedule_list(struct xenvif * vif)781 static void remove_from_net_schedule_list(struct xenvif *vif)
782 {
783 	if (likely(__on_net_schedule_list(vif))) {
784 		list_del_init(&vif->schedule_list);
785 		xenvif_put(vif);
786 	}
787 }
788 
poll_net_schedule_list(struct xen_netbk * netbk)789 static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
790 {
791 	struct xenvif *vif = NULL;
792 
793 	spin_lock_irq(&netbk->net_schedule_list_lock);
794 	if (list_empty(&netbk->net_schedule_list))
795 		goto out;
796 
797 	vif = list_first_entry(&netbk->net_schedule_list,
798 			       struct xenvif, schedule_list);
799 	if (!vif)
800 		goto out;
801 
802 	xenvif_get(vif);
803 
804 	remove_from_net_schedule_list(vif);
805 out:
806 	spin_unlock_irq(&netbk->net_schedule_list_lock);
807 	return vif;
808 }
809 
xen_netbk_schedule_xenvif(struct xenvif * vif)810 void xen_netbk_schedule_xenvif(struct xenvif *vif)
811 {
812 	unsigned long flags;
813 	struct xen_netbk *netbk = vif->netbk;
814 
815 	if (__on_net_schedule_list(vif))
816 		goto kick;
817 
818 	spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
819 	if (!__on_net_schedule_list(vif) &&
820 	    likely(xenvif_schedulable(vif))) {
821 		list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
822 		xenvif_get(vif);
823 	}
824 	spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
825 
826 kick:
827 	smp_mb();
828 	if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
829 	    !list_empty(&netbk->net_schedule_list))
830 		xen_netbk_kick_thread(netbk);
831 }
832 
xen_netbk_deschedule_xenvif(struct xenvif * vif)833 void xen_netbk_deschedule_xenvif(struct xenvif *vif)
834 {
835 	struct xen_netbk *netbk = vif->netbk;
836 	spin_lock_irq(&netbk->net_schedule_list_lock);
837 	remove_from_net_schedule_list(vif);
838 	spin_unlock_irq(&netbk->net_schedule_list_lock);
839 }
840 
xen_netbk_check_rx_xenvif(struct xenvif * vif)841 void xen_netbk_check_rx_xenvif(struct xenvif *vif)
842 {
843 	int more_to_do;
844 
845 	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
846 
847 	if (more_to_do)
848 		xen_netbk_schedule_xenvif(vif);
849 }
850 
tx_add_credit(struct xenvif * vif)851 static void tx_add_credit(struct xenvif *vif)
852 {
853 	unsigned long max_burst, max_credit;
854 
855 	/*
856 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
857 	 * Otherwise the interface can seize up due to insufficient credit.
858 	 */
859 	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
860 	max_burst = min(max_burst, 131072UL);
861 	max_burst = max(max_burst, vif->credit_bytes);
862 
863 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
864 	max_credit = vif->remaining_credit + vif->credit_bytes;
865 	if (max_credit < vif->remaining_credit)
866 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
867 
868 	vif->remaining_credit = min(max_credit, max_burst);
869 }
870 
tx_credit_callback(unsigned long data)871 static void tx_credit_callback(unsigned long data)
872 {
873 	struct xenvif *vif = (struct xenvif *)data;
874 	tx_add_credit(vif);
875 	xen_netbk_check_rx_xenvif(vif);
876 }
877 
netbk_tx_err(struct xenvif * vif,struct xen_netif_tx_request * txp,RING_IDX end)878 static void netbk_tx_err(struct xenvif *vif,
879 			 struct xen_netif_tx_request *txp, RING_IDX end)
880 {
881 	RING_IDX cons = vif->tx.req_cons;
882 
883 	do {
884 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
885 		if (cons == end)
886 			break;
887 		txp = RING_GET_REQUEST(&vif->tx, cons++);
888 	} while (1);
889 	vif->tx.req_cons = cons;
890 	xen_netbk_check_rx_xenvif(vif);
891 	xenvif_put(vif);
892 }
893 
netbk_fatal_tx_err(struct xenvif * vif)894 static void netbk_fatal_tx_err(struct xenvif *vif)
895 {
896 	netdev_err(vif->dev, "fatal error; disabling device\n");
897 	xenvif_carrier_off(vif);
898 	xenvif_put(vif);
899 }
900 
netbk_count_requests(struct xenvif * vif,struct xen_netif_tx_request * first,RING_IDX first_idx,struct xen_netif_tx_request * txp,int work_to_do)901 static int netbk_count_requests(struct xenvif *vif,
902 				struct xen_netif_tx_request *first,
903 				RING_IDX first_idx,
904 				struct xen_netif_tx_request *txp,
905 				int work_to_do)
906 {
907 	RING_IDX cons = vif->tx.req_cons;
908 	int slots = 0;
909 	int drop_err = 0;
910 
911 	if (!(first->flags & XEN_NETTXF_more_data))
912 		return 0;
913 
914 	do {
915 		if (slots >= work_to_do) {
916 			netdev_err(vif->dev,
917 				   "Asked for %d slots but exceeds this limit\n",
918 				   work_to_do);
919 			netbk_fatal_tx_err(vif);
920 			return -ENODATA;
921 		}
922 
923 		/* This guest is really using too many slots and
924 		 * considered malicious.
925 		 */
926 		if (unlikely(slots >= max_skb_slots)) {
927 			netdev_err(vif->dev,
928 				   "Malicious frontend using %d slots, threshold %u\n",
929 				   slots, max_skb_slots);
930 			netbk_fatal_tx_err(vif);
931 			return -E2BIG;
932 		}
933 
934 		/* Xen network protocol had implicit dependency on
935 		 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
936 		 * historical MAX_SKB_FRAGS value 18 to honor the same
937 		 * behavior as before. Any packet using more than 18
938 		 * slots but less than max_skb_slots slots is dropped
939 		 */
940 		if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
941 			if (net_ratelimit())
942 				netdev_dbg(vif->dev,
943 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
944 					   slots, XEN_NETIF_NR_SLOTS_MIN);
945 			drop_err = -E2BIG;
946 		}
947 
948 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
949 		       sizeof(*txp));
950 
951 		/* If the guest submitted a frame >= 64 KiB then
952 		 * first->size overflowed and following slots will
953 		 * appear to be larger than the frame.
954 		 *
955 		 * This cannot be fatal error as there are buggy
956 		 * frontends that do this.
957 		 *
958 		 * Consume all slots and drop the packet.
959 		 */
960 		if (!drop_err && txp->size > first->size) {
961 			if (net_ratelimit())
962 				netdev_dbg(vif->dev,
963 					   "Invalid tx request, slot size %u > remaining size %u\n",
964 					   txp->size, first->size);
965 			drop_err = -EIO;
966 		}
967 
968 		first->size -= txp->size;
969 		slots++;
970 
971 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
972 			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
973 				 txp->offset, txp->size);
974 			netbk_fatal_tx_err(vif);
975 			return -EINVAL;
976 		}
977 	} while ((txp++)->flags & XEN_NETTXF_more_data);
978 
979 	if (drop_err) {
980 		netbk_tx_err(vif, first, first_idx + slots);
981 		return drop_err;
982 	}
983 
984 	return slots;
985 }
986 
xen_netbk_alloc_page(struct xen_netbk * netbk,u16 pending_idx)987 static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
988 					 u16 pending_idx)
989 {
990 	struct page *page;
991 	page = alloc_page(GFP_KERNEL|__GFP_COLD);
992 	if (!page)
993 		return NULL;
994 	set_page_ext(page, netbk, pending_idx);
995 	netbk->mmap_pages[pending_idx] = page;
996 	return page;
997 }
998 
xen_netbk_get_requests(struct xen_netbk * netbk,struct xenvif * vif,struct sk_buff * skb,struct xen_netif_tx_request * txp,struct gnttab_copy * gop)999 static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1000 						  struct xenvif *vif,
1001 						  struct sk_buff *skb,
1002 						  struct xen_netif_tx_request *txp,
1003 						  struct gnttab_copy *gop)
1004 {
1005 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1006 	skb_frag_t *frags = shinfo->frags;
1007 	u16 pending_idx = *((u16 *)skb->data);
1008 	u16 head_idx = 0;
1009 	int slot, start;
1010 	struct page *page;
1011 	pending_ring_idx_t index, start_idx = 0;
1012 	uint16_t dst_offset;
1013 	unsigned int nr_slots;
1014 	struct pending_tx_info *first = NULL;
1015 
1016 	/* At this point shinfo->nr_frags is in fact the number of
1017 	 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1018 	 */
1019 	nr_slots = shinfo->nr_frags;
1020 
1021 	/* Skip first skb fragment if it is on same page as header fragment. */
1022 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1023 
1024 	/* Coalesce tx requests, at this point the packet passed in
1025 	 * should be <= 64K. Any packets larger than 64K have been
1026 	 * handled in netbk_count_requests().
1027 	 */
1028 	for (shinfo->nr_frags = slot = start; slot < nr_slots;
1029 	     shinfo->nr_frags++) {
1030 		struct pending_tx_info *pending_tx_info =
1031 			netbk->pending_tx_info;
1032 
1033 		page = alloc_page(GFP_KERNEL|__GFP_COLD);
1034 		if (!page)
1035 			goto err;
1036 
1037 		dst_offset = 0;
1038 		first = NULL;
1039 		while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1040 			gop->flags = GNTCOPY_source_gref;
1041 
1042 			gop->source.u.ref = txp->gref;
1043 			gop->source.domid = vif->domid;
1044 			gop->source.offset = txp->offset;
1045 
1046 			gop->dest.domid = DOMID_SELF;
1047 
1048 			gop->dest.offset = dst_offset;
1049 			gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1050 
1051 			if (dst_offset + txp->size > PAGE_SIZE) {
1052 				/* This page can only merge a portion
1053 				 * of tx request. Do not increment any
1054 				 * pointer / counter here. The txp
1055 				 * will be dealt with in future
1056 				 * rounds, eventually hitting the
1057 				 * `else` branch.
1058 				 */
1059 				gop->len = PAGE_SIZE - dst_offset;
1060 				txp->offset += gop->len;
1061 				txp->size -= gop->len;
1062 				dst_offset += gop->len; /* quit loop */
1063 			} else {
1064 				/* This tx request can be merged in the page */
1065 				gop->len = txp->size;
1066 				dst_offset += gop->len;
1067 
1068 				index = pending_index(netbk->pending_cons++);
1069 
1070 				pending_idx = netbk->pending_ring[index];
1071 
1072 				memcpy(&pending_tx_info[pending_idx].req, txp,
1073 				       sizeof(*txp));
1074 				xenvif_get(vif);
1075 
1076 				pending_tx_info[pending_idx].vif = vif;
1077 
1078 				/* Poison these fields, corresponding
1079 				 * fields for head tx req will be set
1080 				 * to correct values after the loop.
1081 				 */
1082 				netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1083 				pending_tx_info[pending_idx].head =
1084 					INVALID_PENDING_RING_IDX;
1085 
1086 				if (!first) {
1087 					first = &pending_tx_info[pending_idx];
1088 					start_idx = index;
1089 					head_idx = pending_idx;
1090 				}
1091 
1092 				txp++;
1093 				slot++;
1094 			}
1095 
1096 			gop++;
1097 		}
1098 
1099 		first->req.offset = 0;
1100 		first->req.size = dst_offset;
1101 		first->head = start_idx;
1102 		set_page_ext(page, netbk, head_idx);
1103 		netbk->mmap_pages[head_idx] = page;
1104 		frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1105 	}
1106 
1107 	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1108 
1109 	return gop;
1110 err:
1111 	/* Unwind, freeing all pages and sending error responses. */
1112 	while (shinfo->nr_frags-- > start) {
1113 		xen_netbk_idx_release(netbk,
1114 				frag_get_pending_idx(&frags[shinfo->nr_frags]),
1115 				XEN_NETIF_RSP_ERROR);
1116 	}
1117 	/* The head too, if necessary. */
1118 	if (start)
1119 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1120 
1121 	return NULL;
1122 }
1123 
xen_netbk_tx_check_gop(struct xen_netbk * netbk,struct sk_buff * skb,struct gnttab_copy ** gopp)1124 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1125 				  struct sk_buff *skb,
1126 				  struct gnttab_copy **gopp)
1127 {
1128 	struct gnttab_copy *gop = *gopp;
1129 	u16 pending_idx = *((u16 *)skb->data);
1130 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1131 	struct pending_tx_info *tx_info;
1132 	int nr_frags = shinfo->nr_frags;
1133 	int i, err, start;
1134 	u16 peek; /* peek into next tx request */
1135 
1136 	/* Check status of header. */
1137 	err = gop->status;
1138 	if (unlikely(err))
1139 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1140 
1141 	/* Skip first skb fragment if it is on same page as header fragment. */
1142 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1143 
1144 	for (i = start; i < nr_frags; i++) {
1145 		int j, newerr;
1146 		pending_ring_idx_t head;
1147 
1148 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1149 		tx_info = &netbk->pending_tx_info[pending_idx];
1150 		head = tx_info->head;
1151 
1152 		/* Check error status: if okay then remember grant handle. */
1153 		do {
1154 			newerr = (++gop)->status;
1155 			if (newerr)
1156 				break;
1157 			peek = netbk->pending_ring[pending_index(++head)];
1158 		} while (!pending_tx_is_head(netbk, peek));
1159 
1160 		if (likely(!newerr)) {
1161 			/* Had a previous error? Invalidate this fragment. */
1162 			if (unlikely(err))
1163 				xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1164 			continue;
1165 		}
1166 
1167 		/* Error on this fragment: respond to client with an error. */
1168 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1169 
1170 		/* Not the first error? Preceding frags already invalidated. */
1171 		if (err)
1172 			continue;
1173 
1174 		/* First error: invalidate header and preceding fragments. */
1175 		pending_idx = *((u16 *)skb->data);
1176 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1177 		for (j = start; j < i; j++) {
1178 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1179 			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1180 		}
1181 
1182 		/* Remember the error: invalidate all subsequent fragments. */
1183 		err = newerr;
1184 	}
1185 
1186 	*gopp = gop + 1;
1187 	return err;
1188 }
1189 
xen_netbk_fill_frags(struct xen_netbk * netbk,struct sk_buff * skb)1190 static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1191 {
1192 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1193 	int nr_frags = shinfo->nr_frags;
1194 	int i;
1195 
1196 	for (i = 0; i < nr_frags; i++) {
1197 		skb_frag_t *frag = shinfo->frags + i;
1198 		struct xen_netif_tx_request *txp;
1199 		struct page *page;
1200 		u16 pending_idx;
1201 
1202 		pending_idx = frag_get_pending_idx(frag);
1203 
1204 		txp = &netbk->pending_tx_info[pending_idx].req;
1205 		page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1206 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1207 		skb->len += txp->size;
1208 		skb->data_len += txp->size;
1209 		skb->truesize += txp->size;
1210 
1211 		/* Take an extra reference to offset xen_netbk_idx_release */
1212 		get_page(netbk->mmap_pages[pending_idx]);
1213 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1214 	}
1215 }
1216 
xen_netbk_get_extras(struct xenvif * vif,struct xen_netif_extra_info * extras,int work_to_do)1217 static int xen_netbk_get_extras(struct xenvif *vif,
1218 				struct xen_netif_extra_info *extras,
1219 				int work_to_do)
1220 {
1221 	struct xen_netif_extra_info extra;
1222 	RING_IDX cons = vif->tx.req_cons;
1223 
1224 	do {
1225 		if (unlikely(work_to_do-- <= 0)) {
1226 			netdev_err(vif->dev, "Missing extra info\n");
1227 			netbk_fatal_tx_err(vif);
1228 			return -EBADR;
1229 		}
1230 
1231 		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1232 		       sizeof(extra));
1233 		if (unlikely(!extra.type ||
1234 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1235 			vif->tx.req_cons = ++cons;
1236 			netdev_err(vif->dev,
1237 				   "Invalid extra type: %d\n", extra.type);
1238 			netbk_fatal_tx_err(vif);
1239 			return -EINVAL;
1240 		}
1241 
1242 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1243 		vif->tx.req_cons = ++cons;
1244 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1245 
1246 	return work_to_do;
1247 }
1248 
netbk_set_skb_gso(struct xenvif * vif,struct sk_buff * skb,struct xen_netif_extra_info * gso)1249 static int netbk_set_skb_gso(struct xenvif *vif,
1250 			     struct sk_buff *skb,
1251 			     struct xen_netif_extra_info *gso)
1252 {
1253 	if (!gso->u.gso.size) {
1254 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1255 		netbk_fatal_tx_err(vif);
1256 		return -EINVAL;
1257 	}
1258 
1259 	/* Currently only TCPv4 S.O. is supported. */
1260 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1261 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1262 		netbk_fatal_tx_err(vif);
1263 		return -EINVAL;
1264 	}
1265 
1266 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1267 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1268 
1269 	/* Header must be checked, and gso_segs computed. */
1270 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1271 	skb_shinfo(skb)->gso_segs = 0;
1272 
1273 	return 0;
1274 }
1275 
checksum_setup(struct xenvif * vif,struct sk_buff * skb)1276 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1277 {
1278 	struct iphdr *iph;
1279 	unsigned char *th;
1280 	int err = -EPROTO;
1281 	int recalculate_partial_csum = 0;
1282 
1283 	/*
1284 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1285 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1286 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1287 	 * recalculate the partial checksum.
1288 	 */
1289 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1290 		vif->rx_gso_checksum_fixup++;
1291 		skb->ip_summed = CHECKSUM_PARTIAL;
1292 		recalculate_partial_csum = 1;
1293 	}
1294 
1295 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1296 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1297 		return 0;
1298 
1299 	if (skb->protocol != htons(ETH_P_IP))
1300 		goto out;
1301 
1302 	iph = (void *)skb->data;
1303 	th = skb->data + 4 * iph->ihl;
1304 	if (th >= skb_tail_pointer(skb))
1305 		goto out;
1306 
1307 	skb->csum_start = th - skb->head;
1308 	switch (iph->protocol) {
1309 	case IPPROTO_TCP:
1310 		skb->csum_offset = offsetof(struct tcphdr, check);
1311 
1312 		if (recalculate_partial_csum) {
1313 			struct tcphdr *tcph = (struct tcphdr *)th;
1314 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1315 							 skb->len - iph->ihl*4,
1316 							 IPPROTO_TCP, 0);
1317 		}
1318 		break;
1319 	case IPPROTO_UDP:
1320 		skb->csum_offset = offsetof(struct udphdr, check);
1321 
1322 		if (recalculate_partial_csum) {
1323 			struct udphdr *udph = (struct udphdr *)th;
1324 			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1325 							 skb->len - iph->ihl*4,
1326 							 IPPROTO_UDP, 0);
1327 		}
1328 		break;
1329 	default:
1330 		if (net_ratelimit())
1331 			netdev_err(vif->dev,
1332 				   "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1333 				   iph->protocol);
1334 		goto out;
1335 	}
1336 
1337 	if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1338 		goto out;
1339 
1340 	err = 0;
1341 
1342 out:
1343 	return err;
1344 }
1345 
tx_credit_exceeded(struct xenvif * vif,unsigned size)1346 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1347 {
1348 	u64 now = get_jiffies_64();
1349 	u64 next_credit = vif->credit_window_start +
1350 		msecs_to_jiffies(vif->credit_usec / 1000);
1351 
1352 	/* Timer could already be pending in rare cases. */
1353 	if (timer_pending(&vif->credit_timeout))
1354 		return true;
1355 
1356 	/* Passed the point where we can replenish credit? */
1357 	if (time_after_eq64(now, next_credit)) {
1358 		vif->credit_window_start = now;
1359 		tx_add_credit(vif);
1360 	}
1361 
1362 	/* Still too big to send right now? Set a callback. */
1363 	if (size > vif->remaining_credit) {
1364 		vif->credit_timeout.data     =
1365 			(unsigned long)vif;
1366 		vif->credit_timeout.function =
1367 			tx_credit_callback;
1368 		mod_timer(&vif->credit_timeout,
1369 			  next_credit);
1370 		vif->credit_window_start = next_credit;
1371 
1372 		return true;
1373 	}
1374 
1375 	return false;
1376 }
1377 
xen_netbk_tx_build_gops(struct xen_netbk * netbk)1378 static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1379 {
1380 	struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1381 	struct sk_buff *skb;
1382 	int ret;
1383 
1384 	while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1385 		< MAX_PENDING_REQS) &&
1386 		!list_empty(&netbk->net_schedule_list)) {
1387 		struct xenvif *vif;
1388 		struct xen_netif_tx_request txreq;
1389 		struct xen_netif_tx_request txfrags[max_skb_slots];
1390 		struct page *page;
1391 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1392 		u16 pending_idx;
1393 		RING_IDX idx;
1394 		int work_to_do;
1395 		unsigned int data_len;
1396 		pending_ring_idx_t index;
1397 
1398 		/* Get a netif from the list with work to do. */
1399 		vif = poll_net_schedule_list(netbk);
1400 		/* This can sometimes happen because the test of
1401 		 * list_empty(net_schedule_list) at the top of the
1402 		 * loop is unlocked.  Just go back and have another
1403 		 * look.
1404 		 */
1405 		if (!vif)
1406 			continue;
1407 
1408 		if (vif->tx.sring->req_prod - vif->tx.req_cons >
1409 		    XEN_NETIF_TX_RING_SIZE) {
1410 			netdev_err(vif->dev,
1411 				   "Impossible number of requests. "
1412 				   "req_prod %d, req_cons %d, size %ld\n",
1413 				   vif->tx.sring->req_prod, vif->tx.req_cons,
1414 				   XEN_NETIF_TX_RING_SIZE);
1415 			netbk_fatal_tx_err(vif);
1416 			continue;
1417 		}
1418 
1419 		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1420 		if (!work_to_do) {
1421 			xenvif_put(vif);
1422 			continue;
1423 		}
1424 
1425 		idx = vif->tx.req_cons;
1426 		rmb(); /* Ensure that we see the request before we copy it. */
1427 		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1428 
1429 		/* Credit-based scheduling. */
1430 		if (txreq.size > vif->remaining_credit &&
1431 		    tx_credit_exceeded(vif, txreq.size)) {
1432 			xenvif_put(vif);
1433 			continue;
1434 		}
1435 
1436 		vif->remaining_credit -= txreq.size;
1437 
1438 		work_to_do--;
1439 		vif->tx.req_cons = ++idx;
1440 
1441 		memset(extras, 0, sizeof(extras));
1442 		if (txreq.flags & XEN_NETTXF_extra_info) {
1443 			work_to_do = xen_netbk_get_extras(vif, extras,
1444 							  work_to_do);
1445 			idx = vif->tx.req_cons;
1446 			if (unlikely(work_to_do < 0))
1447 				continue;
1448 		}
1449 
1450 		ret = netbk_count_requests(vif, &txreq, idx,
1451 					   txfrags, work_to_do);
1452 		if (unlikely(ret < 0))
1453 			continue;
1454 
1455 		idx += ret;
1456 
1457 		if (unlikely(txreq.size < ETH_HLEN)) {
1458 			netdev_dbg(vif->dev,
1459 				   "Bad packet size: %d\n", txreq.size);
1460 			netbk_tx_err(vif, &txreq, idx);
1461 			continue;
1462 		}
1463 
1464 		/* No crossing a page as the payload mustn't fragment. */
1465 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1466 			netdev_err(vif->dev,
1467 				   "txreq.offset: %x, size: %u, end: %lu\n",
1468 				   txreq.offset, txreq.size,
1469 				   (txreq.offset&~PAGE_MASK) + txreq.size);
1470 			netbk_fatal_tx_err(vif);
1471 			continue;
1472 		}
1473 
1474 		index = pending_index(netbk->pending_cons);
1475 		pending_idx = netbk->pending_ring[index];
1476 
1477 		data_len = (txreq.size > PKT_PROT_LEN &&
1478 			    ret < XEN_NETIF_NR_SLOTS_MIN) ?
1479 			PKT_PROT_LEN : txreq.size;
1480 
1481 		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1482 				GFP_ATOMIC | __GFP_NOWARN);
1483 		if (unlikely(skb == NULL)) {
1484 			netdev_dbg(vif->dev,
1485 				   "Can't allocate a skb in start_xmit.\n");
1486 			netbk_tx_err(vif, &txreq, idx);
1487 			break;
1488 		}
1489 
1490 		/* Packets passed to netif_rx() must have some headroom. */
1491 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1492 
1493 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1494 			struct xen_netif_extra_info *gso;
1495 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1496 
1497 			if (netbk_set_skb_gso(vif, skb, gso)) {
1498 				/* Failure in netbk_set_skb_gso is fatal. */
1499 				kfree_skb(skb);
1500 				continue;
1501 			}
1502 		}
1503 
1504 		/* XXX could copy straight to head */
1505 		page = xen_netbk_alloc_page(netbk, pending_idx);
1506 		if (!page) {
1507 			kfree_skb(skb);
1508 			netbk_tx_err(vif, &txreq, idx);
1509 			continue;
1510 		}
1511 
1512 		gop->source.u.ref = txreq.gref;
1513 		gop->source.domid = vif->domid;
1514 		gop->source.offset = txreq.offset;
1515 
1516 		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1517 		gop->dest.domid = DOMID_SELF;
1518 		gop->dest.offset = txreq.offset;
1519 
1520 		gop->len = txreq.size;
1521 		gop->flags = GNTCOPY_source_gref;
1522 
1523 		gop++;
1524 
1525 		memcpy(&netbk->pending_tx_info[pending_idx].req,
1526 		       &txreq, sizeof(txreq));
1527 		netbk->pending_tx_info[pending_idx].vif = vif;
1528 		netbk->pending_tx_info[pending_idx].head = index;
1529 		*((u16 *)skb->data) = pending_idx;
1530 
1531 		__skb_put(skb, data_len);
1532 
1533 		skb_shinfo(skb)->nr_frags = ret;
1534 		if (data_len < txreq.size) {
1535 			skb_shinfo(skb)->nr_frags++;
1536 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1537 					     pending_idx);
1538 		} else {
1539 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1540 					     INVALID_PENDING_IDX);
1541 		}
1542 
1543 		__skb_queue_tail(&netbk->tx_queue, skb);
1544 
1545 		netbk->pending_cons++;
1546 
1547 		request_gop = xen_netbk_get_requests(netbk, vif,
1548 						     skb, txfrags, gop);
1549 		if (request_gop == NULL) {
1550 			kfree_skb(skb);
1551 			netbk_tx_err(vif, &txreq, idx);
1552 			continue;
1553 		}
1554 		gop = request_gop;
1555 
1556 		vif->tx.req_cons = idx;
1557 		xen_netbk_check_rx_xenvif(vif);
1558 
1559 		if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1560 			break;
1561 	}
1562 
1563 	return gop - netbk->tx_copy_ops;
1564 }
1565 
xen_netbk_tx_submit(struct xen_netbk * netbk)1566 static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1567 {
1568 	struct gnttab_copy *gop = netbk->tx_copy_ops;
1569 	struct sk_buff *skb;
1570 
1571 	while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1572 		struct xen_netif_tx_request *txp;
1573 		struct xenvif *vif;
1574 		u16 pending_idx;
1575 		unsigned data_len;
1576 
1577 		pending_idx = *((u16 *)skb->data);
1578 		vif = netbk->pending_tx_info[pending_idx].vif;
1579 		txp = &netbk->pending_tx_info[pending_idx].req;
1580 
1581 		/* Check the remap error code. */
1582 		if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1583 			netdev_dbg(vif->dev, "netback grant failed.\n");
1584 			skb_shinfo(skb)->nr_frags = 0;
1585 			kfree_skb(skb);
1586 			continue;
1587 		}
1588 
1589 		data_len = skb->len;
1590 		memcpy(skb->data,
1591 		       (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1592 		       data_len);
1593 		if (data_len < txp->size) {
1594 			/* Append the packet payload as a fragment. */
1595 			txp->offset += data_len;
1596 			txp->size -= data_len;
1597 		} else {
1598 			/* Schedule a response immediately. */
1599 			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1600 		}
1601 
1602 		if (txp->flags & XEN_NETTXF_csum_blank)
1603 			skb->ip_summed = CHECKSUM_PARTIAL;
1604 		else if (txp->flags & XEN_NETTXF_data_validated)
1605 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1606 
1607 		xen_netbk_fill_frags(netbk, skb);
1608 
1609 		/*
1610 		 * If the initial fragment was < PKT_PROT_LEN then
1611 		 * pull through some bytes from the other fragments to
1612 		 * increase the linear region to PKT_PROT_LEN bytes.
1613 		 */
1614 		if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1615 			int target = min_t(int, skb->len, PKT_PROT_LEN);
1616 			__pskb_pull_tail(skb, target - skb_headlen(skb));
1617 		}
1618 
1619 		skb->dev      = vif->dev;
1620 		skb->protocol = eth_type_trans(skb, skb->dev);
1621 
1622 		if (checksum_setup(vif, skb)) {
1623 			netdev_dbg(vif->dev,
1624 				   "Can't setup checksum in net_tx_action\n");
1625 			kfree_skb(skb);
1626 			continue;
1627 		}
1628 
1629 		vif->dev->stats.rx_bytes += skb->len;
1630 		vif->dev->stats.rx_packets++;
1631 
1632 		xenvif_receive_skb(vif, skb);
1633 	}
1634 }
1635 
1636 /* Called after netfront has transmitted */
xen_netbk_tx_action(struct xen_netbk * netbk)1637 static void xen_netbk_tx_action(struct xen_netbk *netbk)
1638 {
1639 	unsigned nr_gops;
1640 	int ret;
1641 
1642 	nr_gops = xen_netbk_tx_build_gops(netbk);
1643 
1644 	if (nr_gops == 0)
1645 		return;
1646 	ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1647 					netbk->tx_copy_ops, nr_gops);
1648 	BUG_ON(ret);
1649 
1650 	xen_netbk_tx_submit(netbk);
1651 
1652 }
1653 
xen_netbk_idx_release(struct xen_netbk * netbk,u16 pending_idx,u8 status)1654 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1655 				  u8 status)
1656 {
1657 	struct xenvif *vif;
1658 	struct pending_tx_info *pending_tx_info;
1659 	pending_ring_idx_t head;
1660 	u16 peek; /* peek into next tx request */
1661 
1662 	BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
1663 
1664 	/* Already complete? */
1665 	if (netbk->mmap_pages[pending_idx] == NULL)
1666 		return;
1667 
1668 	pending_tx_info = &netbk->pending_tx_info[pending_idx];
1669 
1670 	vif = pending_tx_info->vif;
1671 	head = pending_tx_info->head;
1672 
1673 	BUG_ON(!pending_tx_is_head(netbk, head));
1674 	BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
1675 
1676 	do {
1677 		pending_ring_idx_t index;
1678 		pending_ring_idx_t idx = pending_index(head);
1679 		u16 info_idx = netbk->pending_ring[idx];
1680 
1681 		pending_tx_info = &netbk->pending_tx_info[info_idx];
1682 		make_tx_response(vif, &pending_tx_info->req, status);
1683 
1684 		/* Setting any number other than
1685 		 * INVALID_PENDING_RING_IDX indicates this slot is
1686 		 * starting a new packet / ending a previous packet.
1687 		 */
1688 		pending_tx_info->head = 0;
1689 
1690 		index = pending_index(netbk->pending_prod++);
1691 		netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1692 
1693 		xenvif_put(vif);
1694 
1695 		peek = netbk->pending_ring[pending_index(++head)];
1696 
1697 	} while (!pending_tx_is_head(netbk, peek));
1698 
1699 	netbk->mmap_pages[pending_idx]->mapping = 0;
1700 	put_page(netbk->mmap_pages[pending_idx]);
1701 	netbk->mmap_pages[pending_idx] = NULL;
1702 }
1703 
1704 
make_tx_response(struct xenvif * vif,struct xen_netif_tx_request * txp,s8 st)1705 static void make_tx_response(struct xenvif *vif,
1706 			     struct xen_netif_tx_request *txp,
1707 			     s8       st)
1708 {
1709 	RING_IDX i = vif->tx.rsp_prod_pvt;
1710 	struct xen_netif_tx_response *resp;
1711 	int notify;
1712 
1713 	resp = RING_GET_RESPONSE(&vif->tx, i);
1714 	resp->id     = txp->id;
1715 	resp->status = st;
1716 
1717 	if (txp->flags & XEN_NETTXF_extra_info)
1718 		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1719 
1720 	vif->tx.rsp_prod_pvt = ++i;
1721 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1722 	if (notify)
1723 		notify_remote_via_irq(vif->irq);
1724 }
1725 
make_rx_response(struct xenvif * vif,u16 id,s8 st,u16 offset,u16 size,u16 flags)1726 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1727 					     u16      id,
1728 					     s8       st,
1729 					     u16      offset,
1730 					     u16      size,
1731 					     u16      flags)
1732 {
1733 	RING_IDX i = vif->rx.rsp_prod_pvt;
1734 	struct xen_netif_rx_response *resp;
1735 
1736 	resp = RING_GET_RESPONSE(&vif->rx, i);
1737 	resp->offset     = offset;
1738 	resp->flags      = flags;
1739 	resp->id         = id;
1740 	resp->status     = (s16)size;
1741 	if (st < 0)
1742 		resp->status = (s16)st;
1743 
1744 	vif->rx.rsp_prod_pvt = ++i;
1745 
1746 	return resp;
1747 }
1748 
rx_work_todo(struct xen_netbk * netbk)1749 static inline int rx_work_todo(struct xen_netbk *netbk)
1750 {
1751 	return !skb_queue_empty(&netbk->rx_queue);
1752 }
1753 
tx_work_todo(struct xen_netbk * netbk)1754 static inline int tx_work_todo(struct xen_netbk *netbk)
1755 {
1756 
1757 	if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1758 	     < MAX_PENDING_REQS) &&
1759 	     !list_empty(&netbk->net_schedule_list))
1760 		return 1;
1761 
1762 	return 0;
1763 }
1764 
xen_netbk_kthread(void * data)1765 static int xen_netbk_kthread(void *data)
1766 {
1767 	struct xen_netbk *netbk = data;
1768 	while (!kthread_should_stop()) {
1769 		wait_event_interruptible(netbk->wq,
1770 				rx_work_todo(netbk) ||
1771 				tx_work_todo(netbk) ||
1772 				kthread_should_stop());
1773 		cond_resched();
1774 
1775 		if (kthread_should_stop())
1776 			break;
1777 
1778 		if (rx_work_todo(netbk))
1779 			xen_netbk_rx_action(netbk);
1780 
1781 		if (tx_work_todo(netbk))
1782 			xen_netbk_tx_action(netbk);
1783 	}
1784 
1785 	return 0;
1786 }
1787 
xen_netbk_unmap_frontend_rings(struct xenvif * vif)1788 void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1789 {
1790 	if (vif->tx.sring)
1791 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1792 					vif->tx.sring);
1793 	if (vif->rx.sring)
1794 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1795 					vif->rx.sring);
1796 }
1797 
xen_netbk_map_frontend_rings(struct xenvif * vif,grant_ref_t tx_ring_ref,grant_ref_t rx_ring_ref)1798 int xen_netbk_map_frontend_rings(struct xenvif *vif,
1799 				 grant_ref_t tx_ring_ref,
1800 				 grant_ref_t rx_ring_ref)
1801 {
1802 	void *addr;
1803 	struct xen_netif_tx_sring *txs;
1804 	struct xen_netif_rx_sring *rxs;
1805 
1806 	int err = -ENOMEM;
1807 
1808 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1809 				     tx_ring_ref, &addr);
1810 	if (err)
1811 		goto err;
1812 
1813 	txs = (struct xen_netif_tx_sring *)addr;
1814 	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1815 
1816 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1817 				     rx_ring_ref, &addr);
1818 	if (err)
1819 		goto err;
1820 
1821 	rxs = (struct xen_netif_rx_sring *)addr;
1822 	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1823 
1824 	vif->rx_req_cons_peek = 0;
1825 
1826 	return 0;
1827 
1828 err:
1829 	xen_netbk_unmap_frontend_rings(vif);
1830 	return err;
1831 }
1832 
netback_init(void)1833 static int __init netback_init(void)
1834 {
1835 	int i;
1836 	int rc = 0;
1837 	int group;
1838 
1839 	if (!xen_domain())
1840 		return -ENODEV;
1841 
1842 	if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1843 		printk(KERN_INFO
1844 		       "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1845 		       max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1846 		max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1847 	}
1848 
1849 	xen_netbk_group_nr = num_online_cpus();
1850 	xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1851 	if (!xen_netbk)
1852 		return -ENOMEM;
1853 
1854 	for (group = 0; group < xen_netbk_group_nr; group++) {
1855 		struct xen_netbk *netbk = &xen_netbk[group];
1856 		skb_queue_head_init(&netbk->rx_queue);
1857 		skb_queue_head_init(&netbk->tx_queue);
1858 
1859 		init_timer(&netbk->net_timer);
1860 		netbk->net_timer.data = (unsigned long)netbk;
1861 		netbk->net_timer.function = xen_netbk_alarm;
1862 
1863 		netbk->pending_cons = 0;
1864 		netbk->pending_prod = MAX_PENDING_REQS;
1865 		for (i = 0; i < MAX_PENDING_REQS; i++)
1866 			netbk->pending_ring[i] = i;
1867 
1868 		init_waitqueue_head(&netbk->wq);
1869 		netbk->task = kthread_create(xen_netbk_kthread,
1870 					     (void *)netbk,
1871 					     "netback/%u", group);
1872 
1873 		if (IS_ERR(netbk->task)) {
1874 			printk(KERN_ALERT "kthread_create() fails at netback\n");
1875 			del_timer(&netbk->net_timer);
1876 			rc = PTR_ERR(netbk->task);
1877 			goto failed_init;
1878 		}
1879 
1880 		kthread_bind(netbk->task, group);
1881 
1882 		INIT_LIST_HEAD(&netbk->net_schedule_list);
1883 
1884 		spin_lock_init(&netbk->net_schedule_list_lock);
1885 
1886 		atomic_set(&netbk->netfront_count, 0);
1887 
1888 		wake_up_process(netbk->task);
1889 	}
1890 
1891 	rc = xenvif_xenbus_init();
1892 	if (rc)
1893 		goto failed_init;
1894 
1895 	return 0;
1896 
1897 failed_init:
1898 	while (--group >= 0) {
1899 		struct xen_netbk *netbk = &xen_netbk[group];
1900 		for (i = 0; i < MAX_PENDING_REQS; i++) {
1901 			if (netbk->mmap_pages[i])
1902 				__free_page(netbk->mmap_pages[i]);
1903 		}
1904 		del_timer(&netbk->net_timer);
1905 		kthread_stop(netbk->task);
1906 	}
1907 	vfree(xen_netbk);
1908 	return rc;
1909 
1910 }
1911 
1912 module_init(netback_init);
1913 
1914 MODULE_LICENSE("Dual BSD/GPL");
1915 MODULE_ALIAS("xen-backend:vif");
1916