1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #include <linux/ethtool.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/wait.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/if_ether.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_vlan.h>
19 #include <linux/nls.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/ucs2_string.h>
23 
24 #include "hyperv_net.h"
25 #include "netvsc_trace.h"
26 
27 static void rndis_set_multicast(struct work_struct *w);
28 
29 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
30 struct rndis_request {
31 	struct list_head list_ent;
32 	struct completion  wait_event;
33 
34 	struct rndis_message response_msg;
35 	/*
36 	 * The buffer for extended info after the RNDIS response message. It's
37 	 * referenced based on the data offset in the RNDIS message. Its size
38 	 * is enough for current needs, and should be sufficient for the near
39 	 * future.
40 	 */
41 	u8 response_ext[RNDIS_EXT_LEN];
42 
43 	/* Simplify allocation by having a netvsc packet inline */
44 	struct hv_netvsc_packet	pkt;
45 
46 	struct rndis_message request_msg;
47 	/*
48 	 * The buffer for the extended info after the RNDIS request message.
49 	 * It is referenced and sized in a similar way as response_ext.
50 	 */
51 	u8 request_ext[RNDIS_EXT_LEN];
52 };
53 
54 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
55 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
56 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
57 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
58 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
59 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
60 };
61 
get_rndis_device(void)62 static struct rndis_device *get_rndis_device(void)
63 {
64 	struct rndis_device *device;
65 
66 	device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
67 	if (!device)
68 		return NULL;
69 
70 	spin_lock_init(&device->request_lock);
71 
72 	INIT_LIST_HEAD(&device->req_list);
73 	INIT_WORK(&device->mcast_work, rndis_set_multicast);
74 
75 	device->state = RNDIS_DEV_UNINITIALIZED;
76 
77 	return device;
78 }
79 
get_rndis_request(struct rndis_device * dev,u32 msg_type,u32 msg_len)80 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
81 					     u32 msg_type,
82 					     u32 msg_len)
83 {
84 	struct rndis_request *request;
85 	struct rndis_message *rndis_msg;
86 	struct rndis_set_request *set;
87 	unsigned long flags;
88 
89 	request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
90 	if (!request)
91 		return NULL;
92 
93 	init_completion(&request->wait_event);
94 
95 	rndis_msg = &request->request_msg;
96 	rndis_msg->ndis_msg_type = msg_type;
97 	rndis_msg->msg_len = msg_len;
98 
99 	request->pkt.q_idx = 0;
100 
101 	/*
102 	 * Set the request id. This field is always after the rndis header for
103 	 * request/response packet types so we just used the SetRequest as a
104 	 * template
105 	 */
106 	set = &rndis_msg->msg.set_req;
107 	set->req_id = atomic_inc_return(&dev->new_req_id);
108 
109 	/* Add to the request list */
110 	spin_lock_irqsave(&dev->request_lock, flags);
111 	list_add_tail(&request->list_ent, &dev->req_list);
112 	spin_unlock_irqrestore(&dev->request_lock, flags);
113 
114 	return request;
115 }
116 
put_rndis_request(struct rndis_device * dev,struct rndis_request * req)117 static void put_rndis_request(struct rndis_device *dev,
118 			    struct rndis_request *req)
119 {
120 	unsigned long flags;
121 
122 	spin_lock_irqsave(&dev->request_lock, flags);
123 	list_del(&req->list_ent);
124 	spin_unlock_irqrestore(&dev->request_lock, flags);
125 
126 	kfree(req);
127 }
128 
dump_rndis_message(struct net_device * netdev,const struct rndis_message * rndis_msg,const void * data)129 static void dump_rndis_message(struct net_device *netdev,
130 			       const struct rndis_message *rndis_msg,
131 			       const void *data)
132 {
133 	switch (rndis_msg->ndis_msg_type) {
134 	case RNDIS_MSG_PACKET:
135 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
136 			const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
137 			netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
138 				   "data offset %u data len %u, # oob %u, "
139 				   "oob offset %u, oob len %u, pkt offset %u, "
140 				   "pkt len %u\n",
141 				   rndis_msg->msg_len,
142 				   pkt->data_offset,
143 				   pkt->data_len,
144 				   pkt->num_oob_data_elements,
145 				   pkt->oob_data_offset,
146 				   pkt->oob_data_len,
147 				   pkt->per_pkt_info_offset,
148 				   pkt->per_pkt_info_len);
149 		}
150 		break;
151 
152 	case RNDIS_MSG_INIT_C:
153 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
154 				sizeof(struct rndis_initialize_complete)) {
155 			const struct rndis_initialize_complete *init_complete =
156 				data + RNDIS_HEADER_SIZE;
157 			netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
158 				"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
159 				"device flags %d, max xfer size 0x%x, max pkts %u, "
160 				"pkt aligned %u)\n",
161 				rndis_msg->msg_len,
162 				init_complete->req_id,
163 				init_complete->status,
164 				init_complete->major_ver,
165 				init_complete->minor_ver,
166 				init_complete->dev_flags,
167 				init_complete->max_xfer_size,
168 				init_complete->max_pkt_per_msg,
169 				init_complete->pkt_alignment_factor);
170 		}
171 		break;
172 
173 	case RNDIS_MSG_QUERY_C:
174 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
175 				sizeof(struct rndis_query_complete)) {
176 			const struct rndis_query_complete *query_complete =
177 				data + RNDIS_HEADER_SIZE;
178 			netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
179 				"(len %u, id 0x%x, status 0x%x, buf len %u, "
180 				"buf offset %u)\n",
181 				rndis_msg->msg_len,
182 				query_complete->req_id,
183 				query_complete->status,
184 				query_complete->info_buflen,
185 				query_complete->info_buf_offset);
186 		}
187 		break;
188 
189 	case RNDIS_MSG_SET_C:
190 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
191 			const struct rndis_set_complete *set_complete =
192 				data + RNDIS_HEADER_SIZE;
193 			netdev_dbg(netdev,
194 				"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
195 				rndis_msg->msg_len,
196 				set_complete->req_id,
197 				set_complete->status);
198 		}
199 		break;
200 
201 	case RNDIS_MSG_INDICATE:
202 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
203 				sizeof(struct rndis_indicate_status)) {
204 			const struct rndis_indicate_status *indicate_status =
205 				data + RNDIS_HEADER_SIZE;
206 			netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
207 				"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
208 				rndis_msg->msg_len,
209 				indicate_status->status,
210 				indicate_status->status_buflen,
211 				indicate_status->status_buf_offset);
212 		}
213 		break;
214 
215 	default:
216 		netdev_dbg(netdev, "0x%x (len %u)\n",
217 			rndis_msg->ndis_msg_type,
218 			rndis_msg->msg_len);
219 		break;
220 	}
221 }
222 
rndis_filter_send_request(struct rndis_device * dev,struct rndis_request * req)223 static int rndis_filter_send_request(struct rndis_device *dev,
224 				  struct rndis_request *req)
225 {
226 	struct hv_netvsc_packet *packet;
227 	struct hv_page_buffer page_buf[2];
228 	struct hv_page_buffer *pb = page_buf;
229 	int ret;
230 
231 	/* Setup the packet to send it */
232 	packet = &req->pkt;
233 
234 	packet->total_data_buflen = req->request_msg.msg_len;
235 	packet->page_buf_cnt = 1;
236 
237 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
238 					HV_HYP_PAGE_SHIFT;
239 	pb[0].len = req->request_msg.msg_len;
240 	pb[0].offset = offset_in_hvpage(&req->request_msg);
241 
242 	/* Add one page_buf when request_msg crossing page boundary */
243 	if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
244 		packet->page_buf_cnt++;
245 		pb[0].len = HV_HYP_PAGE_SIZE -
246 			pb[0].offset;
247 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
248 			+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
249 		pb[1].offset = 0;
250 		pb[1].len = req->request_msg.msg_len -
251 			pb[0].len;
252 	}
253 
254 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
255 
256 	rcu_read_lock_bh();
257 	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
258 	rcu_read_unlock_bh();
259 
260 	return ret;
261 }
262 
rndis_set_link_state(struct rndis_device * rdev,struct rndis_request * request)263 static void rndis_set_link_state(struct rndis_device *rdev,
264 				 struct rndis_request *request)
265 {
266 	u32 link_status;
267 	struct rndis_query_complete *query_complete;
268 	u32 msg_len = request->response_msg.msg_len;
269 
270 	/* Ensure the packet is big enough to access its fields */
271 	if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
272 		return;
273 
274 	query_complete = &request->response_msg.msg.query_complete;
275 
276 	if (query_complete->status == RNDIS_STATUS_SUCCESS &&
277 	    query_complete->info_buflen >= sizeof(u32) &&
278 	    query_complete->info_buf_offset >= sizeof(*query_complete) &&
279 	    msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
280 	    msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
281 			>= query_complete->info_buflen) {
282 		memcpy(&link_status, (void *)((unsigned long)query_complete +
283 		       query_complete->info_buf_offset), sizeof(u32));
284 		rdev->link_state = link_status != 0;
285 	}
286 }
287 
rndis_filter_receive_response(struct net_device * ndev,struct netvsc_device * nvdev,struct rndis_message * resp,void * data)288 static void rndis_filter_receive_response(struct net_device *ndev,
289 					  struct netvsc_device *nvdev,
290 					  struct rndis_message *resp,
291 					  void *data)
292 {
293 	u32 *req_id = &resp->msg.init_complete.req_id;
294 	struct rndis_device *dev = nvdev->extension;
295 	struct rndis_request *request = NULL;
296 	bool found = false;
297 	unsigned long flags;
298 
299 	/* This should never happen, it means control message
300 	 * response received after device removed.
301 	 */
302 	if (dev->state == RNDIS_DEV_UNINITIALIZED) {
303 		netdev_err(ndev,
304 			   "got rndis message uninitialized\n");
305 		return;
306 	}
307 
308 	/* Ensure the packet is big enough to read req_id. Req_id is the 1st
309 	 * field in any request/response message, so the payload should have at
310 	 * least sizeof(u32) bytes
311 	 */
312 	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
313 		netdev_err(ndev, "rndis msg_len too small: %u\n",
314 			   resp->msg_len);
315 		return;
316 	}
317 
318 	/* Copy the request ID into nvchan->recv_buf */
319 	*req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
320 
321 	spin_lock_irqsave(&dev->request_lock, flags);
322 	list_for_each_entry(request, &dev->req_list, list_ent) {
323 		/*
324 		 * All request/response message contains RequestId as the 1st
325 		 * field
326 		 */
327 		if (request->request_msg.msg.init_req.req_id == *req_id) {
328 			found = true;
329 			break;
330 		}
331 	}
332 	spin_unlock_irqrestore(&dev->request_lock, flags);
333 
334 	if (found) {
335 		if (resp->msg_len <=
336 		    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
337 			memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
338 			memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
339 			       data + RNDIS_HEADER_SIZE + sizeof(*req_id),
340 			       resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
341 			if (request->request_msg.ndis_msg_type ==
342 			    RNDIS_MSG_QUERY && request->request_msg.msg.
343 			    query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
344 				rndis_set_link_state(dev, request);
345 		} else {
346 			netdev_err(ndev,
347 				"rndis response buffer overflow "
348 				"detected (size %u max %zu)\n",
349 				resp->msg_len,
350 				sizeof(struct rndis_message));
351 
352 			if (resp->ndis_msg_type ==
353 			    RNDIS_MSG_RESET_C) {
354 				/* does not have a request id field */
355 				request->response_msg.msg.reset_complete.
356 					status = RNDIS_STATUS_BUFFER_OVERFLOW;
357 			} else {
358 				request->response_msg.msg.
359 				init_complete.status =
360 					RNDIS_STATUS_BUFFER_OVERFLOW;
361 			}
362 		}
363 
364 		netvsc_dma_unmap(((struct net_device_context *)
365 			netdev_priv(ndev))->device_ctx, &request->pkt);
366 		complete(&request->wait_event);
367 	} else {
368 		netdev_err(ndev,
369 			"no rndis request found for this response "
370 			"(id 0x%x res type 0x%x)\n",
371 			*req_id,
372 			resp->ndis_msg_type);
373 	}
374 }
375 
376 /*
377  * Get the Per-Packet-Info with the specified type
378  * return NULL if not found.
379  */
rndis_get_ppi(struct net_device * ndev,struct rndis_packet * rpkt,u32 rpkt_len,u32 type,u8 internal,u32 ppi_size,void * data)380 static inline void *rndis_get_ppi(struct net_device *ndev,
381 				  struct rndis_packet *rpkt,
382 				  u32 rpkt_len, u32 type, u8 internal,
383 				  u32 ppi_size, void *data)
384 {
385 	struct rndis_per_packet_info *ppi;
386 	int len;
387 
388 	if (rpkt->per_pkt_info_offset == 0)
389 		return NULL;
390 
391 	/* Validate info_offset and info_len */
392 	if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
393 	    rpkt->per_pkt_info_offset > rpkt_len) {
394 		netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
395 			   rpkt->per_pkt_info_offset);
396 		return NULL;
397 	}
398 
399 	if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
400 	    rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
401 		netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
402 			   rpkt->per_pkt_info_len);
403 		return NULL;
404 	}
405 
406 	ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
407 		rpkt->per_pkt_info_offset);
408 	/* Copy the PPIs into nvchan->recv_buf */
409 	memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
410 	len = rpkt->per_pkt_info_len;
411 
412 	while (len > 0) {
413 		/* Validate ppi_offset and ppi_size */
414 		if (ppi->size > len) {
415 			netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
416 			continue;
417 		}
418 
419 		if (ppi->ppi_offset >= ppi->size) {
420 			netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
421 			continue;
422 		}
423 
424 		if (ppi->type == type && ppi->internal == internal) {
425 			/* ppi->size should be big enough to hold the returned object. */
426 			if (ppi->size - ppi->ppi_offset < ppi_size) {
427 				netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
428 					   ppi->size, ppi->ppi_offset);
429 				continue;
430 			}
431 			return (void *)((ulong)ppi + ppi->ppi_offset);
432 		}
433 		len -= ppi->size;
434 		ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
435 	}
436 
437 	return NULL;
438 }
439 
440 static inline
rsc_add_data(struct netvsc_channel * nvchan,const struct ndis_pkt_8021q_info * vlan,const struct ndis_tcp_ip_checksum_info * csum_info,const u32 * hash_info,void * data,u32 len)441 void rsc_add_data(struct netvsc_channel *nvchan,
442 		  const struct ndis_pkt_8021q_info *vlan,
443 		  const struct ndis_tcp_ip_checksum_info *csum_info,
444 		  const u32 *hash_info,
445 		  void *data, u32 len)
446 {
447 	u32 cnt = nvchan->rsc.cnt;
448 
449 	if (cnt) {
450 		nvchan->rsc.pktlen += len;
451 	} else {
452 		/* The data/values pointed by vlan, csum_info and hash_info are shared
453 		 * across the different 'fragments' of the RSC packet; store them into
454 		 * the packet itself.
455 		 */
456 		if (vlan != NULL) {
457 			memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
458 			nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
459 		} else {
460 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
461 		}
462 		if (csum_info != NULL) {
463 			memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
464 			nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
465 		} else {
466 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
467 		}
468 		nvchan->rsc.pktlen = len;
469 		if (hash_info != NULL) {
470 			nvchan->rsc.hash_info = *hash_info;
471 			nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
472 		} else {
473 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
474 		}
475 	}
476 
477 	nvchan->rsc.data[cnt] = data;
478 	nvchan->rsc.len[cnt] = len;
479 	nvchan->rsc.cnt++;
480 }
481 
rndis_filter_receive_data(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan,struct rndis_message * msg,void * data,u32 data_buflen)482 static int rndis_filter_receive_data(struct net_device *ndev,
483 				     struct netvsc_device *nvdev,
484 				     struct netvsc_channel *nvchan,
485 				     struct rndis_message *msg,
486 				     void *data, u32 data_buflen)
487 {
488 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
489 	const struct ndis_tcp_ip_checksum_info *csum_info;
490 	const struct ndis_pkt_8021q_info *vlan;
491 	const struct rndis_pktinfo_id *pktinfo_id;
492 	const u32 *hash_info;
493 	u32 data_offset, rpkt_len;
494 	bool rsc_more = false;
495 	int ret;
496 
497 	/* Ensure data_buflen is big enough to read header fields */
498 	if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
499 		netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
500 			   data_buflen);
501 		return NVSP_STAT_FAIL;
502 	}
503 
504 	/* Copy the RNDIS packet into nvchan->recv_buf */
505 	memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
506 
507 	/* Validate rndis_pkt offset */
508 	if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
509 		netdev_err(ndev, "invalid rndis packet offset: %u\n",
510 			   rndis_pkt->data_offset);
511 		return NVSP_STAT_FAIL;
512 	}
513 
514 	/* Remove the rndis header and pass it back up the stack */
515 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
516 
517 	rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
518 	data_buflen -= data_offset;
519 
520 	/*
521 	 * Make sure we got a valid RNDIS message, now total_data_buflen
522 	 * should be the data packet size plus the trailer padding size
523 	 */
524 	if (unlikely(data_buflen < rndis_pkt->data_len)) {
525 		netdev_err(ndev, "rndis message buffer "
526 			   "overflow detected (got %u, min %u)"
527 			   "...dropping this message!\n",
528 			   data_buflen, rndis_pkt->data_len);
529 		return NVSP_STAT_FAIL;
530 	}
531 
532 	vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
533 			     data);
534 
535 	csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
536 				  sizeof(*csum_info), data);
537 
538 	hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
539 				  sizeof(*hash_info), data);
540 
541 	pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
542 				   sizeof(*pktinfo_id), data);
543 
544 	/* Identify RSC frags, drop erroneous packets */
545 	if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
546 		if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
547 			nvchan->rsc.cnt = 0;
548 		else if (nvchan->rsc.cnt == 0)
549 			goto drop;
550 
551 		rsc_more = true;
552 
553 		if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
554 			rsc_more = false;
555 
556 		if (rsc_more && nvchan->rsc.is_last)
557 			goto drop;
558 	} else {
559 		nvchan->rsc.cnt = 0;
560 	}
561 
562 	if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
563 		goto drop;
564 
565 	/* Put data into per channel structure.
566 	 * Also, remove the rndis trailer padding from rndis packet message
567 	 * rndis_pkt->data_len tell us the real data length, we only copy
568 	 * the data packet to the stack, without the rndis trailer padding
569 	 */
570 	rsc_add_data(nvchan, vlan, csum_info, hash_info,
571 		     data + data_offset, rndis_pkt->data_len);
572 
573 	if (rsc_more)
574 		return NVSP_STAT_SUCCESS;
575 
576 	ret = netvsc_recv_callback(ndev, nvdev, nvchan);
577 	nvchan->rsc.cnt = 0;
578 
579 	return ret;
580 
581 drop:
582 	return NVSP_STAT_FAIL;
583 }
584 
rndis_filter_receive(struct net_device * ndev,struct netvsc_device * net_dev,struct netvsc_channel * nvchan,void * data,u32 buflen)585 int rndis_filter_receive(struct net_device *ndev,
586 			 struct netvsc_device *net_dev,
587 			 struct netvsc_channel *nvchan,
588 			 void *data, u32 buflen)
589 {
590 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
591 	struct rndis_message *rndis_msg = nvchan->recv_buf;
592 
593 	if (buflen < RNDIS_HEADER_SIZE) {
594 		netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
595 		return NVSP_STAT_FAIL;
596 	}
597 
598 	/* Copy the RNDIS msg header into nvchan->recv_buf */
599 	memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
600 
601 	/* Validate incoming rndis_message packet */
602 	if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
603 	    buflen < rndis_msg->msg_len) {
604 		netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
605 			   buflen, rndis_msg->msg_len);
606 		return NVSP_STAT_FAIL;
607 	}
608 
609 	if (netif_msg_rx_status(net_device_ctx))
610 		dump_rndis_message(ndev, rndis_msg, data);
611 
612 	switch (rndis_msg->ndis_msg_type) {
613 	case RNDIS_MSG_PACKET:
614 		return rndis_filter_receive_data(ndev, net_dev, nvchan,
615 						 rndis_msg, data, buflen);
616 	case RNDIS_MSG_INIT_C:
617 	case RNDIS_MSG_QUERY_C:
618 	case RNDIS_MSG_SET_C:
619 		/* completion msgs */
620 		rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
621 		break;
622 
623 	case RNDIS_MSG_INDICATE:
624 		/* notification msgs */
625 		netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
626 		break;
627 	default:
628 		netdev_err(ndev,
629 			"unhandled rndis message (type %u len %u)\n",
630 			   rndis_msg->ndis_msg_type,
631 			   rndis_msg->msg_len);
632 		return NVSP_STAT_FAIL;
633 	}
634 
635 	return NVSP_STAT_SUCCESS;
636 }
637 
rndis_filter_query_device(struct rndis_device * dev,struct netvsc_device * nvdev,u32 oid,void * result,u32 * result_size)638 static int rndis_filter_query_device(struct rndis_device *dev,
639 				     struct netvsc_device *nvdev,
640 				     u32 oid, void *result, u32 *result_size)
641 {
642 	struct rndis_request *request;
643 	u32 inresult_size = *result_size;
644 	struct rndis_query_request *query;
645 	struct rndis_query_complete *query_complete;
646 	u32 msg_len;
647 	int ret = 0;
648 
649 	if (!result)
650 		return -EINVAL;
651 
652 	*result_size = 0;
653 	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
654 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
655 	if (!request) {
656 		ret = -ENOMEM;
657 		goto cleanup;
658 	}
659 
660 	/* Setup the rndis query */
661 	query = &request->request_msg.msg.query_req;
662 	query->oid = oid;
663 	query->info_buf_offset = sizeof(struct rndis_query_request);
664 	query->info_buflen = 0;
665 	query->dev_vc_handle = 0;
666 
667 	if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
668 		struct ndis_offload *hwcaps;
669 		u32 nvsp_version = nvdev->nvsp_version;
670 		u8 ndis_rev;
671 		size_t size;
672 
673 		if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
674 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
675 			size = NDIS_OFFLOAD_SIZE;
676 		} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
677 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
678 			size = NDIS_OFFLOAD_SIZE_6_1;
679 		} else {
680 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
681 			size = NDIS_OFFLOAD_SIZE_6_0;
682 		}
683 
684 		request->request_msg.msg_len += size;
685 		query->info_buflen = size;
686 		hwcaps = (struct ndis_offload *)
687 			((unsigned long)query + query->info_buf_offset);
688 
689 		hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
690 		hwcaps->header.revision = ndis_rev;
691 		hwcaps->header.size = size;
692 
693 	} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
694 		struct ndis_recv_scale_cap *cap;
695 
696 		request->request_msg.msg_len +=
697 			sizeof(struct ndis_recv_scale_cap);
698 		query->info_buflen = sizeof(struct ndis_recv_scale_cap);
699 		cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
700 						     query->info_buf_offset);
701 		cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
702 		cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
703 		cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
704 	}
705 
706 	ret = rndis_filter_send_request(dev, request);
707 	if (ret != 0)
708 		goto cleanup;
709 
710 	wait_for_completion(&request->wait_event);
711 
712 	/* Copy the response back */
713 	query_complete = &request->response_msg.msg.query_complete;
714 	msg_len = request->response_msg.msg_len;
715 
716 	/* Ensure the packet is big enough to access its fields */
717 	if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
718 		ret = -1;
719 		goto cleanup;
720 	}
721 
722 	if (query_complete->info_buflen > inresult_size ||
723 	    query_complete->info_buf_offset < sizeof(*query_complete) ||
724 	    msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
725 	    msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
726 			< query_complete->info_buflen) {
727 		ret = -1;
728 		goto cleanup;
729 	}
730 
731 	memcpy(result,
732 	       (void *)((unsigned long)query_complete +
733 			 query_complete->info_buf_offset),
734 	       query_complete->info_buflen);
735 
736 	*result_size = query_complete->info_buflen;
737 
738 cleanup:
739 	if (request)
740 		put_rndis_request(dev, request);
741 
742 	return ret;
743 }
744 
745 /* Get the hardware offload capabilities */
746 static int
rndis_query_hwcaps(struct rndis_device * dev,struct netvsc_device * net_device,struct ndis_offload * caps)747 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
748 		   struct ndis_offload *caps)
749 {
750 	u32 caps_len = sizeof(*caps);
751 	int ret;
752 
753 	memset(caps, 0, sizeof(*caps));
754 
755 	ret = rndis_filter_query_device(dev, net_device,
756 					OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
757 					caps, &caps_len);
758 	if (ret)
759 		return ret;
760 
761 	if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
762 		netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
763 			    caps->header.type);
764 		return -EINVAL;
765 	}
766 
767 	if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
768 		netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
769 			    caps->header.revision);
770 		return -EINVAL;
771 	}
772 
773 	if (caps->header.size > caps_len ||
774 	    caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
775 		netdev_warn(dev->ndev,
776 			    "invalid NDIS objsize %u, data size %u\n",
777 			    caps->header.size, caps_len);
778 		return -EINVAL;
779 	}
780 
781 	return 0;
782 }
783 
rndis_filter_query_device_mac(struct rndis_device * dev,struct netvsc_device * net_device)784 static int rndis_filter_query_device_mac(struct rndis_device *dev,
785 					 struct netvsc_device *net_device)
786 {
787 	u32 size = ETH_ALEN;
788 
789 	return rndis_filter_query_device(dev, net_device,
790 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
791 				      dev->hw_mac_adr, &size);
792 }
793 
794 #define NWADR_STR "NetworkAddress"
795 #define NWADR_STRLEN 14
796 
rndis_filter_set_device_mac(struct netvsc_device * nvdev,const char * mac)797 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
798 				const char *mac)
799 {
800 	struct rndis_device *rdev = nvdev->extension;
801 	struct rndis_request *request;
802 	struct rndis_set_request *set;
803 	struct rndis_config_parameter_info *cpi;
804 	wchar_t *cfg_nwadr, *cfg_mac;
805 	struct rndis_set_complete *set_complete;
806 	char macstr[2*ETH_ALEN+1];
807 	u32 extlen = sizeof(struct rndis_config_parameter_info) +
808 		2*NWADR_STRLEN + 4*ETH_ALEN;
809 	int ret;
810 
811 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
812 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
813 	if (!request)
814 		return -ENOMEM;
815 
816 	set = &request->request_msg.msg.set_req;
817 	set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
818 	set->info_buflen = extlen;
819 	set->info_buf_offset = sizeof(struct rndis_set_request);
820 	set->dev_vc_handle = 0;
821 
822 	cpi = (struct rndis_config_parameter_info *)((ulong)set +
823 		set->info_buf_offset);
824 	cpi->parameter_name_offset =
825 		sizeof(struct rndis_config_parameter_info);
826 	/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
827 	cpi->parameter_name_length = 2*NWADR_STRLEN;
828 	cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
829 	cpi->parameter_value_offset =
830 		cpi->parameter_name_offset + cpi->parameter_name_length;
831 	/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
832 	cpi->parameter_value_length = 4*ETH_ALEN;
833 
834 	cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
835 	cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
836 	ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
837 			      cfg_nwadr, NWADR_STRLEN);
838 	if (ret < 0)
839 		goto cleanup;
840 	snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
841 	ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
842 			      cfg_mac, 2*ETH_ALEN);
843 	if (ret < 0)
844 		goto cleanup;
845 
846 	ret = rndis_filter_send_request(rdev, request);
847 	if (ret != 0)
848 		goto cleanup;
849 
850 	wait_for_completion(&request->wait_event);
851 
852 	set_complete = &request->response_msg.msg.set_complete;
853 	if (set_complete->status != RNDIS_STATUS_SUCCESS)
854 		ret = -EIO;
855 
856 cleanup:
857 	put_rndis_request(rdev, request);
858 	return ret;
859 }
860 
861 int
rndis_filter_set_offload_params(struct net_device * ndev,struct netvsc_device * nvdev,struct ndis_offload_params * req_offloads)862 rndis_filter_set_offload_params(struct net_device *ndev,
863 				struct netvsc_device *nvdev,
864 				struct ndis_offload_params *req_offloads)
865 {
866 	struct rndis_device *rdev = nvdev->extension;
867 	struct rndis_request *request;
868 	struct rndis_set_request *set;
869 	struct ndis_offload_params *offload_params;
870 	struct rndis_set_complete *set_complete;
871 	u32 extlen = sizeof(struct ndis_offload_params);
872 	int ret;
873 	u32 vsp_version = nvdev->nvsp_version;
874 
875 	if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
876 		extlen = VERSION_4_OFFLOAD_SIZE;
877 		/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
878 		 * UDP checksum offload.
879 		 */
880 		req_offloads->udp_ip_v4_csum = 0;
881 		req_offloads->udp_ip_v6_csum = 0;
882 	}
883 
884 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
885 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
886 	if (!request)
887 		return -ENOMEM;
888 
889 	set = &request->request_msg.msg.set_req;
890 	set->oid = OID_TCP_OFFLOAD_PARAMETERS;
891 	set->info_buflen = extlen;
892 	set->info_buf_offset = sizeof(struct rndis_set_request);
893 	set->dev_vc_handle = 0;
894 
895 	offload_params = (struct ndis_offload_params *)((ulong)set +
896 				set->info_buf_offset);
897 	*offload_params = *req_offloads;
898 	offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
899 	offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
900 	offload_params->header.size = extlen;
901 
902 	ret = rndis_filter_send_request(rdev, request);
903 	if (ret != 0)
904 		goto cleanup;
905 
906 	wait_for_completion(&request->wait_event);
907 	set_complete = &request->response_msg.msg.set_complete;
908 	if (set_complete->status != RNDIS_STATUS_SUCCESS) {
909 		netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
910 			   set_complete->status);
911 		ret = -EINVAL;
912 	}
913 
914 cleanup:
915 	put_rndis_request(rdev, request);
916 	return ret;
917 }
918 
rndis_set_rss_param_msg(struct rndis_device * rdev,const u8 * rss_key,u16 flag)919 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
920 				   const u8 *rss_key, u16 flag)
921 {
922 	struct net_device *ndev = rdev->ndev;
923 	struct net_device_context *ndc = netdev_priv(ndev);
924 	struct rndis_request *request;
925 	struct rndis_set_request *set;
926 	struct rndis_set_complete *set_complete;
927 	u32 extlen = sizeof(struct ndis_recv_scale_param) +
928 		     4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
929 	struct ndis_recv_scale_param *rssp;
930 	u32 *itab;
931 	u8 *keyp;
932 	int i, ret;
933 
934 	request = get_rndis_request(
935 			rdev, RNDIS_MSG_SET,
936 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
937 	if (!request)
938 		return -ENOMEM;
939 
940 	set = &request->request_msg.msg.set_req;
941 	set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
942 	set->info_buflen = extlen;
943 	set->info_buf_offset = sizeof(struct rndis_set_request);
944 	set->dev_vc_handle = 0;
945 
946 	rssp = (struct ndis_recv_scale_param *)(set + 1);
947 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
948 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
949 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
950 	rssp->flag = flag;
951 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
952 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
953 			 NDIS_HASH_TCP_IPV6;
954 	rssp->indirect_tabsize = 4*ITAB_NUM;
955 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
956 	rssp->hashkey_size = NETVSC_HASH_KEYLEN;
957 	rssp->hashkey_offset = rssp->indirect_taboffset +
958 			       rssp->indirect_tabsize;
959 
960 	/* Set indirection table entries */
961 	itab = (u32 *)(rssp + 1);
962 	for (i = 0; i < ITAB_NUM; i++)
963 		itab[i] = ndc->rx_table[i];
964 
965 	/* Set hask key values */
966 	keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
967 	memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
968 
969 	ret = rndis_filter_send_request(rdev, request);
970 	if (ret != 0)
971 		goto cleanup;
972 
973 	wait_for_completion(&request->wait_event);
974 	set_complete = &request->response_msg.msg.set_complete;
975 	if (set_complete->status == RNDIS_STATUS_SUCCESS) {
976 		if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
977 		    !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
978 			memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
979 
980 	} else {
981 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
982 			   set_complete->status);
983 		ret = -EINVAL;
984 	}
985 
986 cleanup:
987 	put_rndis_request(rdev, request);
988 	return ret;
989 }
990 
rndis_filter_set_rss_param(struct rndis_device * rdev,const u8 * rss_key)991 int rndis_filter_set_rss_param(struct rndis_device *rdev,
992 			       const u8 *rss_key)
993 {
994 	/* Disable RSS before change */
995 	rndis_set_rss_param_msg(rdev, rss_key,
996 				NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
997 
998 	return rndis_set_rss_param_msg(rdev, rss_key, 0);
999 }
1000 
rndis_filter_query_device_link_status(struct rndis_device * dev,struct netvsc_device * net_device)1001 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
1002 						 struct netvsc_device *net_device)
1003 {
1004 	u32 size = sizeof(u32);
1005 	u32 link_status;
1006 
1007 	return rndis_filter_query_device(dev, net_device,
1008 					 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
1009 					 &link_status, &size);
1010 }
1011 
rndis_filter_query_link_speed(struct rndis_device * dev,struct netvsc_device * net_device)1012 static int rndis_filter_query_link_speed(struct rndis_device *dev,
1013 					 struct netvsc_device *net_device)
1014 {
1015 	u32 size = sizeof(u32);
1016 	u32 link_speed;
1017 	struct net_device_context *ndc;
1018 	int ret;
1019 
1020 	ret = rndis_filter_query_device(dev, net_device,
1021 					RNDIS_OID_GEN_LINK_SPEED,
1022 					&link_speed, &size);
1023 
1024 	if (!ret) {
1025 		ndc = netdev_priv(dev->ndev);
1026 
1027 		/* The link speed reported from host is in 100bps unit, so
1028 		 * we convert it to Mbps here.
1029 		 */
1030 		ndc->speed = link_speed / 10000;
1031 	}
1032 
1033 	return ret;
1034 }
1035 
rndis_filter_set_packet_filter(struct rndis_device * dev,u32 new_filter)1036 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
1037 					  u32 new_filter)
1038 {
1039 	struct rndis_request *request;
1040 	struct rndis_set_request *set;
1041 	int ret;
1042 
1043 	if (dev->filter == new_filter)
1044 		return 0;
1045 
1046 	request = get_rndis_request(dev, RNDIS_MSG_SET,
1047 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
1048 			sizeof(u32));
1049 	if (!request)
1050 		return -ENOMEM;
1051 
1052 	/* Setup the rndis set */
1053 	set = &request->request_msg.msg.set_req;
1054 	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
1055 	set->info_buflen = sizeof(u32);
1056 	set->info_buf_offset = offsetof(typeof(*set), info_buf);
1057 	memcpy(set->info_buf, &new_filter, sizeof(u32));
1058 
1059 	ret = rndis_filter_send_request(dev, request);
1060 	if (ret == 0) {
1061 		wait_for_completion(&request->wait_event);
1062 		dev->filter = new_filter;
1063 	}
1064 
1065 	put_rndis_request(dev, request);
1066 
1067 	return ret;
1068 }
1069 
rndis_set_multicast(struct work_struct * w)1070 static void rndis_set_multicast(struct work_struct *w)
1071 {
1072 	struct rndis_device *rdev
1073 		= container_of(w, struct rndis_device, mcast_work);
1074 	u32 filter = NDIS_PACKET_TYPE_DIRECTED;
1075 	unsigned int flags = rdev->ndev->flags;
1076 
1077 	if (flags & IFF_PROMISC) {
1078 		filter = NDIS_PACKET_TYPE_PROMISCUOUS;
1079 	} else {
1080 		if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
1081 			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
1082 		if (flags & IFF_BROADCAST)
1083 			filter |= NDIS_PACKET_TYPE_BROADCAST;
1084 	}
1085 
1086 	rndis_filter_set_packet_filter(rdev, filter);
1087 }
1088 
rndis_filter_update(struct netvsc_device * nvdev)1089 void rndis_filter_update(struct netvsc_device *nvdev)
1090 {
1091 	struct rndis_device *rdev = nvdev->extension;
1092 
1093 	schedule_work(&rdev->mcast_work);
1094 }
1095 
rndis_filter_init_device(struct rndis_device * dev,struct netvsc_device * nvdev)1096 static int rndis_filter_init_device(struct rndis_device *dev,
1097 				    struct netvsc_device *nvdev)
1098 {
1099 	struct rndis_request *request;
1100 	struct rndis_initialize_request *init;
1101 	struct rndis_initialize_complete *init_complete;
1102 	u32 status;
1103 	int ret;
1104 
1105 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
1106 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1107 	if (!request) {
1108 		ret = -ENOMEM;
1109 		goto cleanup;
1110 	}
1111 
1112 	/* Setup the rndis set */
1113 	init = &request->request_msg.msg.init_req;
1114 	init->major_ver = RNDIS_MAJOR_VERSION;
1115 	init->minor_ver = RNDIS_MINOR_VERSION;
1116 	init->max_xfer_size = 0x4000;
1117 
1118 	dev->state = RNDIS_DEV_INITIALIZING;
1119 
1120 	ret = rndis_filter_send_request(dev, request);
1121 	if (ret != 0) {
1122 		dev->state = RNDIS_DEV_UNINITIALIZED;
1123 		goto cleanup;
1124 	}
1125 
1126 	wait_for_completion(&request->wait_event);
1127 
1128 	init_complete = &request->response_msg.msg.init_complete;
1129 	status = init_complete->status;
1130 	if (status == RNDIS_STATUS_SUCCESS) {
1131 		dev->state = RNDIS_DEV_INITIALIZED;
1132 		nvdev->max_pkt = init_complete->max_pkt_per_msg;
1133 		nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1134 		ret = 0;
1135 	} else {
1136 		dev->state = RNDIS_DEV_UNINITIALIZED;
1137 		ret = -EINVAL;
1138 	}
1139 
1140 cleanup:
1141 	if (request)
1142 		put_rndis_request(dev, request);
1143 
1144 	return ret;
1145 }
1146 
netvsc_device_idle(const struct netvsc_device * nvdev)1147 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1148 {
1149 	int i;
1150 
1151 	for (i = 0; i < nvdev->num_chn; i++) {
1152 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1153 
1154 		if (nvchan->mrc.first != nvchan->mrc.next)
1155 			return false;
1156 
1157 		if (atomic_read(&nvchan->queue_sends) > 0)
1158 			return false;
1159 	}
1160 
1161 	return true;
1162 }
1163 
rndis_filter_halt_device(struct netvsc_device * nvdev,struct rndis_device * dev)1164 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1165 				     struct rndis_device *dev)
1166 {
1167 	struct rndis_request *request;
1168 	struct rndis_halt_request *halt;
1169 
1170 	/* Attempt to do a rndis device halt */
1171 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
1172 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1173 	if (!request)
1174 		goto cleanup;
1175 
1176 	/* Setup the rndis set */
1177 	halt = &request->request_msg.msg.halt_req;
1178 	halt->req_id = atomic_inc_return(&dev->new_req_id);
1179 
1180 	/* Ignore return since this msg is optional. */
1181 	rndis_filter_send_request(dev, request);
1182 
1183 	dev->state = RNDIS_DEV_UNINITIALIZED;
1184 
1185 cleanup:
1186 	nvdev->destroy = true;
1187 
1188 	/* Force flag to be ordered before waiting */
1189 	wmb();
1190 
1191 	/* Wait for all send completions */
1192 	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1193 
1194 	if (request)
1195 		put_rndis_request(dev, request);
1196 }
1197 
rndis_filter_open_device(struct rndis_device * dev)1198 static int rndis_filter_open_device(struct rndis_device *dev)
1199 {
1200 	int ret;
1201 
1202 	if (dev->state != RNDIS_DEV_INITIALIZED)
1203 		return 0;
1204 
1205 	ret = rndis_filter_set_packet_filter(dev,
1206 					 NDIS_PACKET_TYPE_BROADCAST |
1207 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
1208 					 NDIS_PACKET_TYPE_DIRECTED);
1209 	if (ret == 0)
1210 		dev->state = RNDIS_DEV_DATAINITIALIZED;
1211 
1212 	return ret;
1213 }
1214 
rndis_filter_close_device(struct rndis_device * dev)1215 static int rndis_filter_close_device(struct rndis_device *dev)
1216 {
1217 	int ret;
1218 
1219 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1220 		return 0;
1221 
1222 	/* Make sure rndis_set_multicast doesn't re-enable filter! */
1223 	cancel_work_sync(&dev->mcast_work);
1224 
1225 	ret = rndis_filter_set_packet_filter(dev, 0);
1226 	if (ret == -ENODEV)
1227 		ret = 0;
1228 
1229 	if (ret == 0)
1230 		dev->state = RNDIS_DEV_INITIALIZED;
1231 
1232 	return ret;
1233 }
1234 
netvsc_sc_open(struct vmbus_channel * new_sc)1235 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1236 {
1237 	struct net_device *ndev =
1238 		hv_get_drvdata(new_sc->primary_channel->device_obj);
1239 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1240 	struct netvsc_device *nvscdev;
1241 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1242 	struct netvsc_channel *nvchan;
1243 	int ret;
1244 
1245 	/* This is safe because this callback only happens when
1246 	 * new device is being setup and waiting on the channel_init_wait.
1247 	 */
1248 	nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1249 	if (!nvscdev || chn_index >= nvscdev->num_chn)
1250 		return;
1251 
1252 	nvchan = nvscdev->chan_table + chn_index;
1253 
1254 	/* Because the device uses NAPI, all the interrupt batching and
1255 	 * control is done via Net softirq, not the channel handling
1256 	 */
1257 	set_channel_read_mode(new_sc, HV_CALL_ISR);
1258 
1259 	/* Set the channel before opening.*/
1260 	nvchan->channel = new_sc;
1261 
1262 	new_sc->next_request_id_callback = vmbus_next_request_id;
1263 	new_sc->request_addr_callback = vmbus_request_addr;
1264 	new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1265 	new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1266 
1267 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
1268 			 netvsc_ring_bytes, NULL, 0,
1269 			 netvsc_channel_cb, nvchan);
1270 	if (ret == 0)
1271 		napi_enable(&nvchan->napi);
1272 	else
1273 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1274 
1275 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1276 		wake_up(&nvscdev->subchan_open);
1277 }
1278 
1279 /* Open sub-channels after completing the handling of the device probe.
1280  * This breaks overlap of processing the host message for the
1281  * new primary channel with the initialization of sub-channels.
1282  */
rndis_set_subchannel(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_device_info * dev_info)1283 int rndis_set_subchannel(struct net_device *ndev,
1284 			 struct netvsc_device *nvdev,
1285 			 struct netvsc_device_info *dev_info)
1286 {
1287 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1288 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1289 	struct hv_device *hv_dev = ndev_ctx->device_ctx;
1290 	struct rndis_device *rdev = nvdev->extension;
1291 	int i, ret;
1292 
1293 	ASSERT_RTNL();
1294 
1295 	memset(init_packet, 0, sizeof(struct nvsp_message));
1296 	init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1297 	init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1298 	init_packet->msg.v5_msg.subchn_req.num_subchannels =
1299 						nvdev->num_chn - 1;
1300 	trace_nvsp_send(ndev, init_packet);
1301 
1302 	ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1303 			       sizeof(struct nvsp_message),
1304 			       (unsigned long)init_packet,
1305 			       VM_PKT_DATA_INBAND,
1306 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1307 	if (ret) {
1308 		netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1309 		return ret;
1310 	}
1311 
1312 	wait_for_completion(&nvdev->channel_init_wait);
1313 	if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1314 		netdev_err(ndev, "sub channel request failed\n");
1315 		return -EIO;
1316 	}
1317 
1318 	/* Check that number of allocated sub channel is within the expected range */
1319 	if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1320 		netdev_err(ndev, "invalid number of allocated sub channel\n");
1321 		return -EINVAL;
1322 	}
1323 	nvdev->num_chn = 1 +
1324 		init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1325 
1326 	/* wait for all sub channels to open */
1327 	wait_event(nvdev->subchan_open,
1328 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1329 
1330 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1331 		ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1332 
1333 	/* ignore failures from setting rss parameters, still have channels */
1334 	if (dev_info)
1335 		rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1336 	else
1337 		rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1338 
1339 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1340 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1341 
1342 	return 0;
1343 }
1344 
rndis_netdev_set_hwcaps(struct rndis_device * rndis_device,struct netvsc_device * nvdev)1345 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1346 				   struct netvsc_device *nvdev)
1347 {
1348 	struct net_device *net = rndis_device->ndev;
1349 	struct net_device_context *net_device_ctx = netdev_priv(net);
1350 	struct ndis_offload hwcaps;
1351 	struct ndis_offload_params offloads;
1352 	unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
1353 	int ret;
1354 
1355 	/* Find HW offload capabilities */
1356 	ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1357 	if (ret != 0)
1358 		return ret;
1359 
1360 	/* A value of zero means "no change"; now turn on what we want. */
1361 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1362 
1363 	/* Linux does not care about IP checksum, always does in kernel */
1364 	offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1365 
1366 	/* Reset previously set hw_features flags */
1367 	net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1368 	net_device_ctx->tx_checksum_mask = 0;
1369 
1370 	/* Compute tx offload settings based on hw capabilities */
1371 	net->hw_features |= NETIF_F_RXCSUM;
1372 	net->hw_features |= NETIF_F_SG;
1373 	net->hw_features |= NETIF_F_RXHASH;
1374 
1375 	if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1376 		/* Can checksum TCP */
1377 		net->hw_features |= NETIF_F_IP_CSUM;
1378 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1379 
1380 		offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1381 
1382 		if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1383 			offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1384 			net->hw_features |= NETIF_F_TSO;
1385 
1386 			if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1387 				gso_max_size = hwcaps.lsov2.ip4_maxsz;
1388 		}
1389 
1390 		if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1391 			offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1392 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1393 		}
1394 	}
1395 
1396 	if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1397 		net->hw_features |= NETIF_F_IPV6_CSUM;
1398 
1399 		offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1400 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1401 
1402 		if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1403 		    (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1404 			offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1405 			net->hw_features |= NETIF_F_TSO6;
1406 
1407 			if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1408 				gso_max_size = hwcaps.lsov2.ip6_maxsz;
1409 		}
1410 
1411 		if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1412 			offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1413 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1414 		}
1415 	}
1416 
1417 	if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1418 		net->hw_features |= NETIF_F_LRO;
1419 
1420 		if (net->features & NETIF_F_LRO) {
1421 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1422 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1423 		} else {
1424 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1425 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1426 		}
1427 	}
1428 
1429 	/* In case some hw_features disappeared we need to remove them from
1430 	 * net->features list as they're no longer supported.
1431 	 */
1432 	net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1433 
1434 	netif_set_tso_max_size(net, gso_max_size);
1435 
1436 	ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1437 
1438 	return ret;
1439 }
1440 
rndis_get_friendly_name(struct net_device * net,struct rndis_device * rndis_device,struct netvsc_device * net_device)1441 static void rndis_get_friendly_name(struct net_device *net,
1442 				    struct rndis_device *rndis_device,
1443 				    struct netvsc_device *net_device)
1444 {
1445 	ucs2_char_t wname[256];
1446 	unsigned long len;
1447 	u8 ifalias[256];
1448 	u32 size;
1449 
1450 	size = sizeof(wname);
1451 	if (rndis_filter_query_device(rndis_device, net_device,
1452 				      RNDIS_OID_GEN_FRIENDLY_NAME,
1453 				      wname, &size) != 0)
1454 		return;	/* ignore if host does not support */
1455 
1456 	if (size == 0)
1457 		return;	/* name not set */
1458 
1459 	/* Convert Windows Unicode string to UTF-8 */
1460 	len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1461 
1462 	/* ignore the default value from host */
1463 	if (strcmp(ifalias, "Network Adapter") != 0)
1464 		dev_set_alias(net, ifalias, len);
1465 }
1466 
rndis_filter_device_add(struct hv_device * dev,struct netvsc_device_info * device_info)1467 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1468 				      struct netvsc_device_info *device_info)
1469 {
1470 	struct net_device *net = hv_get_drvdata(dev);
1471 	struct net_device_context *ndc = netdev_priv(net);
1472 	struct netvsc_device *net_device;
1473 	struct rndis_device *rndis_device;
1474 	struct ndis_recv_scale_cap rsscap;
1475 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1476 	u32 mtu, size;
1477 	u32 num_possible_rss_qs;
1478 	int i, ret;
1479 
1480 	rndis_device = get_rndis_device();
1481 	if (!rndis_device)
1482 		return ERR_PTR(-ENODEV);
1483 
1484 	/* Let the inner driver handle this first to create the netvsc channel
1485 	 * NOTE! Once the channel is created, we may get a receive callback
1486 	 * (RndisFilterOnReceive()) before this call is completed
1487 	 */
1488 	net_device = netvsc_device_add(dev, device_info);
1489 	if (IS_ERR(net_device)) {
1490 		kfree(rndis_device);
1491 		return net_device;
1492 	}
1493 
1494 	/* Initialize the rndis device */
1495 	net_device->max_chn = 1;
1496 	net_device->num_chn = 1;
1497 
1498 	net_device->extension = rndis_device;
1499 	rndis_device->ndev = net;
1500 
1501 	/* Send the rndis initialization message */
1502 	ret = rndis_filter_init_device(rndis_device, net_device);
1503 	if (ret != 0)
1504 		goto err_dev_remv;
1505 
1506 	/* Get the MTU from the host */
1507 	size = sizeof(u32);
1508 	ret = rndis_filter_query_device(rndis_device, net_device,
1509 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1510 					&mtu, &size);
1511 	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1512 		net->mtu = mtu;
1513 
1514 	/* Get the mac address */
1515 	ret = rndis_filter_query_device_mac(rndis_device, net_device);
1516 	if (ret != 0)
1517 		goto err_dev_remv;
1518 
1519 	memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1520 
1521 	/* Get friendly name as ifalias*/
1522 	if (!net->ifalias)
1523 		rndis_get_friendly_name(net, rndis_device, net_device);
1524 
1525 	/* Query and set hardware capabilities */
1526 	ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1527 	if (ret != 0)
1528 		goto err_dev_remv;
1529 
1530 	rndis_filter_query_device_link_status(rndis_device, net_device);
1531 
1532 	netdev_dbg(net, "Device MAC %pM link state %s\n",
1533 		   rndis_device->hw_mac_adr,
1534 		   rndis_device->link_state ? "down" : "up");
1535 
1536 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1537 		goto out;
1538 
1539 	rndis_filter_query_link_speed(rndis_device, net_device);
1540 
1541 	/* vRSS setup */
1542 	memset(&rsscap, 0, rsscap_size);
1543 	ret = rndis_filter_query_device(rndis_device, net_device,
1544 					OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1545 					&rsscap, &rsscap_size);
1546 	if (ret || rsscap.num_recv_que < 2)
1547 		goto out;
1548 
1549 	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
1550 	num_possible_rss_qs = min_t(u32, num_online_cpus(),
1551 				    rsscap.num_recv_que);
1552 
1553 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1554 
1555 	/* We will use the given number of channels if available. */
1556 	net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1557 
1558 	if (!netif_is_rxfh_configured(net)) {
1559 		for (i = 0; i < ITAB_NUM; i++)
1560 			ndc->rx_table[i] = ethtool_rxfh_indir_default(
1561 						i, net_device->num_chn);
1562 	}
1563 
1564 	atomic_set(&net_device->open_chn, 1);
1565 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1566 
1567 	for (i = 1; i < net_device->num_chn; i++) {
1568 		ret = netvsc_alloc_recv_comp_ring(net_device, i);
1569 		if (ret) {
1570 			while (--i != 0)
1571 				vfree(net_device->chan_table[i].mrc.slots);
1572 			goto out;
1573 		}
1574 	}
1575 
1576 	for (i = 1; i < net_device->num_chn; i++)
1577 		netif_napi_add(net, &net_device->chan_table[i].napi,
1578 			       netvsc_poll, NAPI_POLL_WEIGHT);
1579 
1580 	return net_device;
1581 
1582 out:
1583 	/* setting up multiple channels failed */
1584 	net_device->max_chn = 1;
1585 	net_device->num_chn = 1;
1586 	return net_device;
1587 
1588 err_dev_remv:
1589 	rndis_filter_device_remove(dev, net_device);
1590 	return ERR_PTR(ret);
1591 }
1592 
rndis_filter_device_remove(struct hv_device * dev,struct netvsc_device * net_dev)1593 void rndis_filter_device_remove(struct hv_device *dev,
1594 				struct netvsc_device *net_dev)
1595 {
1596 	struct rndis_device *rndis_dev = net_dev->extension;
1597 
1598 	/* Halt and release the rndis device */
1599 	rndis_filter_halt_device(net_dev, rndis_dev);
1600 
1601 	netvsc_device_remove(dev);
1602 }
1603 
rndis_filter_open(struct netvsc_device * nvdev)1604 int rndis_filter_open(struct netvsc_device *nvdev)
1605 {
1606 	if (!nvdev)
1607 		return -EINVAL;
1608 
1609 	return rndis_filter_open_device(nvdev->extension);
1610 }
1611 
rndis_filter_close(struct netvsc_device * nvdev)1612 int rndis_filter_close(struct netvsc_device *nvdev)
1613 {
1614 	if (!nvdev)
1615 		return -EINVAL;
1616 
1617 	return rndis_filter_close_device(nvdev->extension);
1618 }
1619