1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/wait.h>
24 #include <linux/mm.h>
25 #include <linux/delay.h>
26 #include <linux/io.h>
27 #include <linux/slab.h>
28 #include "hv_api.h"
29 #include "logging.h"
30 #include "netvsc.h"
31 #include "rndis_filter.h"
32 #include "channel.h"
33
34
35 /* Globals */
36 static const char *driver_name = "netvsc";
37
38 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
39 static const struct hv_guid netvsc_device_type = {
40 .data = {
41 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
42 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
43 }
44 };
45
46 static int netvsc_device_add(struct hv_device *device, void *additional_info);
47
48 static int netvsc_device_remove(struct hv_device *device);
49
50 static void netvsc_cleanup(struct hv_driver *driver);
51
52 static void netvsc_channel_cb(void *context);
53
54 static int netvsc_init_send_buf(struct hv_device *device);
55
56 static int netvsc_init_recv_buf(struct hv_device *device);
57
58 static int netvsc_destroy_send_buf(struct netvsc_device *net_device);
59
60 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device);
61
62 static int netvsc_connect_vsp(struct hv_device *device);
63
64 static void netvsc_send_completion(struct hv_device *device,
65 struct vmpacket_descriptor *packet);
66
67 static int netvsc_send(struct hv_device *device,
68 struct hv_netvsc_packet *packet);
69
70 static void netvsc_receive(struct hv_device *device,
71 struct vmpacket_descriptor *packet);
72
73 static void netvsc_receive_completion(void *context);
74
75 static void netvsc_send_recv_completion(struct hv_device *device,
76 u64 transaction_id);
77
78
alloc_net_device(struct hv_device * device)79 static struct netvsc_device *alloc_net_device(struct hv_device *device)
80 {
81 struct netvsc_device *net_device;
82
83 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
84 if (!net_device)
85 return NULL;
86
87 /* Set to 2 to allow both inbound and outbound traffic */
88 atomic_cmpxchg(&net_device->refcnt, 0, 2);
89
90 net_device->dev = device;
91 device->ext = net_device;
92
93 return net_device;
94 }
95
free_net_device(struct netvsc_device * device)96 static void free_net_device(struct netvsc_device *device)
97 {
98 WARN_ON(atomic_read(&device->refcnt) != 0);
99 device->dev->ext = NULL;
100 kfree(device);
101 }
102
103
104 /* Get the net device object iff exists and its refcount > 1 */
get_outbound_net_device(struct hv_device * device)105 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
106 {
107 struct netvsc_device *net_device;
108
109 net_device = device->ext;
110 if (net_device && atomic_read(&net_device->refcnt) > 1)
111 atomic_inc(&net_device->refcnt);
112 else
113 net_device = NULL;
114
115 return net_device;
116 }
117
118 /* Get the net device object iff exists and its refcount > 0 */
get_inbound_net_device(struct hv_device * device)119 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
120 {
121 struct netvsc_device *net_device;
122
123 net_device = device->ext;
124 if (net_device && atomic_read(&net_device->refcnt))
125 atomic_inc(&net_device->refcnt);
126 else
127 net_device = NULL;
128
129 return net_device;
130 }
131
put_net_device(struct hv_device * device)132 static void put_net_device(struct hv_device *device)
133 {
134 struct netvsc_device *net_device;
135
136 net_device = device->ext;
137
138 atomic_dec(&net_device->refcnt);
139 }
140
release_outbound_net_device(struct hv_device * device)141 static struct netvsc_device *release_outbound_net_device(
142 struct hv_device *device)
143 {
144 struct netvsc_device *net_device;
145
146 net_device = device->ext;
147 if (net_device == NULL)
148 return NULL;
149
150 /* Busy wait until the ref drop to 2, then set it to 1 */
151 while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
152 udelay(100);
153
154 return net_device;
155 }
156
release_inbound_net_device(struct hv_device * device)157 static struct netvsc_device *release_inbound_net_device(
158 struct hv_device *device)
159 {
160 struct netvsc_device *net_device;
161
162 net_device = device->ext;
163 if (net_device == NULL)
164 return NULL;
165
166 /* Busy wait until the ref drop to 1, then set it to 0 */
167 while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
168 udelay(100);
169
170 device->ext = NULL;
171 return net_device;
172 }
173
174 /*
175 * netvsc_initialize - Main entry point
176 */
netvsc_initialize(struct hv_driver * drv)177 int netvsc_initialize(struct hv_driver *drv)
178 {
179 struct netvsc_driver *driver = (struct netvsc_driver *)drv;
180
181 DPRINT_DBG(NETVSC, "sizeof(struct hv_netvsc_packet)=%zd, "
182 "sizeof(struct nvsp_message)=%zd, "
183 "sizeof(struct vmtransfer_page_packet_header)=%zd",
184 sizeof(struct hv_netvsc_packet),
185 sizeof(struct nvsp_message),
186 sizeof(struct vmtransfer_page_packet_header));
187
188 drv->name = driver_name;
189 memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
190
191 /* Setup the dispatch table */
192 driver->base.dev_add = netvsc_device_add;
193 driver->base.dev_rm = netvsc_device_remove;
194 driver->base.cleanup = netvsc_cleanup;
195
196 driver->send = netvsc_send;
197
198 rndis_filter_init(driver);
199 return 0;
200 }
201
netvsc_init_recv_buf(struct hv_device * device)202 static int netvsc_init_recv_buf(struct hv_device *device)
203 {
204 int ret = 0;
205 struct netvsc_device *net_device;
206 struct nvsp_message *init_packet;
207
208 net_device = get_outbound_net_device(device);
209 if (!net_device) {
210 DPRINT_ERR(NETVSC, "unable to get net device..."
211 "device being destroyed?");
212 return -1;
213 }
214
215 net_device->recv_buf =
216 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
217 get_order(net_device->recv_buf_size));
218 if (!net_device->recv_buf) {
219 DPRINT_ERR(NETVSC,
220 "unable to allocate receive buffer of size %d",
221 net_device->recv_buf_size);
222 ret = -1;
223 goto cleanup;
224 }
225
226 DPRINT_INFO(NETVSC, "Establishing receive buffer's GPADL...");
227
228 /*
229 * Establish the gpadl handle for this buffer on this
230 * channel. Note: This call uses the vmbus connection rather
231 * than the channel to establish the gpadl handle.
232 */
233 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
234 net_device->recv_buf_size,
235 &net_device->recv_buf_gpadl_handle);
236 if (ret != 0) {
237 DPRINT_ERR(NETVSC,
238 "unable to establish receive buffer's gpadl");
239 goto cleanup;
240 }
241
242
243 /* Notify the NetVsp of the gpadl handle */
244 DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendReceiveBuffer...");
245
246 init_packet = &net_device->channel_init_pkt;
247
248 memset(init_packet, 0, sizeof(struct nvsp_message));
249
250 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
251 init_packet->msg.v1_msg.send_recv_buf.
252 gpadl_handle = net_device->recv_buf_gpadl_handle;
253 init_packet->msg.v1_msg.
254 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
255
256 /* Send the gpadl notification request */
257 net_device->wait_condition = 0;
258 ret = vmbus_sendpacket(device->channel, init_packet,
259 sizeof(struct nvsp_message),
260 (unsigned long)init_packet,
261 VM_PKT_DATA_INBAND,
262 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
263 if (ret != 0) {
264 DPRINT_ERR(NETVSC,
265 "unable to send receive buffer's gpadl to netvsp");
266 goto cleanup;
267 }
268
269 wait_event_timeout(net_device->channel_init_wait,
270 net_device->wait_condition,
271 msecs_to_jiffies(1000));
272 BUG_ON(net_device->wait_condition == 0);
273
274
275 /* Check the response */
276 if (init_packet->msg.v1_msg.
277 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
278 DPRINT_ERR(NETVSC, "Unable to complete receive buffer "
279 "initialzation with NetVsp - status %d",
280 init_packet->msg.v1_msg.
281 send_recv_buf_complete.status);
282 ret = -1;
283 goto cleanup;
284 }
285
286 /* Parse the response */
287
288 net_device->recv_section_cnt = init_packet->msg.
289 v1_msg.send_recv_buf_complete.num_sections;
290
291 net_device->recv_section = kmalloc(net_device->recv_section_cnt
292 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
293 if (net_device->recv_section == NULL) {
294 ret = -1;
295 goto cleanup;
296 }
297
298 memcpy(net_device->recv_section,
299 init_packet->msg.v1_msg.
300 send_recv_buf_complete.sections,
301 net_device->recv_section_cnt *
302 sizeof(struct nvsp_1_receive_buffer_section));
303
304 DPRINT_INFO(NETVSC, "Receive sections info (count %d, offset %d, "
305 "endoffset %d, suballoc size %d, num suballocs %d)",
306 net_device->recv_section_cnt,
307 net_device->recv_section[0].offset,
308 net_device->recv_section[0].end_offset,
309 net_device->recv_section[0].sub_alloc_size,
310 net_device->recv_section[0].num_sub_allocs);
311
312 /*
313 * For 1st release, there should only be 1 section that represents the
314 * entire receive buffer
315 */
316 if (net_device->recv_section_cnt != 1 ||
317 net_device->recv_section->offset != 0) {
318 ret = -1;
319 goto cleanup;
320 }
321
322 goto exit;
323
324 cleanup:
325 netvsc_destroy_recv_buf(net_device);
326
327 exit:
328 put_net_device(device);
329 return ret;
330 }
331
netvsc_init_send_buf(struct hv_device * device)332 static int netvsc_init_send_buf(struct hv_device *device)
333 {
334 int ret = 0;
335 struct netvsc_device *net_device;
336 struct nvsp_message *init_packet;
337
338 net_device = get_outbound_net_device(device);
339 if (!net_device) {
340 DPRINT_ERR(NETVSC, "unable to get net device..."
341 "device being destroyed?");
342 return -1;
343 }
344 if (net_device->send_buf_size <= 0) {
345 ret = -EINVAL;
346 goto cleanup;
347 }
348
349 net_device->send_buf =
350 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
351 get_order(net_device->send_buf_size));
352 if (!net_device->send_buf) {
353 DPRINT_ERR(NETVSC, "unable to allocate send buffer of size %d",
354 net_device->send_buf_size);
355 ret = -1;
356 goto cleanup;
357 }
358
359 DPRINT_INFO(NETVSC, "Establishing send buffer's GPADL...");
360
361 /*
362 * Establish the gpadl handle for this buffer on this
363 * channel. Note: This call uses the vmbus connection rather
364 * than the channel to establish the gpadl handle.
365 */
366 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
367 net_device->send_buf_size,
368 &net_device->send_buf_gpadl_handle);
369 if (ret != 0) {
370 DPRINT_ERR(NETVSC, "unable to establish send buffer's gpadl");
371 goto cleanup;
372 }
373
374 /* Notify the NetVsp of the gpadl handle */
375 DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendSendBuffer...");
376
377 init_packet = &net_device->channel_init_pkt;
378
379 memset(init_packet, 0, sizeof(struct nvsp_message));
380
381 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
382 init_packet->msg.v1_msg.send_recv_buf.
383 gpadl_handle = net_device->send_buf_gpadl_handle;
384 init_packet->msg.v1_msg.send_recv_buf.id =
385 NETVSC_SEND_BUFFER_ID;
386
387 /* Send the gpadl notification request */
388 net_device->wait_condition = 0;
389 ret = vmbus_sendpacket(device->channel, init_packet,
390 sizeof(struct nvsp_message),
391 (unsigned long)init_packet,
392 VM_PKT_DATA_INBAND,
393 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
394 if (ret != 0) {
395 DPRINT_ERR(NETVSC,
396 "unable to send receive buffer's gpadl to netvsp");
397 goto cleanup;
398 }
399
400 wait_event_timeout(net_device->channel_init_wait,
401 net_device->wait_condition,
402 msecs_to_jiffies(1000));
403 BUG_ON(net_device->wait_condition == 0);
404
405 /* Check the response */
406 if (init_packet->msg.v1_msg.
407 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
408 DPRINT_ERR(NETVSC, "Unable to complete send buffer "
409 "initialzation with NetVsp - status %d",
410 init_packet->msg.v1_msg.
411 send_send_buf_complete.status);
412 ret = -1;
413 goto cleanup;
414 }
415
416 net_device->send_section_size = init_packet->
417 msg.v1_msg.send_send_buf_complete.section_size;
418
419 goto exit;
420
421 cleanup:
422 netvsc_destroy_send_buf(net_device);
423
424 exit:
425 put_net_device(device);
426 return ret;
427 }
428
netvsc_destroy_recv_buf(struct netvsc_device * net_device)429 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
430 {
431 struct nvsp_message *revoke_packet;
432 int ret = 0;
433
434 /*
435 * If we got a section count, it means we received a
436 * SendReceiveBufferComplete msg (ie sent
437 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
438 * to send a revoke msg here
439 */
440 if (net_device->recv_section_cnt) {
441 DPRINT_INFO(NETVSC,
442 "Sending NvspMessage1TypeRevokeReceiveBuffer...");
443
444 /* Send the revoke receive buffer */
445 revoke_packet = &net_device->revoke_packet;
446 memset(revoke_packet, 0, sizeof(struct nvsp_message));
447
448 revoke_packet->hdr.msg_type =
449 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
450 revoke_packet->msg.v1_msg.
451 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
452
453 ret = vmbus_sendpacket(net_device->dev->channel,
454 revoke_packet,
455 sizeof(struct nvsp_message),
456 (unsigned long)revoke_packet,
457 VM_PKT_DATA_INBAND, 0);
458 /*
459 * If we failed here, we might as well return and
460 * have a leak rather than continue and a bugchk
461 */
462 if (ret != 0) {
463 DPRINT_ERR(NETVSC, "unable to send revoke receive "
464 "buffer to netvsp");
465 return -1;
466 }
467 }
468
469 /* Teardown the gpadl on the vsp end */
470 if (net_device->recv_buf_gpadl_handle) {
471 DPRINT_INFO(NETVSC, "Tearing down receive buffer's GPADL...");
472
473 ret = vmbus_teardown_gpadl(net_device->dev->channel,
474 net_device->recv_buf_gpadl_handle);
475
476 /* If we failed here, we might as well return and have a leak rather than continue and a bugchk */
477 if (ret != 0) {
478 DPRINT_ERR(NETVSC,
479 "unable to teardown receive buffer's gpadl");
480 return -1;
481 }
482 net_device->recv_buf_gpadl_handle = 0;
483 }
484
485 if (net_device->recv_buf) {
486 DPRINT_INFO(NETVSC, "Freeing up receive buffer...");
487
488 /* Free up the receive buffer */
489 free_pages((unsigned long)net_device->recv_buf,
490 get_order(net_device->recv_buf_size));
491 net_device->recv_buf = NULL;
492 }
493
494 if (net_device->recv_section) {
495 net_device->recv_section_cnt = 0;
496 kfree(net_device->recv_section);
497 net_device->recv_section = NULL;
498 }
499
500 return ret;
501 }
502
netvsc_destroy_send_buf(struct netvsc_device * net_device)503 static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
504 {
505 struct nvsp_message *revoke_packet;
506 int ret = 0;
507
508 /*
509 * If we got a section count, it means we received a
510 * SendReceiveBufferComplete msg (ie sent
511 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
512 * to send a revoke msg here
513 */
514 if (net_device->send_section_size) {
515 DPRINT_INFO(NETVSC,
516 "Sending NvspMessage1TypeRevokeSendBuffer...");
517
518 /* Send the revoke send buffer */
519 revoke_packet = &net_device->revoke_packet;
520 memset(revoke_packet, 0, sizeof(struct nvsp_message));
521
522 revoke_packet->hdr.msg_type =
523 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
524 revoke_packet->msg.v1_msg.
525 revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
526
527 ret = vmbus_sendpacket(net_device->dev->channel,
528 revoke_packet,
529 sizeof(struct nvsp_message),
530 (unsigned long)revoke_packet,
531 VM_PKT_DATA_INBAND, 0);
532 /*
533 * If we failed here, we might as well return and have a leak
534 * rather than continue and a bugchk
535 */
536 if (ret != 0) {
537 DPRINT_ERR(NETVSC, "unable to send revoke send buffer "
538 "to netvsp");
539 return -1;
540 }
541 }
542
543 /* Teardown the gpadl on the vsp end */
544 if (net_device->send_buf_gpadl_handle) {
545 DPRINT_INFO(NETVSC, "Tearing down send buffer's GPADL...");
546 ret = vmbus_teardown_gpadl(net_device->dev->channel,
547 net_device->send_buf_gpadl_handle);
548
549 /*
550 * If we failed here, we might as well return and have a leak
551 * rather than continue and a bugchk
552 */
553 if (ret != 0) {
554 DPRINT_ERR(NETVSC, "unable to teardown send buffer's "
555 "gpadl");
556 return -1;
557 }
558 net_device->send_buf_gpadl_handle = 0;
559 }
560
561 if (net_device->send_buf) {
562 DPRINT_INFO(NETVSC, "Freeing up send buffer...");
563
564 /* Free up the receive buffer */
565 free_pages((unsigned long)net_device->send_buf,
566 get_order(net_device->send_buf_size));
567 net_device->send_buf = NULL;
568 }
569
570 return ret;
571 }
572
573
netvsc_connect_vsp(struct hv_device * device)574 static int netvsc_connect_vsp(struct hv_device *device)
575 {
576 int ret;
577 struct netvsc_device *net_device;
578 struct nvsp_message *init_packet;
579 int ndis_version;
580
581 net_device = get_outbound_net_device(device);
582 if (!net_device) {
583 DPRINT_ERR(NETVSC, "unable to get net device..."
584 "device being destroyed?");
585 return -1;
586 }
587
588 init_packet = &net_device->channel_init_pkt;
589
590 memset(init_packet, 0, sizeof(struct nvsp_message));
591 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
592 init_packet->msg.init_msg.init.min_protocol_ver =
593 NVSP_MIN_PROTOCOL_VERSION;
594 init_packet->msg.init_msg.init.max_protocol_ver =
595 NVSP_MAX_PROTOCOL_VERSION;
596
597 DPRINT_INFO(NETVSC, "Sending NvspMessageTypeInit...");
598
599 /* Send the init request */
600 net_device->wait_condition = 0;
601 ret = vmbus_sendpacket(device->channel, init_packet,
602 sizeof(struct nvsp_message),
603 (unsigned long)init_packet,
604 VM_PKT_DATA_INBAND,
605 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
606
607 if (ret != 0) {
608 DPRINT_ERR(NETVSC, "unable to send NvspMessageTypeInit");
609 goto cleanup;
610 }
611
612 wait_event_timeout(net_device->channel_init_wait,
613 net_device->wait_condition,
614 msecs_to_jiffies(1000));
615 if (net_device->wait_condition == 0) {
616 ret = -ETIMEDOUT;
617 goto cleanup;
618 }
619
620 DPRINT_INFO(NETVSC, "NvspMessageTypeInit status(%d) max mdl chain (%d)",
621 init_packet->msg.init_msg.init_complete.status,
622 init_packet->msg.init_msg.
623 init_complete.max_mdl_chain_len);
624
625 if (init_packet->msg.init_msg.init_complete.status !=
626 NVSP_STAT_SUCCESS) {
627 DPRINT_ERR(NETVSC,
628 "unable to initialize with netvsp (status 0x%x)",
629 init_packet->msg.init_msg.init_complete.status);
630 ret = -1;
631 goto cleanup;
632 }
633
634 if (init_packet->msg.init_msg.init_complete.
635 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
636 DPRINT_ERR(NETVSC, "unable to initialize with netvsp "
637 "(version expected 1 got %d)",
638 init_packet->msg.init_msg.
639 init_complete.negotiated_protocol_ver);
640 ret = -1;
641 goto cleanup;
642 }
643 DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendNdisVersion...");
644
645 /* Send the ndis version */
646 memset(init_packet, 0, sizeof(struct nvsp_message));
647
648 ndis_version = 0x00050000;
649
650 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
651 init_packet->msg.v1_msg.
652 send_ndis_ver.ndis_major_ver =
653 (ndis_version & 0xFFFF0000) >> 16;
654 init_packet->msg.v1_msg.
655 send_ndis_ver.ndis_minor_ver =
656 ndis_version & 0xFFFF;
657
658 /* Send the init request */
659 ret = vmbus_sendpacket(device->channel, init_packet,
660 sizeof(struct nvsp_message),
661 (unsigned long)init_packet,
662 VM_PKT_DATA_INBAND, 0);
663 if (ret != 0) {
664 DPRINT_ERR(NETVSC,
665 "unable to send NvspMessage1TypeSendNdisVersion");
666 ret = -1;
667 goto cleanup;
668 }
669
670 /* Post the big receive buffer to NetVSP */
671 ret = netvsc_init_recv_buf(device);
672 if (ret == 0)
673 ret = netvsc_init_send_buf(device);
674
675 cleanup:
676 put_net_device(device);
677 return ret;
678 }
679
NetVscDisconnectFromVsp(struct netvsc_device * net_device)680 static void NetVscDisconnectFromVsp(struct netvsc_device *net_device)
681 {
682 netvsc_destroy_recv_buf(net_device);
683 netvsc_destroy_send_buf(net_device);
684 }
685
686 /*
687 * netvsc_device_add - Callback when the device belonging to this
688 * driver is added
689 */
netvsc_device_add(struct hv_device * device,void * additional_info)690 static int netvsc_device_add(struct hv_device *device, void *additional_info)
691 {
692 int ret = 0;
693 int i;
694 struct netvsc_device *net_device;
695 struct hv_netvsc_packet *packet, *pos;
696 struct netvsc_driver *net_driver =
697 (struct netvsc_driver *)device->drv;
698
699 net_device = alloc_net_device(device);
700 if (!net_device) {
701 ret = -1;
702 goto cleanup;
703 }
704
705 DPRINT_DBG(NETVSC, "netvsc channel object allocated - %p", net_device);
706
707 /* Initialize the NetVSC channel extension */
708 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
709 spin_lock_init(&net_device->recv_pkt_list_lock);
710
711 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
712
713 INIT_LIST_HEAD(&net_device->recv_pkt_list);
714
715 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
716 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
717 (NETVSC_RECEIVE_SG_COUNT *
718 sizeof(struct hv_page_buffer)), GFP_KERNEL);
719 if (!packet) {
720 DPRINT_DBG(NETVSC, "unable to allocate netvsc pkts "
721 "for receive pool (wanted %d got %d)",
722 NETVSC_RECEIVE_PACKETLIST_COUNT, i);
723 break;
724 }
725 list_add_tail(&packet->list_ent,
726 &net_device->recv_pkt_list);
727 }
728 init_waitqueue_head(&net_device->channel_init_wait);
729
730 /* Open the channel */
731 ret = vmbus_open(device->channel, net_driver->ring_buf_size,
732 net_driver->ring_buf_size, NULL, 0,
733 netvsc_channel_cb, device);
734
735 if (ret != 0) {
736 DPRINT_ERR(NETVSC, "unable to open channel: %d", ret);
737 ret = -1;
738 goto cleanup;
739 }
740
741 /* Channel is opened */
742 DPRINT_INFO(NETVSC, "*** NetVSC channel opened successfully! ***");
743
744 /* Connect with the NetVsp */
745 ret = netvsc_connect_vsp(device);
746 if (ret != 0) {
747 DPRINT_ERR(NETVSC, "unable to connect to NetVSP - %d", ret);
748 ret = -1;
749 goto close;
750 }
751
752 DPRINT_INFO(NETVSC, "*** NetVSC channel handshake result - %d ***",
753 ret);
754
755 return ret;
756
757 close:
758 /* Now, we can close the channel safely */
759 vmbus_close(device->channel);
760
761 cleanup:
762
763 if (net_device) {
764 list_for_each_entry_safe(packet, pos,
765 &net_device->recv_pkt_list,
766 list_ent) {
767 list_del(&packet->list_ent);
768 kfree(packet);
769 }
770
771 release_outbound_net_device(device);
772 release_inbound_net_device(device);
773
774 free_net_device(net_device);
775 }
776
777 return ret;
778 }
779
780 /*
781 * netvsc_device_remove - Callback when the root bus device is removed
782 */
netvsc_device_remove(struct hv_device * device)783 static int netvsc_device_remove(struct hv_device *device)
784 {
785 struct netvsc_device *net_device;
786 struct hv_netvsc_packet *netvsc_packet, *pos;
787
788 DPRINT_INFO(NETVSC, "Disabling outbound traffic on net device (%p)...",
789 device->ext);
790
791 /* Stop outbound traffic ie sends and receives completions */
792 net_device = release_outbound_net_device(device);
793 if (!net_device) {
794 DPRINT_ERR(NETVSC, "No net device present!!");
795 return -1;
796 }
797
798 /* Wait for all send completions */
799 while (atomic_read(&net_device->num_outstanding_sends)) {
800 DPRINT_INFO(NETVSC, "waiting for %d requests to complete...",
801 atomic_read(&net_device->num_outstanding_sends));
802 udelay(100);
803 }
804
805 DPRINT_INFO(NETVSC, "Disconnecting from netvsp...");
806
807 NetVscDisconnectFromVsp(net_device);
808
809 DPRINT_INFO(NETVSC, "Disabling inbound traffic on net device (%p)...",
810 device->ext);
811
812 /* Stop inbound traffic ie receives and sends completions */
813 net_device = release_inbound_net_device(device);
814
815 /* At this point, no one should be accessing netDevice except in here */
816 DPRINT_INFO(NETVSC, "net device (%p) safe to remove", net_device);
817
818 /* Now, we can close the channel safely */
819 vmbus_close(device->channel);
820
821 /* Release all resources */
822 list_for_each_entry_safe(netvsc_packet, pos,
823 &net_device->recv_pkt_list, list_ent) {
824 list_del(&netvsc_packet->list_ent);
825 kfree(netvsc_packet);
826 }
827
828 free_net_device(net_device);
829 return 0;
830 }
831
832 /*
833 * netvsc_cleanup - Perform any cleanup when the driver is removed
834 */
netvsc_cleanup(struct hv_driver * drv)835 static void netvsc_cleanup(struct hv_driver *drv)
836 {
837 }
838
netvsc_send_completion(struct hv_device * device,struct vmpacket_descriptor * packet)839 static void netvsc_send_completion(struct hv_device *device,
840 struct vmpacket_descriptor *packet)
841 {
842 struct netvsc_device *net_device;
843 struct nvsp_message *nvsp_packet;
844 struct hv_netvsc_packet *nvsc_packet;
845
846 net_device = get_inbound_net_device(device);
847 if (!net_device) {
848 DPRINT_ERR(NETVSC, "unable to get net device..."
849 "device being destroyed?");
850 return;
851 }
852
853 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
854 (packet->offset8 << 3));
855
856 DPRINT_DBG(NETVSC, "send completion packet - type %d",
857 nvsp_packet->hdr.msg_type);
858
859 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
860 (nvsp_packet->hdr.msg_type ==
861 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
862 (nvsp_packet->hdr.msg_type ==
863 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
864 /* Copy the response back */
865 memcpy(&net_device->channel_init_pkt, nvsp_packet,
866 sizeof(struct nvsp_message));
867 net_device->wait_condition = 1;
868 wake_up(&net_device->channel_init_wait);
869 } else if (nvsp_packet->hdr.msg_type ==
870 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
871 /* Get the send context */
872 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
873 packet->trans_id;
874
875 /* Notify the layer above us */
876 nvsc_packet->completion.send.send_completion(
877 nvsc_packet->completion.send.send_completion_ctx);
878
879 atomic_dec(&net_device->num_outstanding_sends);
880 } else {
881 DPRINT_ERR(NETVSC, "Unknown send completion packet type - "
882 "%d received!!", nvsp_packet->hdr.msg_type);
883 }
884
885 put_net_device(device);
886 }
887
netvsc_send(struct hv_device * device,struct hv_netvsc_packet * packet)888 static int netvsc_send(struct hv_device *device,
889 struct hv_netvsc_packet *packet)
890 {
891 struct netvsc_device *net_device;
892 int ret = 0;
893
894 struct nvsp_message sendMessage;
895
896 net_device = get_outbound_net_device(device);
897 if (!net_device) {
898 DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
899 "ignoring outbound packets", net_device);
900 return -2;
901 }
902
903 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
904 if (packet->is_data_pkt) {
905 /* 0 is RMC_DATA; */
906 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
907 } else {
908 /* 1 is RMC_CONTROL; */
909 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
910 }
911
912 /* Not using send buffer section */
913 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
914 0xFFFFFFFF;
915 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
916
917 if (packet->page_buf_cnt) {
918 ret = vmbus_sendpacket_pagebuffer(device->channel,
919 packet->page_buf,
920 packet->page_buf_cnt,
921 &sendMessage,
922 sizeof(struct nvsp_message),
923 (unsigned long)packet);
924 } else {
925 ret = vmbus_sendpacket(device->channel, &sendMessage,
926 sizeof(struct nvsp_message),
927 (unsigned long)packet,
928 VM_PKT_DATA_INBAND,
929 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
930
931 }
932
933 if (ret != 0)
934 DPRINT_ERR(NETVSC, "Unable to send packet %p ret %d",
935 packet, ret);
936
937 atomic_inc(&net_device->num_outstanding_sends);
938 put_net_device(device);
939 return ret;
940 }
941
netvsc_receive(struct hv_device * device,struct vmpacket_descriptor * packet)942 static void netvsc_receive(struct hv_device *device,
943 struct vmpacket_descriptor *packet)
944 {
945 struct netvsc_device *net_device;
946 struct vmtransfer_page_packet_header *vmxferpage_packet;
947 struct nvsp_message *nvsp_packet;
948 struct hv_netvsc_packet *netvsc_packet = NULL;
949 unsigned long start;
950 unsigned long end, end_virtual;
951 /* struct netvsc_driver *netvscDriver; */
952 struct xferpage_packet *xferpage_packet = NULL;
953 int i, j;
954 int count = 0, bytes_remain = 0;
955 unsigned long flags;
956 LIST_HEAD(listHead);
957
958 net_device = get_inbound_net_device(device);
959 if (!net_device) {
960 DPRINT_ERR(NETVSC, "unable to get net device..."
961 "device being destroyed?");
962 return;
963 }
964
965 /*
966 * All inbound packets other than send completion should be xfer page
967 * packet
968 */
969 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
970 DPRINT_ERR(NETVSC, "Unknown packet type received - %d",
971 packet->type);
972 put_net_device(device);
973 return;
974 }
975
976 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
977 (packet->offset8 << 3));
978
979 /* Make sure this is a valid nvsp packet */
980 if (nvsp_packet->hdr.msg_type !=
981 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
982 DPRINT_ERR(NETVSC, "Unknown nvsp packet type received - %d",
983 nvsp_packet->hdr.msg_type);
984 put_net_device(device);
985 return;
986 }
987
988 DPRINT_DBG(NETVSC, "NVSP packet received - type %d",
989 nvsp_packet->hdr.msg_type);
990
991 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
992
993 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
994 DPRINT_ERR(NETVSC, "Invalid xfer page set id - "
995 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
996 vmxferpage_packet->xfer_pageset_id);
997 put_net_device(device);
998 return;
999 }
1000
1001 DPRINT_DBG(NETVSC, "xfer page - range count %d",
1002 vmxferpage_packet->range_cnt);
1003
1004 /*
1005 * Grab free packets (range count + 1) to represent this xfer
1006 * page packet. +1 to represent the xfer page packet itself.
1007 * We grab it here so that we know exactly how many we can
1008 * fulfil
1009 */
1010 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
1011 while (!list_empty(&net_device->recv_pkt_list)) {
1012 list_move_tail(net_device->recv_pkt_list.next, &listHead);
1013 if (++count == vmxferpage_packet->range_cnt + 1)
1014 break;
1015 }
1016 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
1017
1018 /*
1019 * We need at least 2 netvsc pkts (1 to represent the xfer
1020 * page and at least 1 for the range) i.e. we can handled
1021 * some of the xfer page packet ranges...
1022 */
1023 if (count < 2) {
1024 DPRINT_ERR(NETVSC, "Got only %d netvsc pkt...needed %d pkts. "
1025 "Dropping this xfer page packet completely!",
1026 count, vmxferpage_packet->range_cnt + 1);
1027
1028 /* Return it to the freelist */
1029 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
1030 for (i = count; i != 0; i--) {
1031 list_move_tail(listHead.next,
1032 &net_device->recv_pkt_list);
1033 }
1034 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
1035 flags);
1036
1037 netvsc_send_recv_completion(device,
1038 vmxferpage_packet->d.trans_id);
1039
1040 put_net_device(device);
1041 return;
1042 }
1043
1044 /* Remove the 1st packet to represent the xfer page packet itself */
1045 xferpage_packet = (struct xferpage_packet *)listHead.next;
1046 list_del(&xferpage_packet->list_ent);
1047
1048 /* This is how much we can satisfy */
1049 xferpage_packet->count = count - 1;
1050
1051 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
1052 DPRINT_INFO(NETVSC, "Needed %d netvsc pkts to satisy this xfer "
1053 "page...got %d", vmxferpage_packet->range_cnt,
1054 xferpage_packet->count);
1055 }
1056
1057 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1058 for (i = 0; i < (count - 1); i++) {
1059 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
1060 list_del(&netvsc_packet->list_ent);
1061
1062 /* Initialize the netvsc packet */
1063 netvsc_packet->xfer_page_pkt = xferpage_packet;
1064 netvsc_packet->completion.recv.recv_completion =
1065 netvsc_receive_completion;
1066 netvsc_packet->completion.recv.recv_completion_ctx =
1067 netvsc_packet;
1068 netvsc_packet->device = device;
1069 /* Save this so that we can send it back */
1070 netvsc_packet->completion.recv.recv_completion_tid =
1071 vmxferpage_packet->d.trans_id;
1072
1073 netvsc_packet->total_data_buflen =
1074 vmxferpage_packet->ranges[i].byte_count;
1075 netvsc_packet->page_buf_cnt = 1;
1076
1077 netvsc_packet->page_buf[0].len =
1078 vmxferpage_packet->ranges[i].byte_count;
1079
1080 start = virt_to_phys((void *)((unsigned long)net_device->
1081 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
1082
1083 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
1084 end_virtual = (unsigned long)net_device->recv_buf
1085 + vmxferpage_packet->ranges[i].byte_offset
1086 + vmxferpage_packet->ranges[i].byte_count - 1;
1087 end = virt_to_phys((void *)end_virtual);
1088
1089 /* Calculate the page relative offset */
1090 netvsc_packet->page_buf[0].offset =
1091 vmxferpage_packet->ranges[i].byte_offset &
1092 (PAGE_SIZE - 1);
1093 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
1094 /* Handle frame across multiple pages: */
1095 netvsc_packet->page_buf[0].len =
1096 (netvsc_packet->page_buf[0].pfn <<
1097 PAGE_SHIFT)
1098 + PAGE_SIZE - start;
1099 bytes_remain = netvsc_packet->total_data_buflen -
1100 netvsc_packet->page_buf[0].len;
1101 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
1102 netvsc_packet->page_buf[j].offset = 0;
1103 if (bytes_remain <= PAGE_SIZE) {
1104 netvsc_packet->page_buf[j].len =
1105 bytes_remain;
1106 bytes_remain = 0;
1107 } else {
1108 netvsc_packet->page_buf[j].len =
1109 PAGE_SIZE;
1110 bytes_remain -= PAGE_SIZE;
1111 }
1112 netvsc_packet->page_buf[j].pfn =
1113 virt_to_phys((void *)(end_virtual -
1114 bytes_remain)) >> PAGE_SHIFT;
1115 netvsc_packet->page_buf_cnt++;
1116 if (bytes_remain == 0)
1117 break;
1118 }
1119 }
1120 DPRINT_DBG(NETVSC, "[%d] - (abs offset %u len %u) => "
1121 "(pfn %llx, offset %u, len %u)", i,
1122 vmxferpage_packet->ranges[i].byte_offset,
1123 vmxferpage_packet->ranges[i].byte_count,
1124 netvsc_packet->page_buf[0].pfn,
1125 netvsc_packet->page_buf[0].offset,
1126 netvsc_packet->page_buf[0].len);
1127
1128 /* Pass it to the upper layer */
1129 ((struct netvsc_driver *)device->drv)->
1130 recv_cb(device, netvsc_packet);
1131
1132 netvsc_receive_completion(netvsc_packet->
1133 completion.recv.recv_completion_ctx);
1134 }
1135
1136 put_net_device(device);
1137 }
1138
netvsc_send_recv_completion(struct hv_device * device,u64 transaction_id)1139 static void netvsc_send_recv_completion(struct hv_device *device,
1140 u64 transaction_id)
1141 {
1142 struct nvsp_message recvcompMessage;
1143 int retries = 0;
1144 int ret;
1145
1146 DPRINT_DBG(NETVSC, "Sending receive completion pkt - %llx",
1147 transaction_id);
1148
1149 recvcompMessage.hdr.msg_type =
1150 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
1151
1152 /* FIXME: Pass in the status */
1153 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
1154 NVSP_STAT_SUCCESS;
1155
1156 retry_send_cmplt:
1157 /* Send the completion */
1158 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
1159 sizeof(struct nvsp_message), transaction_id,
1160 VM_PKT_COMP, 0);
1161 if (ret == 0) {
1162 /* success */
1163 /* no-op */
1164 } else if (ret == -1) {
1165 /* no more room...wait a bit and attempt to retry 3 times */
1166 retries++;
1167 DPRINT_ERR(NETVSC, "unable to send receive completion pkt "
1168 "(tid %llx)...retrying %d", transaction_id, retries);
1169
1170 if (retries < 4) {
1171 udelay(100);
1172 goto retry_send_cmplt;
1173 } else {
1174 DPRINT_ERR(NETVSC, "unable to send receive completion "
1175 "pkt (tid %llx)...give up retrying",
1176 transaction_id);
1177 }
1178 } else {
1179 DPRINT_ERR(NETVSC, "unable to send receive completion pkt - "
1180 "%llx", transaction_id);
1181 }
1182 }
1183
1184 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
netvsc_receive_completion(void * context)1185 static void netvsc_receive_completion(void *context)
1186 {
1187 struct hv_netvsc_packet *packet = context;
1188 struct hv_device *device = (struct hv_device *)packet->device;
1189 struct netvsc_device *net_device;
1190 u64 transaction_id = 0;
1191 bool fsend_receive_comp = false;
1192 unsigned long flags;
1193
1194 /*
1195 * Even though it seems logical to do a GetOutboundNetDevice() here to
1196 * send out receive completion, we are using GetInboundNetDevice()
1197 * since we may have disable outbound traffic already.
1198 */
1199 net_device = get_inbound_net_device(device);
1200 if (!net_device) {
1201 DPRINT_ERR(NETVSC, "unable to get net device..."
1202 "device being destroyed?");
1203 return;
1204 }
1205
1206 /* Overloading use of the lock. */
1207 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
1208
1209 packet->xfer_page_pkt->count--;
1210
1211 /*
1212 * Last one in the line that represent 1 xfer page packet.
1213 * Return the xfer page packet itself to the freelist
1214 */
1215 if (packet->xfer_page_pkt->count == 0) {
1216 fsend_receive_comp = true;
1217 transaction_id = packet->completion.recv.recv_completion_tid;
1218 list_add_tail(&packet->xfer_page_pkt->list_ent,
1219 &net_device->recv_pkt_list);
1220
1221 }
1222
1223 /* Put the packet back */
1224 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
1225 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
1226
1227 /* Send a receive completion for the xfer page packet */
1228 if (fsend_receive_comp)
1229 netvsc_send_recv_completion(device, transaction_id);
1230
1231 put_net_device(device);
1232 }
1233
netvsc_channel_cb(void * context)1234 static void netvsc_channel_cb(void *context)
1235 {
1236 int ret;
1237 struct hv_device *device = context;
1238 struct netvsc_device *net_device;
1239 u32 bytes_recvd;
1240 u64 request_id;
1241 unsigned char *packet;
1242 struct vmpacket_descriptor *desc;
1243 unsigned char *buffer;
1244 int bufferlen = NETVSC_PACKET_SIZE;
1245
1246 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1247 GFP_ATOMIC);
1248 if (!packet)
1249 return;
1250 buffer = packet;
1251
1252 net_device = get_inbound_net_device(device);
1253 if (!net_device) {
1254 DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
1255 "ignoring inbound packets", net_device);
1256 goto out;
1257 }
1258
1259 do {
1260 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
1261 &bytes_recvd, &request_id);
1262 if (ret == 0) {
1263 if (bytes_recvd > 0) {
1264 DPRINT_DBG(NETVSC, "receive %d bytes, tid %llx",
1265 bytes_recvd, request_id);
1266
1267 desc = (struct vmpacket_descriptor *)buffer;
1268 switch (desc->type) {
1269 case VM_PKT_COMP:
1270 netvsc_send_completion(device, desc);
1271 break;
1272
1273 case VM_PKT_DATA_USING_XFER_PAGES:
1274 netvsc_receive(device, desc);
1275 break;
1276
1277 default:
1278 DPRINT_ERR(NETVSC,
1279 "unhandled packet type %d, "
1280 "tid %llx len %d\n",
1281 desc->type, request_id,
1282 bytes_recvd);
1283 break;
1284 }
1285
1286 /* reset */
1287 if (bufferlen > NETVSC_PACKET_SIZE) {
1288 kfree(buffer);
1289 buffer = packet;
1290 bufferlen = NETVSC_PACKET_SIZE;
1291 }
1292 } else {
1293 /* reset */
1294 if (bufferlen > NETVSC_PACKET_SIZE) {
1295 kfree(buffer);
1296 buffer = packet;
1297 bufferlen = NETVSC_PACKET_SIZE;
1298 }
1299
1300 break;
1301 }
1302 } else if (ret == -2) {
1303 /* Handle large packet */
1304 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1305 if (buffer == NULL) {
1306 /* Try again next time around */
1307 DPRINT_ERR(NETVSC,
1308 "unable to allocate buffer of size "
1309 "(%d)!!", bytes_recvd);
1310 break;
1311 }
1312
1313 bufferlen = bytes_recvd;
1314 }
1315 } while (1);
1316
1317 put_net_device(device);
1318 out:
1319 kfree(buffer);
1320 return;
1321 }
1322