1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * FUJITSU Extended Socket Network Device driver
4 * Copyright (c) 2015 FUJITSU LIMITED
5 */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/nls.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
13
14 #include "fjes.h"
15 #include "fjes_trace.h"
16
17 #define MAJ 1
18 #define MIN 2
19 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20 #define DRV_NAME "fjes"
21 char fjes_driver_name[] = DRV_NAME;
22 char fjes_driver_version[] = DRV_VERSION;
23 static const char fjes_driver_string[] =
24 "FUJITSU Extended Socket Network Device Driver";
25 static const char fjes_copyright[] =
26 "Copyright (c) 2015 FUJITSU LIMITED";
27
28 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_VERSION);
32
33 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
34
35 static const struct acpi_device_id fjes_acpi_ids[] = {
36 {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
37 {"", 0},
38 };
39 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
40
is_extended_socket_device(struct acpi_device * device)41 static bool is_extended_socket_device(struct acpi_device *device)
42 {
43 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
44 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
45 union acpi_object *str;
46 acpi_status status;
47 int result;
48
49 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
50 if (ACPI_FAILURE(status))
51 return false;
52
53 str = buffer.pointer;
54 result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
55 str->string.length, UTF16_LITTLE_ENDIAN,
56 str_buf, sizeof(str_buf) - 1);
57 str_buf[result] = 0;
58
59 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
60 kfree(buffer.pointer);
61 return false;
62 }
63 kfree(buffer.pointer);
64
65 return true;
66 }
67
acpi_check_extended_socket_status(struct acpi_device * device)68 static int acpi_check_extended_socket_status(struct acpi_device *device)
69 {
70 unsigned long long sta;
71 acpi_status status;
72
73 status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
74 if (ACPI_FAILURE(status))
75 return -ENODEV;
76
77 if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
78 (sta & ACPI_STA_DEVICE_ENABLED) &&
79 (sta & ACPI_STA_DEVICE_UI) &&
80 (sta & ACPI_STA_DEVICE_FUNCTIONING)))
81 return -ENODEV;
82
83 return 0;
84 }
85
86 static acpi_status
fjes_get_acpi_resource(struct acpi_resource * acpi_res,void * data)87 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
88 {
89 struct acpi_resource_address32 *addr;
90 struct acpi_resource_irq *irq;
91 struct resource *res = data;
92
93 switch (acpi_res->type) {
94 case ACPI_RESOURCE_TYPE_ADDRESS32:
95 addr = &acpi_res->data.address32;
96 res[0].start = addr->address.minimum;
97 res[0].end = addr->address.minimum +
98 addr->address.address_length - 1;
99 break;
100
101 case ACPI_RESOURCE_TYPE_IRQ:
102 irq = &acpi_res->data.irq;
103 if (irq->interrupt_count != 1)
104 return AE_ERROR;
105 res[1].start = irq->interrupts[0];
106 res[1].end = irq->interrupts[0];
107 break;
108
109 default:
110 break;
111 }
112
113 return AE_OK;
114 }
115
116 static struct resource fjes_resource[] = {
117 DEFINE_RES_MEM(0, 1),
118 DEFINE_RES_IRQ(0)
119 };
120
fjes_acpi_add(struct acpi_device * device)121 static int fjes_acpi_add(struct acpi_device *device)
122 {
123 struct platform_device *plat_dev;
124 acpi_status status;
125
126 if (!is_extended_socket_device(device))
127 return -ENODEV;
128
129 if (acpi_check_extended_socket_status(device))
130 return -ENODEV;
131
132 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
133 fjes_get_acpi_resource, fjes_resource);
134 if (ACPI_FAILURE(status))
135 return -ENODEV;
136
137 /* create platform_device */
138 plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
139 ARRAY_SIZE(fjes_resource));
140 if (IS_ERR(plat_dev))
141 return PTR_ERR(plat_dev);
142
143 device->driver_data = plat_dev;
144
145 return 0;
146 }
147
fjes_acpi_remove(struct acpi_device * device)148 static int fjes_acpi_remove(struct acpi_device *device)
149 {
150 struct platform_device *plat_dev;
151
152 plat_dev = (struct platform_device *)acpi_driver_data(device);
153 platform_device_unregister(plat_dev);
154
155 return 0;
156 }
157
158 static struct acpi_driver fjes_acpi_driver = {
159 .name = DRV_NAME,
160 .class = DRV_NAME,
161 .owner = THIS_MODULE,
162 .ids = fjes_acpi_ids,
163 .ops = {
164 .add = fjes_acpi_add,
165 .remove = fjes_acpi_remove,
166 },
167 };
168
fjes_setup_resources(struct fjes_adapter * adapter)169 static int fjes_setup_resources(struct fjes_adapter *adapter)
170 {
171 struct net_device *netdev = adapter->netdev;
172 struct ep_share_mem_info *buf_pair;
173 struct fjes_hw *hw = &adapter->hw;
174 unsigned long flags;
175 int result;
176 int epidx;
177
178 mutex_lock(&hw->hw_info.lock);
179 result = fjes_hw_request_info(hw);
180 switch (result) {
181 case 0:
182 for (epidx = 0; epidx < hw->max_epid; epidx++) {
183 hw->ep_shm_info[epidx].es_status =
184 hw->hw_info.res_buf->info.info[epidx].es_status;
185 hw->ep_shm_info[epidx].zone =
186 hw->hw_info.res_buf->info.info[epidx].zone;
187 }
188 break;
189 default:
190 case -ENOMSG:
191 case -EBUSY:
192 adapter->force_reset = true;
193
194 mutex_unlock(&hw->hw_info.lock);
195 return result;
196 }
197 mutex_unlock(&hw->hw_info.lock);
198
199 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
200 if ((epidx != hw->my_epid) &&
201 (hw->ep_shm_info[epidx].es_status ==
202 FJES_ZONING_STATUS_ENABLE)) {
203 fjes_hw_raise_interrupt(hw, epidx,
204 REG_ICTL_MASK_INFO_UPDATE);
205 hw->ep_shm_info[epidx].ep_stats
206 .send_intr_zoneupdate += 1;
207 }
208 }
209
210 msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
211
212 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
213 if (epidx == hw->my_epid)
214 continue;
215
216 buf_pair = &hw->ep_shm_info[epidx];
217
218 spin_lock_irqsave(&hw->rx_status_lock, flags);
219 fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
220 netdev->mtu);
221 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
222
223 if (fjes_hw_epid_is_same_zone(hw, epidx)) {
224 mutex_lock(&hw->hw_info.lock);
225 result =
226 fjes_hw_register_buff_addr(hw, epidx, buf_pair);
227 mutex_unlock(&hw->hw_info.lock);
228
229 switch (result) {
230 case 0:
231 break;
232 case -ENOMSG:
233 case -EBUSY:
234 default:
235 adapter->force_reset = true;
236 return result;
237 }
238
239 hw->ep_shm_info[epidx].ep_stats
240 .com_regist_buf_exec += 1;
241 }
242 }
243
244 return 0;
245 }
246
fjes_rx_irq(struct fjes_adapter * adapter,int src_epid)247 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
248 {
249 struct fjes_hw *hw = &adapter->hw;
250
251 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
252
253 adapter->unset_rx_last = true;
254 napi_schedule(&adapter->napi);
255 }
256
fjes_stop_req_irq(struct fjes_adapter * adapter,int src_epid)257 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
258 {
259 struct fjes_hw *hw = &adapter->hw;
260 enum ep_partner_status status;
261 unsigned long flags;
262
263 set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
264
265 status = fjes_hw_get_partner_ep_status(hw, src_epid);
266 trace_fjes_stop_req_irq_pre(hw, src_epid, status);
267 switch (status) {
268 case EP_PARTNER_WAITING:
269 spin_lock_irqsave(&hw->rx_status_lock, flags);
270 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
271 FJES_RX_STOP_REQ_DONE;
272 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
273 clear_bit(src_epid, &hw->txrx_stop_req_bit);
274 fallthrough;
275 case EP_PARTNER_UNSHARE:
276 case EP_PARTNER_COMPLETE:
277 default:
278 set_bit(src_epid, &adapter->unshare_watch_bitmask);
279 if (!work_pending(&adapter->unshare_watch_task))
280 queue_work(adapter->control_wq,
281 &adapter->unshare_watch_task);
282 break;
283 case EP_PARTNER_SHARED:
284 set_bit(src_epid, &hw->epstop_req_bit);
285
286 if (!work_pending(&hw->epstop_task))
287 queue_work(adapter->control_wq, &hw->epstop_task);
288 break;
289 }
290 trace_fjes_stop_req_irq_post(hw, src_epid);
291 }
292
fjes_txrx_stop_req_irq(struct fjes_adapter * adapter,int src_epid)293 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
294 int src_epid)
295 {
296 struct fjes_hw *hw = &adapter->hw;
297 enum ep_partner_status status;
298 unsigned long flags;
299
300 status = fjes_hw_get_partner_ep_status(hw, src_epid);
301 trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
302 switch (status) {
303 case EP_PARTNER_UNSHARE:
304 case EP_PARTNER_COMPLETE:
305 default:
306 break;
307 case EP_PARTNER_WAITING:
308 if (src_epid < hw->my_epid) {
309 spin_lock_irqsave(&hw->rx_status_lock, flags);
310 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
311 FJES_RX_STOP_REQ_DONE;
312 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
313
314 clear_bit(src_epid, &hw->txrx_stop_req_bit);
315 set_bit(src_epid, &adapter->unshare_watch_bitmask);
316
317 if (!work_pending(&adapter->unshare_watch_task))
318 queue_work(adapter->control_wq,
319 &adapter->unshare_watch_task);
320 }
321 break;
322 case EP_PARTNER_SHARED:
323 if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
324 FJES_RX_STOP_REQ_REQUEST) {
325 set_bit(src_epid, &hw->epstop_req_bit);
326 if (!work_pending(&hw->epstop_task))
327 queue_work(adapter->control_wq,
328 &hw->epstop_task);
329 }
330 break;
331 }
332 trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
333 }
334
fjes_update_zone_irq(struct fjes_adapter * adapter,int src_epid)335 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
336 int src_epid)
337 {
338 struct fjes_hw *hw = &adapter->hw;
339
340 if (!work_pending(&hw->update_zone_task))
341 queue_work(adapter->control_wq, &hw->update_zone_task);
342 }
343
fjes_intr(int irq,void * data)344 static irqreturn_t fjes_intr(int irq, void *data)
345 {
346 struct fjes_adapter *adapter = data;
347 struct fjes_hw *hw = &adapter->hw;
348 irqreturn_t ret;
349 u32 icr;
350
351 icr = fjes_hw_capture_interrupt_status(hw);
352
353 if (icr & REG_IS_MASK_IS_ASSERT) {
354 if (icr & REG_ICTL_MASK_RX_DATA) {
355 fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
356 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
357 .recv_intr_rx += 1;
358 }
359
360 if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
361 fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
362 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
363 .recv_intr_stop += 1;
364 }
365
366 if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
367 fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
368 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
369 .recv_intr_unshare += 1;
370 }
371
372 if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
373 fjes_hw_set_irqmask(hw,
374 REG_ICTL_MASK_TXRX_STOP_DONE, true);
375
376 if (icr & REG_ICTL_MASK_INFO_UPDATE) {
377 fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
378 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
379 .recv_intr_zoneupdate += 1;
380 }
381
382 ret = IRQ_HANDLED;
383 } else {
384 ret = IRQ_NONE;
385 }
386
387 return ret;
388 }
389
fjes_request_irq(struct fjes_adapter * adapter)390 static int fjes_request_irq(struct fjes_adapter *adapter)
391 {
392 struct net_device *netdev = adapter->netdev;
393 int result = -1;
394
395 adapter->interrupt_watch_enable = true;
396 if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
397 queue_delayed_work(adapter->control_wq,
398 &adapter->interrupt_watch_task,
399 FJES_IRQ_WATCH_DELAY);
400 }
401
402 if (!adapter->irq_registered) {
403 result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
404 IRQF_SHARED, netdev->name, adapter);
405 if (result)
406 adapter->irq_registered = false;
407 else
408 adapter->irq_registered = true;
409 }
410
411 return result;
412 }
413
fjes_free_irq(struct fjes_adapter * adapter)414 static void fjes_free_irq(struct fjes_adapter *adapter)
415 {
416 struct fjes_hw *hw = &adapter->hw;
417
418 adapter->interrupt_watch_enable = false;
419 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
420
421 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
422
423 if (adapter->irq_registered) {
424 free_irq(adapter->hw.hw_res.irq, adapter);
425 adapter->irq_registered = false;
426 }
427 }
428
fjes_free_resources(struct fjes_adapter * adapter)429 static void fjes_free_resources(struct fjes_adapter *adapter)
430 {
431 struct net_device *netdev = adapter->netdev;
432 struct fjes_device_command_param param;
433 struct ep_share_mem_info *buf_pair;
434 struct fjes_hw *hw = &adapter->hw;
435 bool reset_flag = false;
436 unsigned long flags;
437 int result;
438 int epidx;
439
440 for (epidx = 0; epidx < hw->max_epid; epidx++) {
441 if (epidx == hw->my_epid)
442 continue;
443
444 mutex_lock(&hw->hw_info.lock);
445 result = fjes_hw_unregister_buff_addr(hw, epidx);
446 mutex_unlock(&hw->hw_info.lock);
447
448 hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
449
450 if (result)
451 reset_flag = true;
452
453 buf_pair = &hw->ep_shm_info[epidx];
454
455 spin_lock_irqsave(&hw->rx_status_lock, flags);
456 fjes_hw_setup_epbuf(&buf_pair->tx,
457 netdev->dev_addr, netdev->mtu);
458 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
459
460 clear_bit(epidx, &hw->txrx_stop_req_bit);
461 }
462
463 if (reset_flag || adapter->force_reset) {
464 result = fjes_hw_reset(hw);
465
466 adapter->force_reset = false;
467
468 if (result)
469 adapter->open_guard = true;
470
471 hw->hw_info.buffer_share_bit = 0;
472
473 memset((void *)¶m, 0, sizeof(param));
474
475 param.req_len = hw->hw_info.req_buf_size;
476 param.req_start = __pa(hw->hw_info.req_buf);
477 param.res_len = hw->hw_info.res_buf_size;
478 param.res_start = __pa(hw->hw_info.res_buf);
479 param.share_start = __pa(hw->hw_info.share->ep_status);
480
481 fjes_hw_init_command_registers(hw, ¶m);
482 }
483 }
484
485 /* fjes_open - Called when a network interface is made active */
fjes_open(struct net_device * netdev)486 static int fjes_open(struct net_device *netdev)
487 {
488 struct fjes_adapter *adapter = netdev_priv(netdev);
489 struct fjes_hw *hw = &adapter->hw;
490 int result;
491
492 if (adapter->open_guard)
493 return -ENXIO;
494
495 result = fjes_setup_resources(adapter);
496 if (result)
497 goto err_setup_res;
498
499 hw->txrx_stop_req_bit = 0;
500 hw->epstop_req_bit = 0;
501
502 napi_enable(&adapter->napi);
503
504 fjes_hw_capture_interrupt_status(hw);
505
506 result = fjes_request_irq(adapter);
507 if (result)
508 goto err_req_irq;
509
510 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
511
512 netif_tx_start_all_queues(netdev);
513 netif_carrier_on(netdev);
514
515 return 0;
516
517 err_req_irq:
518 fjes_free_irq(adapter);
519 napi_disable(&adapter->napi);
520
521 err_setup_res:
522 fjes_free_resources(adapter);
523 return result;
524 }
525
526 /* fjes_close - Disables a network interface */
fjes_close(struct net_device * netdev)527 static int fjes_close(struct net_device *netdev)
528 {
529 struct fjes_adapter *adapter = netdev_priv(netdev);
530 struct fjes_hw *hw = &adapter->hw;
531 unsigned long flags;
532 int epidx;
533
534 netif_tx_stop_all_queues(netdev);
535 netif_carrier_off(netdev);
536
537 fjes_hw_raise_epstop(hw);
538
539 napi_disable(&adapter->napi);
540
541 spin_lock_irqsave(&hw->rx_status_lock, flags);
542 for (epidx = 0; epidx < hw->max_epid; epidx++) {
543 if (epidx == hw->my_epid)
544 continue;
545
546 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
547 EP_PARTNER_SHARED)
548 adapter->hw.ep_shm_info[epidx]
549 .tx.info->v1i.rx_status &=
550 ~FJES_RX_POLL_WORK;
551 }
552 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
553
554 fjes_free_irq(adapter);
555
556 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
557 cancel_work_sync(&adapter->unshare_watch_task);
558 adapter->unshare_watch_bitmask = 0;
559 cancel_work_sync(&adapter->raise_intr_rxdata_task);
560 cancel_work_sync(&adapter->tx_stall_task);
561
562 cancel_work_sync(&hw->update_zone_task);
563 cancel_work_sync(&hw->epstop_task);
564
565 fjes_hw_wait_epstop(hw);
566
567 fjes_free_resources(adapter);
568
569 return 0;
570 }
571
fjes_tx_send(struct fjes_adapter * adapter,int dest,void * data,size_t len)572 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
573 void *data, size_t len)
574 {
575 int retval;
576
577 retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
578 data, len);
579 if (retval)
580 return retval;
581
582 adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
583 FJES_TX_DELAY_SEND_PENDING;
584 if (!work_pending(&adapter->raise_intr_rxdata_task))
585 queue_work(adapter->txrx_wq,
586 &adapter->raise_intr_rxdata_task);
587
588 retval = 0;
589 return retval;
590 }
591
592 static netdev_tx_t
fjes_xmit_frame(struct sk_buff * skb,struct net_device * netdev)593 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
594 {
595 struct fjes_adapter *adapter = netdev_priv(netdev);
596 struct fjes_hw *hw = &adapter->hw;
597
598 int max_epid, my_epid, dest_epid;
599 enum ep_partner_status pstatus;
600 struct netdev_queue *cur_queue;
601 char shortpkt[VLAN_ETH_HLEN];
602 bool is_multi, vlan;
603 struct ethhdr *eth;
604 u16 queue_no = 0;
605 u16 vlan_id = 0;
606 netdev_tx_t ret;
607 char *data;
608 int len;
609
610 ret = NETDEV_TX_OK;
611 is_multi = false;
612 cur_queue = netdev_get_tx_queue(netdev, queue_no);
613
614 eth = (struct ethhdr *)skb->data;
615 my_epid = hw->my_epid;
616
617 vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
618
619 data = skb->data;
620 len = skb->len;
621
622 if (is_multicast_ether_addr(eth->h_dest)) {
623 dest_epid = 0;
624 max_epid = hw->max_epid;
625 is_multi = true;
626 } else if (is_local_ether_addr(eth->h_dest)) {
627 dest_epid = eth->h_dest[ETH_ALEN - 1];
628 max_epid = dest_epid + 1;
629
630 if ((eth->h_dest[0] == 0x02) &&
631 (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
632 eth->h_dest[3] | eth->h_dest[4])) &&
633 (dest_epid < hw->max_epid)) {
634 ;
635 } else {
636 dest_epid = 0;
637 max_epid = 0;
638 ret = NETDEV_TX_OK;
639
640 adapter->stats64.tx_packets += 1;
641 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
642 adapter->stats64.tx_bytes += len;
643 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
644 }
645 } else {
646 dest_epid = 0;
647 max_epid = 0;
648 ret = NETDEV_TX_OK;
649
650 adapter->stats64.tx_packets += 1;
651 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
652 adapter->stats64.tx_bytes += len;
653 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
654 }
655
656 for (; dest_epid < max_epid; dest_epid++) {
657 if (my_epid == dest_epid)
658 continue;
659
660 pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
661 if (pstatus != EP_PARTNER_SHARED) {
662 if (!is_multi)
663 hw->ep_shm_info[dest_epid].ep_stats
664 .tx_dropped_not_shared += 1;
665 ret = NETDEV_TX_OK;
666 } else if (!fjes_hw_check_epbuf_version(
667 &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
668 /* version is NOT 0 */
669 adapter->stats64.tx_carrier_errors += 1;
670 hw->ep_shm_info[dest_epid].net_stats
671 .tx_carrier_errors += 1;
672 hw->ep_shm_info[dest_epid].ep_stats
673 .tx_dropped_ver_mismatch += 1;
674
675 ret = NETDEV_TX_OK;
676 } else if (!fjes_hw_check_mtu(
677 &adapter->hw.ep_shm_info[dest_epid].rx,
678 netdev->mtu)) {
679 adapter->stats64.tx_dropped += 1;
680 hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
681 adapter->stats64.tx_errors += 1;
682 hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
683 hw->ep_shm_info[dest_epid].ep_stats
684 .tx_dropped_buf_size_mismatch += 1;
685
686 ret = NETDEV_TX_OK;
687 } else if (vlan &&
688 !fjes_hw_check_vlan_id(
689 &adapter->hw.ep_shm_info[dest_epid].rx,
690 vlan_id)) {
691 hw->ep_shm_info[dest_epid].ep_stats
692 .tx_dropped_vlanid_mismatch += 1;
693 ret = NETDEV_TX_OK;
694 } else {
695 if (len < VLAN_ETH_HLEN) {
696 memset(shortpkt, 0, VLAN_ETH_HLEN);
697 memcpy(shortpkt, skb->data, skb->len);
698 len = VLAN_ETH_HLEN;
699 data = shortpkt;
700 }
701
702 if (adapter->tx_retry_count == 0) {
703 adapter->tx_start_jiffies = jiffies;
704 adapter->tx_retry_count = 1;
705 } else {
706 adapter->tx_retry_count++;
707 }
708
709 if (fjes_tx_send(adapter, dest_epid, data, len)) {
710 if (is_multi) {
711 ret = NETDEV_TX_OK;
712 } else if (
713 ((long)jiffies -
714 (long)adapter->tx_start_jiffies) >=
715 FJES_TX_RETRY_TIMEOUT) {
716 adapter->stats64.tx_fifo_errors += 1;
717 hw->ep_shm_info[dest_epid].net_stats
718 .tx_fifo_errors += 1;
719 adapter->stats64.tx_errors += 1;
720 hw->ep_shm_info[dest_epid].net_stats
721 .tx_errors += 1;
722
723 ret = NETDEV_TX_OK;
724 } else {
725 netif_trans_update(netdev);
726 hw->ep_shm_info[dest_epid].ep_stats
727 .tx_buffer_full += 1;
728 netif_tx_stop_queue(cur_queue);
729
730 if (!work_pending(&adapter->tx_stall_task))
731 queue_work(adapter->txrx_wq,
732 &adapter->tx_stall_task);
733
734 ret = NETDEV_TX_BUSY;
735 }
736 } else {
737 if (!is_multi) {
738 adapter->stats64.tx_packets += 1;
739 hw->ep_shm_info[dest_epid].net_stats
740 .tx_packets += 1;
741 adapter->stats64.tx_bytes += len;
742 hw->ep_shm_info[dest_epid].net_stats
743 .tx_bytes += len;
744 }
745
746 adapter->tx_retry_count = 0;
747 ret = NETDEV_TX_OK;
748 }
749 }
750 }
751
752 if (ret == NETDEV_TX_OK) {
753 dev_kfree_skb(skb);
754 if (is_multi) {
755 adapter->stats64.tx_packets += 1;
756 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
757 adapter->stats64.tx_bytes += 1;
758 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
759 }
760 }
761
762 return ret;
763 }
764
765 static void
fjes_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)766 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
767 {
768 struct fjes_adapter *adapter = netdev_priv(netdev);
769
770 memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
771 }
772
fjes_change_mtu(struct net_device * netdev,int new_mtu)773 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
774 {
775 struct fjes_adapter *adapter = netdev_priv(netdev);
776 bool running = netif_running(netdev);
777 struct fjes_hw *hw = &adapter->hw;
778 unsigned long flags;
779 int ret = -EINVAL;
780 int idx, epidx;
781
782 for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
783 if (new_mtu <= fjes_support_mtu[idx]) {
784 new_mtu = fjes_support_mtu[idx];
785 if (new_mtu == netdev->mtu)
786 return 0;
787
788 ret = 0;
789 break;
790 }
791 }
792
793 if (ret)
794 return ret;
795
796 if (running) {
797 spin_lock_irqsave(&hw->rx_status_lock, flags);
798 for (epidx = 0; epidx < hw->max_epid; epidx++) {
799 if (epidx == hw->my_epid)
800 continue;
801 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
802 ~FJES_RX_MTU_CHANGING_DONE;
803 }
804 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
805
806 netif_tx_stop_all_queues(netdev);
807 netif_carrier_off(netdev);
808 cancel_work_sync(&adapter->tx_stall_task);
809 napi_disable(&adapter->napi);
810
811 msleep(1000);
812
813 netif_tx_stop_all_queues(netdev);
814 }
815
816 netdev->mtu = new_mtu;
817
818 if (running) {
819 for (epidx = 0; epidx < hw->max_epid; epidx++) {
820 if (epidx == hw->my_epid)
821 continue;
822
823 spin_lock_irqsave(&hw->rx_status_lock, flags);
824 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
825 netdev->dev_addr,
826 netdev->mtu);
827
828 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
829 FJES_RX_MTU_CHANGING_DONE;
830 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
831 }
832
833 netif_tx_wake_all_queues(netdev);
834 netif_carrier_on(netdev);
835 napi_enable(&adapter->napi);
836 napi_schedule(&adapter->napi);
837 }
838
839 return ret;
840 }
841
fjes_tx_retry(struct net_device * netdev,unsigned int txqueue)842 static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
843 {
844 struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
845
846 netif_tx_wake_queue(queue);
847 }
848
fjes_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)849 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
850 __be16 proto, u16 vid)
851 {
852 struct fjes_adapter *adapter = netdev_priv(netdev);
853 bool ret = true;
854 int epid;
855
856 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
857 if (epid == adapter->hw.my_epid)
858 continue;
859
860 if (!fjes_hw_check_vlan_id(
861 &adapter->hw.ep_shm_info[epid].tx, vid))
862 ret = fjes_hw_set_vlan_id(
863 &adapter->hw.ep_shm_info[epid].tx, vid);
864 }
865
866 return ret ? 0 : -ENOSPC;
867 }
868
fjes_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)869 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
870 __be16 proto, u16 vid)
871 {
872 struct fjes_adapter *adapter = netdev_priv(netdev);
873 int epid;
874
875 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
876 if (epid == adapter->hw.my_epid)
877 continue;
878
879 fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
880 }
881
882 return 0;
883 }
884
885 static const struct net_device_ops fjes_netdev_ops = {
886 .ndo_open = fjes_open,
887 .ndo_stop = fjes_close,
888 .ndo_start_xmit = fjes_xmit_frame,
889 .ndo_get_stats64 = fjes_get_stats64,
890 .ndo_change_mtu = fjes_change_mtu,
891 .ndo_tx_timeout = fjes_tx_retry,
892 .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
893 .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
894 };
895
896 /* fjes_netdev_setup - netdevice initialization routine */
fjes_netdev_setup(struct net_device * netdev)897 static void fjes_netdev_setup(struct net_device *netdev)
898 {
899 ether_setup(netdev);
900
901 netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
902 netdev->netdev_ops = &fjes_netdev_ops;
903 fjes_set_ethtool_ops(netdev);
904 netdev->mtu = fjes_support_mtu[3];
905 netdev->min_mtu = fjes_support_mtu[0];
906 netdev->max_mtu = fjes_support_mtu[3];
907 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
908 }
909
fjes_rxframe_search_exist(struct fjes_adapter * adapter,int start_epid)910 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
911 int start_epid)
912 {
913 struct fjes_hw *hw = &adapter->hw;
914 enum ep_partner_status pstatus;
915 int max_epid, cur_epid;
916 int i;
917
918 max_epid = hw->max_epid;
919 start_epid = (start_epid + 1 + max_epid) % max_epid;
920
921 for (i = 0; i < max_epid; i++) {
922 cur_epid = (start_epid + i) % max_epid;
923 if (cur_epid == hw->my_epid)
924 continue;
925
926 pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
927 if (pstatus == EP_PARTNER_SHARED) {
928 if (!fjes_hw_epbuf_rx_is_empty(
929 &hw->ep_shm_info[cur_epid].rx))
930 return cur_epid;
931 }
932 }
933 return -1;
934 }
935
fjes_rxframe_get(struct fjes_adapter * adapter,size_t * psize,int * cur_epid)936 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
937 int *cur_epid)
938 {
939 void *frame;
940
941 *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
942 if (*cur_epid < 0)
943 return NULL;
944
945 frame =
946 fjes_hw_epbuf_rx_curpkt_get_addr(
947 &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
948
949 return frame;
950 }
951
fjes_rxframe_release(struct fjes_adapter * adapter,int cur_epid)952 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
953 {
954 fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
955 }
956
fjes_poll(struct napi_struct * napi,int budget)957 static int fjes_poll(struct napi_struct *napi, int budget)
958 {
959 struct fjes_adapter *adapter =
960 container_of(napi, struct fjes_adapter, napi);
961 struct net_device *netdev = napi->dev;
962 struct fjes_hw *hw = &adapter->hw;
963 struct sk_buff *skb;
964 int work_done = 0;
965 int cur_epid = 0;
966 int epidx;
967 size_t frame_len;
968 void *frame;
969
970 spin_lock(&hw->rx_status_lock);
971 for (epidx = 0; epidx < hw->max_epid; epidx++) {
972 if (epidx == hw->my_epid)
973 continue;
974
975 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
976 EP_PARTNER_SHARED)
977 adapter->hw.ep_shm_info[epidx]
978 .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
979 }
980 spin_unlock(&hw->rx_status_lock);
981
982 while (work_done < budget) {
983 prefetch(&adapter->hw);
984 frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
985
986 if (frame) {
987 skb = napi_alloc_skb(napi, frame_len);
988 if (!skb) {
989 adapter->stats64.rx_dropped += 1;
990 hw->ep_shm_info[cur_epid].net_stats
991 .rx_dropped += 1;
992 adapter->stats64.rx_errors += 1;
993 hw->ep_shm_info[cur_epid].net_stats
994 .rx_errors += 1;
995 } else {
996 skb_put_data(skb, frame, frame_len);
997 skb->protocol = eth_type_trans(skb, netdev);
998 skb->ip_summed = CHECKSUM_UNNECESSARY;
999
1000 netif_receive_skb(skb);
1001
1002 work_done++;
1003
1004 adapter->stats64.rx_packets += 1;
1005 hw->ep_shm_info[cur_epid].net_stats
1006 .rx_packets += 1;
1007 adapter->stats64.rx_bytes += frame_len;
1008 hw->ep_shm_info[cur_epid].net_stats
1009 .rx_bytes += frame_len;
1010
1011 if (is_multicast_ether_addr(
1012 ((struct ethhdr *)frame)->h_dest)) {
1013 adapter->stats64.multicast += 1;
1014 hw->ep_shm_info[cur_epid].net_stats
1015 .multicast += 1;
1016 }
1017 }
1018
1019 fjes_rxframe_release(adapter, cur_epid);
1020 adapter->unset_rx_last = true;
1021 } else {
1022 break;
1023 }
1024 }
1025
1026 if (work_done < budget) {
1027 napi_complete_done(napi, work_done);
1028
1029 if (adapter->unset_rx_last) {
1030 adapter->rx_last_jiffies = jiffies;
1031 adapter->unset_rx_last = false;
1032 }
1033
1034 if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1035 napi_reschedule(napi);
1036 } else {
1037 spin_lock(&hw->rx_status_lock);
1038 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1039 if (epidx == hw->my_epid)
1040 continue;
1041 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1042 EP_PARTNER_SHARED)
1043 adapter->hw.ep_shm_info[epidx].tx
1044 .info->v1i.rx_status &=
1045 ~FJES_RX_POLL_WORK;
1046 }
1047 spin_unlock(&hw->rx_status_lock);
1048
1049 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1050 }
1051 }
1052
1053 return work_done;
1054 }
1055
fjes_sw_init(struct fjes_adapter * adapter)1056 static int fjes_sw_init(struct fjes_adapter *adapter)
1057 {
1058 struct net_device *netdev = adapter->netdev;
1059
1060 netif_napi_add(netdev, &adapter->napi, fjes_poll);
1061
1062 return 0;
1063 }
1064
fjes_force_close_task(struct work_struct * work)1065 static void fjes_force_close_task(struct work_struct *work)
1066 {
1067 struct fjes_adapter *adapter = container_of(work,
1068 struct fjes_adapter, force_close_task);
1069 struct net_device *netdev = adapter->netdev;
1070
1071 rtnl_lock();
1072 dev_close(netdev);
1073 rtnl_unlock();
1074 }
1075
fjes_tx_stall_task(struct work_struct * work)1076 static void fjes_tx_stall_task(struct work_struct *work)
1077 {
1078 struct fjes_adapter *adapter = container_of(work,
1079 struct fjes_adapter, tx_stall_task);
1080 struct net_device *netdev = adapter->netdev;
1081 struct fjes_hw *hw = &adapter->hw;
1082 int all_queue_available, sendable;
1083 enum ep_partner_status pstatus;
1084 int max_epid, my_epid, epid;
1085 union ep_buffer_info *info;
1086 int i;
1087
1088 if (((long)jiffies -
1089 dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
1090 netif_wake_queue(netdev);
1091 return;
1092 }
1093
1094 my_epid = hw->my_epid;
1095 max_epid = hw->max_epid;
1096
1097 for (i = 0; i < 5; i++) {
1098 all_queue_available = 1;
1099
1100 for (epid = 0; epid < max_epid; epid++) {
1101 if (my_epid == epid)
1102 continue;
1103
1104 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1105 sendable = (pstatus == EP_PARTNER_SHARED);
1106 if (!sendable)
1107 continue;
1108
1109 info = adapter->hw.ep_shm_info[epid].tx.info;
1110
1111 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
1112 return;
1113
1114 if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
1115 info->v1i.count_max)) {
1116 all_queue_available = 0;
1117 break;
1118 }
1119 }
1120
1121 if (all_queue_available) {
1122 netif_wake_queue(netdev);
1123 return;
1124 }
1125 }
1126
1127 usleep_range(50, 100);
1128
1129 queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
1130 }
1131
fjes_raise_intr_rxdata_task(struct work_struct * work)1132 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
1133 {
1134 struct fjes_adapter *adapter = container_of(work,
1135 struct fjes_adapter, raise_intr_rxdata_task);
1136 struct fjes_hw *hw = &adapter->hw;
1137 enum ep_partner_status pstatus;
1138 int max_epid, my_epid, epid;
1139
1140 my_epid = hw->my_epid;
1141 max_epid = hw->max_epid;
1142
1143 for (epid = 0; epid < max_epid; epid++)
1144 hw->ep_shm_info[epid].tx_status_work = 0;
1145
1146 for (epid = 0; epid < max_epid; epid++) {
1147 if (epid == my_epid)
1148 continue;
1149
1150 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1151 if (pstatus == EP_PARTNER_SHARED) {
1152 hw->ep_shm_info[epid].tx_status_work =
1153 hw->ep_shm_info[epid].tx.info->v1i.tx_status;
1154
1155 if (hw->ep_shm_info[epid].tx_status_work ==
1156 FJES_TX_DELAY_SEND_PENDING) {
1157 hw->ep_shm_info[epid].tx.info->v1i.tx_status =
1158 FJES_TX_DELAY_SEND_NONE;
1159 }
1160 }
1161 }
1162
1163 for (epid = 0; epid < max_epid; epid++) {
1164 if (epid == my_epid)
1165 continue;
1166
1167 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1168 if ((hw->ep_shm_info[epid].tx_status_work ==
1169 FJES_TX_DELAY_SEND_PENDING) &&
1170 (pstatus == EP_PARTNER_SHARED) &&
1171 !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
1172 FJES_RX_POLL_WORK)) {
1173 fjes_hw_raise_interrupt(hw, epid,
1174 REG_ICTL_MASK_RX_DATA);
1175 hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
1176 }
1177 }
1178
1179 usleep_range(500, 1000);
1180 }
1181
fjes_watch_unshare_task(struct work_struct * work)1182 static void fjes_watch_unshare_task(struct work_struct *work)
1183 {
1184 struct fjes_adapter *adapter =
1185 container_of(work, struct fjes_adapter, unshare_watch_task);
1186
1187 struct net_device *netdev = adapter->netdev;
1188 struct fjes_hw *hw = &adapter->hw;
1189
1190 int unshare_watch, unshare_reserve;
1191 int max_epid, my_epid, epidx;
1192 int stop_req, stop_req_done;
1193 ulong unshare_watch_bitmask;
1194 unsigned long flags;
1195 int wait_time = 0;
1196 int is_shared;
1197 int ret;
1198
1199 my_epid = hw->my_epid;
1200 max_epid = hw->max_epid;
1201
1202 unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1203 adapter->unshare_watch_bitmask = 0;
1204
1205 while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1206 (wait_time < 3000)) {
1207 for (epidx = 0; epidx < max_epid; epidx++) {
1208 if (epidx == my_epid)
1209 continue;
1210
1211 is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1212 epidx);
1213
1214 stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1215
1216 stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1217 FJES_RX_STOP_REQ_DONE;
1218
1219 unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1220
1221 unshare_reserve = test_bit(epidx,
1222 &hw->hw_info.buffer_unshare_reserve_bit);
1223
1224 if ((!stop_req ||
1225 (is_shared && (!is_shared || !stop_req_done))) &&
1226 (is_shared || !unshare_watch || !unshare_reserve))
1227 continue;
1228
1229 mutex_lock(&hw->hw_info.lock);
1230 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1231 switch (ret) {
1232 case 0:
1233 break;
1234 case -ENOMSG:
1235 case -EBUSY:
1236 default:
1237 if (!work_pending(
1238 &adapter->force_close_task)) {
1239 adapter->force_reset = true;
1240 schedule_work(
1241 &adapter->force_close_task);
1242 }
1243 break;
1244 }
1245 mutex_unlock(&hw->hw_info.lock);
1246 hw->ep_shm_info[epidx].ep_stats
1247 .com_unregist_buf_exec += 1;
1248
1249 spin_lock_irqsave(&hw->rx_status_lock, flags);
1250 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1251 netdev->dev_addr, netdev->mtu);
1252 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1253
1254 clear_bit(epidx, &hw->txrx_stop_req_bit);
1255 clear_bit(epidx, &unshare_watch_bitmask);
1256 clear_bit(epidx,
1257 &hw->hw_info.buffer_unshare_reserve_bit);
1258 }
1259
1260 msleep(100);
1261 wait_time += 100;
1262 }
1263
1264 if (hw->hw_info.buffer_unshare_reserve_bit) {
1265 for (epidx = 0; epidx < max_epid; epidx++) {
1266 if (epidx == my_epid)
1267 continue;
1268
1269 if (test_bit(epidx,
1270 &hw->hw_info.buffer_unshare_reserve_bit)) {
1271 mutex_lock(&hw->hw_info.lock);
1272
1273 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1274 switch (ret) {
1275 case 0:
1276 break;
1277 case -ENOMSG:
1278 case -EBUSY:
1279 default:
1280 if (!work_pending(
1281 &adapter->force_close_task)) {
1282 adapter->force_reset = true;
1283 schedule_work(
1284 &adapter->force_close_task);
1285 }
1286 break;
1287 }
1288 mutex_unlock(&hw->hw_info.lock);
1289
1290 hw->ep_shm_info[epidx].ep_stats
1291 .com_unregist_buf_exec += 1;
1292
1293 spin_lock_irqsave(&hw->rx_status_lock, flags);
1294 fjes_hw_setup_epbuf(
1295 &hw->ep_shm_info[epidx].tx,
1296 netdev->dev_addr, netdev->mtu);
1297 spin_unlock_irqrestore(&hw->rx_status_lock,
1298 flags);
1299
1300 clear_bit(epidx, &hw->txrx_stop_req_bit);
1301 clear_bit(epidx, &unshare_watch_bitmask);
1302 clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1303 }
1304
1305 if (test_bit(epidx, &unshare_watch_bitmask)) {
1306 spin_lock_irqsave(&hw->rx_status_lock, flags);
1307 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1308 ~FJES_RX_STOP_REQ_DONE;
1309 spin_unlock_irqrestore(&hw->rx_status_lock,
1310 flags);
1311 }
1312 }
1313 }
1314 }
1315
fjes_irq_watch_task(struct work_struct * work)1316 static void fjes_irq_watch_task(struct work_struct *work)
1317 {
1318 struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1319 struct fjes_adapter, interrupt_watch_task);
1320
1321 local_irq_disable();
1322 fjes_intr(adapter->hw.hw_res.irq, adapter);
1323 local_irq_enable();
1324
1325 if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1326 napi_schedule(&adapter->napi);
1327
1328 if (adapter->interrupt_watch_enable) {
1329 if (!delayed_work_pending(&adapter->interrupt_watch_task))
1330 queue_delayed_work(adapter->control_wq,
1331 &adapter->interrupt_watch_task,
1332 FJES_IRQ_WATCH_DELAY);
1333 }
1334 }
1335
1336 /* fjes_probe - Device Initialization Routine */
fjes_probe(struct platform_device * plat_dev)1337 static int fjes_probe(struct platform_device *plat_dev)
1338 {
1339 struct fjes_adapter *adapter;
1340 struct net_device *netdev;
1341 struct resource *res;
1342 struct fjes_hw *hw;
1343 u8 addr[ETH_ALEN];
1344 int err;
1345
1346 err = -ENOMEM;
1347 netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1348 NET_NAME_UNKNOWN, fjes_netdev_setup,
1349 FJES_MAX_QUEUES);
1350
1351 if (!netdev)
1352 goto err_out;
1353
1354 SET_NETDEV_DEV(netdev, &plat_dev->dev);
1355
1356 dev_set_drvdata(&plat_dev->dev, netdev);
1357 adapter = netdev_priv(netdev);
1358 adapter->netdev = netdev;
1359 adapter->plat_dev = plat_dev;
1360 hw = &adapter->hw;
1361 hw->back = adapter;
1362
1363 /* setup the private structure */
1364 err = fjes_sw_init(adapter);
1365 if (err)
1366 goto err_free_netdev;
1367
1368 INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1369 adapter->force_reset = false;
1370 adapter->open_guard = false;
1371
1372 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1373 if (unlikely(!adapter->txrx_wq)) {
1374 err = -ENOMEM;
1375 goto err_free_netdev;
1376 }
1377
1378 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1379 WQ_MEM_RECLAIM, 0);
1380 if (unlikely(!adapter->control_wq)) {
1381 err = -ENOMEM;
1382 goto err_free_txrx_wq;
1383 }
1384
1385 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1386 INIT_WORK(&adapter->raise_intr_rxdata_task,
1387 fjes_raise_intr_rxdata_task);
1388 INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1389 adapter->unshare_watch_bitmask = 0;
1390
1391 INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1392 adapter->interrupt_watch_enable = false;
1393
1394 res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1395 if (!res) {
1396 err = -EINVAL;
1397 goto err_free_control_wq;
1398 }
1399 hw->hw_res.start = res->start;
1400 hw->hw_res.size = resource_size(res);
1401 hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1402 if (hw->hw_res.irq < 0) {
1403 err = hw->hw_res.irq;
1404 goto err_free_control_wq;
1405 }
1406
1407 err = fjes_hw_init(&adapter->hw);
1408 if (err)
1409 goto err_free_control_wq;
1410
1411 /* setup MAC address (02:00:00:00:00:[epid])*/
1412 addr[0] = 2;
1413 addr[1] = 0;
1414 addr[2] = 0;
1415 addr[3] = 0;
1416 addr[4] = 0;
1417 addr[5] = hw->my_epid; /* EPID */
1418 eth_hw_addr_set(netdev, addr);
1419
1420 err = register_netdev(netdev);
1421 if (err)
1422 goto err_hw_exit;
1423
1424 netif_carrier_off(netdev);
1425
1426 fjes_dbg_adapter_init(adapter);
1427
1428 return 0;
1429
1430 err_hw_exit:
1431 fjes_hw_exit(&adapter->hw);
1432 err_free_control_wq:
1433 destroy_workqueue(adapter->control_wq);
1434 err_free_txrx_wq:
1435 destroy_workqueue(adapter->txrx_wq);
1436 err_free_netdev:
1437 free_netdev(netdev);
1438 err_out:
1439 return err;
1440 }
1441
1442 /* fjes_remove - Device Removal Routine */
fjes_remove(struct platform_device * plat_dev)1443 static int fjes_remove(struct platform_device *plat_dev)
1444 {
1445 struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1446 struct fjes_adapter *adapter = netdev_priv(netdev);
1447 struct fjes_hw *hw = &adapter->hw;
1448
1449 fjes_dbg_adapter_exit(adapter);
1450
1451 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1452 cancel_work_sync(&adapter->unshare_watch_task);
1453 cancel_work_sync(&adapter->raise_intr_rxdata_task);
1454 cancel_work_sync(&adapter->tx_stall_task);
1455 if (adapter->control_wq)
1456 destroy_workqueue(adapter->control_wq);
1457 if (adapter->txrx_wq)
1458 destroy_workqueue(adapter->txrx_wq);
1459
1460 unregister_netdev(netdev);
1461
1462 fjes_hw_exit(hw);
1463
1464 netif_napi_del(&adapter->napi);
1465
1466 free_netdev(netdev);
1467
1468 return 0;
1469 }
1470
1471 static struct platform_driver fjes_driver = {
1472 .driver = {
1473 .name = DRV_NAME,
1474 },
1475 .probe = fjes_probe,
1476 .remove = fjes_remove,
1477 };
1478
1479 static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle,u32 level,void * context,void ** return_value)1480 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1481 void *context, void **return_value)
1482 {
1483 struct acpi_device *device;
1484 bool *found = context;
1485
1486 device = acpi_fetch_acpi_dev(obj_handle);
1487 if (!device)
1488 return AE_OK;
1489
1490 if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1491 return AE_OK;
1492
1493 if (!is_extended_socket_device(device))
1494 return AE_OK;
1495
1496 if (acpi_check_extended_socket_status(device))
1497 return AE_OK;
1498
1499 *found = true;
1500 return AE_CTRL_TERMINATE;
1501 }
1502
1503 /* fjes_init_module - Driver Registration Routine */
fjes_init_module(void)1504 static int __init fjes_init_module(void)
1505 {
1506 bool found = false;
1507 int result;
1508
1509 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1510 acpi_find_extended_socket_device, NULL, &found,
1511 NULL);
1512
1513 if (!found)
1514 return -ENODEV;
1515
1516 pr_info("%s - version %s - %s\n",
1517 fjes_driver_string, fjes_driver_version, fjes_copyright);
1518
1519 fjes_dbg_init();
1520
1521 result = platform_driver_register(&fjes_driver);
1522 if (result < 0) {
1523 fjes_dbg_exit();
1524 return result;
1525 }
1526
1527 result = acpi_bus_register_driver(&fjes_acpi_driver);
1528 if (result < 0)
1529 goto fail_acpi_driver;
1530
1531 return 0;
1532
1533 fail_acpi_driver:
1534 platform_driver_unregister(&fjes_driver);
1535 fjes_dbg_exit();
1536 return result;
1537 }
1538
1539 module_init(fjes_init_module);
1540
1541 /* fjes_exit_module - Driver Exit Cleanup Routine */
fjes_exit_module(void)1542 static void __exit fjes_exit_module(void)
1543 {
1544 acpi_bus_unregister_driver(&fjes_acpi_driver);
1545 platform_driver_unregister(&fjes_driver);
1546 fjes_dbg_exit();
1547 }
1548
1549 module_exit(fjes_exit_module);
1550