1 /*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
32 #include "fnic_io.h"
33 #include "fnic.h"
34 #include "fnic_fip.h"
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
37
38 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39 struct workqueue_struct *fnic_fip_queue;
40 struct workqueue_struct *fnic_event_queue;
41
42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
fnic_handle_link(struct work_struct * work)49 void fnic_handle_link(struct work_struct *work)
50 {
51 struct fnic *fnic = container_of(work, struct fnic, link_work);
52 unsigned long flags;
53 int old_link_status;
54 u32 old_link_down_cnt;
55 u64 old_port_speed, new_port_speed;
56
57 spin_lock_irqsave(&fnic->fnic_lock, flags);
58
59 fnic->link_events = 1; /* less work to just set everytime*/
60
61 if (fnic->stop_rx_link_events) {
62 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
63 return;
64 }
65
66 old_link_down_cnt = fnic->link_down_cnt;
67 old_link_status = fnic->link_status;
68 old_port_speed = atomic64_read(
69 &fnic->fnic_stats.misc_stats.current_port_speed);
70
71 fnic->link_status = vnic_dev_link_status(fnic->vdev);
72 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
73
74 new_port_speed = vnic_dev_port_speed(fnic->vdev);
75 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
76 new_port_speed);
77 if (old_port_speed != new_port_speed)
78 FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
79 "Current vnic speed set to : %llu\n",
80 new_port_speed);
81
82 switch (vnic_dev_port_speed(fnic->vdev)) {
83 case DCEM_PORTSPEED_10G:
84 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
85 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
86 break;
87 case DCEM_PORTSPEED_20G:
88 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
89 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
90 break;
91 case DCEM_PORTSPEED_25G:
92 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
93 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
94 break;
95 case DCEM_PORTSPEED_40G:
96 case DCEM_PORTSPEED_4x10G:
97 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
98 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
99 break;
100 case DCEM_PORTSPEED_100G:
101 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
102 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
103 break;
104 default:
105 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
106 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
107 break;
108 }
109
110 if (old_link_status == fnic->link_status) {
111 if (!fnic->link_status) {
112 /* DOWN -> DOWN */
113 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
114 fnic_fc_trace_set_data(fnic->lport->host->host_no,
115 FNIC_FC_LE, "Link Status: DOWN->DOWN",
116 strlen("Link Status: DOWN->DOWN"));
117 } else {
118 if (old_link_down_cnt != fnic->link_down_cnt) {
119 /* UP -> DOWN -> UP */
120 fnic->lport->host_stats.link_failure_count++;
121 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
122 fnic_fc_trace_set_data(
123 fnic->lport->host->host_no,
124 FNIC_FC_LE,
125 "Link Status:UP_DOWN_UP",
126 strlen("Link_Status:UP_DOWN_UP")
127 );
128 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
129 "link down\n");
130 fcoe_ctlr_link_down(&fnic->ctlr);
131 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
132 /* start FCoE VLAN discovery */
133 fnic_fc_trace_set_data(
134 fnic->lport->host->host_no,
135 FNIC_FC_LE,
136 "Link Status: UP_DOWN_UP_VLAN",
137 strlen(
138 "Link Status: UP_DOWN_UP_VLAN")
139 );
140 fnic_fcoe_send_vlan_req(fnic);
141 return;
142 }
143 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
144 "link up\n");
145 fcoe_ctlr_link_up(&fnic->ctlr);
146 } else {
147 /* UP -> UP */
148 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
149 fnic_fc_trace_set_data(
150 fnic->lport->host->host_no, FNIC_FC_LE,
151 "Link Status: UP_UP",
152 strlen("Link Status: UP_UP"));
153 }
154 }
155 } else if (fnic->link_status) {
156 /* DOWN -> UP */
157 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
158 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
159 /* start FCoE VLAN discovery */
160 fnic_fc_trace_set_data(
161 fnic->lport->host->host_no,
162 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
163 strlen("Link Status: DOWN_UP_VLAN"));
164 fnic_fcoe_send_vlan_req(fnic);
165 return;
166 }
167 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
168 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
169 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
170 fcoe_ctlr_link_up(&fnic->ctlr);
171 } else {
172 /* UP -> DOWN */
173 fnic->lport->host_stats.link_failure_count++;
174 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
175 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
176 fnic_fc_trace_set_data(
177 fnic->lport->host->host_no, FNIC_FC_LE,
178 "Link Status: UP_DOWN",
179 strlen("Link Status: UP_DOWN"));
180 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
181 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
182 "deleting fip-timer during link-down\n");
183 del_timer_sync(&fnic->fip_timer);
184 }
185 fcoe_ctlr_link_down(&fnic->ctlr);
186 }
187
188 }
189
190 /*
191 * This function passes incoming fabric frames to libFC
192 */
fnic_handle_frame(struct work_struct * work)193 void fnic_handle_frame(struct work_struct *work)
194 {
195 struct fnic *fnic = container_of(work, struct fnic, frame_work);
196 struct fc_lport *lp = fnic->lport;
197 unsigned long flags;
198 struct sk_buff *skb;
199 struct fc_frame *fp;
200
201 while ((skb = skb_dequeue(&fnic->frame_queue))) {
202
203 spin_lock_irqsave(&fnic->fnic_lock, flags);
204 if (fnic->stop_rx_link_events) {
205 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
206 dev_kfree_skb(skb);
207 return;
208 }
209 fp = (struct fc_frame *)skb;
210
211 /*
212 * If we're in a transitional state, just re-queue and return.
213 * The queue will be serviced when we get to a stable state.
214 */
215 if (fnic->state != FNIC_IN_FC_MODE &&
216 fnic->state != FNIC_IN_ETH_MODE) {
217 skb_queue_head(&fnic->frame_queue, skb);
218 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
219 return;
220 }
221 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
222
223 fc_exch_recv(lp, fp);
224 }
225 }
226
fnic_fcoe_evlist_free(struct fnic * fnic)227 void fnic_fcoe_evlist_free(struct fnic *fnic)
228 {
229 struct fnic_event *fevt = NULL;
230 struct fnic_event *next = NULL;
231 unsigned long flags;
232
233 spin_lock_irqsave(&fnic->fnic_lock, flags);
234 if (list_empty(&fnic->evlist)) {
235 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
236 return;
237 }
238
239 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
240 list_del(&fevt->list);
241 kfree(fevt);
242 }
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244 }
245
fnic_handle_event(struct work_struct * work)246 void fnic_handle_event(struct work_struct *work)
247 {
248 struct fnic *fnic = container_of(work, struct fnic, event_work);
249 struct fnic_event *fevt = NULL;
250 struct fnic_event *next = NULL;
251 unsigned long flags;
252
253 spin_lock_irqsave(&fnic->fnic_lock, flags);
254 if (list_empty(&fnic->evlist)) {
255 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
256 return;
257 }
258
259 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
260 if (fnic->stop_rx_link_events) {
261 list_del(&fevt->list);
262 kfree(fevt);
263 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
264 return;
265 }
266 /*
267 * If we're in a transitional state, just re-queue and return.
268 * The queue will be serviced when we get to a stable state.
269 */
270 if (fnic->state != FNIC_IN_FC_MODE &&
271 fnic->state != FNIC_IN_ETH_MODE) {
272 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
273 return;
274 }
275
276 list_del(&fevt->list);
277 switch (fevt->event) {
278 case FNIC_EVT_START_VLAN_DISC:
279 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
280 fnic_fcoe_send_vlan_req(fnic);
281 spin_lock_irqsave(&fnic->fnic_lock, flags);
282 break;
283 case FNIC_EVT_START_FCF_DISC:
284 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
285 "Start FCF Discovery\n");
286 fnic_fcoe_start_fcf_disc(fnic);
287 break;
288 default:
289 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
290 "Unknown event 0x%x\n", fevt->event);
291 break;
292 }
293 kfree(fevt);
294 }
295 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
296 }
297
298 /**
299 * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
300 * @fip: The FCoE controller that received the frame
301 * @skb: The received FIP frame
302 *
303 * Returns non-zero if the frame is rejected with unsupported cmd with
304 * insufficient resource els explanation.
305 */
is_fnic_fip_flogi_reject(struct fcoe_ctlr * fip,struct sk_buff * skb)306 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
307 struct sk_buff *skb)
308 {
309 struct fc_lport *lport = fip->lp;
310 struct fip_header *fiph;
311 struct fc_frame_header *fh = NULL;
312 struct fip_desc *desc;
313 struct fip_encaps *els;
314 u16 op;
315 u8 els_op;
316 u8 sub;
317
318 size_t rlen;
319 size_t dlen = 0;
320
321 if (skb_linearize(skb))
322 return 0;
323
324 if (skb->len < sizeof(*fiph))
325 return 0;
326
327 fiph = (struct fip_header *)skb->data;
328 op = ntohs(fiph->fip_op);
329 sub = fiph->fip_subcode;
330
331 if (op != FIP_OP_LS)
332 return 0;
333
334 if (sub != FIP_SC_REP)
335 return 0;
336
337 rlen = ntohs(fiph->fip_dl_len) * 4;
338 if (rlen + sizeof(*fiph) > skb->len)
339 return 0;
340
341 desc = (struct fip_desc *)(fiph + 1);
342 dlen = desc->fip_dlen * FIP_BPW;
343
344 if (desc->fip_dtype == FIP_DT_FLOGI) {
345
346 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
347 return 0;
348
349 els = (struct fip_encaps *)desc;
350 fh = (struct fc_frame_header *)(els + 1);
351
352 if (!fh)
353 return 0;
354
355 /*
356 * ELS command code, reason and explanation should be = Reject,
357 * unsupported command and insufficient resource
358 */
359 els_op = *(u8 *)(fh + 1);
360 if (els_op == ELS_LS_RJT) {
361 shost_printk(KERN_INFO, lport->host,
362 "Flogi Request Rejected by Switch\n");
363 return 1;
364 }
365 shost_printk(KERN_INFO, lport->host,
366 "Flogi Request Accepted by Switch\n");
367 }
368 return 0;
369 }
370
fnic_fcoe_send_vlan_req(struct fnic * fnic)371 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
372 {
373 struct fcoe_ctlr *fip = &fnic->ctlr;
374 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
375 struct sk_buff *skb;
376 char *eth_fr;
377 struct fip_vlan *vlan;
378 u64 vlan_tov;
379
380 fnic_fcoe_reset_vlans(fnic);
381 fnic->set_vlan(fnic, 0);
382
383 if (printk_ratelimit())
384 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
385 "Sending VLAN request...\n");
386
387 skb = dev_alloc_skb(sizeof(struct fip_vlan));
388 if (!skb)
389 return;
390
391 eth_fr = (char *)skb->data;
392 vlan = (struct fip_vlan *)eth_fr;
393
394 memset(vlan, 0, sizeof(*vlan));
395 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
396 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
397 vlan->eth.h_proto = htons(ETH_P_FIP);
398
399 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
400 vlan->fip.fip_op = htons(FIP_OP_VLAN);
401 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
402 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
403
404 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
405 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
406 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
407
408 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
409 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
410 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
411 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
412
413 skb_put(skb, sizeof(*vlan));
414 skb->protocol = htons(ETH_P_FIP);
415 skb_reset_mac_header(skb);
416 skb_reset_network_header(skb);
417 fip->send(fip, skb);
418
419 /* set a timer so that we can retry if there no response */
420 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
421 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
422 }
423
fnic_fcoe_process_vlan_resp(struct fnic * fnic,struct sk_buff * skb)424 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
425 {
426 struct fcoe_ctlr *fip = &fnic->ctlr;
427 struct fip_header *fiph;
428 struct fip_desc *desc;
429 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
430 u16 vid;
431 size_t rlen;
432 size_t dlen;
433 struct fcoe_vlan *vlan;
434 u64 sol_time;
435 unsigned long flags;
436
437 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
438 "Received VLAN response...\n");
439
440 fiph = (struct fip_header *) skb->data;
441
442 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
443 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
444 ntohs(fiph->fip_op), fiph->fip_subcode);
445
446 rlen = ntohs(fiph->fip_dl_len) * 4;
447 fnic_fcoe_reset_vlans(fnic);
448 spin_lock_irqsave(&fnic->vlans_lock, flags);
449 desc = (struct fip_desc *)(fiph + 1);
450 while (rlen > 0) {
451 dlen = desc->fip_dlen * FIP_BPW;
452 switch (desc->fip_dtype) {
453 case FIP_DT_VLAN:
454 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
455 shost_printk(KERN_INFO, fnic->lport->host,
456 "process_vlan_resp: FIP VLAN %d\n", vid);
457 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
458 if (!vlan) {
459 /* retry from timer */
460 spin_unlock_irqrestore(&fnic->vlans_lock,
461 flags);
462 goto out;
463 }
464 vlan->vid = vid & 0x0fff;
465 vlan->state = FIP_VLAN_AVAIL;
466 list_add_tail(&vlan->list, &fnic->vlans);
467 break;
468 }
469 desc = (struct fip_desc *)((char *)desc + dlen);
470 rlen -= dlen;
471 }
472
473 /* any VLAN descriptors present ? */
474 if (list_empty(&fnic->vlans)) {
475 /* retry from timer */
476 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
477 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
478 "No VLAN descriptors in FIP VLAN response\n");
479 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
480 goto out;
481 }
482
483 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
484 fnic->set_vlan(fnic, vlan->vid);
485 vlan->state = FIP_VLAN_SENT; /* sent now */
486 vlan->sol_count++;
487 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
488
489 /* start the solicitation */
490 fcoe_ctlr_link_up(fip);
491
492 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
493 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
494 out:
495 return;
496 }
497
fnic_fcoe_start_fcf_disc(struct fnic * fnic)498 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
499 {
500 unsigned long flags;
501 struct fcoe_vlan *vlan;
502 u64 sol_time;
503
504 spin_lock_irqsave(&fnic->vlans_lock, flags);
505 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
506 fnic->set_vlan(fnic, vlan->vid);
507 vlan->state = FIP_VLAN_SENT; /* sent now */
508 vlan->sol_count = 1;
509 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
510
511 /* start the solicitation */
512 fcoe_ctlr_link_up(&fnic->ctlr);
513
514 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
515 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
516 }
517
fnic_fcoe_vlan_check(struct fnic * fnic,u16 flag)518 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
519 {
520 unsigned long flags;
521 struct fcoe_vlan *fvlan;
522
523 spin_lock_irqsave(&fnic->vlans_lock, flags);
524 if (list_empty(&fnic->vlans)) {
525 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
526 return -EINVAL;
527 }
528
529 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
530 if (fvlan->state == FIP_VLAN_USED) {
531 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
532 return 0;
533 }
534
535 if (fvlan->state == FIP_VLAN_SENT) {
536 fvlan->state = FIP_VLAN_USED;
537 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
538 return 0;
539 }
540 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
541 return -EINVAL;
542 }
543
fnic_event_enq(struct fnic * fnic,enum fnic_evt ev)544 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
545 {
546 struct fnic_event *fevt;
547 unsigned long flags;
548
549 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
550 if (!fevt)
551 return;
552
553 fevt->fnic = fnic;
554 fevt->event = ev;
555
556 spin_lock_irqsave(&fnic->fnic_lock, flags);
557 list_add_tail(&fevt->list, &fnic->evlist);
558 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
559
560 schedule_work(&fnic->event_work);
561 }
562
fnic_fcoe_handle_fip_frame(struct fnic * fnic,struct sk_buff * skb)563 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
564 {
565 struct fip_header *fiph;
566 int ret = 1;
567 u16 op;
568 u8 sub;
569
570 if (!skb || !(skb->data))
571 return -1;
572
573 if (skb_linearize(skb))
574 goto drop;
575
576 fiph = (struct fip_header *)skb->data;
577 op = ntohs(fiph->fip_op);
578 sub = fiph->fip_subcode;
579
580 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
581 goto drop;
582
583 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
584 goto drop;
585
586 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
587 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
588 goto drop;
589 /* pass it on to fcoe */
590 ret = 1;
591 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
592 /* set the vlan as used */
593 fnic_fcoe_process_vlan_resp(fnic, skb);
594 ret = 0;
595 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
596 /* received CVL request, restart vlan disc */
597 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
598 /* pass it on to fcoe */
599 ret = 1;
600 }
601 drop:
602 return ret;
603 }
604
fnic_handle_fip_frame(struct work_struct * work)605 void fnic_handle_fip_frame(struct work_struct *work)
606 {
607 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
608 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
609 unsigned long flags;
610 struct sk_buff *skb;
611 struct ethhdr *eh;
612
613 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
614 spin_lock_irqsave(&fnic->fnic_lock, flags);
615 if (fnic->stop_rx_link_events) {
616 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
617 dev_kfree_skb(skb);
618 return;
619 }
620 /*
621 * If we're in a transitional state, just re-queue and return.
622 * The queue will be serviced when we get to a stable state.
623 */
624 if (fnic->state != FNIC_IN_FC_MODE &&
625 fnic->state != FNIC_IN_ETH_MODE) {
626 skb_queue_head(&fnic->fip_frame_queue, skb);
627 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
628 return;
629 }
630 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
631 eh = (struct ethhdr *)skb->data;
632 if (eh->h_proto == htons(ETH_P_FIP)) {
633 skb_pull(skb, sizeof(*eh));
634 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
635 dev_kfree_skb(skb);
636 continue;
637 }
638 /*
639 * If there's FLOGI rejects - clear all
640 * fcf's & restart from scratch
641 */
642 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
643 atomic64_inc(
644 &fnic_stats->vlan_stats.flogi_rejects);
645 shost_printk(KERN_INFO, fnic->lport->host,
646 "Trigger a Link down - VLAN Disc\n");
647 fcoe_ctlr_link_down(&fnic->ctlr);
648 /* start FCoE VLAN discovery */
649 fnic_fcoe_send_vlan_req(fnic);
650 dev_kfree_skb(skb);
651 continue;
652 }
653 fcoe_ctlr_recv(&fnic->ctlr, skb);
654 continue;
655 }
656 }
657 }
658
659 /**
660 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
661 * @fnic: fnic instance.
662 * @skb: Ethernet Frame.
663 */
fnic_import_rq_eth_pkt(struct fnic * fnic,struct sk_buff * skb)664 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
665 {
666 struct fc_frame *fp;
667 struct ethhdr *eh;
668 struct fcoe_hdr *fcoe_hdr;
669 struct fcoe_crc_eof *ft;
670
671 /*
672 * Undo VLAN encapsulation if present.
673 */
674 eh = (struct ethhdr *)skb->data;
675 if (eh->h_proto == htons(ETH_P_8021Q)) {
676 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
677 eh = skb_pull(skb, VLAN_HLEN);
678 skb_reset_mac_header(skb);
679 }
680 if (eh->h_proto == htons(ETH_P_FIP)) {
681 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
682 printk(KERN_ERR "Dropped FIP frame, as firmware "
683 "uses non-FIP mode, Enable FIP "
684 "using UCSM\n");
685 goto drop;
686 }
687 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
688 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
689 printk(KERN_ERR "fnic ctlr frame trace error!!!");
690 }
691 skb_queue_tail(&fnic->fip_frame_queue, skb);
692 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
693 return 1; /* let caller know packet was used */
694 }
695 if (eh->h_proto != htons(ETH_P_FCOE))
696 goto drop;
697 skb_set_network_header(skb, sizeof(*eh));
698 skb_pull(skb, sizeof(*eh));
699
700 fcoe_hdr = (struct fcoe_hdr *)skb->data;
701 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
702 goto drop;
703
704 fp = (struct fc_frame *)skb;
705 fc_frame_init(fp);
706 fr_sof(fp) = fcoe_hdr->fcoe_sof;
707 skb_pull(skb, sizeof(struct fcoe_hdr));
708 skb_reset_transport_header(skb);
709
710 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
711 fr_eof(fp) = ft->fcoe_eof;
712 skb_trim(skb, skb->len - sizeof(*ft));
713 return 0;
714 drop:
715 dev_kfree_skb_irq(skb);
716 return -1;
717 }
718
719 /**
720 * fnic_update_mac_locked() - set data MAC address and filters.
721 * @fnic: fnic instance.
722 * @new: newly-assigned FCoE MAC address.
723 *
724 * Called with the fnic lock held.
725 */
fnic_update_mac_locked(struct fnic * fnic,u8 * new)726 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
727 {
728 u8 *ctl = fnic->ctlr.ctl_src_addr;
729 u8 *data = fnic->data_src_addr;
730
731 if (is_zero_ether_addr(new))
732 new = ctl;
733 if (ether_addr_equal(data, new))
734 return;
735 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
736 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
737 vnic_dev_del_addr(fnic->vdev, data);
738 memcpy(data, new, ETH_ALEN);
739 if (!ether_addr_equal(new, ctl))
740 vnic_dev_add_addr(fnic->vdev, new);
741 }
742
743 /**
744 * fnic_update_mac() - set data MAC address and filters.
745 * @lport: local port.
746 * @new: newly-assigned FCoE MAC address.
747 */
fnic_update_mac(struct fc_lport * lport,u8 * new)748 void fnic_update_mac(struct fc_lport *lport, u8 *new)
749 {
750 struct fnic *fnic = lport_priv(lport);
751
752 spin_lock_irq(&fnic->fnic_lock);
753 fnic_update_mac_locked(fnic, new);
754 spin_unlock_irq(&fnic->fnic_lock);
755 }
756
757 /**
758 * fnic_set_port_id() - set the port_ID after successful FLOGI.
759 * @lport: local port.
760 * @port_id: assigned FC_ID.
761 * @fp: received frame containing the FLOGI accept or NULL.
762 *
763 * This is called from libfc when a new FC_ID has been assigned.
764 * This causes us to reset the firmware to FC_MODE and setup the new MAC
765 * address and FC_ID.
766 *
767 * It is also called with FC_ID 0 when we're logged off.
768 *
769 * If the FC_ID is due to point-to-point, fp may be NULL.
770 */
fnic_set_port_id(struct fc_lport * lport,u32 port_id,struct fc_frame * fp)771 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
772 {
773 struct fnic *fnic = lport_priv(lport);
774 u8 *mac;
775 int ret;
776
777 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
778 port_id, fp);
779
780 /*
781 * If we're clearing the FC_ID, change to use the ctl_src_addr.
782 * Set ethernet mode to send FLOGI.
783 */
784 if (!port_id) {
785 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
786 fnic_set_eth_mode(fnic);
787 return;
788 }
789
790 if (fp) {
791 mac = fr_cb(fp)->granted_mac;
792 if (is_zero_ether_addr(mac)) {
793 /* non-FIP - FLOGI already accepted - ignore return */
794 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
795 }
796 fnic_update_mac(lport, mac);
797 }
798
799 /* Change state to reflect transition to FC mode */
800 spin_lock_irq(&fnic->fnic_lock);
801 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
802 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
803 else {
804 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
805 "Unexpected fnic state %s while"
806 " processing flogi resp\n",
807 fnic_state_to_str(fnic->state));
808 spin_unlock_irq(&fnic->fnic_lock);
809 return;
810 }
811 spin_unlock_irq(&fnic->fnic_lock);
812
813 /*
814 * Send FLOGI registration to firmware to set up FC mode.
815 * The new address will be set up when registration completes.
816 */
817 ret = fnic_flogi_reg_handler(fnic, port_id);
818
819 if (ret < 0) {
820 spin_lock_irq(&fnic->fnic_lock);
821 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
822 fnic->state = FNIC_IN_ETH_MODE;
823 spin_unlock_irq(&fnic->fnic_lock);
824 }
825 }
826
fnic_rq_cmpl_frame_recv(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)827 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
828 *cq_desc, struct vnic_rq_buf *buf,
829 int skipped __attribute__((unused)),
830 void *opaque)
831 {
832 struct fnic *fnic = vnic_dev_priv(rq->vdev);
833 struct sk_buff *skb;
834 struct fc_frame *fp;
835 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
836 u8 type, color, eop, sop, ingress_port, vlan_stripped;
837 u8 fcoe = 0, fcoe_sof, fcoe_eof;
838 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
839 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
840 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
841 u8 fcs_ok = 1, packet_error = 0;
842 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
843 u32 rss_hash;
844 u16 exchange_id, tmpl;
845 u8 sof = 0;
846 u8 eof = 0;
847 u32 fcp_bytes_written = 0;
848 unsigned long flags;
849
850 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
851 DMA_FROM_DEVICE);
852 skb = buf->os_buf;
853 fp = (struct fc_frame *)skb;
854 buf->os_buf = NULL;
855
856 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
857 if (type == CQ_DESC_TYPE_RQ_FCP) {
858 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
859 &type, &color, &q_number, &completed_index,
860 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
861 &tmpl, &fcp_bytes_written, &sof, &eof,
862 &ingress_port, &packet_error,
863 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
864 &vlan);
865 skb_trim(skb, fcp_bytes_written);
866 fr_sof(fp) = sof;
867 fr_eof(fp) = eof;
868
869 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
870 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
871 &type, &color, &q_number, &completed_index,
872 &ingress_port, &fcoe, &eop, &sop,
873 &rss_type, &csum_not_calc, &rss_hash,
874 &bytes_written, &packet_error,
875 &vlan_stripped, &vlan, &checksum,
876 &fcoe_sof, &fcoe_fc_crc_ok,
877 &fcoe_enc_error, &fcoe_eof,
878 &tcp_udp_csum_ok, &udp, &tcp,
879 &ipv4_csum_ok, &ipv6, &ipv4,
880 &ipv4_fragment, &fcs_ok);
881 skb_trim(skb, bytes_written);
882 if (!fcs_ok) {
883 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
884 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
885 "fcs error. dropping packet.\n");
886 goto drop;
887 }
888 if (fnic_import_rq_eth_pkt(fnic, skb))
889 return;
890
891 } else {
892 /* wrong CQ type*/
893 shost_printk(KERN_ERR, fnic->lport->host,
894 "fnic rq_cmpl wrong cq type x%x\n", type);
895 goto drop;
896 }
897
898 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
899 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
900 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
901 "fnic rq_cmpl fcoe x%x fcsok x%x"
902 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
903 " x%x\n",
904 fcoe, fcs_ok, packet_error,
905 fcoe_fc_crc_ok, fcoe_enc_error);
906 goto drop;
907 }
908
909 spin_lock_irqsave(&fnic->fnic_lock, flags);
910 if (fnic->stop_rx_link_events) {
911 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
912 goto drop;
913 }
914 fr_dev(fp) = fnic->lport;
915 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
916 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
917 (char *)skb->data, skb->len)) != 0) {
918 printk(KERN_ERR "fnic ctlr frame trace error!!!");
919 }
920
921 skb_queue_tail(&fnic->frame_queue, skb);
922 queue_work(fnic_event_queue, &fnic->frame_work);
923
924 return;
925 drop:
926 dev_kfree_skb_irq(skb);
927 }
928
fnic_rq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)929 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
930 struct cq_desc *cq_desc, u8 type,
931 u16 q_number, u16 completed_index,
932 void *opaque)
933 {
934 struct fnic *fnic = vnic_dev_priv(vdev);
935
936 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
937 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
938 NULL);
939 return 0;
940 }
941
fnic_rq_cmpl_handler(struct fnic * fnic,int rq_work_to_do)942 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
943 {
944 unsigned int tot_rq_work_done = 0, cur_work_done;
945 unsigned int i;
946 int err;
947
948 for (i = 0; i < fnic->rq_count; i++) {
949 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
950 fnic_rq_cmpl_handler_cont,
951 NULL);
952 if (cur_work_done) {
953 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
954 if (err)
955 shost_printk(KERN_ERR, fnic->lport->host,
956 "fnic_alloc_rq_frame can't alloc"
957 " frame\n");
958 }
959 tot_rq_work_done += cur_work_done;
960 }
961
962 return tot_rq_work_done;
963 }
964
965 /*
966 * This function is called once at init time to allocate and fill RQ
967 * buffers. Subsequently, it is called in the interrupt context after RQ
968 * buffer processing to replenish the buffers in the RQ
969 */
fnic_alloc_rq_frame(struct vnic_rq * rq)970 int fnic_alloc_rq_frame(struct vnic_rq *rq)
971 {
972 struct fnic *fnic = vnic_dev_priv(rq->vdev);
973 struct sk_buff *skb;
974 u16 len;
975 dma_addr_t pa;
976 int r;
977
978 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
979 skb = dev_alloc_skb(len);
980 if (!skb) {
981 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
982 "Unable to allocate RQ sk_buff\n");
983 return -ENOMEM;
984 }
985 skb_reset_mac_header(skb);
986 skb_reset_transport_header(skb);
987 skb_reset_network_header(skb);
988 skb_put(skb, len);
989 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
990 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
991 r = -ENOMEM;
992 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
993 goto free_skb;
994 }
995
996 fnic_queue_rq_desc(rq, skb, pa, len);
997 return 0;
998
999 free_skb:
1000 kfree_skb(skb);
1001 return r;
1002 }
1003
fnic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)1004 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1005 {
1006 struct fc_frame *fp = buf->os_buf;
1007 struct fnic *fnic = vnic_dev_priv(rq->vdev);
1008
1009 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1010 DMA_FROM_DEVICE);
1011
1012 dev_kfree_skb(fp_skb(fp));
1013 buf->os_buf = NULL;
1014 }
1015
1016 /**
1017 * fnic_eth_send() - Send Ethernet frame.
1018 * @fip: fcoe_ctlr instance.
1019 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
1020 */
fnic_eth_send(struct fcoe_ctlr * fip,struct sk_buff * skb)1021 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1022 {
1023 struct fnic *fnic = fnic_from_ctlr(fip);
1024 struct vnic_wq *wq = &fnic->wq[0];
1025 dma_addr_t pa;
1026 struct ethhdr *eth_hdr;
1027 struct vlan_ethhdr *vlan_hdr;
1028 unsigned long flags;
1029
1030 if (!fnic->vlan_hw_insert) {
1031 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1032 vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1033 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1034 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1035 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1036 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1037 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1038 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1039 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1040 }
1041 } else {
1042 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1043 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1044 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1045 }
1046 }
1047
1048 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1049 DMA_TO_DEVICE);
1050 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1051 printk(KERN_ERR "DMA mapping failed\n");
1052 goto free_skb;
1053 }
1054
1055 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1056 if (!vnic_wq_desc_avail(wq))
1057 goto irq_restore;
1058
1059 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1060 0 /* hw inserts cos value */,
1061 fnic->vlan_id, 1);
1062 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1063 return;
1064
1065 irq_restore:
1066 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1067 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1068 free_skb:
1069 kfree_skb(skb);
1070 }
1071
1072 /*
1073 * Send FC frame.
1074 */
fnic_send_frame(struct fnic * fnic,struct fc_frame * fp)1075 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1076 {
1077 struct vnic_wq *wq = &fnic->wq[0];
1078 struct sk_buff *skb;
1079 dma_addr_t pa;
1080 struct ethhdr *eth_hdr;
1081 struct vlan_ethhdr *vlan_hdr;
1082 struct fcoe_hdr *fcoe_hdr;
1083 struct fc_frame_header *fh;
1084 u32 tot_len, eth_hdr_len;
1085 int ret = 0;
1086 unsigned long flags;
1087
1088 fh = fc_frame_header_get(fp);
1089 skb = fp_skb(fp);
1090
1091 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1092 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1093 return 0;
1094
1095 if (!fnic->vlan_hw_insert) {
1096 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1097 vlan_hdr = skb_push(skb, eth_hdr_len);
1098 eth_hdr = (struct ethhdr *)vlan_hdr;
1099 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1100 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1101 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1102 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1103 } else {
1104 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1105 eth_hdr = skb_push(skb, eth_hdr_len);
1106 eth_hdr->h_proto = htons(ETH_P_FCOE);
1107 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1108 }
1109
1110 if (fnic->ctlr.map_dest)
1111 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1112 else
1113 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1114 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1115
1116 tot_len = skb->len;
1117 BUG_ON(tot_len % 4);
1118
1119 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1120 fcoe_hdr->fcoe_sof = fr_sof(fp);
1121 if (FC_FCOE_VER)
1122 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1123
1124 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1125 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1126 ret = -ENOMEM;
1127 printk(KERN_ERR "DMA map failed with error %d\n", ret);
1128 goto free_skb_on_err;
1129 }
1130
1131 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1132 (char *)eth_hdr, tot_len)) != 0) {
1133 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1134 }
1135
1136 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1137
1138 if (!vnic_wq_desc_avail(wq)) {
1139 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1140 ret = -1;
1141 goto irq_restore;
1142 }
1143
1144 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1145 0 /* hw inserts cos value */,
1146 fnic->vlan_id, 1, 1, 1);
1147
1148 irq_restore:
1149 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1150
1151 free_skb_on_err:
1152 if (ret)
1153 dev_kfree_skb_any(fp_skb(fp));
1154
1155 return ret;
1156 }
1157
1158 /*
1159 * fnic_send
1160 * Routine to send a raw frame
1161 */
fnic_send(struct fc_lport * lp,struct fc_frame * fp)1162 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1163 {
1164 struct fnic *fnic = lport_priv(lp);
1165 unsigned long flags;
1166
1167 if (fnic->in_remove) {
1168 dev_kfree_skb(fp_skb(fp));
1169 return -1;
1170 }
1171
1172 /*
1173 * Queue frame if in a transitional state.
1174 * This occurs while registering the Port_ID / MAC address after FLOGI.
1175 */
1176 spin_lock_irqsave(&fnic->fnic_lock, flags);
1177 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1178 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1179 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1180 return 0;
1181 }
1182 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1183
1184 return fnic_send_frame(fnic, fp);
1185 }
1186
1187 /**
1188 * fnic_flush_tx() - send queued frames.
1189 * @fnic: fnic device
1190 *
1191 * Send frames that were waiting to go out in FC or Ethernet mode.
1192 * Whenever changing modes we purge queued frames, so these frames should
1193 * be queued for the stable mode that we're in, either FC or Ethernet.
1194 *
1195 * Called without fnic_lock held.
1196 */
fnic_flush_tx(struct fnic * fnic)1197 void fnic_flush_tx(struct fnic *fnic)
1198 {
1199 struct sk_buff *skb;
1200 struct fc_frame *fp;
1201
1202 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1203 fp = (struct fc_frame *)skb;
1204 fnic_send_frame(fnic, fp);
1205 }
1206 }
1207
1208 /**
1209 * fnic_set_eth_mode() - put fnic into ethernet mode.
1210 * @fnic: fnic device
1211 *
1212 * Called without fnic lock held.
1213 */
fnic_set_eth_mode(struct fnic * fnic)1214 static void fnic_set_eth_mode(struct fnic *fnic)
1215 {
1216 unsigned long flags;
1217 enum fnic_state old_state;
1218 int ret;
1219
1220 spin_lock_irqsave(&fnic->fnic_lock, flags);
1221 again:
1222 old_state = fnic->state;
1223 switch (old_state) {
1224 case FNIC_IN_FC_MODE:
1225 case FNIC_IN_ETH_TRANS_FC_MODE:
1226 default:
1227 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1228 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1229
1230 ret = fnic_fw_reset_handler(fnic);
1231
1232 spin_lock_irqsave(&fnic->fnic_lock, flags);
1233 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1234 goto again;
1235 if (ret)
1236 fnic->state = old_state;
1237 break;
1238
1239 case FNIC_IN_FC_TRANS_ETH_MODE:
1240 case FNIC_IN_ETH_MODE:
1241 break;
1242 }
1243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1244 }
1245
fnic_wq_complete_frame_send(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)1246 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1247 struct cq_desc *cq_desc,
1248 struct vnic_wq_buf *buf, void *opaque)
1249 {
1250 struct sk_buff *skb = buf->os_buf;
1251 struct fc_frame *fp = (struct fc_frame *)skb;
1252 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1253
1254 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1255 DMA_TO_DEVICE);
1256 dev_kfree_skb_irq(fp_skb(fp));
1257 buf->os_buf = NULL;
1258 }
1259
fnic_wq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)1260 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1261 struct cq_desc *cq_desc, u8 type,
1262 u16 q_number, u16 completed_index,
1263 void *opaque)
1264 {
1265 struct fnic *fnic = vnic_dev_priv(vdev);
1266 unsigned long flags;
1267
1268 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1269 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1270 fnic_wq_complete_frame_send, NULL);
1271 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1272
1273 return 0;
1274 }
1275
fnic_wq_cmpl_handler(struct fnic * fnic,int work_to_do)1276 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1277 {
1278 unsigned int wq_work_done = 0;
1279 unsigned int i;
1280
1281 for (i = 0; i < fnic->raw_wq_count; i++) {
1282 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1283 work_to_do,
1284 fnic_wq_cmpl_handler_cont,
1285 NULL);
1286 }
1287
1288 return wq_work_done;
1289 }
1290
1291
fnic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)1292 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1293 {
1294 struct fc_frame *fp = buf->os_buf;
1295 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1296
1297 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1298 DMA_TO_DEVICE);
1299
1300 dev_kfree_skb(fp_skb(fp));
1301 buf->os_buf = NULL;
1302 }
1303
fnic_fcoe_reset_vlans(struct fnic * fnic)1304 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1305 {
1306 unsigned long flags;
1307 struct fcoe_vlan *vlan;
1308 struct fcoe_vlan *next;
1309
1310 /*
1311 * indicate a link down to fcoe so that all fcf's are free'd
1312 * might not be required since we did this before sending vlan
1313 * discovery request
1314 */
1315 spin_lock_irqsave(&fnic->vlans_lock, flags);
1316 if (!list_empty(&fnic->vlans)) {
1317 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1318 list_del(&vlan->list);
1319 kfree(vlan);
1320 }
1321 }
1322 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1323 }
1324
fnic_handle_fip_timer(struct fnic * fnic)1325 void fnic_handle_fip_timer(struct fnic *fnic)
1326 {
1327 unsigned long flags;
1328 struct fcoe_vlan *vlan;
1329 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1330 u64 sol_time;
1331
1332 spin_lock_irqsave(&fnic->fnic_lock, flags);
1333 if (fnic->stop_rx_link_events) {
1334 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1335 return;
1336 }
1337 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1338
1339 if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1340 return;
1341
1342 spin_lock_irqsave(&fnic->vlans_lock, flags);
1343 if (list_empty(&fnic->vlans)) {
1344 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1345 /* no vlans available, try again */
1346 if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1347 if (printk_ratelimit())
1348 shost_printk(KERN_DEBUG, fnic->lport->host,
1349 "Start VLAN Discovery\n");
1350 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1351 return;
1352 }
1353
1354 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1355 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1356 "fip_timer: vlan %d state %d sol_count %d\n",
1357 vlan->vid, vlan->state, vlan->sol_count);
1358 switch (vlan->state) {
1359 case FIP_VLAN_USED:
1360 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1361 "FIP VLAN is selected for FC transaction\n");
1362 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1363 break;
1364 case FIP_VLAN_FAILED:
1365 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1366 /* if all vlans are in failed state, restart vlan disc */
1367 if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1368 if (printk_ratelimit())
1369 shost_printk(KERN_DEBUG, fnic->lport->host,
1370 "Start VLAN Discovery\n");
1371 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1372 break;
1373 case FIP_VLAN_SENT:
1374 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1375 /*
1376 * no response on this vlan, remove from the list.
1377 * Try the next vlan
1378 */
1379 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1380 "Dequeue this VLAN ID %d from list\n",
1381 vlan->vid);
1382 list_del(&vlan->list);
1383 kfree(vlan);
1384 vlan = NULL;
1385 if (list_empty(&fnic->vlans)) {
1386 /* we exhausted all vlans, restart vlan disc */
1387 spin_unlock_irqrestore(&fnic->vlans_lock,
1388 flags);
1389 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1390 "fip_timer: vlan list empty, "
1391 "trigger vlan disc\n");
1392 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1393 return;
1394 }
1395 /* check the next vlan */
1396 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1397 list);
1398 fnic->set_vlan(fnic, vlan->vid);
1399 vlan->state = FIP_VLAN_SENT; /* sent now */
1400 }
1401 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1402 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1403 vlan->sol_count++;
1404 sol_time = jiffies + msecs_to_jiffies
1405 (FCOE_CTLR_START_DELAY);
1406 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1407 break;
1408 }
1409 }
1410