1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/etherdevice.h>
20 #include <linux/if_bridge.h>
21 #include <linux/list.h>
22 #include <linux/hash.h>
23 #include <linux/hashtable.h>
24 #include <net/switchdev.h>
25 #include <asm/chsc.h>
26 #include <asm/css_chars.h>
27 #include <asm/setup.h>
28 #include "qeth_core.h"
29 #include "qeth_l2.h"
30
qeth_l2_setdelmac_makerc(struct qeth_card * card,u16 retcode)31 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
32 {
33 int rc;
34
35 if (retcode)
36 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
37 switch (retcode) {
38 case IPA_RC_SUCCESS:
39 rc = 0;
40 break;
41 case IPA_RC_L2_UNSUPPORTED_CMD:
42 rc = -EOPNOTSUPP;
43 break;
44 case IPA_RC_L2_ADDR_TABLE_FULL:
45 rc = -ENOSPC;
46 break;
47 case IPA_RC_L2_DUP_MAC:
48 case IPA_RC_L2_DUP_LAYER3_MAC:
49 rc = -EADDRINUSE;
50 break;
51 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
52 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
53 rc = -EADDRNOTAVAIL;
54 break;
55 case IPA_RC_L2_MAC_NOT_FOUND:
56 rc = -ENOENT;
57 break;
58 default:
59 rc = -EIO;
60 break;
61 }
62 return rc;
63 }
64
qeth_l2_send_setdelmac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)65 static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
66 struct qeth_reply *reply,
67 unsigned long data)
68 {
69 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
70
71 return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
72 }
73
qeth_l2_send_setdelmac(struct qeth_card * card,const __u8 * mac,enum qeth_ipa_cmds ipacmd)74 static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
75 enum qeth_ipa_cmds ipacmd)
76 {
77 struct qeth_ipa_cmd *cmd;
78 struct qeth_cmd_buffer *iob;
79
80 QETH_CARD_TEXT(card, 2, "L2sdmac");
81 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
82 IPA_DATA_SIZEOF(setdelmac));
83 if (!iob)
84 return -ENOMEM;
85 cmd = __ipa_cmd(iob);
86 cmd->data.setdelmac.mac_length = ETH_ALEN;
87 ether_addr_copy(cmd->data.setdelmac.mac, mac);
88 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
89 }
90
qeth_l2_send_setmac(struct qeth_card * card,const __u8 * mac)91 static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
92 {
93 int rc;
94
95 QETH_CARD_TEXT(card, 2, "L2Setmac");
96 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
97 if (rc == 0) {
98 dev_info(&card->gdev->dev,
99 "MAC address %pM successfully registered\n", mac);
100 } else {
101 switch (rc) {
102 case -EADDRINUSE:
103 dev_warn(&card->gdev->dev,
104 "MAC address %pM already exists\n", mac);
105 break;
106 case -EADDRNOTAVAIL:
107 dev_warn(&card->gdev->dev,
108 "MAC address %pM is not authorized\n", mac);
109 break;
110 }
111 }
112 return rc;
113 }
114
qeth_l2_write_mac(struct qeth_card * card,u8 * mac)115 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
116 {
117 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
118 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
119 int rc;
120
121 QETH_CARD_TEXT(card, 2, "L2Wmac");
122 rc = qeth_l2_send_setdelmac(card, mac, cmd);
123 if (rc == -EADDRINUSE)
124 QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
125 ether_addr_to_u64(mac), CARD_DEVID(card));
126 else if (rc)
127 QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
128 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
129 return rc;
130 }
131
qeth_l2_remove_mac(struct qeth_card * card,u8 * mac)132 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
133 {
134 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
135 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
136 int rc;
137
138 QETH_CARD_TEXT(card, 2, "L2Rmac");
139 rc = qeth_l2_send_setdelmac(card, mac, cmd);
140 if (rc)
141 QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
142 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
143 return rc;
144 }
145
qeth_l2_drain_rx_mode_cache(struct qeth_card * card)146 static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
147 {
148 struct qeth_mac *mac;
149 struct hlist_node *tmp;
150 int i;
151
152 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
153 hash_del(&mac->hnode);
154 kfree(mac);
155 }
156 }
157
qeth_l2_fill_header(struct qeth_qdio_out_q * queue,struct qeth_hdr * hdr,struct sk_buff * skb,__be16 proto,unsigned int data_len)158 static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
159 struct qeth_hdr *hdr, struct sk_buff *skb,
160 __be16 proto, unsigned int data_len)
161 {
162 int cast_type = qeth_get_ether_cast_type(skb);
163 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
164
165 hdr->hdr.l2.pkt_length = data_len;
166
167 if (skb_is_gso(skb)) {
168 hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
169 } else {
170 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
171 if (skb->ip_summed == CHECKSUM_PARTIAL)
172 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
173 }
174
175 /* set byte byte 3 to casting flags */
176 if (cast_type == RTN_MULTICAST)
177 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
178 else if (cast_type == RTN_BROADCAST)
179 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
180 else
181 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
182
183 /* VSWITCH relies on the VLAN
184 * information to be present in
185 * the QDIO header */
186 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
187 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
188 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
189 }
190 }
191
qeth_l2_setdelvlan_makerc(struct qeth_card * card,u16 retcode)192 static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
193 {
194 if (retcode)
195 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
196
197 switch (retcode) {
198 case IPA_RC_SUCCESS:
199 return 0;
200 case IPA_RC_L2_INVALID_VLAN_ID:
201 return -EINVAL;
202 case IPA_RC_L2_DUP_VLAN_ID:
203 return -EEXIST;
204 case IPA_RC_L2_VLAN_ID_NOT_FOUND:
205 return -ENOENT;
206 case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
207 return -EPERM;
208 default:
209 return -EIO;
210 }
211 }
212
qeth_l2_send_setdelvlan_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)213 static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
214 struct qeth_reply *reply,
215 unsigned long data)
216 {
217 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
218
219 QETH_CARD_TEXT(card, 2, "L2sdvcb");
220 if (cmd->hdr.return_code) {
221 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
222 cmd->data.setdelvlan.vlan_id,
223 CARD_DEVID(card), cmd->hdr.return_code);
224 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
225 }
226 return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
227 }
228
qeth_l2_send_setdelvlan(struct qeth_card * card,__u16 i,enum qeth_ipa_cmds ipacmd)229 static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
230 enum qeth_ipa_cmds ipacmd)
231 {
232 struct qeth_ipa_cmd *cmd;
233 struct qeth_cmd_buffer *iob;
234
235 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
236 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
237 IPA_DATA_SIZEOF(setdelvlan));
238 if (!iob)
239 return -ENOMEM;
240 cmd = __ipa_cmd(iob);
241 cmd->data.setdelvlan.vlan_id = i;
242 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
243 }
244
qeth_l2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)245 static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
246 __be16 proto, u16 vid)
247 {
248 struct qeth_card *card = dev->ml_priv;
249
250 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
251 if (!vid)
252 return 0;
253
254 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
255 }
256
qeth_l2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)257 static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
258 __be16 proto, u16 vid)
259 {
260 struct qeth_card *card = dev->ml_priv;
261
262 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
263 if (!vid)
264 return 0;
265
266 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
267 }
268
qeth_l2_set_pnso_mode(struct qeth_card * card,enum qeth_pnso_mode mode)269 static void qeth_l2_set_pnso_mode(struct qeth_card *card,
270 enum qeth_pnso_mode mode)
271 {
272 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
273 WRITE_ONCE(card->info.pnso_mode, mode);
274 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
275
276 if (mode == QETH_PNSO_NONE)
277 drain_workqueue(card->event_wq);
278 }
279
qeth_l2_dev2br_fdb_flush(struct qeth_card * card)280 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
281 {
282 struct switchdev_notifier_fdb_info info = {};
283
284 QETH_CARD_TEXT(card, 2, "fdbflush");
285
286 info.addr = NULL;
287 /* flush all VLANs: */
288 info.vid = 0;
289 info.added_by_user = false;
290 info.offloaded = true;
291
292 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
293 card->dev, &info.info, NULL);
294 }
295
qeth_l2_request_initial_mac(struct qeth_card * card)296 static int qeth_l2_request_initial_mac(struct qeth_card *card)
297 {
298 int rc = 0;
299
300 QETH_CARD_TEXT(card, 2, "l2reqmac");
301
302 if (MACHINE_IS_VM) {
303 rc = qeth_vm_request_mac(card);
304 if (!rc)
305 goto out;
306 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
307 CARD_DEVID(card), rc);
308 QETH_CARD_TEXT_(card, 2, "err%04x", rc);
309 /* fall back to alternative mechanism: */
310 }
311
312 rc = qeth_setadpparms_change_macaddr(card);
313 if (!rc)
314 goto out;
315 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
316 CARD_DEVID(card), rc);
317 QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
318
319 /* Fall back once more, but some devices don't support a custom MAC
320 * address:
321 */
322 if (IS_OSM(card) || IS_OSX(card))
323 return (rc) ? rc : -EADDRNOTAVAIL;
324 eth_hw_addr_random(card->dev);
325
326 out:
327 QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
328 return 0;
329 }
330
qeth_l2_register_dev_addr(struct qeth_card * card)331 static void qeth_l2_register_dev_addr(struct qeth_card *card)
332 {
333 if (!is_valid_ether_addr(card->dev->dev_addr))
334 qeth_l2_request_initial_mac(card);
335
336 if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
337 card->info.dev_addr_is_registered = 1;
338 else
339 card->info.dev_addr_is_registered = 0;
340 }
341
qeth_l2_validate_addr(struct net_device * dev)342 static int qeth_l2_validate_addr(struct net_device *dev)
343 {
344 struct qeth_card *card = dev->ml_priv;
345
346 if (card->info.dev_addr_is_registered)
347 return eth_validate_addr(dev);
348
349 QETH_CARD_TEXT(card, 4, "nomacadr");
350 return -EPERM;
351 }
352
qeth_l2_set_mac_address(struct net_device * dev,void * p)353 static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
354 {
355 struct sockaddr *addr = p;
356 struct qeth_card *card = dev->ml_priv;
357 u8 old_addr[ETH_ALEN];
358 int rc = 0;
359
360 QETH_CARD_TEXT(card, 3, "setmac");
361
362 if (IS_OSM(card) || IS_OSX(card)) {
363 QETH_CARD_TEXT(card, 3, "setmcTYP");
364 return -EOPNOTSUPP;
365 }
366 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
367 if (!is_valid_ether_addr(addr->sa_data))
368 return -EADDRNOTAVAIL;
369
370 /* don't register the same address twice */
371 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
372 card->info.dev_addr_is_registered)
373 return 0;
374
375 /* add the new address, switch over, drop the old */
376 rc = qeth_l2_send_setmac(card, addr->sa_data);
377 if (rc)
378 return rc;
379 ether_addr_copy(old_addr, dev->dev_addr);
380 eth_hw_addr_set(dev, addr->sa_data);
381
382 if (card->info.dev_addr_is_registered)
383 qeth_l2_remove_mac(card, old_addr);
384 card->info.dev_addr_is_registered = 1;
385 return 0;
386 }
387
qeth_l2_promisc_to_bridge(struct qeth_card * card,bool enable)388 static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
389 {
390 int role;
391 int rc;
392
393 QETH_CARD_TEXT(card, 3, "pmisc2br");
394
395 if (enable) {
396 if (card->options.sbp.reflect_promisc_primary)
397 role = QETH_SBP_ROLE_PRIMARY;
398 else
399 role = QETH_SBP_ROLE_SECONDARY;
400 } else
401 role = QETH_SBP_ROLE_NONE;
402
403 rc = qeth_bridgeport_setrole(card, role);
404 QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
405 if (!rc) {
406 card->options.sbp.role = role;
407 card->info.promisc_mode = enable;
408 }
409 }
410
qeth_l2_set_promisc_mode(struct qeth_card * card)411 static void qeth_l2_set_promisc_mode(struct qeth_card *card)
412 {
413 bool enable = card->dev->flags & IFF_PROMISC;
414
415 if (card->info.promisc_mode == enable)
416 return;
417
418 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
419 qeth_setadp_promisc_mode(card, enable);
420 } else {
421 mutex_lock(&card->sbp_lock);
422 if (card->options.sbp.reflect_promisc)
423 qeth_l2_promisc_to_bridge(card, enable);
424 mutex_unlock(&card->sbp_lock);
425 }
426 }
427
428 /* New MAC address is added to the hash table and marked to be written on card
429 * only if there is not in the hash table storage already
430 *
431 */
qeth_l2_add_mac(struct qeth_card * card,struct netdev_hw_addr * ha)432 static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
433 {
434 u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
435 struct qeth_mac *mac;
436
437 hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
438 if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
439 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
440 return;
441 }
442 }
443
444 mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
445 if (!mac)
446 return;
447
448 ether_addr_copy(mac->mac_addr, ha->addr);
449 mac->disp_flag = QETH_DISP_ADDR_ADD;
450
451 hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
452 }
453
qeth_l2_rx_mode_work(struct work_struct * work)454 static void qeth_l2_rx_mode_work(struct work_struct *work)
455 {
456 struct qeth_card *card = container_of(work, struct qeth_card,
457 rx_mode_work);
458 struct net_device *dev = card->dev;
459 struct netdev_hw_addr *ha;
460 struct qeth_mac *mac;
461 struct hlist_node *tmp;
462 int i;
463 int rc;
464
465 QETH_CARD_TEXT(card, 3, "setmulti");
466
467 netif_addr_lock_bh(dev);
468 netdev_for_each_mc_addr(ha, dev)
469 qeth_l2_add_mac(card, ha);
470 netdev_for_each_uc_addr(ha, dev)
471 qeth_l2_add_mac(card, ha);
472 netif_addr_unlock_bh(dev);
473
474 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
475 switch (mac->disp_flag) {
476 case QETH_DISP_ADDR_DELETE:
477 qeth_l2_remove_mac(card, mac->mac_addr);
478 hash_del(&mac->hnode);
479 kfree(mac);
480 break;
481 case QETH_DISP_ADDR_ADD:
482 rc = qeth_l2_write_mac(card, mac->mac_addr);
483 if (rc) {
484 hash_del(&mac->hnode);
485 kfree(mac);
486 break;
487 }
488 fallthrough;
489 default:
490 /* for next call to set_rx_mode(): */
491 mac->disp_flag = QETH_DISP_ADDR_DELETE;
492 }
493 }
494
495 qeth_l2_set_promisc_mode(card);
496 }
497
qeth_l2_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)498 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
499 struct net_device *dev)
500 {
501 struct qeth_card *card = dev->ml_priv;
502 u16 txq = skb_get_queue_mapping(skb);
503 struct qeth_qdio_out_q *queue;
504 int rc;
505
506 if (!skb_is_gso(skb))
507 qdisc_skb_cb(skb)->pkt_len = skb->len;
508 if (IS_IQD(card))
509 txq = qeth_iqd_translate_txq(dev, txq);
510 queue = card->qdio.out_qs[txq];
511
512 rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
513 qeth_l2_fill_header);
514 if (!rc)
515 return NETDEV_TX_OK;
516
517 QETH_TXQ_STAT_INC(queue, tx_dropped);
518 kfree_skb(skb);
519 return NETDEV_TX_OK;
520 }
521
qeth_l2_iqd_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)522 static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
523 struct net_device *sb_dev)
524 {
525 return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
526 sb_dev);
527 }
528
qeth_l2_set_rx_mode(struct net_device * dev)529 static void qeth_l2_set_rx_mode(struct net_device *dev)
530 {
531 struct qeth_card *card = dev->ml_priv;
532
533 schedule_work(&card->rx_mode_work);
534 }
535
536 /**
537 * qeth_l2_pnso() - perform network subchannel operation
538 * @card: qeth_card structure pointer
539 * @oc: Operation Code
540 * @cnc: Boolean Change-Notification Control
541 * @cb: Callback function will be executed for each element
542 * of the address list
543 * @priv: Pointer to pass to the callback function.
544 *
545 * Collects network information in a network address list and calls the
546 * callback function for every entry in the list. If "change-notification-
547 * control" is set, further changes in the address list will be reported
548 * via the IPA command.
549 */
qeth_l2_pnso(struct qeth_card * card,u8 oc,int cnc,void (* cb)(void * priv,struct chsc_pnso_naid_l2 * entry),void * priv)550 static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
551 void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
552 void *priv)
553 {
554 struct ccw_device *ddev = CARD_DDEV(card);
555 struct chsc_pnso_area *rr;
556 u32 prev_instance = 0;
557 int isfirstblock = 1;
558 int i, size, elems;
559 int rc;
560
561 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
562 if (rr == NULL)
563 return -ENOMEM;
564 do {
565 QETH_CARD_TEXT(card, 2, "PNSO");
566 /* on the first iteration, naihdr.resume_token will be zero */
567 rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
568 cnc);
569 if (rc)
570 continue;
571 if (cb == NULL)
572 continue;
573
574 size = rr->naihdr.naids;
575 if (size != sizeof(struct chsc_pnso_naid_l2)) {
576 WARN_ON_ONCE(1);
577 continue;
578 }
579
580 elems = (rr->response.length - sizeof(struct chsc_header) -
581 sizeof(struct chsc_pnso_naihdr)) / size;
582
583 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
584 /* Inform the caller that they need to scrap */
585 /* the data that was already reported via cb */
586 rc = -EAGAIN;
587 break;
588 }
589 isfirstblock = 0;
590 prev_instance = rr->naihdr.instance;
591 for (i = 0; i < elems; i++)
592 (*cb)(priv, &rr->entries[i]);
593 } while ((rc == -EBUSY) || (!rc && /* list stored */
594 /* resume token is non-zero => list incomplete */
595 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
596
597 if (rc)
598 QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
599
600 free_page((unsigned long)rr);
601 return rc;
602 }
603
qeth_is_my_net_if_token(struct qeth_card * card,struct net_if_token * token)604 static bool qeth_is_my_net_if_token(struct qeth_card *card,
605 struct net_if_token *token)
606 {
607 return ((card->info.ddev_devno == token->devnum) &&
608 (card->info.cssid == token->cssid) &&
609 (card->info.iid == token->iid) &&
610 (card->info.ssid == token->ssid) &&
611 (card->info.chpid == token->chpid) &&
612 (card->info.chid == token->chid));
613 }
614
615 /**
616 * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
617 * @card: qeth_card structure pointer
618 * @code: event bitmask: high order bit 0x80 set to
619 * 1 - removal of an object
620 * 0 - addition of an object
621 * Object type(s):
622 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
623 * @token: "network token" structure identifying 'physical' location
624 * of the target
625 * @addr_lnid: structure with MAC address and VLAN ID of the target
626 */
qeth_l2_dev2br_fdb_notify(struct qeth_card * card,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)627 static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
628 struct net_if_token *token,
629 struct mac_addr_lnid *addr_lnid)
630 {
631 struct switchdev_notifier_fdb_info info = {};
632 u8 ntfy_mac[ETH_ALEN];
633
634 ether_addr_copy(ntfy_mac, addr_lnid->mac);
635 /* Ignore VLAN only changes */
636 if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
637 return;
638 /* Ignore mcast entries */
639 if (is_multicast_ether_addr(ntfy_mac))
640 return;
641 /* Ignore my own addresses */
642 if (qeth_is_my_net_if_token(card, token))
643 return;
644
645 info.addr = ntfy_mac;
646 /* don't report VLAN IDs */
647 info.vid = 0;
648 info.added_by_user = false;
649 info.offloaded = true;
650
651 if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
652 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
653 card->dev, &info.info, NULL);
654 QETH_CARD_TEXT(card, 4, "andelmac");
655 QETH_CARD_TEXT_(card, 4,
656 "mc%012llx", ether_addr_to_u64(ntfy_mac));
657 } else {
658 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
659 card->dev, &info.info, NULL);
660 QETH_CARD_TEXT(card, 4, "anaddmac");
661 QETH_CARD_TEXT_(card, 4,
662 "mc%012llx", ether_addr_to_u64(ntfy_mac));
663 }
664 }
665
qeth_l2_dev2br_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)666 static void qeth_l2_dev2br_an_set_cb(void *priv,
667 struct chsc_pnso_naid_l2 *entry)
668 {
669 u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
670 struct qeth_card *card = priv;
671
672 if (entry->addr_lnid.lnid < VLAN_N_VID)
673 code |= IPA_ADDR_CHANGE_CODE_VLANID;
674 qeth_l2_dev2br_fdb_notify(card, code,
675 (struct net_if_token *)&entry->nit,
676 (struct mac_addr_lnid *)&entry->addr_lnid);
677 }
678
679 /**
680 * qeth_l2_dev2br_an_set() -
681 * Enable or disable 'dev to bridge network address notification'
682 * @card: qeth_card structure pointer
683 * @enable: Enable or disable 'dev to bridge network address notification'
684 *
685 * Returns negative errno-compatible error indication or 0 on success.
686 *
687 * On enable, emits a series of address notifications for all
688 * currently registered hosts.
689 */
qeth_l2_dev2br_an_set(struct qeth_card * card,bool enable)690 static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
691 {
692 int rc;
693
694 if (enable) {
695 QETH_CARD_TEXT(card, 2, "anseton");
696 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
697 qeth_l2_dev2br_an_set_cb, card);
698 if (rc == -EAGAIN)
699 /* address notification enabled, but inconsistent
700 * addresses reported -> disable address notification
701 */
702 qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
703 NULL, NULL);
704 } else {
705 QETH_CARD_TEXT(card, 2, "ansetoff");
706 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
707 }
708
709 return rc;
710 }
711
712 struct qeth_l2_br2dev_event_work {
713 struct work_struct work;
714 struct net_device *br_dev;
715 struct net_device *lsync_dev;
716 struct net_device *dst_dev;
717 unsigned long event;
718 unsigned char addr[ETH_ALEN];
719 };
720
721 static const struct net_device_ops qeth_l2_iqd_netdev_ops;
722 static const struct net_device_ops qeth_l2_osa_netdev_ops;
723
qeth_l2_must_learn(struct net_device * netdev,struct net_device * dstdev)724 static bool qeth_l2_must_learn(struct net_device *netdev,
725 struct net_device *dstdev)
726 {
727 struct qeth_priv *priv;
728
729 priv = netdev_priv(netdev);
730 return (netdev != dstdev &&
731 (priv->brport_features & BR_LEARNING_SYNC) &&
732 !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
733 br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
734 (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
735 netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
736 }
737
738 /**
739 * qeth_l2_br2dev_worker() - update local MACs
740 * @work: bridge to device FDB update
741 *
742 * Update local MACs of a learning_sync bridgeport so it can receive
743 * messages for a destination port.
744 * In case of an isolated learning_sync port, also update its isolated
745 * siblings.
746 */
qeth_l2_br2dev_worker(struct work_struct * work)747 static void qeth_l2_br2dev_worker(struct work_struct *work)
748 {
749 struct qeth_l2_br2dev_event_work *br2dev_event_work =
750 container_of(work, struct qeth_l2_br2dev_event_work, work);
751 struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
752 struct net_device *dstdev = br2dev_event_work->dst_dev;
753 struct net_device *brdev = br2dev_event_work->br_dev;
754 unsigned long event = br2dev_event_work->event;
755 unsigned char *addr = br2dev_event_work->addr;
756 struct qeth_card *card = lsyncdev->ml_priv;
757 struct net_device *lowerdev;
758 struct list_head *iter;
759 int err = 0;
760
761 QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
762 QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
763
764 rcu_read_lock();
765 /* Verify preconditions are still valid: */
766 if (!netif_is_bridge_port(lsyncdev) ||
767 brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
768 goto unlock;
769 if (!qeth_l2_must_learn(lsyncdev, dstdev))
770 goto unlock;
771
772 if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
773 /* Update lsyncdev and its isolated sibling(s): */
774 iter = &brdev->adj_list.lower;
775 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
776 while (lowerdev) {
777 if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
778 switch (event) {
779 case SWITCHDEV_FDB_ADD_TO_DEVICE:
780 err = dev_uc_add(lowerdev, addr);
781 break;
782 case SWITCHDEV_FDB_DEL_TO_DEVICE:
783 err = dev_uc_del(lowerdev, addr);
784 break;
785 default:
786 break;
787 }
788 if (err) {
789 QETH_CARD_TEXT(card, 2, "b2derris");
790 QETH_CARD_TEXT_(card, 2,
791 "err%02lx%03d", event,
792 lowerdev->ifindex);
793 }
794 }
795 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
796 }
797 } else {
798 switch (event) {
799 case SWITCHDEV_FDB_ADD_TO_DEVICE:
800 err = dev_uc_add(lsyncdev, addr);
801 break;
802 case SWITCHDEV_FDB_DEL_TO_DEVICE:
803 err = dev_uc_del(lsyncdev, addr);
804 break;
805 default:
806 break;
807 }
808 if (err)
809 QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
810 }
811
812 unlock:
813 rcu_read_unlock();
814 dev_put(brdev);
815 dev_put(lsyncdev);
816 dev_put(dstdev);
817 kfree(br2dev_event_work);
818 }
819
qeth_l2_br2dev_queue_work(struct net_device * brdev,struct net_device * lsyncdev,struct net_device * dstdev,unsigned long event,const unsigned char * addr)820 static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
821 struct net_device *lsyncdev,
822 struct net_device *dstdev,
823 unsigned long event,
824 const unsigned char *addr)
825 {
826 struct qeth_l2_br2dev_event_work *worker_data;
827 struct qeth_card *card;
828
829 worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
830 if (!worker_data)
831 return -ENOMEM;
832 INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
833 worker_data->br_dev = brdev;
834 worker_data->lsync_dev = lsyncdev;
835 worker_data->dst_dev = dstdev;
836 worker_data->event = event;
837 ether_addr_copy(worker_data->addr, addr);
838
839 card = lsyncdev->ml_priv;
840 /* Take a reference on the sw port devices and the bridge */
841 dev_hold(brdev);
842 dev_hold(lsyncdev);
843 dev_hold(dstdev);
844 queue_work(card->event_wq, &worker_data->work);
845 return 0;
846 }
847
848 /* Called under rtnl_lock */
qeth_l2_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)849 static int qeth_l2_switchdev_event(struct notifier_block *unused,
850 unsigned long event, void *ptr)
851 {
852 struct net_device *dstdev, *brdev, *lowerdev;
853 struct switchdev_notifier_fdb_info *fdb_info;
854 struct switchdev_notifier_info *info = ptr;
855 struct list_head *iter;
856 struct qeth_card *card;
857 int rc;
858
859 if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
860 event == SWITCHDEV_FDB_DEL_TO_DEVICE))
861 return NOTIFY_DONE;
862
863 dstdev = switchdev_notifier_info_to_dev(info);
864 brdev = netdev_master_upper_dev_get_rcu(dstdev);
865 if (!brdev || !netif_is_bridge_master(brdev))
866 return NOTIFY_DONE;
867 fdb_info = container_of(info,
868 struct switchdev_notifier_fdb_info,
869 info);
870 iter = &brdev->adj_list.lower;
871 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
872 while (lowerdev) {
873 if (qeth_l2_must_learn(lowerdev, dstdev)) {
874 card = lowerdev->ml_priv;
875 QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
876 rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
877 dstdev, event,
878 fdb_info->addr);
879 if (rc) {
880 QETH_CARD_TEXT(card, 2, "b2dqwerr");
881 return NOTIFY_BAD;
882 }
883 }
884 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
885 }
886 return NOTIFY_DONE;
887 }
888
889 static struct notifier_block qeth_l2_sw_notifier = {
890 .notifier_call = qeth_l2_switchdev_event,
891 };
892
893 static refcount_t qeth_l2_switchdev_notify_refcnt;
894
895 /* Called under rtnl_lock */
qeth_l2_br2dev_get(void)896 static void qeth_l2_br2dev_get(void)
897 {
898 int rc;
899
900 if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
901 rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
902 if (rc) {
903 QETH_DBF_MESSAGE(2,
904 "failed to register qeth_l2_sw_notifier: %d\n",
905 rc);
906 } else {
907 refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
908 QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
909 }
910 }
911 QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
912 qeth_l2_switchdev_notify_refcnt.refs.counter);
913 }
914
915 /* Called under rtnl_lock */
qeth_l2_br2dev_put(void)916 static void qeth_l2_br2dev_put(void)
917 {
918 int rc;
919
920 if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
921 rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
922 if (rc) {
923 QETH_DBF_MESSAGE(2,
924 "failed to unregister qeth_l2_sw_notifier: %d\n",
925 rc);
926 } else {
927 QETH_DBF_MESSAGE(2,
928 "qeth_l2_sw_notifier unregistered\n");
929 }
930 }
931 QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
932 qeth_l2_switchdev_notify_refcnt.refs.counter);
933 }
934
qeth_l2_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)935 static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
936 struct net_device *dev, u32 filter_mask,
937 int nlflags)
938 {
939 struct qeth_priv *priv = netdev_priv(dev);
940 struct qeth_card *card = dev->ml_priv;
941 u16 mode = BRIDGE_MODE_UNDEF;
942
943 /* Do not even show qeth devs that cannot do bridge_setlink */
944 if (!priv->brport_hw_features || !netif_device_present(dev) ||
945 qeth_bridgeport_is_in_use(card))
946 return -EOPNOTSUPP;
947
948 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
949 mode, priv->brport_features,
950 priv->brport_hw_features,
951 nlflags, filter_mask, NULL);
952 }
953
954 static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
955 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
956 };
957
958 /**
959 * qeth_l2_bridge_setlink() - set bridgeport attributes
960 * @dev: netdevice
961 * @nlh: netlink message header
962 * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
963 * @extack: extended ACK report struct
964 *
965 * Called under rtnl_lock
966 */
qeth_l2_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)967 static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
968 u16 flags, struct netlink_ext_ack *extack)
969 {
970 struct qeth_priv *priv = netdev_priv(dev);
971 struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
972 struct qeth_card *card = dev->ml_priv;
973 struct nlattr *attr, *nested_attr;
974 bool enable, has_protinfo = false;
975 int rem1, rem2;
976 int rc;
977
978 if (!netif_device_present(dev))
979 return -ENODEV;
980
981 nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
982 if (nla_type(attr) == IFLA_PROTINFO) {
983 rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
984 qeth_brport_policy, extack);
985 if (rc)
986 return rc;
987 has_protinfo = true;
988 } else if (nla_type(attr) == IFLA_AF_SPEC) {
989 nla_for_each_nested(nested_attr, attr, rem2) {
990 if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
991 continue;
992 NL_SET_ERR_MSG_ATTR(extack, nested_attr,
993 "Unsupported attribute");
994 return -EINVAL;
995 }
996 } else {
997 NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
998 return -EINVAL;
999 }
1000 }
1001 if (!has_protinfo)
1002 return 0;
1003 if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
1004 return -EINVAL;
1005 if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
1006 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1007 "Operation not supported by HW");
1008 return -EOPNOTSUPP;
1009 }
1010 if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
1011 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1012 "Requires NET_SWITCHDEV");
1013 return -EOPNOTSUPP;
1014 }
1015 enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
1016
1017 if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
1018 return 0;
1019
1020 mutex_lock(&card->sbp_lock);
1021 /* do not change anything if BridgePort is enabled */
1022 if (qeth_bridgeport_is_in_use(card)) {
1023 NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
1024 rc = -EBUSY;
1025 } else if (enable) {
1026 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1027 rc = qeth_l2_dev2br_an_set(card, true);
1028 if (rc) {
1029 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1030 } else {
1031 priv->brport_features |= BR_LEARNING_SYNC;
1032 qeth_l2_br2dev_get();
1033 }
1034 } else {
1035 rc = qeth_l2_dev2br_an_set(card, false);
1036 if (!rc) {
1037 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1038 priv->brport_features ^= BR_LEARNING_SYNC;
1039 qeth_l2_dev2br_fdb_flush(card);
1040 qeth_l2_br2dev_put();
1041 }
1042 }
1043 mutex_unlock(&card->sbp_lock);
1044
1045 return rc;
1046 }
1047
1048 static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
1049 .ndo_open = qeth_open,
1050 .ndo_stop = qeth_stop,
1051 .ndo_get_stats64 = qeth_get_stats64,
1052 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1053 .ndo_features_check = qeth_features_check,
1054 .ndo_select_queue = qeth_l2_iqd_select_queue,
1055 .ndo_validate_addr = qeth_l2_validate_addr,
1056 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1057 .ndo_eth_ioctl = qeth_do_ioctl,
1058 .ndo_siocdevprivate = qeth_siocdevprivate,
1059 .ndo_set_mac_address = qeth_l2_set_mac_address,
1060 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1061 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1062 .ndo_tx_timeout = qeth_tx_timeout,
1063 .ndo_fix_features = qeth_fix_features,
1064 .ndo_set_features = qeth_set_features,
1065 .ndo_bridge_getlink = qeth_l2_bridge_getlink,
1066 .ndo_bridge_setlink = qeth_l2_bridge_setlink,
1067 };
1068
1069 static const struct net_device_ops qeth_l2_osa_netdev_ops = {
1070 .ndo_open = qeth_open,
1071 .ndo_stop = qeth_stop,
1072 .ndo_get_stats64 = qeth_get_stats64,
1073 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1074 .ndo_features_check = qeth_features_check,
1075 .ndo_select_queue = qeth_osa_select_queue,
1076 .ndo_validate_addr = qeth_l2_validate_addr,
1077 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1078 .ndo_eth_ioctl = qeth_do_ioctl,
1079 .ndo_siocdevprivate = qeth_siocdevprivate,
1080 .ndo_set_mac_address = qeth_l2_set_mac_address,
1081 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1082 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1083 .ndo_tx_timeout = qeth_tx_timeout,
1084 .ndo_fix_features = qeth_fix_features,
1085 .ndo_set_features = qeth_set_features,
1086 };
1087
qeth_l2_setup_netdev(struct qeth_card * card)1088 static int qeth_l2_setup_netdev(struct qeth_card *card)
1089 {
1090 card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
1091 &qeth_l2_osa_netdev_ops;
1092 card->dev->needed_headroom = sizeof(struct qeth_hdr);
1093 card->dev->priv_flags |= IFF_UNICAST_FLT;
1094
1095 if (IS_OSM(card)) {
1096 card->dev->features |= NETIF_F_VLAN_CHALLENGED;
1097 } else {
1098 if (!IS_VM_NIC(card))
1099 card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1100 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1101 }
1102
1103 if (IS_OSD(card) && !IS_VM_NIC(card)) {
1104 card->dev->features |= NETIF_F_SG;
1105 /* OSA 3S and earlier has no RX/TX support */
1106 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1107 card->dev->hw_features |= NETIF_F_IP_CSUM;
1108 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1109 }
1110 }
1111 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
1112 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
1113 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
1114 }
1115 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
1116 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
1117 card->dev->hw_features |= NETIF_F_RXCSUM;
1118 card->dev->vlan_features |= NETIF_F_RXCSUM;
1119 }
1120 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1121 card->dev->hw_features |= NETIF_F_TSO;
1122 card->dev->vlan_features |= NETIF_F_TSO;
1123 }
1124 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
1125 card->dev->hw_features |= NETIF_F_TSO6;
1126 card->dev->vlan_features |= NETIF_F_TSO6;
1127 }
1128
1129 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1130 card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
1131 netif_keep_dst(card->dev);
1132 netif_set_tso_max_size(card->dev,
1133 PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
1134 }
1135
1136 netif_napi_add(card->dev, &card->napi, qeth_poll);
1137 return register_netdev(card->dev);
1138 }
1139
qeth_l2_trace_features(struct qeth_card * card)1140 static void qeth_l2_trace_features(struct qeth_card *card)
1141 {
1142 /* Set BridgePort features */
1143 QETH_CARD_TEXT(card, 2, "featuSBP");
1144 QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
1145 sizeof(card->options.sbp.supported_funcs));
1146 /* VNIC Characteristics features */
1147 QETH_CARD_TEXT(card, 2, "feaVNICC");
1148 QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
1149 sizeof(card->options.vnicc.sup_chars));
1150 }
1151
qeth_l2_setup_bridgeport_attrs(struct qeth_card * card)1152 static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
1153 {
1154 if (!card->options.sbp.reflect_promisc &&
1155 card->options.sbp.role != QETH_SBP_ROLE_NONE) {
1156 /* Conditional to avoid spurious error messages */
1157 qeth_bridgeport_setrole(card, card->options.sbp.role);
1158 /* Let the callback function refresh the stored role value. */
1159 qeth_bridgeport_query_ports(card, &card->options.sbp.role,
1160 NULL);
1161 }
1162 if (card->options.sbp.hostnotification) {
1163 if (qeth_bridgeport_an_set(card, 1))
1164 card->options.sbp.hostnotification = 0;
1165 }
1166 }
1167
1168 /**
1169 * qeth_l2_detect_dev2br_support() -
1170 * Detect whether this card supports 'dev to bridge fdb network address
1171 * change notification' and thus can support the learning_sync bridgeport
1172 * attribute
1173 * @card: qeth_card structure pointer
1174 */
qeth_l2_detect_dev2br_support(struct qeth_card * card)1175 static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
1176 {
1177 struct qeth_priv *priv = netdev_priv(card->dev);
1178 bool dev2br_supported;
1179
1180 QETH_CARD_TEXT(card, 2, "d2brsup");
1181 if (!IS_IQD(card))
1182 return;
1183
1184 /* dev2br requires valid cssid,iid,chid */
1185 dev2br_supported = card->info.ids_valid &&
1186 css_general_characteristics.enarf;
1187 QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
1188
1189 if (dev2br_supported)
1190 priv->brport_hw_features |= BR_LEARNING_SYNC;
1191 else
1192 priv->brport_hw_features &= ~BR_LEARNING_SYNC;
1193 }
1194
qeth_l2_enable_brport_features(struct qeth_card * card)1195 static void qeth_l2_enable_brport_features(struct qeth_card *card)
1196 {
1197 struct qeth_priv *priv = netdev_priv(card->dev);
1198 int rc;
1199
1200 if (priv->brport_features & BR_LEARNING_SYNC) {
1201 if (priv->brport_hw_features & BR_LEARNING_SYNC) {
1202 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1203 rc = qeth_l2_dev2br_an_set(card, true);
1204 if (rc == -EAGAIN) {
1205 /* Recoverable error, retry once */
1206 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1207 qeth_l2_dev2br_fdb_flush(card);
1208 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1209 rc = qeth_l2_dev2br_an_set(card, true);
1210 }
1211 if (rc) {
1212 netdev_err(card->dev,
1213 "failed to enable bridge learning_sync: %d\n",
1214 rc);
1215 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1216 qeth_l2_dev2br_fdb_flush(card);
1217 priv->brport_features ^= BR_LEARNING_SYNC;
1218 }
1219 } else {
1220 dev_warn(&card->gdev->dev,
1221 "bridge learning_sync not supported\n");
1222 priv->brport_features ^= BR_LEARNING_SYNC;
1223 }
1224 }
1225 }
1226
1227 /* SETBRIDGEPORT support, async notifications */
1228
1229 enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
1230
1231 /**
1232 * qeth_bridge_emit_host_event() - bridgeport address change notification
1233 * @card: qeth_card structure pointer, for udev events.
1234 * @evtype: "normal" register/unregister, or abort, or reset. For abort
1235 * and reset token and addr_lnid are unused and may be NULL.
1236 * @code: event bitmask: high order bit 0x80 value 1 means removal of an
1237 * object, 0 - addition of an object.
1238 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
1239 * @token: "network token" structure identifying physical address of the port.
1240 * @addr_lnid: pointer to structure with MAC address and VLAN ID.
1241 *
1242 * This function is called when registrations and deregistrations are
1243 * reported by the hardware, and also when notifications are enabled -
1244 * for all currently registered addresses.
1245 */
qeth_bridge_emit_host_event(struct qeth_card * card,enum qeth_an_event_type evtype,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)1246 static void qeth_bridge_emit_host_event(struct qeth_card *card,
1247 enum qeth_an_event_type evtype,
1248 u8 code,
1249 struct net_if_token *token,
1250 struct mac_addr_lnid *addr_lnid)
1251 {
1252 char str[7][32];
1253 char *env[8];
1254 int i = 0;
1255
1256 switch (evtype) {
1257 case anev_reg_unreg:
1258 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
1259 (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
1260 ? "deregister" : "register");
1261 env[i] = str[i]; i++;
1262 if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
1263 snprintf(str[i], sizeof(str[i]), "VLAN=%d",
1264 addr_lnid->lnid);
1265 env[i] = str[i]; i++;
1266 }
1267 if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
1268 snprintf(str[i], sizeof(str[i]), "MAC=%pM",
1269 addr_lnid->mac);
1270 env[i] = str[i]; i++;
1271 }
1272 snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
1273 token->cssid, token->ssid, token->devnum);
1274 env[i] = str[i]; i++;
1275 snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
1276 env[i] = str[i]; i++;
1277 snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
1278 token->chpid);
1279 env[i] = str[i]; i++;
1280 snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
1281 env[i] = str[i]; i++;
1282 break;
1283 case anev_abort:
1284 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
1285 env[i] = str[i]; i++;
1286 break;
1287 case anev_reset:
1288 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
1289 env[i] = str[i]; i++;
1290 break;
1291 }
1292 env[i] = NULL;
1293 kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
1294 }
1295
1296 struct qeth_bridge_state_data {
1297 struct work_struct worker;
1298 struct qeth_card *card;
1299 u8 role;
1300 u8 state;
1301 };
1302
qeth_bridge_state_change_worker(struct work_struct * work)1303 static void qeth_bridge_state_change_worker(struct work_struct *work)
1304 {
1305 struct qeth_bridge_state_data *data =
1306 container_of(work, struct qeth_bridge_state_data, worker);
1307 char env_locrem[32];
1308 char env_role[32];
1309 char env_state[32];
1310 char *env[] = {
1311 env_locrem,
1312 env_role,
1313 env_state,
1314 NULL
1315 };
1316
1317 snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
1318 snprintf(env_role, sizeof(env_role), "ROLE=%s",
1319 (data->role == QETH_SBP_ROLE_NONE) ? "none" :
1320 (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
1321 (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
1322 "<INVALID>");
1323 snprintf(env_state, sizeof(env_state), "STATE=%s",
1324 (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
1325 (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
1326 (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
1327 "<INVALID>");
1328 kobject_uevent_env(&data->card->gdev->dev.kobj,
1329 KOBJ_CHANGE, env);
1330 kfree(data);
1331 }
1332
qeth_bridge_state_change(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1333 static void qeth_bridge_state_change(struct qeth_card *card,
1334 struct qeth_ipa_cmd *cmd)
1335 {
1336 struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
1337 struct qeth_bridge_state_data *data;
1338
1339 QETH_CARD_TEXT(card, 2, "brstchng");
1340 if (qports->num_entries == 0) {
1341 QETH_CARD_TEXT(card, 2, "BPempty");
1342 return;
1343 }
1344 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1345 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
1346 return;
1347 }
1348
1349 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1350 if (!data) {
1351 QETH_CARD_TEXT(card, 2, "BPSalloc");
1352 return;
1353 }
1354 INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
1355 data->card = card;
1356 /* Information for the local port: */
1357 data->role = qports->entry[0].role;
1358 data->state = qports->entry[0].state;
1359
1360 queue_work(card->event_wq, &data->worker);
1361 }
1362
1363 struct qeth_addr_change_data {
1364 struct delayed_work dwork;
1365 struct qeth_card *card;
1366 struct qeth_ipacmd_addr_change ac_event;
1367 };
1368
qeth_l2_dev2br_worker(struct work_struct * work)1369 static void qeth_l2_dev2br_worker(struct work_struct *work)
1370 {
1371 struct delayed_work *dwork = to_delayed_work(work);
1372 struct qeth_addr_change_data *data;
1373 struct qeth_card *card;
1374 struct qeth_priv *priv;
1375 unsigned int i;
1376 int rc;
1377
1378 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1379 card = data->card;
1380 priv = netdev_priv(card->dev);
1381
1382 QETH_CARD_TEXT(card, 4, "dev2brew");
1383
1384 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1385 goto free;
1386
1387 if (data->ac_event.lost_event_mask) {
1388 /* Potential re-config in progress, try again later: */
1389 if (!rtnl_trylock()) {
1390 queue_delayed_work(card->event_wq, dwork,
1391 msecs_to_jiffies(100));
1392 return;
1393 }
1394
1395 if (!netif_device_present(card->dev)) {
1396 rtnl_unlock();
1397 goto free;
1398 }
1399
1400 QETH_DBF_MESSAGE(3,
1401 "Address change notification overflow on device %x\n",
1402 CARD_DEVID(card));
1403 /* Card fdb and bridge fdb are out of sync, card has stopped
1404 * notifications (no need to drain_workqueue). Purge all
1405 * 'extern_learn' entries from the parent bridge and restart
1406 * the notifications.
1407 */
1408 qeth_l2_dev2br_fdb_flush(card);
1409 rc = qeth_l2_dev2br_an_set(card, true);
1410 if (rc) {
1411 /* TODO: if we want to retry after -EAGAIN, be
1412 * aware there could be stale entries in the
1413 * workqueue now, that need to be drained.
1414 * For now we give up:
1415 */
1416 netdev_err(card->dev,
1417 "bridge learning_sync failed to recover: %d\n",
1418 rc);
1419 WRITE_ONCE(card->info.pnso_mode,
1420 QETH_PNSO_NONE);
1421 /* To remove fdb entries reported by an_set: */
1422 qeth_l2_dev2br_fdb_flush(card);
1423 priv->brport_features ^= BR_LEARNING_SYNC;
1424 } else {
1425 QETH_DBF_MESSAGE(3,
1426 "Address Notification resynced on device %x\n",
1427 CARD_DEVID(card));
1428 }
1429
1430 rtnl_unlock();
1431 } else {
1432 for (i = 0; i < data->ac_event.num_entries; i++) {
1433 struct qeth_ipacmd_addr_change_entry *entry =
1434 &data->ac_event.entry[i];
1435 qeth_l2_dev2br_fdb_notify(card,
1436 entry->change_code,
1437 &entry->token,
1438 &entry->addr_lnid);
1439 }
1440 }
1441
1442 free:
1443 kfree(data);
1444 }
1445
qeth_addr_change_event_worker(struct work_struct * work)1446 static void qeth_addr_change_event_worker(struct work_struct *work)
1447 {
1448 struct delayed_work *dwork = to_delayed_work(work);
1449 struct qeth_addr_change_data *data;
1450 struct qeth_card *card;
1451 int i;
1452
1453 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1454 card = data->card;
1455
1456 QETH_CARD_TEXT(data->card, 4, "adrchgew");
1457
1458 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1459 goto free;
1460
1461 if (data->ac_event.lost_event_mask) {
1462 /* Potential re-config in progress, try again later: */
1463 if (!mutex_trylock(&card->sbp_lock)) {
1464 queue_delayed_work(card->event_wq, dwork,
1465 msecs_to_jiffies(100));
1466 return;
1467 }
1468
1469 dev_info(&data->card->gdev->dev,
1470 "Address change notification stopped on %s (%s)\n",
1471 netdev_name(card->dev),
1472 (data->ac_event.lost_event_mask == 0x01)
1473 ? "Overflow"
1474 : (data->ac_event.lost_event_mask == 0x02)
1475 ? "Bridge port state change"
1476 : "Unknown reason");
1477
1478 data->card->options.sbp.hostnotification = 0;
1479 card->info.pnso_mode = QETH_PNSO_NONE;
1480 mutex_unlock(&data->card->sbp_lock);
1481 qeth_bridge_emit_host_event(data->card, anev_abort,
1482 0, NULL, NULL);
1483 } else
1484 for (i = 0; i < data->ac_event.num_entries; i++) {
1485 struct qeth_ipacmd_addr_change_entry *entry =
1486 &data->ac_event.entry[i];
1487 qeth_bridge_emit_host_event(data->card,
1488 anev_reg_unreg,
1489 entry->change_code,
1490 &entry->token,
1491 &entry->addr_lnid);
1492 }
1493
1494 free:
1495 kfree(data);
1496 }
1497
qeth_addr_change_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1498 static void qeth_addr_change_event(struct qeth_card *card,
1499 struct qeth_ipa_cmd *cmd)
1500 {
1501 struct qeth_ipacmd_addr_change *hostevs =
1502 &cmd->data.addrchange;
1503 struct qeth_addr_change_data *data;
1504 int extrasize;
1505
1506 if (card->info.pnso_mode == QETH_PNSO_NONE)
1507 return;
1508
1509 QETH_CARD_TEXT(card, 4, "adrchgev");
1510 if (cmd->hdr.return_code != 0x0000) {
1511 if (cmd->hdr.return_code == 0x0010) {
1512 if (hostevs->lost_event_mask == 0x00)
1513 hostevs->lost_event_mask = 0xff;
1514 } else {
1515 QETH_CARD_TEXT_(card, 2, "ACHN%04x",
1516 cmd->hdr.return_code);
1517 return;
1518 }
1519 }
1520 extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
1521 hostevs->num_entries;
1522 data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
1523 GFP_ATOMIC);
1524 if (!data) {
1525 QETH_CARD_TEXT(card, 2, "ACNalloc");
1526 return;
1527 }
1528 if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
1529 INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
1530 else
1531 INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
1532 data->card = card;
1533 data->ac_event = *hostevs;
1534 memcpy(data->ac_event.entry, hostevs->entry, extrasize);
1535 queue_delayed_work(card->event_wq, &data->dwork, 0);
1536 }
1537
1538 /* SETBRIDGEPORT support; sending commands */
1539
1540 struct _qeth_sbp_cbctl {
1541 union {
1542 u32 supported;
1543 struct {
1544 enum qeth_sbp_roles *role;
1545 enum qeth_sbp_states *state;
1546 } qports;
1547 } data;
1548 };
1549
qeth_bridgeport_makerc(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1550 static int qeth_bridgeport_makerc(struct qeth_card *card,
1551 struct qeth_ipa_cmd *cmd)
1552 {
1553 struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
1554 enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
1555 u16 ipa_rc = cmd->hdr.return_code;
1556 u16 sbp_rc = sbp->hdr.return_code;
1557 int rc;
1558
1559 if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
1560 return 0;
1561
1562 if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
1563 (!IS_IQD(card) && ipa_rc == sbp_rc)) {
1564 switch (sbp_rc) {
1565 case IPA_RC_SUCCESS:
1566 rc = 0;
1567 break;
1568 case IPA_RC_L2_UNSUPPORTED_CMD:
1569 case IPA_RC_UNSUPPORTED_COMMAND:
1570 rc = -EOPNOTSUPP;
1571 break;
1572 case IPA_RC_SBP_OSA_NOT_CONFIGURED:
1573 case IPA_RC_SBP_IQD_NOT_CONFIGURED:
1574 rc = -ENODEV; /* maybe not the best code here? */
1575 dev_err(&card->gdev->dev,
1576 "The device is not configured as a Bridge Port\n");
1577 break;
1578 case IPA_RC_SBP_OSA_OS_MISMATCH:
1579 case IPA_RC_SBP_IQD_OS_MISMATCH:
1580 rc = -EPERM;
1581 dev_err(&card->gdev->dev,
1582 "A Bridge Port is already configured by a different operating system\n");
1583 break;
1584 case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
1585 case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
1586 switch (setcmd) {
1587 case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
1588 rc = -EEXIST;
1589 dev_err(&card->gdev->dev,
1590 "The LAN already has a primary Bridge Port\n");
1591 break;
1592 case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
1593 rc = -EBUSY;
1594 dev_err(&card->gdev->dev,
1595 "The device is already a primary Bridge Port\n");
1596 break;
1597 default:
1598 rc = -EIO;
1599 }
1600 break;
1601 case IPA_RC_SBP_OSA_CURRENT_SECOND:
1602 case IPA_RC_SBP_IQD_CURRENT_SECOND:
1603 rc = -EBUSY;
1604 dev_err(&card->gdev->dev,
1605 "The device is already a secondary Bridge Port\n");
1606 break;
1607 case IPA_RC_SBP_OSA_LIMIT_SECOND:
1608 case IPA_RC_SBP_IQD_LIMIT_SECOND:
1609 rc = -EEXIST;
1610 dev_err(&card->gdev->dev,
1611 "The LAN cannot have more secondary Bridge Ports\n");
1612 break;
1613 case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
1614 case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
1615 rc = -EBUSY;
1616 dev_err(&card->gdev->dev,
1617 "The device is already a primary Bridge Port\n");
1618 break;
1619 case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
1620 case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
1621 rc = -EACCES;
1622 dev_err(&card->gdev->dev,
1623 "The device is not authorized to be a Bridge Port\n");
1624 break;
1625 default:
1626 rc = -EIO;
1627 }
1628 } else {
1629 switch (ipa_rc) {
1630 case IPA_RC_NOTSUPP:
1631 rc = -EOPNOTSUPP;
1632 break;
1633 case IPA_RC_UNSUPPORTED_COMMAND:
1634 rc = -EOPNOTSUPP;
1635 break;
1636 default:
1637 rc = -EIO;
1638 }
1639 }
1640
1641 if (rc) {
1642 QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
1643 QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
1644 }
1645 return rc;
1646 }
1647
qeth_sbp_build_cmd(struct qeth_card * card,enum qeth_ipa_sbp_cmd sbp_cmd,unsigned int data_length)1648 static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
1649 enum qeth_ipa_sbp_cmd sbp_cmd,
1650 unsigned int data_length)
1651 {
1652 enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
1653 IPA_CMD_SETBRIDGEPORT_OSA;
1654 struct qeth_ipacmd_sbp_hdr *hdr;
1655 struct qeth_cmd_buffer *iob;
1656
1657 iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
1658 data_length +
1659 offsetof(struct qeth_ipacmd_setbridgeport,
1660 data));
1661 if (!iob)
1662 return iob;
1663
1664 hdr = &__ipa_cmd(iob)->data.sbp.hdr;
1665 hdr->cmdlength = sizeof(*hdr) + data_length;
1666 hdr->command_code = sbp_cmd;
1667 hdr->used_total = 1;
1668 hdr->seq_no = 1;
1669 return iob;
1670 }
1671
qeth_bridgeport_query_support_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1672 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
1673 struct qeth_reply *reply, unsigned long data)
1674 {
1675 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1676 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1677 int rc;
1678
1679 QETH_CARD_TEXT(card, 2, "brqsupcb");
1680 rc = qeth_bridgeport_makerc(card, cmd);
1681 if (rc)
1682 return rc;
1683
1684 cbctl->data.supported =
1685 cmd->data.sbp.data.query_cmds_supp.supported_cmds;
1686 return 0;
1687 }
1688
1689 /**
1690 * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
1691 * @card: qeth_card structure pointer.
1692 *
1693 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
1694 * strucutre: card->options.sbp.supported_funcs.
1695 */
qeth_bridgeport_query_support(struct qeth_card * card)1696 static void qeth_bridgeport_query_support(struct qeth_card *card)
1697 {
1698 struct qeth_cmd_buffer *iob;
1699 struct _qeth_sbp_cbctl cbctl;
1700
1701 QETH_CARD_TEXT(card, 2, "brqsuppo");
1702 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
1703 SBP_DATA_SIZEOF(query_cmds_supp));
1704 if (!iob)
1705 return;
1706
1707 if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
1708 &cbctl)) {
1709 card->options.sbp.role = QETH_SBP_ROLE_NONE;
1710 card->options.sbp.supported_funcs = 0;
1711 return;
1712 }
1713 card->options.sbp.supported_funcs = cbctl.data.supported;
1714 }
1715
qeth_bridgeport_query_ports_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1716 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
1717 struct qeth_reply *reply, unsigned long data)
1718 {
1719 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1720 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1721 struct qeth_sbp_port_data *qports;
1722 int rc;
1723
1724 QETH_CARD_TEXT(card, 2, "brqprtcb");
1725 rc = qeth_bridgeport_makerc(card, cmd);
1726 if (rc)
1727 return rc;
1728
1729 qports = &cmd->data.sbp.data.port_data;
1730 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1731 QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
1732 return -EINVAL;
1733 }
1734 /* first entry contains the state of the local port */
1735 if (qports->num_entries > 0) {
1736 if (cbctl->data.qports.role)
1737 *cbctl->data.qports.role = qports->entry[0].role;
1738 if (cbctl->data.qports.state)
1739 *cbctl->data.qports.state = qports->entry[0].state;
1740 }
1741 return 0;
1742 }
1743
1744 /**
1745 * qeth_bridgeport_query_ports() - query local bridgeport status.
1746 * @card: qeth_card structure pointer.
1747 * @role: Role of the port: 0-none, 1-primary, 2-secondary.
1748 * @state: State of the port: 0-inactive, 1-standby, 2-active.
1749 *
1750 * Returns negative errno-compatible error indication or 0 on success.
1751 *
1752 * 'role' and 'state' are not updated in case of hardware operation failure.
1753 */
qeth_bridgeport_query_ports(struct qeth_card * card,enum qeth_sbp_roles * role,enum qeth_sbp_states * state)1754 int qeth_bridgeport_query_ports(struct qeth_card *card,
1755 enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
1756 {
1757 struct qeth_cmd_buffer *iob;
1758 struct _qeth_sbp_cbctl cbctl = {
1759 .data = {
1760 .qports = {
1761 .role = role,
1762 .state = state,
1763 },
1764 },
1765 };
1766
1767 QETH_CARD_TEXT(card, 2, "brqports");
1768 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1769 return -EOPNOTSUPP;
1770 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
1771 if (!iob)
1772 return -ENOMEM;
1773
1774 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
1775 &cbctl);
1776 }
1777
qeth_bridgeport_set_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1778 static int qeth_bridgeport_set_cb(struct qeth_card *card,
1779 struct qeth_reply *reply, unsigned long data)
1780 {
1781 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
1782
1783 QETH_CARD_TEXT(card, 2, "brsetrcb");
1784 return qeth_bridgeport_makerc(card, cmd);
1785 }
1786
1787 /**
1788 * qeth_bridgeport_setrole() - Assign primary role to the port.
1789 * @card: qeth_card structure pointer.
1790 * @role: Role to assign.
1791 *
1792 * Returns negative errno-compatible error indication or 0 on success.
1793 */
qeth_bridgeport_setrole(struct qeth_card * card,enum qeth_sbp_roles role)1794 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1795 {
1796 struct qeth_cmd_buffer *iob;
1797 enum qeth_ipa_sbp_cmd setcmd;
1798 unsigned int cmdlength = 0;
1799
1800 QETH_CARD_TEXT(card, 2, "brsetrol");
1801 switch (role) {
1802 case QETH_SBP_ROLE_NONE:
1803 setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
1804 break;
1805 case QETH_SBP_ROLE_PRIMARY:
1806 setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
1807 cmdlength = SBP_DATA_SIZEOF(set_primary);
1808 break;
1809 case QETH_SBP_ROLE_SECONDARY:
1810 setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
1811 break;
1812 default:
1813 return -EINVAL;
1814 }
1815 if (!(card->options.sbp.supported_funcs & setcmd))
1816 return -EOPNOTSUPP;
1817 iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
1818 if (!iob)
1819 return -ENOMEM;
1820
1821 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
1822 }
1823
qeth_bridgeport_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)1824 static void qeth_bridgeport_an_set_cb(void *priv,
1825 struct chsc_pnso_naid_l2 *entry)
1826 {
1827 struct qeth_card *card = (struct qeth_card *)priv;
1828 u8 code;
1829
1830 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1831 if (entry->addr_lnid.lnid < VLAN_N_VID)
1832 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1833 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1834 (struct net_if_token *)&entry->nit,
1835 (struct mac_addr_lnid *)&entry->addr_lnid);
1836 }
1837
1838 /**
1839 * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
1840 * @card: qeth_card structure pointer.
1841 * @enable: 0 - disable, non-zero - enable notifications
1842 *
1843 * Returns negative errno-compatible error indication or 0 on success.
1844 *
1845 * On enable, emits a series of address notifications udev events for all
1846 * currently registered hosts.
1847 */
qeth_bridgeport_an_set(struct qeth_card * card,int enable)1848 int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
1849 {
1850 int rc;
1851
1852 if (!card->options.sbp.supported_funcs)
1853 return -EOPNOTSUPP;
1854
1855 if (enable) {
1856 qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
1857 qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
1858 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
1859 qeth_bridgeport_an_set_cb, card);
1860 if (rc)
1861 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1862 } else {
1863 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
1864 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1865 }
1866 return rc;
1867 }
1868
1869 /* VNIC Characteristics support */
1870
1871 /* handle VNICC IPA command return codes; convert to error codes */
qeth_l2_vnicc_makerc(struct qeth_card * card,u16 ipa_rc)1872 static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
1873 {
1874 int rc;
1875
1876 switch (ipa_rc) {
1877 case IPA_RC_SUCCESS:
1878 return ipa_rc;
1879 case IPA_RC_L2_UNSUPPORTED_CMD:
1880 case IPA_RC_NOTSUPP:
1881 rc = -EOPNOTSUPP;
1882 break;
1883 case IPA_RC_VNICC_OOSEQ:
1884 rc = -EALREADY;
1885 break;
1886 case IPA_RC_VNICC_VNICBP:
1887 rc = -EBUSY;
1888 break;
1889 case IPA_RC_L2_ADDR_TABLE_FULL:
1890 rc = -ENOSPC;
1891 break;
1892 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
1893 rc = -EACCES;
1894 break;
1895 default:
1896 rc = -EIO;
1897 }
1898
1899 QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
1900 return rc;
1901 }
1902
1903 /* generic VNICC request call back */
qeth_l2_vnicc_request_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1904 static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
1905 struct qeth_reply *reply,
1906 unsigned long data)
1907 {
1908 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1909 struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
1910 u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
1911
1912 QETH_CARD_TEXT(card, 2, "vniccrcb");
1913 if (cmd->hdr.return_code)
1914 return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
1915 /* return results to caller */
1916 card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
1917 card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
1918
1919 if (sub_cmd == IPA_VNICC_QUERY_CMDS)
1920 *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
1921 else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
1922 *(u32 *)reply->param = rep->data.getset_timeout.timeout;
1923
1924 return 0;
1925 }
1926
qeth_l2_vnicc_build_cmd(struct qeth_card * card,u32 vnicc_cmd,unsigned int data_length)1927 static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
1928 u32 vnicc_cmd,
1929 unsigned int data_length)
1930 {
1931 struct qeth_ipacmd_vnicc_hdr *hdr;
1932 struct qeth_cmd_buffer *iob;
1933
1934 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
1935 data_length +
1936 offsetof(struct qeth_ipacmd_vnicc, data));
1937 if (!iob)
1938 return NULL;
1939
1940 hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
1941 hdr->data_length = sizeof(*hdr) + data_length;
1942 hdr->sub_command = vnicc_cmd;
1943 return iob;
1944 }
1945
1946 /* VNICC query VNIC characteristics request */
qeth_l2_vnicc_query_chars(struct qeth_card * card)1947 static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
1948 {
1949 struct qeth_cmd_buffer *iob;
1950
1951 QETH_CARD_TEXT(card, 2, "vniccqch");
1952 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
1953 if (!iob)
1954 return -ENOMEM;
1955
1956 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1957 }
1958
1959 /* VNICC query sub commands request */
qeth_l2_vnicc_query_cmds(struct qeth_card * card,u32 vnic_char,u32 * sup_cmds)1960 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
1961 u32 *sup_cmds)
1962 {
1963 struct qeth_cmd_buffer *iob;
1964
1965 QETH_CARD_TEXT(card, 2, "vniccqcm");
1966 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
1967 VNICC_DATA_SIZEOF(query_cmds));
1968 if (!iob)
1969 return -ENOMEM;
1970
1971 __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
1972
1973 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
1974 }
1975
1976 /* VNICC enable/disable characteristic request */
qeth_l2_vnicc_set_char(struct qeth_card * card,u32 vnic_char,u32 cmd)1977 static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
1978 u32 cmd)
1979 {
1980 struct qeth_cmd_buffer *iob;
1981
1982 QETH_CARD_TEXT(card, 2, "vniccedc");
1983 iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
1984 if (!iob)
1985 return -ENOMEM;
1986
1987 __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
1988
1989 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1990 }
1991
1992 /* VNICC get/set timeout for characteristic request */
qeth_l2_vnicc_getset_timeout(struct qeth_card * card,u32 vnicc,u32 cmd,u32 * timeout)1993 static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
1994 u32 cmd, u32 *timeout)
1995 {
1996 struct qeth_vnicc_getset_timeout *getset_timeout;
1997 struct qeth_cmd_buffer *iob;
1998
1999 QETH_CARD_TEXT(card, 2, "vniccgst");
2000 iob = qeth_l2_vnicc_build_cmd(card, cmd,
2001 VNICC_DATA_SIZEOF(getset_timeout));
2002 if (!iob)
2003 return -ENOMEM;
2004
2005 getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
2006 getset_timeout->vnic_char = vnicc;
2007
2008 if (cmd == IPA_VNICC_SET_TIMEOUT)
2009 getset_timeout->timeout = *timeout;
2010
2011 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
2012 }
2013
2014 /* recover user timeout setting */
qeth_l2_vnicc_recover_timeout(struct qeth_card * card,u32 vnicc,u32 * timeout)2015 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
2016 u32 *timeout)
2017 {
2018 if (card->options.vnicc.sup_chars & vnicc &&
2019 card->options.vnicc.getset_timeout_sup & vnicc &&
2020 !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
2021 timeout))
2022 return false;
2023 *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2024 return true;
2025 }
2026
2027 /* set current VNICC flag state; called from sysfs store function */
qeth_l2_vnicc_set_state(struct qeth_card * card,u32 vnicc,bool state)2028 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
2029 {
2030 int rc = 0;
2031 u32 cmd;
2032
2033 QETH_CARD_TEXT(card, 2, "vniccsch");
2034
2035 /* check if characteristic and enable/disable are supported */
2036 if (!(card->options.vnicc.sup_chars & vnicc) ||
2037 !(card->options.vnicc.set_char_sup & vnicc))
2038 return -EOPNOTSUPP;
2039
2040 if (qeth_bridgeport_is_in_use(card))
2041 return -EBUSY;
2042
2043 /* set enable/disable command and store wanted characteristic */
2044 if (state) {
2045 cmd = IPA_VNICC_ENABLE;
2046 card->options.vnicc.wanted_chars |= vnicc;
2047 } else {
2048 cmd = IPA_VNICC_DISABLE;
2049 card->options.vnicc.wanted_chars &= ~vnicc;
2050 }
2051
2052 /* do we need to do anything? */
2053 if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
2054 return rc;
2055
2056 /* if card is not ready, simply stop here */
2057 if (!qeth_card_hw_is_reachable(card)) {
2058 if (state)
2059 card->options.vnicc.cur_chars |= vnicc;
2060 else
2061 card->options.vnicc.cur_chars &= ~vnicc;
2062 return rc;
2063 }
2064
2065 rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
2066 if (rc)
2067 card->options.vnicc.wanted_chars =
2068 card->options.vnicc.cur_chars;
2069 else {
2070 /* successful online VNICC change; handle special cases */
2071 if (state && vnicc == QETH_VNICC_RX_BCAST)
2072 card->options.vnicc.rx_bcast_enabled = true;
2073 if (!state && vnicc == QETH_VNICC_LEARNING)
2074 qeth_l2_vnicc_recover_timeout(card, vnicc,
2075 &card->options.vnicc.learning_timeout);
2076 }
2077
2078 return rc;
2079 }
2080
2081 /* get current VNICC flag state; called from sysfs show function */
qeth_l2_vnicc_get_state(struct qeth_card * card,u32 vnicc,bool * state)2082 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
2083 {
2084 int rc = 0;
2085
2086 QETH_CARD_TEXT(card, 2, "vniccgch");
2087
2088 /* check if characteristic is supported */
2089 if (!(card->options.vnicc.sup_chars & vnicc))
2090 return -EOPNOTSUPP;
2091
2092 if (qeth_bridgeport_is_in_use(card))
2093 return -EBUSY;
2094
2095 /* if card is ready, query current VNICC state */
2096 if (qeth_card_hw_is_reachable(card))
2097 rc = qeth_l2_vnicc_query_chars(card);
2098
2099 *state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
2100 return rc;
2101 }
2102
2103 /* set VNICC timeout; called from sysfs store function. Currently, only learning
2104 * supports timeout
2105 */
qeth_l2_vnicc_set_timeout(struct qeth_card * card,u32 timeout)2106 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
2107 {
2108 int rc = 0;
2109
2110 QETH_CARD_TEXT(card, 2, "vniccsto");
2111
2112 /* check if characteristic and set_timeout are supported */
2113 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2114 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2115 return -EOPNOTSUPP;
2116
2117 if (qeth_bridgeport_is_in_use(card))
2118 return -EBUSY;
2119
2120 /* do we need to do anything? */
2121 if (card->options.vnicc.learning_timeout == timeout)
2122 return rc;
2123
2124 /* if card is not ready, simply store the value internally and return */
2125 if (!qeth_card_hw_is_reachable(card)) {
2126 card->options.vnicc.learning_timeout = timeout;
2127 return rc;
2128 }
2129
2130 /* send timeout value to card; if successful, store value internally */
2131 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2132 IPA_VNICC_SET_TIMEOUT, &timeout);
2133 if (!rc)
2134 card->options.vnicc.learning_timeout = timeout;
2135
2136 return rc;
2137 }
2138
2139 /* get current VNICC timeout; called from sysfs show function. Currently, only
2140 * learning supports timeout
2141 */
qeth_l2_vnicc_get_timeout(struct qeth_card * card,u32 * timeout)2142 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
2143 {
2144 int rc = 0;
2145
2146 QETH_CARD_TEXT(card, 2, "vniccgto");
2147
2148 /* check if characteristic and get_timeout are supported */
2149 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2150 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2151 return -EOPNOTSUPP;
2152
2153 if (qeth_bridgeport_is_in_use(card))
2154 return -EBUSY;
2155
2156 /* if card is ready, get timeout. Otherwise, just return stored value */
2157 *timeout = card->options.vnicc.learning_timeout;
2158 if (qeth_card_hw_is_reachable(card))
2159 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2160 IPA_VNICC_GET_TIMEOUT,
2161 timeout);
2162
2163 return rc;
2164 }
2165
2166 /* check if VNICC is currently enabled */
_qeth_l2_vnicc_is_in_use(struct qeth_card * card)2167 static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
2168 {
2169 if (!card->options.vnicc.sup_chars)
2170 return false;
2171 /* default values are only OK if rx_bcast was not enabled by user
2172 * or the card is offline.
2173 */
2174 if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
2175 if (!card->options.vnicc.rx_bcast_enabled ||
2176 !qeth_card_hw_is_reachable(card))
2177 return false;
2178 }
2179 return true;
2180 }
2181
2182 /**
2183 * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
2184 * @card: qeth_card structure pointer
2185 *
2186 * qeth_bridgeport functionality is mutually exclusive with usage of the
2187 * VNIC Characteristics and dev2br address notifications
2188 */
qeth_bridgeport_allowed(struct qeth_card * card)2189 bool qeth_bridgeport_allowed(struct qeth_card *card)
2190 {
2191 struct qeth_priv *priv = netdev_priv(card->dev);
2192
2193 return (!_qeth_l2_vnicc_is_in_use(card) &&
2194 !(priv->brport_features & BR_LEARNING_SYNC));
2195 }
2196
2197 /* recover user characteristic setting */
qeth_l2_vnicc_recover_char(struct qeth_card * card,u32 vnicc,bool enable)2198 static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
2199 bool enable)
2200 {
2201 u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
2202
2203 if (card->options.vnicc.sup_chars & vnicc &&
2204 card->options.vnicc.set_char_sup & vnicc &&
2205 !qeth_l2_vnicc_set_char(card, vnicc, cmd))
2206 return false;
2207 card->options.vnicc.wanted_chars &= ~vnicc;
2208 card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
2209 return true;
2210 }
2211
2212 /* (re-)initialize VNICC */
qeth_l2_vnicc_init(struct qeth_card * card)2213 static void qeth_l2_vnicc_init(struct qeth_card *card)
2214 {
2215 u32 *timeout = &card->options.vnicc.learning_timeout;
2216 bool enable, error = false;
2217 unsigned int chars_len, i;
2218 unsigned long chars_tmp;
2219 u32 sup_cmds, vnicc;
2220
2221 QETH_CARD_TEXT(card, 2, "vniccini");
2222 /* reset rx_bcast */
2223 card->options.vnicc.rx_bcast_enabled = 0;
2224 /* initial query and storage of VNIC characteristics */
2225 if (qeth_l2_vnicc_query_chars(card)) {
2226 if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
2227 *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
2228 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2229 /* fail quietly if user didn't change the default config */
2230 card->options.vnicc.sup_chars = 0;
2231 card->options.vnicc.cur_chars = 0;
2232 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2233 return;
2234 }
2235 /* get supported commands for each supported characteristic */
2236 chars_tmp = card->options.vnicc.sup_chars;
2237 chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
2238 for_each_set_bit(i, &chars_tmp, chars_len) {
2239 vnicc = BIT(i);
2240 if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
2241 sup_cmds = 0;
2242 error = true;
2243 }
2244 if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
2245 (sup_cmds & IPA_VNICC_GET_TIMEOUT))
2246 card->options.vnicc.getset_timeout_sup |= vnicc;
2247 else
2248 card->options.vnicc.getset_timeout_sup &= ~vnicc;
2249 if ((sup_cmds & IPA_VNICC_ENABLE) &&
2250 (sup_cmds & IPA_VNICC_DISABLE))
2251 card->options.vnicc.set_char_sup |= vnicc;
2252 else
2253 card->options.vnicc.set_char_sup &= ~vnicc;
2254 }
2255 /* enforce assumed default values and recover settings, if changed */
2256 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
2257 timeout);
2258 /* Change chars, if necessary */
2259 chars_tmp = card->options.vnicc.wanted_chars ^
2260 card->options.vnicc.cur_chars;
2261 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
2262 for_each_set_bit(i, &chars_tmp, chars_len) {
2263 vnicc = BIT(i);
2264 enable = card->options.vnicc.wanted_chars & vnicc;
2265 error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
2266 }
2267 if (error)
2268 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2269 }
2270
2271 /* configure default values of VNIC characteristics */
qeth_l2_vnicc_set_defaults(struct qeth_card * card)2272 static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
2273 {
2274 /* characteristics values */
2275 card->options.vnicc.sup_chars = QETH_VNICC_ALL;
2276 card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
2277 card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2278 /* supported commands */
2279 card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
2280 card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
2281 /* settings wanted by users */
2282 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2283 }
2284
2285 static const struct device_type qeth_l2_devtype = {
2286 .name = "qeth_layer2",
2287 .groups = qeth_l2_attr_groups,
2288 };
2289
qeth_l2_probe_device(struct ccwgroup_device * gdev)2290 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
2291 {
2292 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2293 int rc;
2294
2295 qeth_l2_vnicc_set_defaults(card);
2296 mutex_init(&card->sbp_lock);
2297
2298 if (gdev->dev.type) {
2299 rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
2300 if (rc)
2301 return rc;
2302 } else {
2303 gdev->dev.type = &qeth_l2_devtype;
2304 }
2305
2306 INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
2307 return 0;
2308 }
2309
qeth_l2_remove_device(struct ccwgroup_device * gdev)2310 static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
2311 {
2312 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2313 struct qeth_priv *priv;
2314
2315 if (gdev->dev.type != &qeth_l2_devtype)
2316 device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
2317
2318 qeth_set_allowed_threads(card, 0, 1);
2319 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2320
2321 if (gdev->state == CCWGROUP_ONLINE)
2322 qeth_set_offline(card, card->discipline, false);
2323
2324 if (card->dev->reg_state == NETREG_REGISTERED) {
2325 priv = netdev_priv(card->dev);
2326 if (priv->brport_features & BR_LEARNING_SYNC) {
2327 rtnl_lock();
2328 qeth_l2_br2dev_put();
2329 rtnl_unlock();
2330 }
2331 unregister_netdev(card->dev);
2332 }
2333 }
2334
qeth_l2_set_online(struct qeth_card * card,bool carrier_ok)2335 static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
2336 {
2337 struct net_device *dev = card->dev;
2338 int rc = 0;
2339
2340 qeth_l2_detect_dev2br_support(card);
2341
2342 mutex_lock(&card->sbp_lock);
2343 qeth_bridgeport_query_support(card);
2344 if (card->options.sbp.supported_funcs) {
2345 qeth_l2_setup_bridgeport_attrs(card);
2346 dev_info(&card->gdev->dev,
2347 "The device represents a Bridge Capable Port\n");
2348 }
2349 mutex_unlock(&card->sbp_lock);
2350
2351 qeth_l2_register_dev_addr(card);
2352
2353 /* for the rx_bcast characteristic, init VNICC after setmac */
2354 qeth_l2_vnicc_init(card);
2355
2356 qeth_l2_trace_features(card);
2357
2358 /* softsetup */
2359 QETH_CARD_TEXT(card, 2, "softsetp");
2360
2361 card->state = CARD_STATE_SOFTSETUP;
2362
2363 qeth_set_allowed_threads(card, 0xffffffff, 0);
2364
2365 if (dev->reg_state != NETREG_REGISTERED) {
2366 rc = qeth_l2_setup_netdev(card);
2367 if (rc)
2368 goto err_setup;
2369
2370 if (carrier_ok)
2371 netif_carrier_on(dev);
2372 } else {
2373 rtnl_lock();
2374 rc = qeth_set_real_num_tx_queues(card,
2375 qeth_tx_actual_queues(card));
2376 if (rc) {
2377 rtnl_unlock();
2378 goto err_set_queues;
2379 }
2380
2381 if (carrier_ok)
2382 netif_carrier_on(dev);
2383 else
2384 netif_carrier_off(dev);
2385
2386 netif_device_attach(dev);
2387 qeth_enable_hw_features(dev);
2388 qeth_l2_enable_brport_features(card);
2389
2390 if (card->info.open_when_online) {
2391 card->info.open_when_online = 0;
2392 dev_open(dev, NULL);
2393 }
2394 rtnl_unlock();
2395 }
2396 return 0;
2397
2398 err_set_queues:
2399 err_setup:
2400 qeth_set_allowed_threads(card, 0, 1);
2401 card->state = CARD_STATE_DOWN;
2402 return rc;
2403 }
2404
qeth_l2_set_offline(struct qeth_card * card)2405 static void qeth_l2_set_offline(struct qeth_card *card)
2406 {
2407 struct qeth_priv *priv = netdev_priv(card->dev);
2408
2409 qeth_set_allowed_threads(card, 0, 1);
2410 qeth_l2_drain_rx_mode_cache(card);
2411
2412 if (card->state == CARD_STATE_SOFTSETUP)
2413 card->state = CARD_STATE_DOWN;
2414
2415 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
2416 if (priv->brport_features & BR_LEARNING_SYNC)
2417 qeth_l2_dev2br_fdb_flush(card);
2418 }
2419
2420 /* Returns zero if the command is successfully "consumed" */
qeth_l2_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2421 static int qeth_l2_control_event(struct qeth_card *card,
2422 struct qeth_ipa_cmd *cmd)
2423 {
2424 switch (cmd->hdr.command) {
2425 case IPA_CMD_SETBRIDGEPORT_OSA:
2426 case IPA_CMD_SETBRIDGEPORT_IQD:
2427 if (cmd->data.sbp.hdr.command_code ==
2428 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
2429 qeth_bridge_state_change(card, cmd);
2430 return 0;
2431 }
2432
2433 return 1;
2434 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
2435 qeth_addr_change_event(card, cmd);
2436 return 0;
2437 default:
2438 return 1;
2439 }
2440 }
2441
2442 const struct qeth_discipline qeth_l2_discipline = {
2443 .setup = qeth_l2_probe_device,
2444 .remove = qeth_l2_remove_device,
2445 .set_online = qeth_l2_set_online,
2446 .set_offline = qeth_l2_set_offline,
2447 .control_event_handler = qeth_l2_control_event,
2448 };
2449 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
2450
qeth_l2_init(void)2451 static int __init qeth_l2_init(void)
2452 {
2453 pr_info("register layer 2 discipline\n");
2454 refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
2455 return 0;
2456 }
2457
qeth_l2_exit(void)2458 static void __exit qeth_l2_exit(void)
2459 {
2460 pr_info("unregister layer 2 discipline\n");
2461 }
2462
2463 module_init(qeth_l2_init);
2464 module_exit(qeth_l2_exit);
2465 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2466 MODULE_DESCRIPTION("qeth layer 2 discipline");
2467 MODULE_LICENSE("GPL");
2468