1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/if_ether.h>
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32
33 #include "e1000_mac.h"
34
35 #include "igb.h"
36
37 static s32 igb_set_default_fc(struct e1000_hw *hw);
38 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39
40 /**
41 * igb_get_bus_info_pcie - Get PCIe bus information
42 * @hw: pointer to the HW structure
43 *
44 * Determines and stores the system bus information for a particular
45 * network interface. The following bus information is determined and stored:
46 * bus speed, bus width, type (PCIe), and PCIe function.
47 **/
igb_get_bus_info_pcie(struct e1000_hw * hw)48 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
49 {
50 struct e1000_bus_info *bus = &hw->bus;
51 s32 ret_val;
52 u32 reg;
53 u16 pcie_link_status;
54
55 bus->type = e1000_bus_type_pci_express;
56
57 ret_val = igb_read_pcie_cap_reg(hw,
58 PCI_EXP_LNKSTA,
59 &pcie_link_status);
60 if (ret_val) {
61 bus->width = e1000_bus_width_unknown;
62 bus->speed = e1000_bus_speed_unknown;
63 } else {
64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
65 case PCI_EXP_LNKSTA_CLS_2_5GB:
66 bus->speed = e1000_bus_speed_2500;
67 break;
68 case PCI_EXP_LNKSTA_CLS_5_0GB:
69 bus->speed = e1000_bus_speed_5000;
70 break;
71 default:
72 bus->speed = e1000_bus_speed_unknown;
73 break;
74 }
75
76 bus->width = (enum e1000_bus_width)((pcie_link_status &
77 PCI_EXP_LNKSTA_NLW) >>
78 PCI_EXP_LNKSTA_NLW_SHIFT);
79 }
80
81 reg = rd32(E1000_STATUS);
82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
83
84 return 0;
85 }
86
87 /**
88 * igb_clear_vfta - Clear VLAN filter table
89 * @hw: pointer to the HW structure
90 *
91 * Clears the register array which contains the VLAN filter table by
92 * setting all the values to 0.
93 **/
igb_clear_vfta(struct e1000_hw * hw)94 void igb_clear_vfta(struct e1000_hw *hw)
95 {
96 u32 offset;
97
98 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
99 array_wr32(E1000_VFTA, offset, 0);
100 wrfl();
101 }
102 }
103
104 /**
105 * igb_write_vfta - Write value to VLAN filter table
106 * @hw: pointer to the HW structure
107 * @offset: register offset in VLAN filter table
108 * @value: register value written to VLAN filter table
109 *
110 * Writes value at the given offset in the register array which stores
111 * the VLAN filter table.
112 **/
igb_write_vfta(struct e1000_hw * hw,u32 offset,u32 value)113 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
114 {
115 array_wr32(E1000_VFTA, offset, value);
116 wrfl();
117 }
118
119 /**
120 * igb_init_rx_addrs - Initialize receive address's
121 * @hw: pointer to the HW structure
122 * @rar_count: receive address registers
123 *
124 * Setups the receive address registers by setting the base receive address
125 * register to the devices MAC address and clearing all the other receive
126 * address registers to 0.
127 **/
igb_init_rx_addrs(struct e1000_hw * hw,u16 rar_count)128 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
129 {
130 u32 i;
131 u8 mac_addr[ETH_ALEN] = {0};
132
133 /* Setup the receive address */
134 hw_dbg("Programming MAC Address into RAR[0]\n");
135
136 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
137
138 /* Zero out the other (rar_entry_count - 1) receive addresses */
139 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
140 for (i = 1; i < rar_count; i++)
141 hw->mac.ops.rar_set(hw, mac_addr, i);
142 }
143
144 /**
145 * igb_vfta_set - enable or disable vlan in VLAN filter table
146 * @hw: pointer to the HW structure
147 * @vid: VLAN id to add or remove
148 * @add: if true add filter, if false remove
149 *
150 * Sets or clears a bit in the VLAN filter table array based on VLAN id
151 * and if we are adding or removing the filter
152 **/
igb_vfta_set(struct e1000_hw * hw,u32 vid,bool add)153 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
154 {
155 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
156 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
157 u32 vfta = array_rd32(E1000_VFTA, index);
158 s32 ret_val = 0;
159
160 /* bit was set/cleared before we started */
161 if ((!!(vfta & mask)) == add) {
162 ret_val = -E1000_ERR_CONFIG;
163 } else {
164 if (add)
165 vfta |= mask;
166 else
167 vfta &= ~mask;
168 }
169
170 igb_write_vfta(hw, index, vfta);
171
172 return ret_val;
173 }
174
175 /**
176 * igb_check_alt_mac_addr - Check for alternate MAC addr
177 * @hw: pointer to the HW structure
178 *
179 * Checks the nvm for an alternate MAC address. An alternate MAC address
180 * can be setup by pre-boot software and must be treated like a permanent
181 * address and must override the actual permanent MAC address. If an
182 * alternate MAC address is fopund it is saved in the hw struct and
183 * prgrammed into RAR0 and the cuntion returns success, otherwise the
184 * function returns an error.
185 **/
igb_check_alt_mac_addr(struct e1000_hw * hw)186 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
187 {
188 u32 i;
189 s32 ret_val = 0;
190 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
191 u8 alt_mac_addr[ETH_ALEN];
192
193 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset);
195 if (ret_val) {
196 hw_dbg("NVM Read Error\n");
197 goto out;
198 }
199
200 if (nvm_alt_mac_addr_offset == 0xFFFF) {
201 /* There is no Alternate MAC Address */
202 goto out;
203 }
204
205 if (hw->bus.func == E1000_FUNC_1)
206 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
207 for (i = 0; i < ETH_ALEN; i += 2) {
208 offset = nvm_alt_mac_addr_offset + (i >> 1);
209 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
210 if (ret_val) {
211 hw_dbg("NVM Read Error\n");
212 goto out;
213 }
214
215 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
216 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
217 }
218
219 /* if multicast bit is set, the alternate address will not be used */
220 if (alt_mac_addr[0] & 0x01) {
221 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
222 goto out;
223 }
224
225 /*
226 * We have a valid alternate MAC address, and we want to treat it the
227 * same as the normal permanent MAC address stored by the HW into the
228 * RAR. Do this by mapping this address into RAR0.
229 */
230 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
231
232 out:
233 return ret_val;
234 }
235
236 /**
237 * igb_rar_set - Set receive address register
238 * @hw: pointer to the HW structure
239 * @addr: pointer to the receive address
240 * @index: receive address array register
241 *
242 * Sets the receive address array register at index to the address passed
243 * in by addr.
244 **/
igb_rar_set(struct e1000_hw * hw,u8 * addr,u32 index)245 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
246 {
247 u32 rar_low, rar_high;
248
249 /*
250 * HW expects these in little endian so we reverse the byte order
251 * from network order (big endian) to little endian
252 */
253 rar_low = ((u32) addr[0] |
254 ((u32) addr[1] << 8) |
255 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
256
257 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
258
259 /* If MAC address zero, no need to set the AV bit */
260 if (rar_low || rar_high)
261 rar_high |= E1000_RAH_AV;
262
263 /*
264 * Some bridges will combine consecutive 32-bit writes into
265 * a single burst write, which will malfunction on some parts.
266 * The flushes avoid this.
267 */
268 wr32(E1000_RAL(index), rar_low);
269 wrfl();
270 wr32(E1000_RAH(index), rar_high);
271 wrfl();
272 }
273
274 /**
275 * igb_mta_set - Set multicast filter table address
276 * @hw: pointer to the HW structure
277 * @hash_value: determines the MTA register and bit to set
278 *
279 * The multicast table address is a register array of 32-bit registers.
280 * The hash_value is used to determine what register the bit is in, the
281 * current value is read, the new bit is OR'd in and the new value is
282 * written back into the register.
283 **/
igb_mta_set(struct e1000_hw * hw,u32 hash_value)284 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
285 {
286 u32 hash_bit, hash_reg, mta;
287
288 /*
289 * The MTA is a register array of 32-bit registers. It is
290 * treated like an array of (32*mta_reg_count) bits. We want to
291 * set bit BitArray[hash_value]. So we figure out what register
292 * the bit is in, read it, OR in the new bit, then write
293 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
294 * mask to bits 31:5 of the hash value which gives us the
295 * register we're modifying. The hash bit within that register
296 * is determined by the lower 5 bits of the hash value.
297 */
298 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
299 hash_bit = hash_value & 0x1F;
300
301 mta = array_rd32(E1000_MTA, hash_reg);
302
303 mta |= (1 << hash_bit);
304
305 array_wr32(E1000_MTA, hash_reg, mta);
306 wrfl();
307 }
308
309 /**
310 * igb_hash_mc_addr - Generate a multicast hash value
311 * @hw: pointer to the HW structure
312 * @mc_addr: pointer to a multicast address
313 *
314 * Generates a multicast address hash value which is used to determine
315 * the multicast filter table array address and new table value. See
316 * igb_mta_set()
317 **/
igb_hash_mc_addr(struct e1000_hw * hw,u8 * mc_addr)318 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
319 {
320 u32 hash_value, hash_mask;
321 u8 bit_shift = 0;
322
323 /* Register count multiplied by bits per register */
324 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
325
326 /*
327 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
328 * where 0xFF would still fall within the hash mask.
329 */
330 while (hash_mask >> bit_shift != 0xFF)
331 bit_shift++;
332
333 /*
334 * The portion of the address that is used for the hash table
335 * is determined by the mc_filter_type setting.
336 * The algorithm is such that there is a total of 8 bits of shifting.
337 * The bit_shift for a mc_filter_type of 0 represents the number of
338 * left-shifts where the MSB of mc_addr[5] would still fall within
339 * the hash_mask. Case 0 does this exactly. Since there are a total
340 * of 8 bits of shifting, then mc_addr[4] will shift right the
341 * remaining number of bits. Thus 8 - bit_shift. The rest of the
342 * cases are a variation of this algorithm...essentially raising the
343 * number of bits to shift mc_addr[5] left, while still keeping the
344 * 8-bit shifting total.
345 *
346 * For example, given the following Destination MAC Address and an
347 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
348 * we can see that the bit_shift for case 0 is 4. These are the hash
349 * values resulting from each mc_filter_type...
350 * [0] [1] [2] [3] [4] [5]
351 * 01 AA 00 12 34 56
352 * LSB MSB
353 *
354 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
355 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
356 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
357 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
358 */
359 switch (hw->mac.mc_filter_type) {
360 default:
361 case 0:
362 break;
363 case 1:
364 bit_shift += 1;
365 break;
366 case 2:
367 bit_shift += 2;
368 break;
369 case 3:
370 bit_shift += 4;
371 break;
372 }
373
374 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
375 (((u16) mc_addr[5]) << bit_shift)));
376
377 return hash_value;
378 }
379
380 /**
381 * igb_update_mc_addr_list - Update Multicast addresses
382 * @hw: pointer to the HW structure
383 * @mc_addr_list: array of multicast addresses to program
384 * @mc_addr_count: number of multicast addresses to program
385 *
386 * Updates entire Multicast Table Array.
387 * The caller must have a packed mc_addr_list of multicast addresses.
388 **/
igb_update_mc_addr_list(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)389 void igb_update_mc_addr_list(struct e1000_hw *hw,
390 u8 *mc_addr_list, u32 mc_addr_count)
391 {
392 u32 hash_value, hash_bit, hash_reg;
393 int i;
394
395 /* clear mta_shadow */
396 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
397
398 /* update mta_shadow from mc_addr_list */
399 for (i = 0; (u32) i < mc_addr_count; i++) {
400 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
401
402 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
403 hash_bit = hash_value & 0x1F;
404
405 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
406 mc_addr_list += (ETH_ALEN);
407 }
408
409 /* replace the entire MTA table */
410 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
411 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
412 wrfl();
413 }
414
415 /**
416 * igb_clear_hw_cntrs_base - Clear base hardware counters
417 * @hw: pointer to the HW structure
418 *
419 * Clears the base hardware counters by reading the counter registers.
420 **/
igb_clear_hw_cntrs_base(struct e1000_hw * hw)421 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
422 {
423 rd32(E1000_CRCERRS);
424 rd32(E1000_SYMERRS);
425 rd32(E1000_MPC);
426 rd32(E1000_SCC);
427 rd32(E1000_ECOL);
428 rd32(E1000_MCC);
429 rd32(E1000_LATECOL);
430 rd32(E1000_COLC);
431 rd32(E1000_DC);
432 rd32(E1000_SEC);
433 rd32(E1000_RLEC);
434 rd32(E1000_XONRXC);
435 rd32(E1000_XONTXC);
436 rd32(E1000_XOFFRXC);
437 rd32(E1000_XOFFTXC);
438 rd32(E1000_FCRUC);
439 rd32(E1000_GPRC);
440 rd32(E1000_BPRC);
441 rd32(E1000_MPRC);
442 rd32(E1000_GPTC);
443 rd32(E1000_GORCL);
444 rd32(E1000_GORCH);
445 rd32(E1000_GOTCL);
446 rd32(E1000_GOTCH);
447 rd32(E1000_RNBC);
448 rd32(E1000_RUC);
449 rd32(E1000_RFC);
450 rd32(E1000_ROC);
451 rd32(E1000_RJC);
452 rd32(E1000_TORL);
453 rd32(E1000_TORH);
454 rd32(E1000_TOTL);
455 rd32(E1000_TOTH);
456 rd32(E1000_TPR);
457 rd32(E1000_TPT);
458 rd32(E1000_MPTC);
459 rd32(E1000_BPTC);
460 }
461
462 /**
463 * igb_check_for_copper_link - Check for link (Copper)
464 * @hw: pointer to the HW structure
465 *
466 * Checks to see of the link status of the hardware has changed. If a
467 * change in link status has been detected, then we read the PHY registers
468 * to get the current speed/duplex if link exists.
469 **/
igb_check_for_copper_link(struct e1000_hw * hw)470 s32 igb_check_for_copper_link(struct e1000_hw *hw)
471 {
472 struct e1000_mac_info *mac = &hw->mac;
473 s32 ret_val;
474 bool link;
475
476 /*
477 * We only want to go out to the PHY registers to see if Auto-Neg
478 * has completed and/or if our link status has changed. The
479 * get_link_status flag is set upon receiving a Link Status
480 * Change or Rx Sequence Error interrupt.
481 */
482 if (!mac->get_link_status) {
483 ret_val = 0;
484 goto out;
485 }
486
487 /*
488 * First we want to see if the MII Status Register reports
489 * link. If so, then we want to get the current speed/duplex
490 * of the PHY.
491 */
492 ret_val = igb_phy_has_link(hw, 1, 0, &link);
493 if (ret_val)
494 goto out;
495
496 if (!link)
497 goto out; /* No link detected */
498
499 mac->get_link_status = false;
500
501 /*
502 * Check if there was DownShift, must be checked
503 * immediately after link-up
504 */
505 igb_check_downshift(hw);
506
507 /*
508 * If we are forcing speed/duplex, then we simply return since
509 * we have already determined whether we have link or not.
510 */
511 if (!mac->autoneg) {
512 ret_val = -E1000_ERR_CONFIG;
513 goto out;
514 }
515
516 /*
517 * Auto-Neg is enabled. Auto Speed Detection takes care
518 * of MAC speed/duplex configuration. So we only need to
519 * configure Collision Distance in the MAC.
520 */
521 igb_config_collision_dist(hw);
522
523 /*
524 * Configure Flow Control now that Auto-Neg has completed.
525 * First, we need to restore the desired flow control
526 * settings because we may have had to re-autoneg with a
527 * different link partner.
528 */
529 ret_val = igb_config_fc_after_link_up(hw);
530 if (ret_val)
531 hw_dbg("Error configuring flow control\n");
532
533 out:
534 return ret_val;
535 }
536
537 /**
538 * igb_setup_link - Setup flow control and link settings
539 * @hw: pointer to the HW structure
540 *
541 * Determines which flow control settings to use, then configures flow
542 * control. Calls the appropriate media-specific link configuration
543 * function. Assuming the adapter has a valid link partner, a valid link
544 * should be established. Assumes the hardware has previously been reset
545 * and the transmitter and receiver are not enabled.
546 **/
igb_setup_link(struct e1000_hw * hw)547 s32 igb_setup_link(struct e1000_hw *hw)
548 {
549 s32 ret_val = 0;
550
551 /*
552 * In the case of the phy reset being blocked, we already have a link.
553 * We do not need to set it up again.
554 */
555 if (igb_check_reset_block(hw))
556 goto out;
557
558 /*
559 * If requested flow control is set to default, set flow control
560 * based on the EEPROM flow control settings.
561 */
562 if (hw->fc.requested_mode == e1000_fc_default) {
563 ret_val = igb_set_default_fc(hw);
564 if (ret_val)
565 goto out;
566 }
567
568 /*
569 * We want to save off the original Flow Control configuration just
570 * in case we get disconnected and then reconnected into a different
571 * hub or switch with different Flow Control capabilities.
572 */
573 hw->fc.current_mode = hw->fc.requested_mode;
574
575 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
576
577 /* Call the necessary media_type subroutine to configure the link. */
578 ret_val = hw->mac.ops.setup_physical_interface(hw);
579 if (ret_val)
580 goto out;
581
582 /*
583 * Initialize the flow control address, type, and PAUSE timer
584 * registers to their default values. This is done even if flow
585 * control is disabled, because it does not hurt anything to
586 * initialize these registers.
587 */
588 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
589 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
590 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
591 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
592
593 wr32(E1000_FCTTV, hw->fc.pause_time);
594
595 ret_val = igb_set_fc_watermarks(hw);
596
597 out:
598 return ret_val;
599 }
600
601 /**
602 * igb_config_collision_dist - Configure collision distance
603 * @hw: pointer to the HW structure
604 *
605 * Configures the collision distance to the default value and is used
606 * during link setup. Currently no func pointer exists and all
607 * implementations are handled in the generic version of this function.
608 **/
igb_config_collision_dist(struct e1000_hw * hw)609 void igb_config_collision_dist(struct e1000_hw *hw)
610 {
611 u32 tctl;
612
613 tctl = rd32(E1000_TCTL);
614
615 tctl &= ~E1000_TCTL_COLD;
616 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
617
618 wr32(E1000_TCTL, tctl);
619 wrfl();
620 }
621
622 /**
623 * igb_set_fc_watermarks - Set flow control high/low watermarks
624 * @hw: pointer to the HW structure
625 *
626 * Sets the flow control high/low threshold (watermark) registers. If
627 * flow control XON frame transmission is enabled, then set XON frame
628 * tansmission as well.
629 **/
igb_set_fc_watermarks(struct e1000_hw * hw)630 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
631 {
632 s32 ret_val = 0;
633 u32 fcrtl = 0, fcrth = 0;
634
635 /*
636 * Set the flow control receive threshold registers. Normally,
637 * these registers will be set to a default threshold that may be
638 * adjusted later by the driver's runtime code. However, if the
639 * ability to transmit pause frames is not enabled, then these
640 * registers will be set to 0.
641 */
642 if (hw->fc.current_mode & e1000_fc_tx_pause) {
643 /*
644 * We need to set up the Receive Threshold high and low water
645 * marks as well as (optionally) enabling the transmission of
646 * XON frames.
647 */
648 fcrtl = hw->fc.low_water;
649 if (hw->fc.send_xon)
650 fcrtl |= E1000_FCRTL_XONE;
651
652 fcrth = hw->fc.high_water;
653 }
654 wr32(E1000_FCRTL, fcrtl);
655 wr32(E1000_FCRTH, fcrth);
656
657 return ret_val;
658 }
659
660 /**
661 * igb_set_default_fc - Set flow control default values
662 * @hw: pointer to the HW structure
663 *
664 * Read the EEPROM for the default values for flow control and store the
665 * values.
666 **/
igb_set_default_fc(struct e1000_hw * hw)667 static s32 igb_set_default_fc(struct e1000_hw *hw)
668 {
669 s32 ret_val = 0;
670 u16 nvm_data;
671
672 /*
673 * Read and store word 0x0F of the EEPROM. This word contains bits
674 * that determine the hardware's default PAUSE (flow control) mode,
675 * a bit that determines whether the HW defaults to enabling or
676 * disabling auto-negotiation, and the direction of the
677 * SW defined pins. If there is no SW over-ride of the flow
678 * control setting, then the variable hw->fc will
679 * be initialized based on a value in the EEPROM.
680 */
681 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
682
683 if (ret_val) {
684 hw_dbg("NVM Read Error\n");
685 goto out;
686 }
687
688 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
689 hw->fc.requested_mode = e1000_fc_none;
690 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
691 NVM_WORD0F_ASM_DIR)
692 hw->fc.requested_mode = e1000_fc_tx_pause;
693 else
694 hw->fc.requested_mode = e1000_fc_full;
695
696 out:
697 return ret_val;
698 }
699
700 /**
701 * igb_force_mac_fc - Force the MAC's flow control settings
702 * @hw: pointer to the HW structure
703 *
704 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
705 * device control register to reflect the adapter settings. TFCE and RFCE
706 * need to be explicitly set by software when a copper PHY is used because
707 * autonegotiation is managed by the PHY rather than the MAC. Software must
708 * also configure these bits when link is forced on a fiber connection.
709 **/
igb_force_mac_fc(struct e1000_hw * hw)710 s32 igb_force_mac_fc(struct e1000_hw *hw)
711 {
712 u32 ctrl;
713 s32 ret_val = 0;
714
715 ctrl = rd32(E1000_CTRL);
716
717 /*
718 * Because we didn't get link via the internal auto-negotiation
719 * mechanism (we either forced link or we got link via PHY
720 * auto-neg), we have to manually enable/disable transmit an
721 * receive flow control.
722 *
723 * The "Case" statement below enables/disable flow control
724 * according to the "hw->fc.current_mode" parameter.
725 *
726 * The possible values of the "fc" parameter are:
727 * 0: Flow control is completely disabled
728 * 1: Rx flow control is enabled (we can receive pause
729 * frames but not send pause frames).
730 * 2: Tx flow control is enabled (we can send pause frames
731 * frames but we do not receive pause frames).
732 * 3: Both Rx and TX flow control (symmetric) is enabled.
733 * other: No other values should be possible at this point.
734 */
735 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
736
737 switch (hw->fc.current_mode) {
738 case e1000_fc_none:
739 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
740 break;
741 case e1000_fc_rx_pause:
742 ctrl &= (~E1000_CTRL_TFCE);
743 ctrl |= E1000_CTRL_RFCE;
744 break;
745 case e1000_fc_tx_pause:
746 ctrl &= (~E1000_CTRL_RFCE);
747 ctrl |= E1000_CTRL_TFCE;
748 break;
749 case e1000_fc_full:
750 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
751 break;
752 default:
753 hw_dbg("Flow control param set incorrectly\n");
754 ret_val = -E1000_ERR_CONFIG;
755 goto out;
756 }
757
758 wr32(E1000_CTRL, ctrl);
759
760 out:
761 return ret_val;
762 }
763
764 /**
765 * igb_config_fc_after_link_up - Configures flow control after link
766 * @hw: pointer to the HW structure
767 *
768 * Checks the status of auto-negotiation after link up to ensure that the
769 * speed and duplex were not forced. If the link needed to be forced, then
770 * flow control needs to be forced also. If auto-negotiation is enabled
771 * and did not fail, then we configure flow control based on our link
772 * partner.
773 **/
igb_config_fc_after_link_up(struct e1000_hw * hw)774 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
775 {
776 struct e1000_mac_info *mac = &hw->mac;
777 s32 ret_val = 0;
778 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
779 u16 speed, duplex;
780
781 /*
782 * Check for the case where we have fiber media and auto-neg failed
783 * so we had to force link. In this case, we need to force the
784 * configuration of the MAC to match the "fc" parameter.
785 */
786 if (mac->autoneg_failed) {
787 if (hw->phy.media_type == e1000_media_type_internal_serdes)
788 ret_val = igb_force_mac_fc(hw);
789 } else {
790 if (hw->phy.media_type == e1000_media_type_copper)
791 ret_val = igb_force_mac_fc(hw);
792 }
793
794 if (ret_val) {
795 hw_dbg("Error forcing flow control settings\n");
796 goto out;
797 }
798
799 /*
800 * Check for the case where we have copper media and auto-neg is
801 * enabled. In this case, we need to check and see if Auto-Neg
802 * has completed, and if so, how the PHY and link partner has
803 * flow control configured.
804 */
805 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
806 /*
807 * Read the MII Status Register and check to see if AutoNeg
808 * has completed. We read this twice because this reg has
809 * some "sticky" (latched) bits.
810 */
811 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
812 &mii_status_reg);
813 if (ret_val)
814 goto out;
815 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
816 &mii_status_reg);
817 if (ret_val)
818 goto out;
819
820 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
821 hw_dbg("Copper PHY and Auto Neg "
822 "has not completed.\n");
823 goto out;
824 }
825
826 /*
827 * The AutoNeg process has completed, so we now need to
828 * read both the Auto Negotiation Advertisement
829 * Register (Address 4) and the Auto_Negotiation Base
830 * Page Ability Register (Address 5) to determine how
831 * flow control was negotiated.
832 */
833 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
834 &mii_nway_adv_reg);
835 if (ret_val)
836 goto out;
837 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
838 &mii_nway_lp_ability_reg);
839 if (ret_val)
840 goto out;
841
842 /*
843 * Two bits in the Auto Negotiation Advertisement Register
844 * (Address 4) and two bits in the Auto Negotiation Base
845 * Page Ability Register (Address 5) determine flow control
846 * for both the PHY and the link partner. The following
847 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
848 * 1999, describes these PAUSE resolution bits and how flow
849 * control is determined based upon these settings.
850 * NOTE: DC = Don't Care
851 *
852 * LOCAL DEVICE | LINK PARTNER
853 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
854 *-------|---------|-------|---------|--------------------
855 * 0 | 0 | DC | DC | e1000_fc_none
856 * 0 | 1 | 0 | DC | e1000_fc_none
857 * 0 | 1 | 1 | 0 | e1000_fc_none
858 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
859 * 1 | 0 | 0 | DC | e1000_fc_none
860 * 1 | DC | 1 | DC | e1000_fc_full
861 * 1 | 1 | 0 | 0 | e1000_fc_none
862 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
863 *
864 * Are both PAUSE bits set to 1? If so, this implies
865 * Symmetric Flow Control is enabled at both ends. The
866 * ASM_DIR bits are irrelevant per the spec.
867 *
868 * For Symmetric Flow Control:
869 *
870 * LOCAL DEVICE | LINK PARTNER
871 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
872 *-------|---------|-------|---------|--------------------
873 * 1 | DC | 1 | DC | E1000_fc_full
874 *
875 */
876 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
877 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
878 /*
879 * Now we need to check if the user selected RX ONLY
880 * of pause frames. In this case, we had to advertise
881 * FULL flow control because we could not advertise RX
882 * ONLY. Hence, we must now check to see if we need to
883 * turn OFF the TRANSMISSION of PAUSE frames.
884 */
885 if (hw->fc.requested_mode == e1000_fc_full) {
886 hw->fc.current_mode = e1000_fc_full;
887 hw_dbg("Flow Control = FULL.\r\n");
888 } else {
889 hw->fc.current_mode = e1000_fc_rx_pause;
890 hw_dbg("Flow Control = "
891 "RX PAUSE frames only.\r\n");
892 }
893 }
894 /*
895 * For receiving PAUSE frames ONLY.
896 *
897 * LOCAL DEVICE | LINK PARTNER
898 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
899 *-------|---------|-------|---------|--------------------
900 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
901 */
902 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
903 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
904 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
905 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
906 hw->fc.current_mode = e1000_fc_tx_pause;
907 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
908 }
909 /*
910 * For transmitting PAUSE frames ONLY.
911 *
912 * LOCAL DEVICE | LINK PARTNER
913 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
914 *-------|---------|-------|---------|--------------------
915 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
916 */
917 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
918 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
919 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
920 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
921 hw->fc.current_mode = e1000_fc_rx_pause;
922 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
923 }
924 /*
925 * Per the IEEE spec, at this point flow control should be
926 * disabled. However, we want to consider that we could
927 * be connected to a legacy switch that doesn't advertise
928 * desired flow control, but can be forced on the link
929 * partner. So if we advertised no flow control, that is
930 * what we will resolve to. If we advertised some kind of
931 * receive capability (Rx Pause Only or Full Flow Control)
932 * and the link partner advertised none, we will configure
933 * ourselves to enable Rx Flow Control only. We can do
934 * this safely for two reasons: If the link partner really
935 * didn't want flow control enabled, and we enable Rx, no
936 * harm done since we won't be receiving any PAUSE frames
937 * anyway. If the intent on the link partner was to have
938 * flow control enabled, then by us enabling RX only, we
939 * can at least receive pause frames and process them.
940 * This is a good idea because in most cases, since we are
941 * predominantly a server NIC, more times than not we will
942 * be asked to delay transmission of packets than asking
943 * our link partner to pause transmission of frames.
944 */
945 else if ((hw->fc.requested_mode == e1000_fc_none ||
946 hw->fc.requested_mode == e1000_fc_tx_pause) ||
947 hw->fc.strict_ieee) {
948 hw->fc.current_mode = e1000_fc_none;
949 hw_dbg("Flow Control = NONE.\r\n");
950 } else {
951 hw->fc.current_mode = e1000_fc_rx_pause;
952 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
953 }
954
955 /*
956 * Now we need to do one last check... If we auto-
957 * negotiated to HALF DUPLEX, flow control should not be
958 * enabled per IEEE 802.3 spec.
959 */
960 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
961 if (ret_val) {
962 hw_dbg("Error getting link speed and duplex\n");
963 goto out;
964 }
965
966 if (duplex == HALF_DUPLEX)
967 hw->fc.current_mode = e1000_fc_none;
968
969 /*
970 * Now we call a subroutine to actually force the MAC
971 * controller to use the correct flow control settings.
972 */
973 ret_val = igb_force_mac_fc(hw);
974 if (ret_val) {
975 hw_dbg("Error forcing flow control settings\n");
976 goto out;
977 }
978 }
979
980 out:
981 return ret_val;
982 }
983
984 /**
985 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
986 * @hw: pointer to the HW structure
987 * @speed: stores the current speed
988 * @duplex: stores the current duplex
989 *
990 * Read the status register for the current speed/duplex and store the current
991 * speed and duplex for copper connections.
992 **/
igb_get_speed_and_duplex_copper(struct e1000_hw * hw,u16 * speed,u16 * duplex)993 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
994 u16 *duplex)
995 {
996 u32 status;
997
998 status = rd32(E1000_STATUS);
999 if (status & E1000_STATUS_SPEED_1000) {
1000 *speed = SPEED_1000;
1001 hw_dbg("1000 Mbs, ");
1002 } else if (status & E1000_STATUS_SPEED_100) {
1003 *speed = SPEED_100;
1004 hw_dbg("100 Mbs, ");
1005 } else {
1006 *speed = SPEED_10;
1007 hw_dbg("10 Mbs, ");
1008 }
1009
1010 if (status & E1000_STATUS_FD) {
1011 *duplex = FULL_DUPLEX;
1012 hw_dbg("Full Duplex\n");
1013 } else {
1014 *duplex = HALF_DUPLEX;
1015 hw_dbg("Half Duplex\n");
1016 }
1017
1018 return 0;
1019 }
1020
1021 /**
1022 * igb_get_hw_semaphore - Acquire hardware semaphore
1023 * @hw: pointer to the HW structure
1024 *
1025 * Acquire the HW semaphore to access the PHY or NVM
1026 **/
igb_get_hw_semaphore(struct e1000_hw * hw)1027 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1028 {
1029 u32 swsm;
1030 s32 ret_val = 0;
1031 s32 timeout = hw->nvm.word_size + 1;
1032 s32 i = 0;
1033
1034 /* Get the SW semaphore */
1035 while (i < timeout) {
1036 swsm = rd32(E1000_SWSM);
1037 if (!(swsm & E1000_SWSM_SMBI))
1038 break;
1039
1040 udelay(50);
1041 i++;
1042 }
1043
1044 if (i == timeout) {
1045 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1046 ret_val = -E1000_ERR_NVM;
1047 goto out;
1048 }
1049
1050 /* Get the FW semaphore. */
1051 for (i = 0; i < timeout; i++) {
1052 swsm = rd32(E1000_SWSM);
1053 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1054
1055 /* Semaphore acquired if bit latched */
1056 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1057 break;
1058
1059 udelay(50);
1060 }
1061
1062 if (i == timeout) {
1063 /* Release semaphores */
1064 igb_put_hw_semaphore(hw);
1065 hw_dbg("Driver can't access the NVM\n");
1066 ret_val = -E1000_ERR_NVM;
1067 goto out;
1068 }
1069
1070 out:
1071 return ret_val;
1072 }
1073
1074 /**
1075 * igb_put_hw_semaphore - Release hardware semaphore
1076 * @hw: pointer to the HW structure
1077 *
1078 * Release hardware semaphore used to access the PHY or NVM
1079 **/
igb_put_hw_semaphore(struct e1000_hw * hw)1080 void igb_put_hw_semaphore(struct e1000_hw *hw)
1081 {
1082 u32 swsm;
1083
1084 swsm = rd32(E1000_SWSM);
1085
1086 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1087
1088 wr32(E1000_SWSM, swsm);
1089 }
1090
1091 /**
1092 * igb_get_auto_rd_done - Check for auto read completion
1093 * @hw: pointer to the HW structure
1094 *
1095 * Check EEPROM for Auto Read done bit.
1096 **/
igb_get_auto_rd_done(struct e1000_hw * hw)1097 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1098 {
1099 s32 i = 0;
1100 s32 ret_val = 0;
1101
1102
1103 while (i < AUTO_READ_DONE_TIMEOUT) {
1104 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1105 break;
1106 msleep(1);
1107 i++;
1108 }
1109
1110 if (i == AUTO_READ_DONE_TIMEOUT) {
1111 hw_dbg("Auto read by HW from NVM has not completed.\n");
1112 ret_val = -E1000_ERR_RESET;
1113 goto out;
1114 }
1115
1116 out:
1117 return ret_val;
1118 }
1119
1120 /**
1121 * igb_valid_led_default - Verify a valid default LED config
1122 * @hw: pointer to the HW structure
1123 * @data: pointer to the NVM (EEPROM)
1124 *
1125 * Read the EEPROM for the current default LED configuration. If the
1126 * LED configuration is not valid, set to a valid LED configuration.
1127 **/
igb_valid_led_default(struct e1000_hw * hw,u16 * data)1128 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1129 {
1130 s32 ret_val;
1131
1132 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1133 if (ret_val) {
1134 hw_dbg("NVM Read Error\n");
1135 goto out;
1136 }
1137
1138 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1139 switch(hw->phy.media_type) {
1140 case e1000_media_type_internal_serdes:
1141 *data = ID_LED_DEFAULT_82575_SERDES;
1142 break;
1143 case e1000_media_type_copper:
1144 default:
1145 *data = ID_LED_DEFAULT;
1146 break;
1147 }
1148 }
1149 out:
1150 return ret_val;
1151 }
1152
1153 /**
1154 * igb_id_led_init -
1155 * @hw: pointer to the HW structure
1156 *
1157 **/
igb_id_led_init(struct e1000_hw * hw)1158 s32 igb_id_led_init(struct e1000_hw *hw)
1159 {
1160 struct e1000_mac_info *mac = &hw->mac;
1161 s32 ret_val;
1162 const u32 ledctl_mask = 0x000000FF;
1163 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1164 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1165 u16 data, i, temp;
1166 const u16 led_mask = 0x0F;
1167
1168 ret_val = igb_valid_led_default(hw, &data);
1169 if (ret_val)
1170 goto out;
1171
1172 mac->ledctl_default = rd32(E1000_LEDCTL);
1173 mac->ledctl_mode1 = mac->ledctl_default;
1174 mac->ledctl_mode2 = mac->ledctl_default;
1175
1176 for (i = 0; i < 4; i++) {
1177 temp = (data >> (i << 2)) & led_mask;
1178 switch (temp) {
1179 case ID_LED_ON1_DEF2:
1180 case ID_LED_ON1_ON2:
1181 case ID_LED_ON1_OFF2:
1182 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1183 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1184 break;
1185 case ID_LED_OFF1_DEF2:
1186 case ID_LED_OFF1_ON2:
1187 case ID_LED_OFF1_OFF2:
1188 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1189 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1190 break;
1191 default:
1192 /* Do nothing */
1193 break;
1194 }
1195 switch (temp) {
1196 case ID_LED_DEF1_ON2:
1197 case ID_LED_ON1_ON2:
1198 case ID_LED_OFF1_ON2:
1199 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1200 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1201 break;
1202 case ID_LED_DEF1_OFF2:
1203 case ID_LED_ON1_OFF2:
1204 case ID_LED_OFF1_OFF2:
1205 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1206 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1207 break;
1208 default:
1209 /* Do nothing */
1210 break;
1211 }
1212 }
1213
1214 out:
1215 return ret_val;
1216 }
1217
1218 /**
1219 * igb_cleanup_led - Set LED config to default operation
1220 * @hw: pointer to the HW structure
1221 *
1222 * Remove the current LED configuration and set the LED configuration
1223 * to the default value, saved from the EEPROM.
1224 **/
igb_cleanup_led(struct e1000_hw * hw)1225 s32 igb_cleanup_led(struct e1000_hw *hw)
1226 {
1227 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1228 return 0;
1229 }
1230
1231 /**
1232 * igb_blink_led - Blink LED
1233 * @hw: pointer to the HW structure
1234 *
1235 * Blink the led's which are set to be on.
1236 **/
igb_blink_led(struct e1000_hw * hw)1237 s32 igb_blink_led(struct e1000_hw *hw)
1238 {
1239 u32 ledctl_blink = 0;
1240 u32 i;
1241
1242 /*
1243 * set the blink bit for each LED that's "on" (0x0E)
1244 * in ledctl_mode2
1245 */
1246 ledctl_blink = hw->mac.ledctl_mode2;
1247 for (i = 0; i < 4; i++)
1248 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1249 E1000_LEDCTL_MODE_LED_ON)
1250 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1251 (i * 8));
1252
1253 wr32(E1000_LEDCTL, ledctl_blink);
1254
1255 return 0;
1256 }
1257
1258 /**
1259 * igb_led_off - Turn LED off
1260 * @hw: pointer to the HW structure
1261 *
1262 * Turn LED off.
1263 **/
igb_led_off(struct e1000_hw * hw)1264 s32 igb_led_off(struct e1000_hw *hw)
1265 {
1266 switch (hw->phy.media_type) {
1267 case e1000_media_type_copper:
1268 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1269 break;
1270 default:
1271 break;
1272 }
1273
1274 return 0;
1275 }
1276
1277 /**
1278 * igb_disable_pcie_master - Disables PCI-express master access
1279 * @hw: pointer to the HW structure
1280 *
1281 * Returns 0 (0) if successful, else returns -10
1282 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1283 * the master requests to be disabled.
1284 *
1285 * Disables PCI-Express master access and verifies there are no pending
1286 * requests.
1287 **/
igb_disable_pcie_master(struct e1000_hw * hw)1288 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1289 {
1290 u32 ctrl;
1291 s32 timeout = MASTER_DISABLE_TIMEOUT;
1292 s32 ret_val = 0;
1293
1294 if (hw->bus.type != e1000_bus_type_pci_express)
1295 goto out;
1296
1297 ctrl = rd32(E1000_CTRL);
1298 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1299 wr32(E1000_CTRL, ctrl);
1300
1301 while (timeout) {
1302 if (!(rd32(E1000_STATUS) &
1303 E1000_STATUS_GIO_MASTER_ENABLE))
1304 break;
1305 udelay(100);
1306 timeout--;
1307 }
1308
1309 if (!timeout) {
1310 hw_dbg("Master requests are pending.\n");
1311 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1312 goto out;
1313 }
1314
1315 out:
1316 return ret_val;
1317 }
1318
1319 /**
1320 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1321 * @hw: pointer to the HW structure
1322 *
1323 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1324 * set, which is forced to MDI mode only.
1325 **/
igb_validate_mdi_setting(struct e1000_hw * hw)1326 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1327 {
1328 s32 ret_val = 0;
1329
1330 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1331 hw_dbg("Invalid MDI setting detected\n");
1332 hw->phy.mdix = 1;
1333 ret_val = -E1000_ERR_CONFIG;
1334 goto out;
1335 }
1336
1337 out:
1338 return ret_val;
1339 }
1340
1341 /**
1342 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1343 * @hw: pointer to the HW structure
1344 * @reg: 32bit register offset such as E1000_SCTL
1345 * @offset: register offset to write to
1346 * @data: data to write at register offset
1347 *
1348 * Writes an address/data control type register. There are several of these
1349 * and they all have the format address << 8 | data and bit 31 is polled for
1350 * completion.
1351 **/
igb_write_8bit_ctrl_reg(struct e1000_hw * hw,u32 reg,u32 offset,u8 data)1352 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1353 u32 offset, u8 data)
1354 {
1355 u32 i, regvalue = 0;
1356 s32 ret_val = 0;
1357
1358 /* Set up the address and data */
1359 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1360 wr32(reg, regvalue);
1361
1362 /* Poll the ready bit to see if the MDI read completed */
1363 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1364 udelay(5);
1365 regvalue = rd32(reg);
1366 if (regvalue & E1000_GEN_CTL_READY)
1367 break;
1368 }
1369 if (!(regvalue & E1000_GEN_CTL_READY)) {
1370 hw_dbg("Reg %08x did not indicate ready\n", reg);
1371 ret_val = -E1000_ERR_PHY;
1372 goto out;
1373 }
1374
1375 out:
1376 return ret_val;
1377 }
1378
1379 /**
1380 * igb_enable_mng_pass_thru - Enable processing of ARP's
1381 * @hw: pointer to the HW structure
1382 *
1383 * Verifies the hardware needs to leave interface enabled so that frames can
1384 * be directed to and from the management interface.
1385 **/
igb_enable_mng_pass_thru(struct e1000_hw * hw)1386 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1387 {
1388 u32 manc;
1389 u32 fwsm, factps;
1390 bool ret_val = false;
1391
1392 if (!hw->mac.asf_firmware_present)
1393 goto out;
1394
1395 manc = rd32(E1000_MANC);
1396
1397 if (!(manc & E1000_MANC_RCV_TCO_EN))
1398 goto out;
1399
1400 if (hw->mac.arc_subsystem_valid) {
1401 fwsm = rd32(E1000_FWSM);
1402 factps = rd32(E1000_FACTPS);
1403
1404 if (!(factps & E1000_FACTPS_MNGCG) &&
1405 ((fwsm & E1000_FWSM_MODE_MASK) ==
1406 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1407 ret_val = true;
1408 goto out;
1409 }
1410 } else {
1411 if ((manc & E1000_MANC_SMBUS_EN) &&
1412 !(manc & E1000_MANC_ASF_EN)) {
1413 ret_val = true;
1414 goto out;
1415 }
1416 }
1417
1418 out:
1419 return ret_val;
1420 }
1421