Lines Matching refs:pf

61 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);  in ice_hw_to_dev()  local
63 return &pf->pdev->dev; in ice_hw_to_dev()
70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
72 static void ice_vsi_release_all(struct ice_pf *pf);
74 static int ice_rebuild_channels(struct ice_pf *pf);
109 static void ice_check_for_hang_subtask(struct ice_pf *pf) in ice_check_for_hang_subtask() argument
117 ice_for_each_vsi(pf, v) in ice_check_for_hang_subtask()
118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
119 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
172 static int ice_init_mac_fltr(struct ice_pf *pf) in ice_init_mac_fltr() argument
177 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
307 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_devlink_port() local
309 if (!ice_is_switchdev_running(pf)) in ice_get_devlink_port()
312 return &pf->devlink_port; in ice_get_devlink_port()
327 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr() local
328 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
415 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { in ice_vsi_sync_fltr()
416 err = ice_set_dflt_vsi(pf->first_sw, vsi); in ice_vsi_sync_fltr()
429 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { in ice_vsi_sync_fltr()
430 err = ice_clear_dflt_vsi(pf->first_sw); in ice_vsi_sync_fltr()
462 static void ice_sync_fltr_subtask(struct ice_pf *pf) in ice_sync_fltr_subtask() argument
466 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
469 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
471 ice_for_each_vsi(pf, v) in ice_sync_fltr_subtask()
472 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
473 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
475 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
485 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) in ice_pf_dis_all_vsi() argument
490 ice_for_each_vsi(pf, v) in ice_pf_dis_all_vsi()
491 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
492 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
495 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
498 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
510 static void ice_clear_sw_switch_recipes(struct ice_pf *pf) in ice_clear_sw_switch_recipes() argument
515 recp = pf->hw.switch_info->recp_list; in ice_clear_sw_switch_recipes()
528 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_prepare_for_reset() argument
530 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
535 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); in ice_prepare_for_reset()
538 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
541 ice_unplug_aux_dev(pf); in ice_prepare_for_reset()
545 ice_vc_notify_reset(pf); in ice_prepare_for_reset()
548 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
549 ice_for_each_vf(pf, bkt, vf) in ice_prepare_for_reset()
551 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
553 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_prepare_for_reset()
555 ice_clear_sw_switch_recipes(pf); in ice_prepare_for_reset()
559 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
568 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
584 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
593 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_reset()
595 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
596 ice_ptp_prepare_for_reset(pf); in ice_prepare_for_reset()
598 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_prepare_for_reset()
599 ice_gnss_exit(pf); in ice_prepare_for_reset()
606 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
614 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_do_reset() argument
616 struct device *dev = ice_pf_to_dev(pf); in ice_do_reset()
617 struct ice_hw *hw = &pf->hw; in ice_do_reset()
621 ice_prepare_for_reset(pf, reset_type); in ice_do_reset()
626 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
627 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
628 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
629 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
630 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
631 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
632 wake_up(&pf->reset_wait_queue); in ice_do_reset()
641 pf->pfr_count++; in ice_do_reset()
642 ice_rebuild(pf, reset_type); in ice_do_reset()
643 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
644 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
645 wake_up(&pf->reset_wait_queue); in ice_do_reset()
646 ice_reset_all_vfs(pf); in ice_do_reset()
654 static void ice_reset_subtask(struct ice_pf *pf) in ice_reset_subtask() argument
668 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
670 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
672 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
674 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
679 ice_prepare_for_reset(pf, reset_type); in ice_reset_subtask()
682 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
683 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
686 pf->hw.reset_ongoing = false; in ice_reset_subtask()
687 ice_rebuild(pf, reset_type); in ice_reset_subtask()
691 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
692 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
693 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
694 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
695 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
696 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
697 ice_reset_all_vfs(pf); in ice_reset_subtask()
704 if (test_bit(ICE_PFR_REQ, pf->state)) in ice_reset_subtask()
706 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
708 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
715 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
716 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
717 ice_do_reset(pf, reset_type); in ice_reset_subtask()
919 static void ice_set_dflt_mib(struct ice_pf *pf) in ice_set_dflt_mib() argument
921 struct device *dev = ice_pf_to_dev(pf); in ice_set_dflt_mib()
925 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
1003 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) in ice_check_phy_fw_load() argument
1006 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1010 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1014 …dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and… in ice_check_phy_fw_load()
1015 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1027 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) in ice_check_module_power() argument
1032 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1039 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1043 …dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cann… in ice_check_module_power()
1044 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1046 …dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cann… in ice_check_module_power()
1047 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1059 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) in ice_check_link_cfg_err() argument
1061 ice_check_module_power(pf, link_cfg_err); in ice_check_link_cfg_err()
1062 ice_check_phy_fw_load(pf, link_cfg_err); in ice_check_link_cfg_err()
1075 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, in ice_link_event() argument
1078 struct device *dev = ice_pf_to_dev(pf); in ice_link_event()
1100 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1108 vsi = ice_get_main_vsi(pf); in ice_link_event()
1113 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1115 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1123 if (!ice_is_e810(&pf->hw)) in ice_link_event()
1124 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); in ice_link_event()
1126 if (ice_is_dcb_active(pf)) { in ice_link_event()
1127 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1128 ice_dcb_rebuild(pf); in ice_link_event()
1131 ice_set_dflt_mib(pf); in ice_link_event()
1136 ice_vc_notify_link_state(pf); in ice_link_event()
1145 static void ice_watchdog_subtask(struct ice_pf *pf) in ice_watchdog_subtask() argument
1150 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1151 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1156 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1159 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1164 ice_update_pf_stats(pf); in ice_watchdog_subtask()
1165 ice_for_each_vsi(pf, i) in ice_watchdog_subtask()
1166 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1167 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1205 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_handle_link_event() argument
1212 port_info = pf->hw.port_info; in ice_handle_link_event()
1216 status = ice_link_event(pf, port_info, in ice_handle_link_event()
1220 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", in ice_handle_link_event()
1257 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, in ice_aq_wait_for_event() argument
1260 struct device *dev = ice_pf_to_dev(pf); in ice_aq_wait_for_event()
1275 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1276 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_wait_for_event()
1277 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1281 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, in ice_aq_wait_for_event()
1304 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1306 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1330 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, in ice_aq_check_events() argument
1336 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1337 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1355 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1358 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1368 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) in ice_aq_cancel_waiting_tasks() argument
1372 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1373 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1375 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1377 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1385 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) in __ice_clean_ctrlq() argument
1387 struct device *dev = ice_pf_to_dev(pf); in __ice_clean_ctrlq()
1389 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1396 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1485 ice_aq_check_events(pf, opcode, &event); in __ice_clean_ctrlq()
1489 if (ice_handle_link_event(pf, &event)) in __ice_clean_ctrlq()
1493 ice_vf_lan_overflow_event(pf, &event); in __ice_clean_ctrlq()
1496 if (!ice_is_malicious_vf(pf, &event, i, pending)) in __ice_clean_ctrlq()
1497 ice_vc_process_vf_msg(pf, &event); in __ice_clean_ctrlq()
1503 ice_dcb_process_lldp_set_mib_change(pf, &event); in __ice_clean_ctrlq()
1536 static void ice_clean_adminq_subtask(struct ice_pf *pf) in ice_clean_adminq_subtask() argument
1538 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1540 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1543 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) in ice_clean_adminq_subtask()
1546 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1554 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); in ice_clean_adminq_subtask()
1563 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) in ice_clean_mailboxq_subtask() argument
1565 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1567 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1570 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) in ice_clean_mailboxq_subtask()
1573 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1576 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); in ice_clean_mailboxq_subtask()
1585 static void ice_clean_sbq_subtask(struct ice_pf *pf) in ice_clean_sbq_subtask() argument
1587 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1595 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1598 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) in ice_clean_sbq_subtask()
1601 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1604 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); in ice_clean_sbq_subtask()
1615 void ice_service_task_schedule(struct ice_pf *pf) in ice_service_task_schedule() argument
1617 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1618 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1619 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1620 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1627 static void ice_service_task_complete(struct ice_pf *pf) in ice_service_task_complete() argument
1629 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1633 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1643 static int ice_service_task_stop(struct ice_pf *pf) in ice_service_task_stop() argument
1647 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1649 if (pf->serv_tmr.function) in ice_service_task_stop()
1650 del_timer_sync(&pf->serv_tmr); in ice_service_task_stop()
1651 if (pf->serv_task.func) in ice_service_task_stop()
1652 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1654 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1664 static void ice_service_task_restart(struct ice_pf *pf) in ice_service_task_restart() argument
1666 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1667 ice_service_task_schedule(pf); in ice_service_task_restart()
1676 struct ice_pf *pf = from_timer(pf, t, serv_tmr); in ice_service_timer() local
1678 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1679 ice_service_task_schedule(pf); in ice_service_timer()
1692 static void ice_handle_mdd_event(struct ice_pf *pf) in ice_handle_mdd_event() argument
1694 struct device *dev = ice_pf_to_dev(pf); in ice_handle_mdd_event()
1695 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1700 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1704 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
1720 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1737 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1754 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1764 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1771 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1778 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1785 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1786 ice_for_each_vf(pf, bkt, vf) { in ice_handle_mdd_event()
1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1792 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1802 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1812 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1821 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1822 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1830 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { in ice_handle_mdd_event()
1839 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1841 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
1929 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type() local
1940 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_nvm_phy_type()
1944 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
1945 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
1961 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override() local
1963 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
1973 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
1974 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
1999 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override() local
2001 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2009 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2011 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2017 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2038 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg() local
2055 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_phy_user_cfg()
2065 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2072 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2087 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2108 struct ice_pf *pf = vsi->back; in ice_configure_phy() local
2117 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2121 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2207 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2225 static void ice_check_media_subtask(struct ice_pf *pf) in ice_check_media_subtask() argument
2232 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2235 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2245 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2248 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2260 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2274 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task() local
2280 ice_reset_subtask(pf); in ice_service_task()
2283 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2284 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2285 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2286 ice_service_task_complete(pf); in ice_service_task()
2290 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2297 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2298 ice_send_event_to_aux(pf, event); in ice_service_task()
2303 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { in ice_service_task()
2305 ice_plug_aux_dev(pf); in ice_service_task()
2312 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2313 ice_unplug_aux_dev(pf); in ice_service_task()
2316 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2322 ice_send_event_to_aux(pf, event); in ice_service_task()
2327 ice_clean_adminq_subtask(pf); in ice_service_task()
2328 ice_check_media_subtask(pf); in ice_service_task()
2329 ice_check_for_hang_subtask(pf); in ice_service_task()
2330 ice_sync_fltr_subtask(pf); in ice_service_task()
2331 ice_handle_mdd_event(pf); in ice_service_task()
2332 ice_watchdog_subtask(pf); in ice_service_task()
2334 if (ice_is_safe_mode(pf)) { in ice_service_task()
2335 ice_service_task_complete(pf); in ice_service_task()
2339 ice_process_vflr_event(pf); in ice_service_task()
2340 ice_clean_mailboxq_subtask(pf); in ice_service_task()
2341 ice_clean_sbq_subtask(pf); in ice_service_task()
2342 ice_sync_arfs_fltrs(pf); in ice_service_task()
2343 ice_flush_fdir_ctx(pf); in ice_service_task()
2346 ice_service_task_complete(pf); in ice_service_task()
2352 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2353 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2354 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2355 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2356 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2357 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2358 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2359 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2387 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) in ice_schedule_reset() argument
2389 struct device *dev = ice_pf_to_dev(pf); in ice_schedule_reset()
2392 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2397 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2402 ice_unplug_aux_dev(pf); in ice_schedule_reset()
2406 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2409 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2412 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2418 ice_service_task_schedule(pf); in ice_schedule_reset()
2474 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix() local
2482 dev = ice_pf_to_dev(pf); in ice_vsi_req_irq_msix()
2486 irq_num = pf->msix_entries[base + vector].vector; in ice_vsi_req_irq_msix()
2542 irq_num = pf->msix_entries[base + vector].vector; in ice_vsi_req_irq_msix()
2629 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings() local
2631 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2632 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2633 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2644 dev = ice_pf_to_dev(pf); in ice_prepare_xdp_rings()
2701 if (ice_is_reset_in_progress(pf->state)) in ice_prepare_xdp_rings()
2739 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2741 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2744 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2760 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings() local
2768 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) in ice_destroy_xdp_rings()
2784 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2786 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2789 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2801 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2807 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) in ice_destroy_xdp_rings()
2975 static void ice_ena_misc_vector(struct ice_pf *pf) in ice_ena_misc_vector() argument
2977 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3004 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), in ice_ena_misc_vector()
3015 struct ice_pf *pf = (struct ice_pf *)data; in ice_misc_intr() local
3016 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3021 dev = ice_pf_to_dev(pf); in ice_misc_intr()
3022 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3023 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3024 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3031 pf->sw_int_count++; in ice_misc_intr()
3036 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3040 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3047 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3060 pf->corer_count++; in ice_misc_intr()
3062 pf->globr_count++; in ice_misc_intr()
3064 pf->empr_count++; in ice_misc_intr()
3071 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3073 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3075 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3077 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3098 ice_ptp_process_ts(pf); in ice_misc_intr()
3106 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | in ice_misc_intr()
3110 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); in ice_misc_intr()
3115 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3116 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3129 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3130 ice_service_task_schedule(pf); in ice_misc_intr()
3135 ice_service_task_schedule(pf); in ice_misc_intr()
3169 static void ice_free_irq_msix_misc(struct ice_pf *pf) in ice_free_irq_msix_misc() argument
3171 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3179 if (pf->msix_entries) { in ice_free_irq_msix_misc()
3180 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); in ice_free_irq_msix_misc()
3181 devm_free_irq(ice_pf_to_dev(pf), in ice_free_irq_msix_misc()
3182 pf->msix_entries[pf->oicr_idx].vector, pf); in ice_free_irq_msix_misc()
3185 pf->num_avail_sw_msix += 1; in ice_free_irq_msix_misc()
3186 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); in ice_free_irq_msix_misc()
3228 static int ice_req_irq_msix_misc(struct ice_pf *pf) in ice_req_irq_msix_misc() argument
3230 struct device *dev = ice_pf_to_dev(pf); in ice_req_irq_msix_misc()
3231 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3234 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3235 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3242 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3246 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); in ice_req_irq_msix_misc()
3250 pf->num_avail_sw_msix -= 1; in ice_req_irq_msix_misc()
3251 pf->oicr_idx = (u16)oicr_idx; in ice_req_irq_msix_misc()
3253 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, in ice_req_irq_msix_misc()
3254 ice_misc_intr, 0, pf->int_name, pf); in ice_req_irq_msix_misc()
3257 pf->int_name, err); in ice_req_irq_msix_misc()
3258 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); in ice_req_irq_msix_misc()
3259 pf->num_avail_sw_msix += 1; in ice_req_irq_msix_misc()
3264 ice_ena_misc_vector(pf); in ice_req_irq_msix_misc()
3266 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); in ice_req_irq_msix_misc()
3267 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), in ice_req_irq_msix_misc()
3302 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_ops() local
3304 if (ice_is_safe_mode(pf)) { in ice_set_ops()
3311 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3321 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_netdev_features() local
3322 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3328 if (ice_is_safe_mode(pf)) { in ice_set_netdev_features()
3467 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_pf_vsi_setup() argument
3469 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); in ice_pf_vsi_setup()
3473 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, in ice_chnl_vsi_setup() argument
3476 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); in ice_chnl_vsi_setup()
3488 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_ctrl_vsi_setup() argument
3490 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); in ice_ctrl_vsi_setup()
3502 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_lb_vsi_setup() argument
3504 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); in ice_lb_vsi_setup()
3667 static void ice_tc_indir_block_remove(struct ice_pf *pf) in ice_tc_indir_block_remove() argument
3669 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); in ice_tc_indir_block_remove()
3702 static int ice_setup_pf_sw(struct ice_pf *pf) in ice_setup_pf_sw() argument
3704 struct device *dev = ice_pf_to_dev(pf); in ice_setup_pf_sw()
3705 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_setup_pf_sw()
3709 if (ice_is_reset_in_progress(pf->state)) in ice_setup_pf_sw()
3712 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_setup_pf_sw()
3716 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_setup_pf_sw()
3745 status = ice_init_mac_fltr(pf); in ice_setup_pf_sw()
3792 u16 ice_get_avail_txq_count(struct ice_pf *pf) in ice_get_avail_txq_count() argument
3794 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3795 pf->max_pf_txqs); in ice_get_avail_txq_count()
3802 u16 ice_get_avail_rxq_count(struct ice_pf *pf) in ice_get_avail_rxq_count() argument
3804 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3805 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3812 static void ice_deinit_pf(struct ice_pf *pf) in ice_deinit_pf() argument
3814 ice_service_task_stop(pf); in ice_deinit_pf()
3815 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3816 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3817 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3818 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3819 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3821 if (pf->avail_txqs) { in ice_deinit_pf()
3822 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3823 pf->avail_txqs = NULL; in ice_deinit_pf()
3826 if (pf->avail_rxqs) { in ice_deinit_pf()
3827 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3828 pf->avail_rxqs = NULL; in ice_deinit_pf()
3831 if (pf->ptp.clock) in ice_deinit_pf()
3832 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3839 static void ice_set_pf_caps(struct ice_pf *pf) in ice_set_pf_caps() argument
3841 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
3843 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3845 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3846 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3848 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3849 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3851 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3852 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
3855 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
3857 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
3859 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
3866 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
3867 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
3869 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
3872 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
3876 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
3878 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
3880 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
3881 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
3888 static int ice_init_pf(struct ice_pf *pf) in ice_init_pf() argument
3890 ice_set_pf_caps(pf); in ice_init_pf()
3892 mutex_init(&pf->sw_mutex); in ice_init_pf()
3893 mutex_init(&pf->tc_mutex); in ice_init_pf()
3894 mutex_init(&pf->adev_mutex); in ice_init_pf()
3896 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
3897 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
3898 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
3900 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
3903 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
3904 pf->serv_tmr_period = HZ; in ice_init_pf()
3905 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
3906 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
3908 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
3909 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
3910 if (!pf->avail_txqs) in ice_init_pf()
3913 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
3914 if (!pf->avail_rxqs) { in ice_init_pf()
3915 bitmap_free(pf->avail_txqs); in ice_init_pf()
3916 pf->avail_txqs = NULL; in ice_init_pf()
3920 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
3921 hash_init(pf->vfs.table); in ice_init_pf()
3933 static int ice_ena_msix_range(struct ice_pf *pf) in ice_ena_msix_range() argument
3936 struct device *dev = ice_pf_to_dev(pf); in ice_ena_msix_range()
3939 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_ena_msix_range()
3950 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_ena_msix_range()
3972 pf->num_lan_msix = needed; in ice_ena_msix_range()
3977 if (ice_is_rdma_ena(pf)) { in ice_ena_msix_range()
3981 pf->num_rdma_msix = needed; in ice_ena_msix_range()
3986 pf->msix_entries = devm_kcalloc(dev, v_budget, in ice_ena_msix_range()
3987 sizeof(*pf->msix_entries), GFP_KERNEL); in ice_ena_msix_range()
3988 if (!pf->msix_entries) { in ice_ena_msix_range()
3994 pf->msix_entries[i].entry = i; in ice_ena_msix_range()
3997 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, in ice_ena_msix_range()
4011 pci_disable_msix(pf->pdev); in ice_ena_msix_range()
4018 if (ice_is_rdma_ena(pf)) { in ice_ena_msix_range()
4029 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_ena_msix_range()
4031 pf->num_rdma_msix = 0; in ice_ena_msix_range()
4032 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; in ice_ena_msix_range()
4038 pf->num_rdma_msix = v_min_rdma; in ice_ena_msix_range()
4039 pf->num_lan_msix = v_remain - v_min_rdma; in ice_ena_msix_range()
4044 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + in ice_ena_msix_range()
4046 pf->num_lan_msix = v_remain - pf->num_rdma_msix; in ice_ena_msix_range()
4050 pf->num_lan_msix); in ice_ena_msix_range()
4052 if (ice_is_rdma_ena(pf)) in ice_ena_msix_range()
4054 pf->num_rdma_msix); in ice_ena_msix_range()
4061 devm_kfree(dev, pf->msix_entries); in ice_ena_msix_range()
4069 pf->num_rdma_msix = 0; in ice_ena_msix_range()
4070 pf->num_lan_msix = 0; in ice_ena_msix_range()
4078 static void ice_dis_msix(struct ice_pf *pf) in ice_dis_msix() argument
4080 pci_disable_msix(pf->pdev); in ice_dis_msix()
4081 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); in ice_dis_msix()
4082 pf->msix_entries = NULL; in ice_dis_msix()
4089 static void ice_clear_interrupt_scheme(struct ice_pf *pf) in ice_clear_interrupt_scheme() argument
4091 ice_dis_msix(pf); in ice_clear_interrupt_scheme()
4093 if (pf->irq_tracker) { in ice_clear_interrupt_scheme()
4094 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); in ice_clear_interrupt_scheme()
4095 pf->irq_tracker = NULL; in ice_clear_interrupt_scheme()
4103 static int ice_init_interrupt_scheme(struct ice_pf *pf) in ice_init_interrupt_scheme() argument
4107 vectors = ice_ena_msix_range(pf); in ice_init_interrupt_scheme()
4113 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), in ice_init_interrupt_scheme()
4114 struct_size(pf->irq_tracker, list, vectors), in ice_init_interrupt_scheme()
4116 if (!pf->irq_tracker) { in ice_init_interrupt_scheme()
4117 ice_dis_msix(pf); in ice_init_interrupt_scheme()
4122 pf->num_avail_sw_msix = (u16)vectors; in ice_init_interrupt_scheme()
4123 pf->irq_tracker->num_entries = (u16)vectors; in ice_init_interrupt_scheme()
4124 pf->irq_tracker->end = pf->irq_tracker->num_entries; in ice_init_interrupt_scheme()
4161 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs() local
4167 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4182 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); in ice_vsi_recfg_qs()
4188 ice_pf_dcb_recfg(pf); in ice_vsi_recfg_qs()
4191 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4202 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) in ice_set_safe_mode_vlan_cfg() argument
4204 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg()
4216 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4255 struct ice_pf *pf = hw->back; in ice_log_pkg_init() local
4258 dev = ice_pf_to_dev(pf); in ice_log_pkg_init()
4338 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) in ice_load_pkg() argument
4341 struct device *dev = ice_pf_to_dev(pf); in ice_load_pkg()
4342 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4359 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4366 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4377 static void ice_verify_cacheline_size(struct ice_pf *pf) in ice_verify_cacheline_size() argument
4379 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4380 …dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts… in ice_verify_cacheline_size()
4390 static int ice_send_version(struct ice_pf *pf) in ice_send_version() argument
4400 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4409 static int ice_init_fdir(struct ice_pf *pf) in ice_init_fdir() argument
4411 struct device *dev = ice_pf_to_dev(pf); in ice_init_fdir()
4418 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4430 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4432 err = ice_fdir_create_dflt_rules(pf); in ice_init_fdir()
4439 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4443 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4444 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4445 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4454 static char *ice_get_opt_fw_name(struct ice_pf *pf) in ice_get_opt_fw_name() argument
4459 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4484 static void ice_request_fw(struct ice_pf *pf) in ice_request_fw() argument
4486 char *opt_fw_filename = ice_get_opt_fw_name(pf); in ice_request_fw()
4488 struct device *dev = ice_pf_to_dev(pf); in ice_request_fw()
4503 ice_load_pkg(firmware, pf); in ice_request_fw()
4517 ice_load_pkg(firmware, pf); in ice_request_fw()
4525 static void ice_print_wake_reason(struct ice_pf *pf) in ice_print_wake_reason() argument
4527 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4545 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); in ice_print_wake_reason()
4552 static int ice_register_netdev(struct ice_pf *pf) in ice_register_netdev() argument
4557 vsi = ice_get_main_vsi(pf); in ice_register_netdev()
4568 err = ice_devlink_create_pf_port(pf); in ice_register_netdev()
4572 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); in ice_register_netdev()
4596 struct ice_pf *pf; in ice_probe() local
4618 pf = ice_allocate_pf(dev); in ice_probe()
4619 if (!pf) in ice_probe()
4623 pf->aux_idx = -1; in ice_probe()
4635 pf->pdev = pdev; in ice_probe()
4636 pci_set_drvdata(pdev, pf); in ice_probe()
4637 set_bit(ICE_DOWN, pf->state); in ice_probe()
4639 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4641 hw = &pf->hw; in ice_probe()
4645 hw->back = pf; in ice_probe()
4655 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
4669 ice_init_feature_support(pf); in ice_probe()
4671 ice_request_fw(pf); in ice_probe()
4677 if (ice_is_safe_mode(pf)) { in ice_probe()
4688 err = ice_init_pf(pf); in ice_probe()
4694 ice_devlink_init_regions(pf); in ice_probe()
4696 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_probe()
4697 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_probe()
4698 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in ice_probe()
4699 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_probe()
4701 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_probe()
4702 pf->hw.udp_tunnel_nic.tables[i].n_entries = in ice_probe()
4703 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_probe()
4704 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = in ice_probe()
4708 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_probe()
4709 pf->hw.udp_tunnel_nic.tables[i].n_entries = in ice_probe()
4710 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_probe()
4711 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = in ice_probe()
4716 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; in ice_probe()
4717 if (!pf->num_alloc_vsi) { in ice_probe()
4721 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_probe()
4722 dev_warn(&pf->pdev->dev, in ice_probe()
4724 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_probe()
4725 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_probe()
4728 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_probe()
4730 if (!pf->vsi) { in ice_probe()
4735 err = ice_init_interrupt_scheme(pf); in ice_probe()
4747 err = ice_req_irq_msix_misc(pf); in ice_probe()
4754 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); in ice_probe()
4755 if (!pf->first_sw) { in ice_probe()
4761 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_probe()
4763 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_probe()
4765 pf->first_sw->pf = pf; in ice_probe()
4768 pf->first_sw->sw_id = hw->port_info->sw_id; in ice_probe()
4770 err = ice_setup_pf_sw(pf); in ice_probe()
4776 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4779 err = ice_send_version(pf); in ice_probe()
4787 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_probe()
4789 err = ice_init_link_events(pf->hw.port_info); in ice_probe()
4796 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_probe()
4801 err = ice_update_link_info(pf->hw.port_info); in ice_probe()
4805 ice_init_link_dflt_override(pf->hw.port_info); in ice_probe()
4807 ice_check_link_cfg_err(pf, in ice_probe()
4808 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_probe()
4811 if (pf->hw.port_info->phy.link_info.link_info & in ice_probe()
4814 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_probe()
4818 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_probe()
4819 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_probe()
4825 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_probe()
4828 ice_verify_cacheline_size(pf); in ice_probe()
4831 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_probe()
4834 ice_print_wake_reason(pf); in ice_probe()
4842 if (ice_is_safe_mode(pf)) { in ice_probe()
4843 ice_set_safe_mode_vlan_cfg(pf); in ice_probe()
4848 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_probe()
4849 ice_ptp_init(pf); in ice_probe()
4851 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_probe()
4852 ice_gnss_init(pf); in ice_probe()
4855 if (ice_init_fdir(pf)) in ice_probe()
4859 if (ice_init_pf_dcb(pf, false)) { in ice_probe()
4860 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_probe()
4861 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_probe()
4863 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_probe()
4866 if (ice_init_lag(pf)) in ice_probe()
4870 pcie_print_link_status(pf->pdev); in ice_probe()
4873 err = ice_register_netdev(pf); in ice_probe()
4877 err = ice_devlink_register_params(pf); in ice_probe()
4882 clear_bit(ICE_DOWN, pf->state); in ice_probe()
4883 if (ice_is_rdma_ena(pf)) { in ice_probe()
4884 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); in ice_probe()
4885 if (pf->aux_idx < 0) { in ice_probe()
4891 err = ice_init_rdma(pf); in ice_probe()
4901 ice_devlink_register(pf); in ice_probe()
4905 pf->adev = NULL; in ice_probe()
4906 ida_free(&ice_aux_ida, pf->aux_idx); in ice_probe()
4908 ice_devlink_unregister_params(pf); in ice_probe()
4911 ice_vsi_release_all(pf); in ice_probe()
4913 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4914 set_bit(ICE_DOWN, pf->state); in ice_probe()
4915 devm_kfree(dev, pf->first_sw); in ice_probe()
4917 ice_free_irq_msix_misc(pf); in ice_probe()
4919 ice_clear_interrupt_scheme(pf); in ice_probe()
4921 devm_kfree(dev, pf->vsi); in ice_probe()
4923 ice_deinit_pf(pf); in ice_probe()
4924 ice_devlink_destroy_regions(pf); in ice_probe()
4938 static void ice_set_wake(struct ice_pf *pf) in ice_set_wake() argument
4940 struct ice_hw *hw = &pf->hw; in ice_set_wake()
4941 bool wol = pf->wol_ena; in ice_set_wake()
4961 static void ice_setup_mc_magic_wake(struct ice_pf *pf) in ice_setup_mc_magic_wake() argument
4963 struct device *dev = ice_pf_to_dev(pf); in ice_setup_mc_magic_wake()
4964 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
4970 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
4973 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
4999 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_remove() local
5002 ice_devlink_unregister(pf); in ice_remove()
5004 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5009 ice_tc_indir_block_remove(pf); in ice_remove()
5011 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5012 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5013 ice_free_vfs(pf); in ice_remove()
5016 ice_service_task_stop(pf); in ice_remove()
5018 ice_aq_cancel_waiting_tasks(pf); in ice_remove()
5019 ice_unplug_aux_dev(pf); in ice_remove()
5020 if (pf->aux_idx >= 0) in ice_remove()
5021 ida_free(&ice_aux_ida, pf->aux_idx); in ice_remove()
5022 ice_devlink_unregister_params(pf); in ice_remove()
5023 set_bit(ICE_DOWN, pf->state); in ice_remove()
5025 ice_deinit_lag(pf); in ice_remove()
5026 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_remove()
5027 ice_ptp_release(pf); in ice_remove()
5028 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_remove()
5029 ice_gnss_exit(pf); in ice_remove()
5030 if (!ice_is_safe_mode(pf)) in ice_remove()
5031 ice_remove_arfs(pf); in ice_remove()
5032 ice_setup_mc_magic_wake(pf); in ice_remove()
5033 ice_vsi_release_all(pf); in ice_remove()
5034 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_remove()
5035 ice_set_wake(pf); in ice_remove()
5036 ice_free_irq_msix_misc(pf); in ice_remove()
5037 ice_for_each_vsi(pf, i) { in ice_remove()
5038 if (!pf->vsi[i]) in ice_remove()
5040 ice_vsi_free_q_vectors(pf->vsi[i]); in ice_remove()
5042 ice_deinit_pf(pf); in ice_remove()
5043 ice_devlink_destroy_regions(pf); in ice_remove()
5044 ice_deinit_hw(&pf->hw); in ice_remove()
5050 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_remove()
5052 ice_clear_interrupt_scheme(pf); in ice_remove()
5063 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_shutdown() local
5068 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5080 static void ice_prepare_for_shutdown(struct ice_pf *pf) in ice_prepare_for_shutdown() argument
5082 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5087 ice_vc_notify_reset(pf); in ice_prepare_for_shutdown()
5089 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); in ice_prepare_for_shutdown()
5092 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_shutdown()
5094 ice_for_each_vsi(pf, v) in ice_prepare_for_shutdown()
5095 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5096 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5111 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) in ice_reinit_interrupt_scheme() argument
5113 struct device *dev = ice_pf_to_dev(pf); in ice_reinit_interrupt_scheme()
5120 ret = ice_init_interrupt_scheme(pf); in ice_reinit_interrupt_scheme()
5127 ice_for_each_vsi(pf, v) { in ice_reinit_interrupt_scheme()
5128 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5131 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5134 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5137 ret = ice_req_irq_msix_misc(pf); in ice_reinit_interrupt_scheme()
5148 if (pf->vsi[v]) in ice_reinit_interrupt_scheme()
5149 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5164 struct ice_pf *pf; in ice_suspend() local
5167 pf = pci_get_drvdata(pdev); in ice_suspend()
5169 if (!ice_pf_state_is_nominal(pf)) { in ice_suspend()
5180 disabled = ice_service_task_stop(pf); in ice_suspend()
5182 ice_unplug_aux_dev(pf); in ice_suspend()
5185 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5187 ice_service_task_restart(pf); in ice_suspend()
5191 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5192 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5195 ice_service_task_restart(pf); in ice_suspend()
5199 ice_setup_mc_magic_wake(pf); in ice_suspend()
5201 ice_prepare_for_shutdown(pf); in ice_suspend()
5203 ice_set_wake(pf); in ice_suspend()
5210 ice_free_irq_msix_misc(pf); in ice_suspend()
5211 ice_for_each_vsi(pf, v) { in ice_suspend()
5212 if (!pf->vsi[v]) in ice_suspend()
5214 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5216 ice_clear_interrupt_scheme(pf); in ice_suspend()
5219 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5232 struct ice_pf *pf; in ice_resume() local
5249 pf = pci_get_drvdata(pdev); in ice_resume()
5250 hw = &pf->hw; in ice_resume()
5252 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5253 ice_print_wake_reason(pf); in ice_resume()
5258 ret = ice_reinit_interrupt_scheme(pf); in ice_resume()
5262 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5266 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5268 if (ice_schedule_reset(pf, reset_type)) in ice_resume()
5271 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5272 ice_service_task_restart(pf); in ice_resume()
5275 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5292 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_detected() local
5294 if (!pf) { in ice_pci_err_detected()
5300 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5301 ice_service_task_stop(pf); in ice_pci_err_detected()
5303 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5304 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5305 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_detected()
5321 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_slot_reset() local
5338 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5363 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_resume() local
5365 if (!pf) { in ice_pci_err_resume()
5371 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5379 ice_do_reset(pf, ICE_RESET_PFR); in ice_pci_err_resume()
5380 ice_service_task_restart(pf); in ice_pci_err_resume()
5381 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5390 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_reset_prepare() local
5392 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5393 ice_service_task_stop(pf); in ice_pci_err_reset_prepare()
5395 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5396 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5397 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_reset_prepare()
5528 struct ice_pf *pf = vsi->back; in ice_set_mac_address() local
5529 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
5546 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
5547 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
5553 if (ice_chnl_dmac_fltr_cnt(pf)) { in ice_set_mac_address()
5958 struct ice_pf *pf = vsi->back; in ice_set_features() local
5968 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
5997 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { in ice_set_features()
5998 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); in ice_set_features()
6004 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_set_features()
6006 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_set_features()
6210 struct ice_pf *pf = vsi->back; in ice_up_complete() local
6233 if (!ice_is_e810(&pf->hw)) in ice_up_complete()
6234 ice_ptp_link_change(pf, pf->hw.pf_id, true); in ice_up_complete()
6241 ice_service_task_schedule(pf); in ice_up_complete()
6375 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats() local
6378 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6394 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6395 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6396 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6397 pf->stats.rx_len_errors + in ice_update_vsi_stats()
6398 pf->stats.rx_undersize + in ice_update_vsi_stats()
6399 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6400 pf->stats.rx_jabber + in ice_update_vsi_stats()
6401 pf->stats.rx_fragments + in ice_update_vsi_stats()
6402 pf->stats.rx_oversize; in ice_update_vsi_stats()
6403 cur_ns->rx_length_errors = pf->stats.rx_len_errors; in ice_update_vsi_stats()
6405 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
6413 void ice_update_pf_stats(struct ice_pf *pf) in ice_update_pf_stats() argument
6416 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
6421 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
6422 cur_ps = &pf->stats; in ice_update_pf_stats()
6424 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6428 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6432 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6436 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6440 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
6444 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6448 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6452 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6456 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6460 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6464 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6467 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6470 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6473 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6476 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6479 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6482 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6485 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6488 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6491 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6494 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6497 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6500 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6503 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6510 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
6512 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6515 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6518 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6521 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6524 ice_update_dcb_stats(pf); in ice_update_pf_stats()
6526 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6529 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6532 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6536 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6540 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6543 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6546 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6549 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6552 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6555 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
6557 pf->stat_prev_loaded = true; in ice_update_pf_stats()
6763 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl() local
6767 dev = ice_pf_to_dev(pf); in ice_vsi_open_ctrl()
6819 struct ice_pf *pf = vsi->back; in ice_vsi_open() local
6836 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
6874 static void ice_vsi_release_all(struct ice_pf *pf) in ice_vsi_release_all() argument
6878 if (!pf->vsi) in ice_vsi_release_all()
6881 ice_for_each_vsi(pf, i) { in ice_vsi_release_all()
6882 if (!pf->vsi[i]) in ice_vsi_release_all()
6885 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
6888 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
6890 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
6891 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
6902 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) in ice_vsi_rebuild_by_type() argument
6904 struct device *dev = ice_pf_to_dev(pf); in ice_vsi_rebuild_by_type()
6907 ice_for_each_vsi(pf, i) { in ice_vsi_rebuild_by_type()
6908 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
6922 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
6932 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
6953 static void ice_update_pf_netdev_link(struct ice_pf *pf) in ice_update_pf_netdev_link() argument
6958 ice_for_each_vsi(pf, i) { in ice_update_pf_netdev_link()
6959 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
6964 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
6966 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
6967 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
6969 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
6970 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
6985 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_rebuild() argument
6987 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild()
6988 struct ice_hw *hw = &pf->hw; in ice_rebuild()
6992 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7003 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7015 if (!ice_is_safe_mode(pf)) { in ice_rebuild()
7021 ice_load_pkg(NULL, pf); in ice_rebuild()
7030 if (pf->first_sw->dflt_vsi_ena) in ice_rebuild()
7033 pf->first_sw->dflt_vsi = NULL; in ice_rebuild()
7034 pf->first_sw->dflt_vsi_ena = false; in ice_rebuild()
7058 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7067 err = ice_req_irq_msix_misc(pf); in ice_rebuild()
7073 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7088 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7089 ice_dcb_rebuild(pf); in ice_rebuild()
7095 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7096 ice_ptp_reset(pf); in ice_rebuild()
7098 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_rebuild()
7099 ice_gnss_init(pf); in ice_rebuild()
7102 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); in ice_rebuild()
7109 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7110 ice_ptp_cfg_timestamp(pf, false); in ice_rebuild()
7112 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); in ice_rebuild()
7119 err = ice_rebuild_channels(pf); in ice_rebuild()
7128 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7129 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); in ice_rebuild()
7140 ice_fdir_replay_fltrs(pf); in ice_rebuild()
7142 ice_rebuild_arfs(pf); in ice_rebuild()
7145 ice_update_pf_netdev_link(pf); in ice_rebuild()
7148 err = ice_send_version(pf); in ice_rebuild()
7158 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7160 ice_plug_aux_dev(pf); in ice_rebuild()
7168 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7171 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7198 struct ice_pf *pf = vsi->back; in ice_change_mtu() local
7219 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7251 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7265 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl() local
7269 return ice_ptp_get_ts_config(pf, ifr); in ice_eth_ioctl()
7271 return ice_ptp_set_ts_config(pf, ifr); in ice_eth_ioctl()
7442 struct ice_pf *pf = vsi->back; in ice_bridge_getlink() local
7445 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
7513 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink() local
7515 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
7519 pf_sw = pf->first_sw; in ice_bridge_setlink()
7537 ice_for_each_vsi(pf, v) { in ice_bridge_setlink()
7538 if (!pf->vsi[v]) in ice_bridge_setlink()
7540 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
7575 struct ice_pf *pf = vsi->back; in ice_tx_timeout() local
7578 pf->tx_timeout_count++; in ice_tx_timeout()
7584 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { in ice_tx_timeout()
7585 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", in ice_tx_timeout()
7601 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
7602 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
7603 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
7608 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
7621 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
7623 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
7625 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
7627 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
7630 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
7633 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
7637 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
7639 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
7643 ice_service_task_schedule(pf); in ice_tx_timeout()
7644 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
7708 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt() local
7722 dev = ice_pf_to_dev(pf); in ice_validate_mqprio_qopt()
7838 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
7840 struct device *dev = ice_pf_to_dev(pf); in ice_add_vsi_to_fdir()
7848 hw = &pf->hw; in ice_add_vsi_to_fdir()
7902 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) in ice_add_channel() argument
7904 struct device *dev = ice_pf_to_dev(pf); in ice_add_channel()
7912 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
7918 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8020 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8023 struct device *dev = ice_pf_to_dev(pf); in ice_setup_hw_channel()
8029 ret = ice_add_channel(pf, sw_id, ch); in ice_setup_hw_channel()
8058 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8061 struct device *dev = ice_pf_to_dev(pf); in ice_setup_channel()
8070 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8073 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8111 struct ice_pf *pf = vsi->back; in ice_create_q_channel() local
8117 dev = ice_pf_to_dev(pf); in ice_create_q_channel()
8129 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8159 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) in ice_rem_all_chnl_fltrs() argument
8166 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8178 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8181 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", in ice_rem_all_chnl_fltrs()
8184 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", in ice_rem_all_chnl_fltrs()
8194 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8213 struct ice_pf *pf = vsi->back; in ice_remove_q_channels() local
8218 ice_rem_all_chnl_fltrs(pf); in ice_remove_q_channels()
8222 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8260 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8290 static int ice_rebuild_channels(struct ice_pf *pf) in ice_rebuild_channels() argument
8292 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild_channels()
8300 main_vsi = ice_get_main_vsi(pf); in ice_rebuild_channels()
8304 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8319 ice_for_each_vsi(pf, i) { in ice_rebuild_channels()
8322 vsi = pf->vsi[i]; in ice_rebuild_channels()
8339 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
8342 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
8406 struct ice_pf *pf = vsi->back; in ice_create_q_channels() local
8436 dev_err(ice_pf_to_dev(pf), in ice_create_q_channels()
8443 dev_dbg(ice_pf_to_dev(pf), in ice_create_q_channels()
8464 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc() local
8471 dev = ice_pf_to_dev(pf); in ice_setup_tc_mqprio_qdisc()
8476 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
8496 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
8502 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
8518 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
8521 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
8522 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
8524 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
8561 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
8577 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
8630 struct ice_pf *pf = np->vsi->back; in ice_setup_tc() local
8641 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
8643 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
8781 struct ice_pf *pf = np->vsi->back; in ice_open() local
8783 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
8804 struct ice_pf *pf = vsi->back; in ice_open_internal() local
8808 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
8822 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
8826 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
8827 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
8843 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
8872 struct ice_pf *pf = vsi->back; in ice_stop() local
8874 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()