Lines Matching refs:smi_info

126 struct smi_info {  struct
147 int (*oem_data_avail_handler)(struct smi_info *smi_info); argument
262 static int try_smi_init(struct smi_info *smi); argument
263 static void cleanup_one_si(struct smi_info *smi_info);
267 void debug_timestamp(struct smi_info *smi_info, char *msg) in debug_timestamp() argument
272 dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", in debug_timestamp()
276 #define debug_timestamp(smi_info, x) argument
285 static void deliver_recv_msg(struct smi_info *smi_info, in deliver_recv_msg() argument
289 ipmi_smi_msg_received(smi_info->intf, msg); in deliver_recv_msg()
292 static void return_hosed_msg(struct smi_info *smi_info, int cCode) in return_hosed_msg() argument
294 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg()
306 smi_info->curr_msg = NULL; in return_hosed_msg()
307 deliver_recv_msg(smi_info, msg); in return_hosed_msg()
310 static enum si_sm_result start_next_msg(struct smi_info *smi_info) in start_next_msg() argument
314 if (!smi_info->waiting_msg) { in start_next_msg()
315 smi_info->curr_msg = NULL; in start_next_msg()
320 smi_info->curr_msg = smi_info->waiting_msg; in start_next_msg()
321 smi_info->waiting_msg = NULL; in start_next_msg()
322 debug_timestamp(smi_info, "Start2"); in start_next_msg()
324 0, smi_info); in start_next_msg()
329 err = smi_info->handlers->start_transaction( in start_next_msg()
330 smi_info->si_sm, in start_next_msg()
331 smi_info->curr_msg->data, in start_next_msg()
332 smi_info->curr_msg->data_size); in start_next_msg()
334 return_hosed_msg(smi_info, err); in start_next_msg()
342 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) in smi_mod_timer() argument
344 if (!smi_info->timer_can_start) in smi_mod_timer()
346 smi_info->last_timeout_jiffies = jiffies; in smi_mod_timer()
347 mod_timer(&smi_info->si_timer, new_val); in smi_mod_timer()
348 smi_info->timer_running = true; in smi_mod_timer()
354 static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, in start_new_msg() argument
357 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in start_new_msg()
359 if (smi_info->thread) in start_new_msg()
360 wake_up_process(smi_info->thread); in start_new_msg()
362 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); in start_new_msg()
365 static void start_check_enables(struct smi_info *smi_info) in start_check_enables() argument
372 start_new_msg(smi_info, msg, 2); in start_check_enables()
373 smi_info->si_state = SI_CHECKING_ENABLES; in start_check_enables()
376 static void start_clear_flags(struct smi_info *smi_info) in start_clear_flags() argument
385 start_new_msg(smi_info, msg, 3); in start_clear_flags()
386 smi_info->si_state = SI_CLEARING_FLAGS; in start_clear_flags()
389 static void start_getting_msg_queue(struct smi_info *smi_info) in start_getting_msg_queue() argument
391 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_msg_queue()
392 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; in start_getting_msg_queue()
393 smi_info->curr_msg->data_size = 2; in start_getting_msg_queue()
395 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_msg_queue()
396 smi_info->curr_msg->data_size); in start_getting_msg_queue()
397 smi_info->si_state = SI_GETTING_MESSAGES; in start_getting_msg_queue()
400 static void start_getting_events(struct smi_info *smi_info) in start_getting_events() argument
402 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_events()
403 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; in start_getting_events()
404 smi_info->curr_msg->data_size = 2; in start_getting_events()
406 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_events()
407 smi_info->curr_msg->data_size); in start_getting_events()
408 smi_info->si_state = SI_GETTING_EVENTS; in start_getting_events()
420 static inline bool disable_si_irq(struct smi_info *smi_info) in disable_si_irq() argument
422 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in disable_si_irq()
423 smi_info->interrupt_disabled = true; in disable_si_irq()
424 start_check_enables(smi_info); in disable_si_irq()
430 static inline bool enable_si_irq(struct smi_info *smi_info) in enable_si_irq() argument
432 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { in enable_si_irq()
433 smi_info->interrupt_disabled = false; in enable_si_irq()
434 start_check_enables(smi_info); in enable_si_irq()
446 static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) in alloc_msg_handle_irq() argument
452 if (!disable_si_irq(smi_info)) in alloc_msg_handle_irq()
453 smi_info->si_state = SI_NORMAL; in alloc_msg_handle_irq()
454 } else if (enable_si_irq(smi_info)) { in alloc_msg_handle_irq()
461 static void handle_flags(struct smi_info *smi_info) in handle_flags() argument
464 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { in handle_flags()
466 smi_inc_stat(smi_info, watchdog_pretimeouts); in handle_flags()
468 start_clear_flags(smi_info); in handle_flags()
469 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; in handle_flags()
470 ipmi_smi_watchdog_pretimeout(smi_info->intf); in handle_flags()
471 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { in handle_flags()
473 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
474 if (!smi_info->curr_msg) in handle_flags()
477 start_getting_msg_queue(smi_info); in handle_flags()
478 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { in handle_flags()
480 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
481 if (!smi_info->curr_msg) in handle_flags()
484 start_getting_events(smi_info); in handle_flags()
485 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && in handle_flags()
486 smi_info->oem_data_avail_handler) { in handle_flags()
487 if (smi_info->oem_data_avail_handler(smi_info)) in handle_flags()
490 smi_info->si_state = SI_NORMAL; in handle_flags()
499 static u8 current_global_enables(struct smi_info *smi_info, u8 base, in current_global_enables() argument
504 if (smi_info->supports_event_msg_buff) in current_global_enables()
507 if (((smi_info->io.irq && !smi_info->interrupt_disabled) || in current_global_enables()
508 smi_info->cannot_disable_irq) && in current_global_enables()
509 !smi_info->irq_enable_broken) in current_global_enables()
512 if (smi_info->supports_event_msg_buff && in current_global_enables()
513 smi_info->io.irq && !smi_info->interrupt_disabled && in current_global_enables()
514 !smi_info->irq_enable_broken) in current_global_enables()
522 static void check_bt_irq(struct smi_info *smi_info, bool irq_on) in check_bt_irq() argument
524 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); in check_bt_irq()
532 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in check_bt_irq()
535 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); in check_bt_irq()
538 static void handle_transaction_done(struct smi_info *smi_info) in handle_transaction_done() argument
542 debug_timestamp(smi_info, "Done"); in handle_transaction_done()
543 switch (smi_info->si_state) { in handle_transaction_done()
545 if (!smi_info->curr_msg) in handle_transaction_done()
548 smi_info->curr_msg->rsp_size in handle_transaction_done()
549 = smi_info->handlers->get_result( in handle_transaction_done()
550 smi_info->si_sm, in handle_transaction_done()
551 smi_info->curr_msg->rsp, in handle_transaction_done()
559 msg = smi_info->curr_msg; in handle_transaction_done()
560 smi_info->curr_msg = NULL; in handle_transaction_done()
561 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
570 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
573 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
579 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
581 smi_info->msg_flags = msg[3]; in handle_transaction_done()
582 handle_flags(smi_info); in handle_transaction_done()
592 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); in handle_transaction_done()
595 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
598 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
604 smi_info->curr_msg->rsp_size in handle_transaction_done()
605 = smi_info->handlers->get_result( in handle_transaction_done()
606 smi_info->si_sm, in handle_transaction_done()
607 smi_info->curr_msg->rsp, in handle_transaction_done()
615 msg = smi_info->curr_msg; in handle_transaction_done()
616 smi_info->curr_msg = NULL; in handle_transaction_done()
622 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; in handle_transaction_done()
623 handle_flags(smi_info); in handle_transaction_done()
625 smi_inc_stat(smi_info, events); in handle_transaction_done()
633 handle_flags(smi_info); in handle_transaction_done()
635 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
642 smi_info->curr_msg->rsp_size in handle_transaction_done()
643 = smi_info->handlers->get_result( in handle_transaction_done()
644 smi_info->si_sm, in handle_transaction_done()
645 smi_info->curr_msg->rsp, in handle_transaction_done()
653 msg = smi_info->curr_msg; in handle_transaction_done()
654 smi_info->curr_msg = NULL; in handle_transaction_done()
660 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; in handle_transaction_done()
661 handle_flags(smi_info); in handle_transaction_done()
663 smi_inc_stat(smi_info, incoming_messages); in handle_transaction_done()
671 handle_flags(smi_info); in handle_transaction_done()
673 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
685 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
687 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
691 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
694 enables = current_global_enables(smi_info, 0, &irq_on); in handle_transaction_done()
695 if (smi_info->io.si_type == SI_BT) in handle_transaction_done()
697 check_bt_irq(smi_info, irq_on); in handle_transaction_done()
703 smi_info->handlers->start_transaction( in handle_transaction_done()
704 smi_info->si_sm, msg, 3); in handle_transaction_done()
705 smi_info->si_state = SI_SETTING_ENABLES; in handle_transaction_done()
706 } else if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
707 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
708 if (!smi_info->curr_msg) { in handle_transaction_done()
709 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
712 start_getting_events(smi_info); in handle_transaction_done()
714 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
723 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
725 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
729 if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
730 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
731 if (!smi_info->curr_msg) { in handle_transaction_done()
732 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
735 start_getting_events(smi_info); in handle_transaction_done()
737 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
749 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, in smi_event_handler() argument
763 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); in smi_event_handler()
766 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); in smi_event_handler()
769 smi_inc_stat(smi_info, complete_transactions); in smi_event_handler()
771 handle_transaction_done(smi_info); in smi_event_handler()
774 smi_inc_stat(smi_info, hosed_count); in smi_event_handler()
780 smi_info->si_state = SI_NORMAL; in smi_event_handler()
781 if (smi_info->curr_msg != NULL) { in smi_event_handler()
787 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); in smi_event_handler()
796 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { in smi_event_handler()
799 if (smi_info->si_state != SI_NORMAL) { in smi_event_handler()
804 smi_info->got_attn = true; in smi_event_handler()
806 smi_info->got_attn = false; in smi_event_handler()
807 smi_inc_stat(smi_info, attentions); in smi_event_handler()
819 start_new_msg(smi_info, msg, 2); in smi_event_handler()
820 smi_info->si_state = SI_GETTING_FLAGS; in smi_event_handler()
827 smi_inc_stat(smi_info, idles); in smi_event_handler()
829 si_sm_result = start_next_msg(smi_info); in smi_event_handler()
835 && (atomic_read(&smi_info->req_events))) { in smi_event_handler()
840 atomic_set(&smi_info->req_events, 0); in smi_event_handler()
848 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { in smi_event_handler()
849 start_check_enables(smi_info); in smi_event_handler()
851 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in smi_event_handler()
852 if (!smi_info->curr_msg) in smi_event_handler()
855 start_getting_events(smi_info); in smi_event_handler()
860 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { in smi_event_handler()
862 if (del_timer(&smi_info->si_timer)) in smi_event_handler()
863 smi_info->timer_running = false; in smi_event_handler()
870 static void check_start_timer_thread(struct smi_info *smi_info) in check_start_timer_thread() argument
872 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { in check_start_timer_thread()
873 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in check_start_timer_thread()
875 if (smi_info->thread) in check_start_timer_thread()
876 wake_up_process(smi_info->thread); in check_start_timer_thread()
878 start_next_msg(smi_info); in check_start_timer_thread()
879 smi_event_handler(smi_info, 0); in check_start_timer_thread()
885 struct smi_info *smi_info = send_info; in flush_messages() local
892 result = smi_event_handler(smi_info, 0); in flush_messages()
895 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); in flush_messages()
902 struct smi_info *smi_info = send_info; in sender() local
905 debug_timestamp(smi_info, "Enqueue"); in sender()
907 if (smi_info->run_to_completion) { in sender()
912 smi_info->waiting_msg = msg; in sender()
916 spin_lock_irqsave(&smi_info->si_lock, flags); in sender()
924 BUG_ON(smi_info->waiting_msg); in sender()
925 smi_info->waiting_msg = msg; in sender()
926 check_start_timer_thread(smi_info); in sender()
927 spin_unlock_irqrestore(&smi_info->si_lock, flags); in sender()
932 struct smi_info *smi_info = send_info; in set_run_to_completion() local
934 smi_info->run_to_completion = i_run_to_completion; in set_run_to_completion()
936 flush_messages(smi_info); in set_run_to_completion()
945 const struct smi_info *smi_info, in ipmi_thread_busy_wait() argument
950 if (smi_info->si_num < num_max_busy_us) in ipmi_thread_busy_wait()
951 max_busy_us = kipmid_max_busy_us[smi_info->si_num]; in ipmi_thread_busy_wait()
977 struct smi_info *smi_info = data; in ipmi_thread() local
986 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_thread()
987 smi_result = smi_event_handler(smi_info, 0); in ipmi_thread()
996 if (smi_result != SI_SM_IDLE && !smi_info->timer_running) in ipmi_thread()
997 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in ipmi_thread()
999 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_thread()
1000 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, in ipmi_thread()
1011 if (smi_info->in_maintenance_mode) in ipmi_thread()
1016 if (atomic_read(&smi_info->need_watch)) { in ipmi_thread()
1033 struct smi_info *smi_info = send_info; in poll() local
1035 bool run_to_completion = smi_info->run_to_completion; in poll()
1043 spin_lock_irqsave(&smi_info->si_lock, flags); in poll()
1044 smi_event_handler(smi_info, 10); in poll()
1046 spin_unlock_irqrestore(&smi_info->si_lock, flags); in poll()
1051 struct smi_info *smi_info = send_info; in request_events() local
1053 if (!smi_info->has_event_buffer) in request_events()
1056 atomic_set(&smi_info->req_events, 1); in request_events()
1061 struct smi_info *smi_info = send_info; in set_need_watch() local
1067 atomic_set(&smi_info->need_watch, enable); in set_need_watch()
1068 spin_lock_irqsave(&smi_info->si_lock, flags); in set_need_watch()
1069 check_start_timer_thread(smi_info); in set_need_watch()
1070 spin_unlock_irqrestore(&smi_info->si_lock, flags); in set_need_watch()
1075 struct smi_info *smi_info = from_timer(smi_info, t, si_timer); in smi_timeout() local
1082 spin_lock_irqsave(&(smi_info->si_lock), flags); in smi_timeout()
1083 debug_timestamp(smi_info, "Timer"); in smi_timeout()
1086 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) in smi_timeout()
1088 smi_result = smi_event_handler(smi_info, time_diff); in smi_timeout()
1090 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in smi_timeout()
1093 smi_inc_stat(smi_info, long_timeouts); in smi_timeout()
1102 smi_inc_stat(smi_info, short_timeouts); in smi_timeout()
1105 smi_inc_stat(smi_info, long_timeouts); in smi_timeout()
1111 smi_mod_timer(smi_info, timeout); in smi_timeout()
1113 smi_info->timer_running = false; in smi_timeout()
1114 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in smi_timeout()
1119 struct smi_info *smi_info = data; in ipmi_si_irq_handler() local
1122 if (smi_info->io.si_type == SI_BT) in ipmi_si_irq_handler()
1124 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in ipmi_si_irq_handler()
1128 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1130 smi_inc_stat(smi_info, interrupts); in ipmi_si_irq_handler()
1132 debug_timestamp(smi_info, "Interrupt"); in ipmi_si_irq_handler()
1134 smi_event_handler(smi_info, 0); in ipmi_si_irq_handler()
1135 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1142 struct smi_info *new_smi = send_info; in smi_start_processing()
1186 struct smi_info *smi = send_info; in get_smi_info()
1198 struct smi_info *smi_info = send_info; in set_maintenance_mode() local
1201 atomic_set(&smi_info->req_events, 0); in set_maintenance_mode()
1202 smi_info->in_maintenance_mode = enable; in set_maintenance_mode()
1282 static int wait_for_msg_done(struct smi_info *smi_info) in wait_for_msg_done() argument
1286 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); in wait_for_msg_done()
1291 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1292 smi_info->si_sm, jiffies_to_usecs(1)); in wait_for_msg_done()
1294 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1295 smi_info->si_sm, 0); in wait_for_msg_done()
1309 static int try_get_dev_id(struct smi_info *smi_info) in try_get_dev_id() argument
1329 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_get_dev_id()
1331 rv = wait_for_msg_done(smi_info); in try_get_dev_id()
1335 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_get_dev_id()
1340 resp + 2, resp_len - 2, &smi_info->device_id); in try_get_dev_id()
1347 dev_warn_ratelimited(smi_info->io.dev, in try_get_dev_id()
1359 static int get_global_enables(struct smi_info *smi_info, u8 *enables) in get_global_enables() argument
1372 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in get_global_enables()
1374 rv = wait_for_msg_done(smi_info); in get_global_enables()
1376 dev_warn(smi_info->io.dev, in get_global_enables()
1382 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in get_global_enables()
1389 dev_warn(smi_info->io.dev, in get_global_enables()
1406 static int set_global_enables(struct smi_info *smi_info, u8 enables) in set_global_enables() argument
1420 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in set_global_enables()
1422 rv = wait_for_msg_done(smi_info); in set_global_enables()
1424 dev_warn(smi_info->io.dev, in set_global_enables()
1430 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in set_global_enables()
1436 dev_warn(smi_info->io.dev, in set_global_enables()
1456 static void check_clr_rcv_irq(struct smi_info *smi_info) in check_clr_rcv_irq() argument
1461 rv = get_global_enables(smi_info, &enables); in check_clr_rcv_irq()
1468 rv = set_global_enables(smi_info, enables); in check_clr_rcv_irq()
1472 dev_err(smi_info->io.dev, in check_clr_rcv_irq()
1482 dev_warn(smi_info->io.dev, in check_clr_rcv_irq()
1484 smi_info->cannot_disable_irq = true; in check_clr_rcv_irq()
1493 static void check_set_rcv_irq(struct smi_info *smi_info) in check_set_rcv_irq() argument
1498 if (!smi_info->io.irq) in check_set_rcv_irq()
1501 rv = get_global_enables(smi_info, &enables); in check_set_rcv_irq()
1504 rv = set_global_enables(smi_info, enables); in check_set_rcv_irq()
1508 dev_err(smi_info->io.dev, in check_set_rcv_irq()
1518 dev_warn(smi_info->io.dev, in check_set_rcv_irq()
1520 smi_info->cannot_disable_irq = true; in check_set_rcv_irq()
1521 smi_info->irq_enable_broken = true; in check_set_rcv_irq()
1525 static int try_enable_event_buffer(struct smi_info *smi_info) in try_enable_event_buffer() argument
1538 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_enable_event_buffer()
1540 rv = wait_for_msg_done(smi_info); in try_enable_event_buffer()
1546 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1560 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1567 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in try_enable_event_buffer()
1569 rv = wait_for_msg_done(smi_info); in try_enable_event_buffer()
1575 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1593 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1605 struct smi_info *smi_info = dev_get_drvdata(dev); \
1607 return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
1615 struct smi_info *smi_info = dev_get_drvdata(dev); in type_show() local
1617 return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]); in type_show()
1625 struct smi_info *smi_info = dev_get_drvdata(dev); in interrupts_enabled_show() local
1626 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; in interrupts_enabled_show()
1648 struct smi_info *smi_info = dev_get_drvdata(dev); in params_show() local
1652 si_to_str[smi_info->io.si_type], in params_show()
1653 addr_space_to_str[smi_info->io.addr_space], in params_show()
1654 smi_info->io.addr_data, in params_show()
1655 smi_info->io.regspacing, in params_show()
1656 smi_info->io.regsize, in params_show()
1657 smi_info->io.regshift, in params_show()
1658 smi_info->io.irq, in params_show()
1659 smi_info->io.slave_addr); in params_show()
1692 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) in oem_data_avail_to_receive_msg_avail() argument
1694 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | in oem_data_avail_to_receive_msg_avail()
1728 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) in setup_dell_poweredge_oem_data_handler() argument
1730 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_oem_data_handler()
1735 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1740 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1747 static void return_hosed_msg_badsize(struct smi_info *smi_info) in return_hosed_msg_badsize() argument
1749 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg_badsize()
1756 smi_info->curr_msg = NULL; in return_hosed_msg_badsize()
1757 deliver_recv_msg(smi_info, msg); in return_hosed_msg_badsize()
1777 struct smi_info *smi_info = in; in dell_poweredge_bt_xaction_handler() local
1778 unsigned char *data = smi_info->curr_msg->data; in dell_poweredge_bt_xaction_handler()
1779 unsigned int size = smi_info->curr_msg->data_size; in dell_poweredge_bt_xaction_handler()
1784 return_hosed_msg_badsize(smi_info); in dell_poweredge_bt_xaction_handler()
1802 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) in setup_dell_poweredge_bt_xaction_handler() argument
1804 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_bt_xaction_handler()
1806 smi_info->io.si_type == SI_BT) in setup_dell_poweredge_bt_xaction_handler()
1818 static void setup_oem_data_handler(struct smi_info *smi_info) in setup_oem_data_handler() argument
1820 setup_dell_poweredge_oem_data_handler(smi_info); in setup_oem_data_handler()
1823 static void setup_xaction_handlers(struct smi_info *smi_info) in setup_xaction_handlers() argument
1825 setup_dell_poweredge_bt_xaction_handler(smi_info); in setup_xaction_handlers()
1828 static void check_for_broken_irqs(struct smi_info *smi_info) in check_for_broken_irqs() argument
1830 check_clr_rcv_irq(smi_info); in check_for_broken_irqs()
1831 check_set_rcv_irq(smi_info); in check_for_broken_irqs()
1834 static inline void stop_timer_and_thread(struct smi_info *smi_info) in stop_timer_and_thread() argument
1836 if (smi_info->thread != NULL) { in stop_timer_and_thread()
1837 kthread_stop(smi_info->thread); in stop_timer_and_thread()
1838 smi_info->thread = NULL; in stop_timer_and_thread()
1841 smi_info->timer_can_start = false; in stop_timer_and_thread()
1842 del_timer_sync(&smi_info->si_timer); in stop_timer_and_thread()
1845 static struct smi_info *find_dup_si(struct smi_info *info) in find_dup_si()
1847 struct smi_info *e; in find_dup_si()
1870 struct smi_info *new_smi, *dup; in ipmi_si_add_smi()
1940 static int try_smi_init(struct smi_info *new_smi) in try_smi_init()
2090 struct smi_info *e; in init_ipmi_si()
2158 struct smi_info *smi_info = send_info; in shutdown_smi() local
2160 if (smi_info->dev_group_added) { in shutdown_smi()
2161 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); in shutdown_smi()
2162 smi_info->dev_group_added = false; in shutdown_smi()
2164 if (smi_info->io.dev) in shutdown_smi()
2165 dev_set_drvdata(smi_info->io.dev, NULL); in shutdown_smi()
2171 smi_info->interrupt_disabled = true; in shutdown_smi()
2172 if (smi_info->io.irq_cleanup) { in shutdown_smi()
2173 smi_info->io.irq_cleanup(&smi_info->io); in shutdown_smi()
2174 smi_info->io.irq_cleanup = NULL; in shutdown_smi()
2176 stop_timer_and_thread(smi_info); in shutdown_smi()
2190 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { in shutdown_smi()
2191 poll(smi_info); in shutdown_smi()
2194 if (smi_info->handlers) in shutdown_smi()
2195 disable_si_irq(smi_info); in shutdown_smi()
2196 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { in shutdown_smi()
2197 poll(smi_info); in shutdown_smi()
2200 if (smi_info->handlers) in shutdown_smi()
2201 smi_info->handlers->cleanup(smi_info->si_sm); in shutdown_smi()
2203 if (smi_info->io.io_cleanup) { in shutdown_smi()
2204 smi_info->io.io_cleanup(&smi_info->io); in shutdown_smi()
2205 smi_info->io.io_cleanup = NULL; in shutdown_smi()
2208 kfree(smi_info->si_sm); in shutdown_smi()
2209 smi_info->si_sm = NULL; in shutdown_smi()
2211 smi_info->intf = NULL; in shutdown_smi()
2218 static void cleanup_one_si(struct smi_info *smi_info) in cleanup_one_si() argument
2220 if (!smi_info) in cleanup_one_si()
2223 list_del(&smi_info->link); in cleanup_one_si()
2224 ipmi_unregister_smi(smi_info->intf); in cleanup_one_si()
2225 kfree(smi_info); in cleanup_one_si()
2230 struct smi_info *e; in ipmi_si_remove_by_dev()
2246 struct smi_info *e, *tmp_e; in ipmi_si_remove_by_data()
2267 struct smi_info *e, *tmp_e; in cleanup_ipmi_si()