Lines Matching refs:devfreq

67 static struct devfreq *find_device_devfreq(struct device *dev)  in find_device_devfreq()
69 struct devfreq *tmp_devfreq; in find_device_devfreq()
86 static unsigned long find_available_min_freq(struct devfreq *devfreq) in find_available_min_freq() argument
91 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); in find_available_min_freq()
100 static unsigned long find_available_max_freq(struct devfreq *devfreq) in find_available_max_freq() argument
105 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); in find_available_max_freq()
122 void devfreq_get_freq_range(struct devfreq *devfreq, in devfreq_get_freq_range() argument
126 unsigned long *freq_table = devfreq->freq_table; in devfreq_get_freq_range()
129 lockdep_assert_held(&devfreq->lock); in devfreq_get_freq_range()
136 if (freq_table[0] < freq_table[devfreq->max_state - 1]) { in devfreq_get_freq_range()
138 *max_freq = freq_table[devfreq->max_state - 1]; in devfreq_get_freq_range()
140 *min_freq = freq_table[devfreq->max_state - 1]; in devfreq_get_freq_range()
145 qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent, in devfreq_get_freq_range()
147 qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent, in devfreq_get_freq_range()
155 *min_freq = max(*min_freq, devfreq->scaling_min_freq); in devfreq_get_freq_range()
156 *max_freq = min(*max_freq, devfreq->scaling_max_freq); in devfreq_get_freq_range()
168 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) in devfreq_get_freq_level() argument
172 for (lev = 0; lev < devfreq->max_state; lev++) in devfreq_get_freq_level()
173 if (freq == devfreq->freq_table[lev]) in devfreq_get_freq_level()
179 static int set_freq_table(struct devfreq *devfreq) in set_freq_table() argument
186 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); in set_freq_table()
190 devfreq->max_state = count; in set_freq_table()
191 devfreq->freq_table = devm_kcalloc(devfreq->dev.parent, in set_freq_table()
192 devfreq->max_state, in set_freq_table()
193 sizeof(*devfreq->freq_table), in set_freq_table()
195 if (!devfreq->freq_table) in set_freq_table()
198 for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) { in set_freq_table()
199 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); in set_freq_table()
201 devm_kfree(devfreq->dev.parent, devfreq->freq_table); in set_freq_table()
205 devfreq->freq_table[i] = freq; in set_freq_table()
216 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) in devfreq_update_status() argument
221 lockdep_assert_held(&devfreq->lock); in devfreq_update_status()
225 if (!devfreq->previous_freq) in devfreq_update_status()
228 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); in devfreq_update_status()
234 devfreq->stats.time_in_state[prev_lev] += in devfreq_update_status()
235 cur_time - devfreq->stats.last_update; in devfreq_update_status()
237 lev = devfreq_get_freq_level(devfreq, freq); in devfreq_update_status()
244 devfreq->stats.trans_table[ in devfreq_update_status()
245 (prev_lev * devfreq->max_state) + lev]++; in devfreq_update_status()
246 devfreq->stats.total_trans++; in devfreq_update_status()
250 devfreq->stats.last_update = cur_time; in devfreq_update_status()
324 static int devfreq_notify_transition(struct devfreq *devfreq, in devfreq_notify_transition() argument
327 if (!devfreq) in devfreq_notify_transition()
332 srcu_notifier_call_chain(&devfreq->transition_notifier_list, in devfreq_notify_transition()
337 srcu_notifier_call_chain(&devfreq->transition_notifier_list, in devfreq_notify_transition()
347 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, in devfreq_set_target() argument
354 if (devfreq->profile->get_cur_freq) in devfreq_set_target()
355 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); in devfreq_set_target()
357 cur_freq = devfreq->previous_freq; in devfreq_set_target()
361 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); in devfreq_set_target()
363 err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags); in devfreq_set_target()
366 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); in devfreq_set_target()
376 trace_devfreq_frequency(devfreq, new_freq, cur_freq); in devfreq_set_target()
379 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); in devfreq_set_target()
381 if (devfreq_update_status(devfreq, new_freq)) in devfreq_set_target()
382 dev_warn(&devfreq->dev, in devfreq_set_target()
385 devfreq->previous_freq = new_freq; in devfreq_set_target()
387 if (devfreq->suspend_freq) in devfreq_set_target()
388 devfreq->resume_freq = new_freq; in devfreq_set_target()
403 int devfreq_update_target(struct devfreq *devfreq, unsigned long freq) in devfreq_update_target() argument
409 lockdep_assert_held(&devfreq->lock); in devfreq_update_target()
411 if (!devfreq->governor) in devfreq_update_target()
415 err = devfreq->governor->get_target_freq(devfreq, &freq); in devfreq_update_target()
418 devfreq_get_freq_range(devfreq, &min_freq, &max_freq); in devfreq_update_target()
429 return devfreq_set_target(devfreq, freq, flags); in devfreq_update_target()
442 int update_devfreq(struct devfreq *devfreq) in update_devfreq() argument
444 return devfreq_update_target(devfreq, 0L); in update_devfreq()
456 struct devfreq *devfreq = container_of(work, in devfreq_monitor() local
457 struct devfreq, work.work); in devfreq_monitor()
459 mutex_lock(&devfreq->lock); in devfreq_monitor()
460 err = update_devfreq(devfreq); in devfreq_monitor()
462 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); in devfreq_monitor()
464 if (devfreq->stop_polling) in devfreq_monitor()
467 queue_delayed_work(devfreq_wq, &devfreq->work, in devfreq_monitor()
468 msecs_to_jiffies(devfreq->profile->polling_ms)); in devfreq_monitor()
471 mutex_unlock(&devfreq->lock); in devfreq_monitor()
472 trace_devfreq_monitor(devfreq); in devfreq_monitor()
485 void devfreq_monitor_start(struct devfreq *devfreq) in devfreq_monitor_start() argument
487 if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) in devfreq_monitor_start()
490 mutex_lock(&devfreq->lock); in devfreq_monitor_start()
491 if (delayed_work_pending(&devfreq->work)) in devfreq_monitor_start()
494 switch (devfreq->profile->timer) { in devfreq_monitor_start()
496 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); in devfreq_monitor_start()
499 INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor); in devfreq_monitor_start()
505 if (devfreq->profile->polling_ms) in devfreq_monitor_start()
506 queue_delayed_work(devfreq_wq, &devfreq->work, in devfreq_monitor_start()
507 msecs_to_jiffies(devfreq->profile->polling_ms)); in devfreq_monitor_start()
510 devfreq->stop_polling = false; in devfreq_monitor_start()
511 mutex_unlock(&devfreq->lock); in devfreq_monitor_start()
523 void devfreq_monitor_stop(struct devfreq *devfreq) in devfreq_monitor_stop() argument
525 if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) in devfreq_monitor_stop()
528 mutex_lock(&devfreq->lock); in devfreq_monitor_stop()
529 if (devfreq->stop_polling) { in devfreq_monitor_stop()
530 mutex_unlock(&devfreq->lock); in devfreq_monitor_stop()
534 devfreq->stop_polling = true; in devfreq_monitor_stop()
535 mutex_unlock(&devfreq->lock); in devfreq_monitor_stop()
536 cancel_delayed_work_sync(&devfreq->work); in devfreq_monitor_stop()
552 void devfreq_monitor_suspend(struct devfreq *devfreq) in devfreq_monitor_suspend() argument
554 mutex_lock(&devfreq->lock); in devfreq_monitor_suspend()
555 if (devfreq->stop_polling) { in devfreq_monitor_suspend()
556 mutex_unlock(&devfreq->lock); in devfreq_monitor_suspend()
560 devfreq_update_status(devfreq, devfreq->previous_freq); in devfreq_monitor_suspend()
561 devfreq->stop_polling = true; in devfreq_monitor_suspend()
562 mutex_unlock(&devfreq->lock); in devfreq_monitor_suspend()
564 if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) in devfreq_monitor_suspend()
567 cancel_delayed_work_sync(&devfreq->work); in devfreq_monitor_suspend()
579 void devfreq_monitor_resume(struct devfreq *devfreq) in devfreq_monitor_resume() argument
583 mutex_lock(&devfreq->lock); in devfreq_monitor_resume()
585 if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) in devfreq_monitor_resume()
588 if (!devfreq->stop_polling) in devfreq_monitor_resume()
591 if (!delayed_work_pending(&devfreq->work) && in devfreq_monitor_resume()
592 devfreq->profile->polling_ms) in devfreq_monitor_resume()
593 queue_delayed_work(devfreq_wq, &devfreq->work, in devfreq_monitor_resume()
594 msecs_to_jiffies(devfreq->profile->polling_ms)); in devfreq_monitor_resume()
597 devfreq->stats.last_update = get_jiffies_64(); in devfreq_monitor_resume()
598 devfreq->stop_polling = false; in devfreq_monitor_resume()
600 if (devfreq->profile->get_cur_freq && in devfreq_monitor_resume()
601 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) in devfreq_monitor_resume()
602 devfreq->previous_freq = freq; in devfreq_monitor_resume()
605 mutex_unlock(&devfreq->lock); in devfreq_monitor_resume()
617 void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay) in devfreq_update_interval() argument
619 unsigned int cur_delay = devfreq->profile->polling_ms; in devfreq_update_interval()
622 mutex_lock(&devfreq->lock); in devfreq_update_interval()
623 devfreq->profile->polling_ms = new_delay; in devfreq_update_interval()
625 if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) in devfreq_update_interval()
628 if (devfreq->stop_polling) in devfreq_update_interval()
633 mutex_unlock(&devfreq->lock); in devfreq_update_interval()
634 cancel_delayed_work_sync(&devfreq->work); in devfreq_update_interval()
640 queue_delayed_work(devfreq_wq, &devfreq->work, in devfreq_update_interval()
641 msecs_to_jiffies(devfreq->profile->polling_ms)); in devfreq_update_interval()
647 mutex_unlock(&devfreq->lock); in devfreq_update_interval()
648 cancel_delayed_work_sync(&devfreq->work); in devfreq_update_interval()
649 mutex_lock(&devfreq->lock); in devfreq_update_interval()
650 if (!devfreq->stop_polling) in devfreq_update_interval()
651 queue_delayed_work(devfreq_wq, &devfreq->work, in devfreq_update_interval()
652 msecs_to_jiffies(devfreq->profile->polling_ms)); in devfreq_update_interval()
655 mutex_unlock(&devfreq->lock); in devfreq_update_interval()
671 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); in devfreq_notifier_call() local
674 mutex_lock(&devfreq->lock); in devfreq_notifier_call()
676 devfreq->scaling_min_freq = find_available_min_freq(devfreq); in devfreq_notifier_call()
677 if (!devfreq->scaling_min_freq) in devfreq_notifier_call()
680 devfreq->scaling_max_freq = find_available_max_freq(devfreq); in devfreq_notifier_call()
681 if (!devfreq->scaling_max_freq) { in devfreq_notifier_call()
682 devfreq->scaling_max_freq = ULONG_MAX; in devfreq_notifier_call()
686 err = update_devfreq(devfreq); in devfreq_notifier_call()
689 mutex_unlock(&devfreq->lock); in devfreq_notifier_call()
691 dev_err(devfreq->dev.parent, in devfreq_notifier_call()
702 static int qos_notifier_call(struct devfreq *devfreq) in qos_notifier_call() argument
706 mutex_lock(&devfreq->lock); in qos_notifier_call()
707 err = update_devfreq(devfreq); in qos_notifier_call()
708 mutex_unlock(&devfreq->lock); in qos_notifier_call()
710 dev_err(devfreq->dev.parent, in qos_notifier_call()
726 return qos_notifier_call(container_of(nb, struct devfreq, nb_min)); in qos_min_notifier_call()
738 return qos_notifier_call(container_of(nb, struct devfreq, nb_max)); in qos_max_notifier_call()
749 struct devfreq *devfreq = to_devfreq(dev); in devfreq_dev_release() local
753 list_del(&devfreq->node); in devfreq_dev_release()
756 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max, in devfreq_dev_release()
761 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min, in devfreq_dev_release()
767 if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) { in devfreq_dev_release()
768 err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req); in devfreq_dev_release()
773 if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) { in devfreq_dev_release()
774 err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req); in devfreq_dev_release()
780 if (devfreq->profile->exit) in devfreq_dev_release()
781 devfreq->profile->exit(devfreq->dev.parent); in devfreq_dev_release()
783 if (devfreq->opp_table) in devfreq_dev_release()
784 dev_pm_opp_put_opp_table(devfreq->opp_table); in devfreq_dev_release()
786 mutex_destroy(&devfreq->lock); in devfreq_dev_release()
787 srcu_cleanup_notifier_head(&devfreq->transition_notifier_list); in devfreq_dev_release()
788 kfree(devfreq); in devfreq_dev_release()
791 static void create_sysfs_files(struct devfreq *devfreq,
793 static void remove_sysfs_files(struct devfreq *devfreq,
803 struct devfreq *devfreq_add_device(struct device *dev, in devfreq_add_device()
808 struct devfreq *devfreq; in devfreq_add_device() local
819 devfreq = find_device_devfreq(dev); in devfreq_add_device()
821 if (!IS_ERR(devfreq)) { in devfreq_add_device()
828 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); in devfreq_add_device()
829 if (!devfreq) { in devfreq_add_device()
834 mutex_init(&devfreq->lock); in devfreq_add_device()
835 mutex_lock(&devfreq->lock); in devfreq_add_device()
836 devfreq->dev.parent = dev; in devfreq_add_device()
837 devfreq->dev.class = devfreq_class; in devfreq_add_device()
838 devfreq->dev.release = devfreq_dev_release; in devfreq_add_device()
839 INIT_LIST_HEAD(&devfreq->node); in devfreq_add_device()
840 devfreq->profile = profile; in devfreq_add_device()
841 devfreq->previous_freq = profile->initial_freq; in devfreq_add_device()
842 devfreq->last_status.current_frequency = profile->initial_freq; in devfreq_add_device()
843 devfreq->data = data; in devfreq_add_device()
844 devfreq->nb.notifier_call = devfreq_notifier_call; in devfreq_add_device()
846 if (devfreq->profile->timer < 0 in devfreq_add_device()
847 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) { in devfreq_add_device()
848 mutex_unlock(&devfreq->lock); in devfreq_add_device()
853 if (!devfreq->profile->max_state || !devfreq->profile->freq_table) { in devfreq_add_device()
854 mutex_unlock(&devfreq->lock); in devfreq_add_device()
855 err = set_freq_table(devfreq); in devfreq_add_device()
858 mutex_lock(&devfreq->lock); in devfreq_add_device()
860 devfreq->freq_table = devfreq->profile->freq_table; in devfreq_add_device()
861 devfreq->max_state = devfreq->profile->max_state; in devfreq_add_device()
864 devfreq->scaling_min_freq = find_available_min_freq(devfreq); in devfreq_add_device()
865 if (!devfreq->scaling_min_freq) { in devfreq_add_device()
866 mutex_unlock(&devfreq->lock); in devfreq_add_device()
871 devfreq->scaling_max_freq = find_available_max_freq(devfreq); in devfreq_add_device()
872 if (!devfreq->scaling_max_freq) { in devfreq_add_device()
873 mutex_unlock(&devfreq->lock); in devfreq_add_device()
878 devfreq_get_freq_range(devfreq, &min_freq, &max_freq); in devfreq_add_device()
880 devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); in devfreq_add_device()
881 devfreq->opp_table = dev_pm_opp_get_opp_table(dev); in devfreq_add_device()
882 if (IS_ERR(devfreq->opp_table)) in devfreq_add_device()
883 devfreq->opp_table = NULL; in devfreq_add_device()
885 atomic_set(&devfreq->suspend_count, 0); in devfreq_add_device()
887 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); in devfreq_add_device()
888 err = device_register(&devfreq->dev); in devfreq_add_device()
890 mutex_unlock(&devfreq->lock); in devfreq_add_device()
891 put_device(&devfreq->dev); in devfreq_add_device()
895 devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, in devfreq_add_device()
897 devfreq->max_state, in devfreq_add_device()
898 devfreq->max_state), in devfreq_add_device()
900 if (!devfreq->stats.trans_table) { in devfreq_add_device()
901 mutex_unlock(&devfreq->lock); in devfreq_add_device()
906 devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, in devfreq_add_device()
907 devfreq->max_state, in devfreq_add_device()
908 sizeof(*devfreq->stats.time_in_state), in devfreq_add_device()
910 if (!devfreq->stats.time_in_state) { in devfreq_add_device()
911 mutex_unlock(&devfreq->lock); in devfreq_add_device()
916 devfreq->stats.total_trans = 0; in devfreq_add_device()
917 devfreq->stats.last_update = get_jiffies_64(); in devfreq_add_device()
919 srcu_init_notifier_head(&devfreq->transition_notifier_list); in devfreq_add_device()
921 mutex_unlock(&devfreq->lock); in devfreq_add_device()
923 err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req, in devfreq_add_device()
927 err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req, in devfreq_add_device()
933 devfreq->nb_min.notifier_call = qos_min_notifier_call; in devfreq_add_device()
934 err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min, in devfreq_add_device()
939 devfreq->nb_max.notifier_call = qos_max_notifier_call; in devfreq_add_device()
940 err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max, in devfreq_add_device()
955 devfreq->governor = governor; in devfreq_add_device()
956 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, in devfreq_add_device()
964 create_sysfs_files(devfreq, devfreq->governor); in devfreq_add_device()
966 list_add(&devfreq->node, &devfreq_list); in devfreq_add_device()
970 if (devfreq->profile->is_cooling_device) { in devfreq_add_device()
971 devfreq->cdev = devfreq_cooling_em_register(devfreq, NULL); in devfreq_add_device()
972 if (IS_ERR(devfreq->cdev)) in devfreq_add_device()
973 devfreq->cdev = NULL; in devfreq_add_device()
976 return devfreq; in devfreq_add_device()
981 devfreq_remove_device(devfreq); in devfreq_add_device()
982 devfreq = NULL; in devfreq_add_device()
984 kfree(devfreq); in devfreq_add_device()
996 int devfreq_remove_device(struct devfreq *devfreq) in devfreq_remove_device() argument
998 if (!devfreq) in devfreq_remove_device()
1001 devfreq_cooling_unregister(devfreq->cdev); in devfreq_remove_device()
1003 if (devfreq->governor) { in devfreq_remove_device()
1004 devfreq->governor->event_handler(devfreq, in devfreq_remove_device()
1006 remove_sysfs_files(devfreq, devfreq->governor); in devfreq_remove_device()
1009 device_unregister(&devfreq->dev); in devfreq_remove_device()
1017 struct devfreq **r = res; in devm_devfreq_dev_match()
1027 devfreq_remove_device(*(struct devfreq **)res); in devm_devfreq_dev_release()
1041 struct devfreq *devm_devfreq_add_device(struct device *dev, in devm_devfreq_add_device()
1046 struct devfreq **ptr, *devfreq; in devm_devfreq_add_device() local
1052 devfreq = devfreq_add_device(dev, profile, governor_name, data); in devm_devfreq_add_device()
1053 if (IS_ERR(devfreq)) { in devm_devfreq_add_device()
1055 return devfreq; in devm_devfreq_add_device()
1058 *ptr = devfreq; in devm_devfreq_add_device()
1061 return devfreq; in devm_devfreq_add_device()
1072 struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node) in devfreq_get_devfreq_by_node()
1074 struct devfreq *devfreq; in devfreq_get_devfreq_by_node() local
1080 list_for_each_entry(devfreq, &devfreq_list, node) { in devfreq_get_devfreq_by_node()
1081 if (devfreq->dev.parent in devfreq_get_devfreq_by_node()
1082 && device_match_of_node(devfreq->dev.parent, node)) { in devfreq_get_devfreq_by_node()
1084 return devfreq; in devfreq_get_devfreq_by_node()
1100 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, in devfreq_get_devfreq_by_phandle()
1104 struct devfreq *devfreq; in devfreq_get_devfreq_by_phandle() local
1116 devfreq = devfreq_get_devfreq_by_node(node); in devfreq_get_devfreq_by_phandle()
1119 return devfreq; in devfreq_get_devfreq_by_phandle()
1123 struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node) in devfreq_get_devfreq_by_node()
1128 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, in devfreq_get_devfreq_by_phandle()
1142 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) in devm_devfreq_remove_device() argument
1145 devm_devfreq_dev_match, devfreq)); in devm_devfreq_remove_device()
1157 int devfreq_suspend_device(struct devfreq *devfreq) in devfreq_suspend_device() argument
1161 if (!devfreq) in devfreq_suspend_device()
1164 if (atomic_inc_return(&devfreq->suspend_count) > 1) in devfreq_suspend_device()
1167 if (devfreq->governor) { in devfreq_suspend_device()
1168 ret = devfreq->governor->event_handler(devfreq, in devfreq_suspend_device()
1174 if (devfreq->suspend_freq) { in devfreq_suspend_device()
1175 mutex_lock(&devfreq->lock); in devfreq_suspend_device()
1176 ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); in devfreq_suspend_device()
1177 mutex_unlock(&devfreq->lock); in devfreq_suspend_device()
1194 int devfreq_resume_device(struct devfreq *devfreq) in devfreq_resume_device() argument
1198 if (!devfreq) in devfreq_resume_device()
1201 if (atomic_dec_return(&devfreq->suspend_count) >= 1) in devfreq_resume_device()
1204 if (devfreq->resume_freq) { in devfreq_resume_device()
1205 mutex_lock(&devfreq->lock); in devfreq_resume_device()
1206 ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); in devfreq_resume_device()
1207 mutex_unlock(&devfreq->lock); in devfreq_resume_device()
1212 if (devfreq->governor) { in devfreq_resume_device()
1213 ret = devfreq->governor->event_handler(devfreq, in devfreq_resume_device()
1233 struct devfreq *devfreq; in devfreq_suspend() local
1237 list_for_each_entry(devfreq, &devfreq_list, node) { in devfreq_suspend()
1238 ret = devfreq_suspend_device(devfreq); in devfreq_suspend()
1240 dev_err(&devfreq->dev, in devfreq_suspend()
1254 struct devfreq *devfreq; in devfreq_resume() local
1258 list_for_each_entry(devfreq, &devfreq_list, node) { in devfreq_resume()
1259 ret = devfreq_resume_device(devfreq); in devfreq_resume()
1261 dev_warn(&devfreq->dev, in devfreq_resume()
1274 struct devfreq *devfreq; in devfreq_add_governor() local
1293 list_for_each_entry(devfreq, &devfreq_list, node) { in devfreq_add_governor()
1295 struct device *dev = devfreq->dev.parent; in devfreq_add_governor()
1297 if (!strncmp(devfreq->governor->name, governor->name, in devfreq_add_governor()
1300 if (devfreq->governor) { in devfreq_add_governor()
1303 __func__, devfreq->governor->name); in devfreq_add_governor()
1304 ret = devfreq->governor->event_handler(devfreq, in devfreq_add_governor()
1310 devfreq->governor->name, ret); in devfreq_add_governor()
1314 devfreq->governor = governor; in devfreq_add_governor()
1315 ret = devfreq->governor->event_handler(devfreq, in devfreq_add_governor()
1319 __func__, devfreq->governor->name, in devfreq_add_governor()
1365 struct devfreq *devfreq; in devfreq_remove_governor() local
1381 list_for_each_entry(devfreq, &devfreq_list, node) { in devfreq_remove_governor()
1383 struct device *dev = devfreq->dev.parent; in devfreq_remove_governor()
1385 if (!strncmp(devfreq->governor->name, governor->name, in devfreq_remove_governor()
1388 if (!devfreq->governor) { in devfreq_remove_governor()
1394 ret = devfreq->governor->event_handler(devfreq, in devfreq_remove_governor()
1398 __func__, devfreq->governor->name, in devfreq_remove_governor()
1401 devfreq->governor = NULL; in devfreq_remove_governor()
1416 struct devfreq *df = to_devfreq(dev); in name_show()
1424 struct devfreq *df = to_devfreq(dev); in governor_show()
1435 struct devfreq *df = to_devfreq(dev); in governor_store()
1516 struct devfreq *df = to_devfreq(d); in available_governors_show()
1562 struct devfreq *df = to_devfreq(dev); in cur_freq_show()
1578 struct devfreq *df = to_devfreq(dev); in target_freq_show()
1587 struct devfreq *df = to_devfreq(dev); in min_freq_store()
1614 struct devfreq *df = to_devfreq(dev); in min_freq_show()
1628 struct devfreq *df = to_devfreq(dev); in max_freq_store()
1668 struct devfreq *df = to_devfreq(dev); in max_freq_show()
1683 struct devfreq *df = to_devfreq(d); in available_frequencies_show()
1710 struct devfreq *df = to_devfreq(dev); in trans_stat_show()
1783 struct devfreq *df = to_devfreq(dev); in trans_stat_store()
1822 ATTRIBUTE_GROUPS(devfreq);
1827 struct devfreq *df = to_devfreq(dev); in polling_interval_show()
1839 struct devfreq *df = to_devfreq(dev); in polling_interval_store()
1860 struct devfreq *df = to_devfreq(dev); in timer_show()
1871 struct devfreq *df = to_devfreq(dev); in timer_store()
1931 static void create_sysfs_files(struct devfreq *devfreq, in create_sysfs_files() argument
1935 CREATE_SYSFS_FILE(devfreq, polling_interval); in create_sysfs_files()
1937 CREATE_SYSFS_FILE(devfreq, timer); in create_sysfs_files()
1941 static void remove_sysfs_files(struct devfreq *devfreq, in remove_sysfs_files() argument
1945 sysfs_remove_file(&devfreq->dev.kobj, in remove_sysfs_files()
1948 sysfs_remove_file(&devfreq->dev.kobj, &dev_attr_timer.attr); in remove_sysfs_files()
1963 struct devfreq *devfreq; in devfreq_summary_show() local
1964 struct devfreq *p_devfreq = NULL; in devfreq_summary_show()
1990 list_for_each_entry_reverse(devfreq, &devfreq_list, node) { in devfreq_summary_show()
1992 if (!strncmp(devfreq->governor->name, DEVFREQ_GOV_PASSIVE, in devfreq_summary_show()
1994 struct devfreq_passive_data *data = devfreq->data; in devfreq_summary_show()
2003 mutex_lock(&devfreq->lock); in devfreq_summary_show()
2004 cur_freq = devfreq->previous_freq; in devfreq_summary_show()
2005 devfreq_get_freq_range(devfreq, &min_freq, &max_freq); in devfreq_summary_show()
2006 timer = devfreq->profile->timer; in devfreq_summary_show()
2008 if (IS_SUPPORTED_ATTR(devfreq->governor->attrs, POLLING_INTERVAL)) in devfreq_summary_show()
2009 polling_ms = devfreq->profile->polling_ms; in devfreq_summary_show()
2012 mutex_unlock(&devfreq->lock); in devfreq_summary_show()
2016 dev_name(&devfreq->dev), in devfreq_summary_show()
2018 devfreq->governor->name, in devfreq_summary_show()
2105 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) in devfreq_register_opp_notifier() argument
2107 return dev_pm_opp_register_notifier(dev, &devfreq->nb); in devfreq_register_opp_notifier()
2121 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) in devfreq_unregister_opp_notifier() argument
2123 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb); in devfreq_unregister_opp_notifier()
2129 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); in devm_devfreq_opp_release()
2139 struct devfreq *devfreq) in devm_devfreq_register_opp_notifier() argument
2141 struct devfreq **ptr; in devm_devfreq_register_opp_notifier()
2148 ret = devfreq_register_opp_notifier(dev, devfreq); in devm_devfreq_register_opp_notifier()
2154 *ptr = devfreq; in devm_devfreq_register_opp_notifier()
2168 struct devfreq *devfreq) in devm_devfreq_unregister_opp_notifier() argument
2171 devm_devfreq_dev_match, devfreq)); in devm_devfreq_unregister_opp_notifier()
2181 int devfreq_register_notifier(struct devfreq *devfreq, in devfreq_register_notifier() argument
2187 if (!devfreq) in devfreq_register_notifier()
2193 &devfreq->transition_notifier_list, nb); in devfreq_register_notifier()
2209 int devfreq_unregister_notifier(struct devfreq *devfreq, in devfreq_unregister_notifier() argument
2215 if (!devfreq) in devfreq_unregister_notifier()
2221 &devfreq->transition_notifier_list, nb); in devfreq_unregister_notifier()
2232 struct devfreq *devfreq; member
2241 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); in devm_devfreq_notifier_release()
2253 struct devfreq *devfreq, in devm_devfreq_register_notifier() argument
2265 ret = devfreq_register_notifier(devfreq, nb, list); in devm_devfreq_register_notifier()
2271 ptr->devfreq = devfreq; in devm_devfreq_register_notifier()
2289 struct devfreq *devfreq, in devm_devfreq_unregister_notifier() argument
2294 devm_devfreq_dev_match, devfreq)); in devm_devfreq_unregister_notifier()