Lines Matching refs:cs

151 static void __clocksource_change_rating(struct clocksource *cs, int rating);
171 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument
173 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
174 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
180 if (list_empty(&cs->list)) { in __clocksource_unstable()
181 cs->rating = 0; in __clocksource_unstable()
185 if (cs->mark_unstable) in __clocksource_unstable()
186 cs->mark_unstable(cs); in __clocksource_unstable()
200 void clocksource_mark_unstable(struct clocksource *cs) in clocksource_mark_unstable() argument
205 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
206 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
207 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
208 __clocksource_unstable(cs); in clocksource_mark_unstable()
225 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) in cs_watchdog_read() argument
234 *csnow = cs->read(cs); in cs_watchdog_read()
266 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name); in cs_watchdog_read()
273 cs->name, wd_delay); in cs_watchdog_read()
332 struct clocksource *cs = (struct clocksource *)csin; in clocksource_verify_one_cpu() local
334 csnow_mid = cs->read(cs); in clocksource_verify_one_cpu()
337 void clocksource_verify_percpu(struct clocksource *cs) in clocksource_verify_percpu() argument
354 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); in clocksource_verify_percpu()
358 …pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu,… in clocksource_verify_percpu()
362 csnow_begin = cs->read(cs); in clocksource_verify_percpu()
363 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); in clocksource_verify_percpu()
364 csnow_end = cs->read(cs); in clocksource_verify_percpu()
365 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
368 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
371 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu()
372 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu()
382 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); in clocksource_verify_percpu()
385 cpumask_pr_args(&cpus_behind), testcpu, cs->name); in clocksource_verify_percpu()
388 testcpu, cs_nsec_min, cs_nsec_max, cs->name); in clocksource_verify_percpu()
394 struct clocksource *cs; in clocksource_reset_watchdog() local
396 list_for_each_entry(cs, &watchdog_list, wd_list) in clocksource_reset_watchdog()
397 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
406 struct clocksource *cs; in clocksource_watchdog() local
417 list_for_each_entry(cs, &watchdog_list, wd_list) { in clocksource_watchdog()
420 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
426 read_ret = cs_watchdog_read(cs, &csnow, &wdnow); in clocksource_watchdog()
430 __clocksource_unstable(cs); in clocksource_watchdog()
453 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
455 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
456 cs->wd_last = wdnow; in clocksource_watchdog()
457 cs->cs_last = csnow; in clocksource_watchdog()
461 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
465 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog()
466 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog()
467 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
468 cslast = cs->cs_last; in clocksource_watchdog()
469 cs->cs_last = csnow; in clocksource_watchdog()
470 cs->wd_last = wdnow; in clocksource_watchdog()
497 md = cs->uncertainty_margin + watchdog->uncertainty_margin; in clocksource_watchdog()
504 smp_processor_id(), cs->name); in clocksource_watchdog()
508 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog()
512 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec); in clocksource_watchdog()
513 if (curr_clocksource == cs) in clocksource_watchdog()
514 pr_warn(" '%s' is current clocksource.\n", cs->name); in clocksource_watchdog()
516 … '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); in clocksource_watchdog()
519 __clocksource_unstable(cs); in clocksource_watchdog()
523 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
524 cs->tick_stable(cs); in clocksource_watchdog()
526 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
527 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
530 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
547 if (cs != curr_clocksource) { in clocksource_watchdog()
548 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
606 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
608 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
610 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
612 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
613 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
616 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
617 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
623 struct clocksource *cs, *old_wd; in clocksource_select_watchdog() local
632 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_select_watchdog()
634 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
638 if (fallback && cs == old_wd) in clocksource_select_watchdog()
642 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
643 watchdog = cs; in clocksource_select_watchdog()
658 static void clocksource_dequeue_watchdog(struct clocksource *cs) in clocksource_dequeue_watchdog() argument
660 if (cs != watchdog) { in clocksource_dequeue_watchdog()
661 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
663 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
672 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
683 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { in __clocksource_watchdog_kthread()
684 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
685 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
686 __clocksource_change_rating(cs, 0); in __clocksource_watchdog_kthread()
689 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
690 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
710 static bool clocksource_is_watchdog(struct clocksource *cs) in clocksource_is_watchdog() argument
712 return cs == watchdog; in clocksource_is_watchdog()
717 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
719 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
720 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
724 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } in clocksource_dequeue_watchdog() argument
727 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } in clocksource_is_watchdog() argument
728 void clocksource_mark_unstable(struct clocksource *cs) { } in clocksource_mark_unstable() argument
735 static bool clocksource_is_suspend(struct clocksource *cs) in clocksource_is_suspend() argument
737 return cs == suspend_clocksource; in clocksource_is_suspend()
740 static void __clocksource_suspend_select(struct clocksource *cs) in __clocksource_suspend_select() argument
745 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
753 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
755 cs->name); in __clocksource_suspend_select()
759 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
760 suspend_clocksource = cs; in __clocksource_suspend_select()
769 struct clocksource *cs, *old_suspend; in clocksource_suspend_select() local
775 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_suspend_select()
777 if (fallback && cs == old_suspend) in clocksource_suspend_select()
780 __clocksource_suspend_select(cs); in clocksource_suspend_select()
797 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing() argument
807 if (clocksource_is_suspend(cs)) { in clocksource_start_suspend_timing()
835 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing() argument
847 if (clocksource_is_suspend(cs)) in clocksource_stop_suspend_timing()
863 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
874 struct clocksource *cs; in clocksource_suspend() local
876 list_for_each_entry_reverse(cs, &clocksource_list, list) in clocksource_suspend()
877 if (cs->suspend) in clocksource_suspend()
878 cs->suspend(cs); in clocksource_suspend()
886 struct clocksource *cs; in clocksource_resume() local
888 list_for_each_entry(cs, &clocksource_list, list) in clocksource_resume()
889 if (cs->resume) in clocksource_resume()
890 cs->resume(cs); in clocksource_resume()
912 static u32 clocksource_max_adjustment(struct clocksource *cs) in clocksource_max_adjustment() argument
918 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
973 static inline void clocksource_update_max_deferment(struct clocksource *cs) in clocksource_update_max_deferment() argument
975 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
976 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
977 &cs->max_cycles); in clocksource_update_max_deferment()
982 struct clocksource *cs; in clocksource_find_best() local
992 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_find_best()
993 if (skipcur && cs == curr_clocksource) in clocksource_find_best()
995 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
997 return cs; in clocksource_find_best()
1005 struct clocksource *best, *cs; in __clocksource_select() local
1016 list_for_each_entry(cs, &clocksource_list, list) { in __clocksource_select()
1017 if (skipcur && cs == curr_clocksource) in __clocksource_select()
1019 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
1026 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
1028 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
1030 cs->name); in __clocksource_select()
1038 cs->name); in __clocksource_select()
1042 best = cs; in __clocksource_select()
1096 static void clocksource_enqueue(struct clocksource *cs) in clocksource_enqueue() argument
1103 if (tmp->rating < cs->rating) in clocksource_enqueue()
1107 list_add(&cs->list, entry); in clocksource_enqueue()
1122 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
1140 sec = cs->mask; in __clocksource_update_freq_scale()
1145 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
1148 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
1162 if (scale && freq && !cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1163 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); in __clocksource_update_freq_scale()
1164 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) in __clocksource_update_freq_scale()
1165 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; in __clocksource_update_freq_scale()
1166 } else if (!cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1167 cs->uncertainty_margin = WATCHDOG_THRESHOLD; in __clocksource_update_freq_scale()
1169 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); in __clocksource_update_freq_scale()
1175 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1176 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
1177 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
1178 cs->mult >>= 1; in __clocksource_update_freq_scale()
1179 cs->shift--; in __clocksource_update_freq_scale()
1180 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1187 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
1189 cs->name); in __clocksource_update_freq_scale()
1191 clocksource_update_max_deferment(cs); in __clocksource_update_freq_scale()
1194 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
1209 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
1213 clocksource_arch_init(cs); in __clocksource_register_scale()
1215 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) in __clocksource_register_scale()
1216 cs->id = CSID_GENERIC; in __clocksource_register_scale()
1217 if (cs->vdso_clock_mode < 0 || in __clocksource_register_scale()
1218 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { in __clocksource_register_scale()
1220 cs->name, cs->vdso_clock_mode); in __clocksource_register_scale()
1221 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; in __clocksource_register_scale()
1225 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
1231 clocksource_enqueue(cs); in __clocksource_register_scale()
1232 clocksource_enqueue_watchdog(cs); in __clocksource_register_scale()
1237 __clocksource_suspend_select(cs); in __clocksource_register_scale()
1243 static void __clocksource_change_rating(struct clocksource *cs, int rating) in __clocksource_change_rating() argument
1245 list_del(&cs->list); in __clocksource_change_rating()
1246 cs->rating = rating; in __clocksource_change_rating()
1247 clocksource_enqueue(cs); in __clocksource_change_rating()
1255 void clocksource_change_rating(struct clocksource *cs, int rating) in clocksource_change_rating() argument
1261 __clocksource_change_rating(cs, rating); in clocksource_change_rating()
1274 static int clocksource_unbind(struct clocksource *cs) in clocksource_unbind() argument
1278 if (clocksource_is_watchdog(cs)) { in clocksource_unbind()
1281 if (clocksource_is_watchdog(cs)) in clocksource_unbind()
1285 if (cs == curr_clocksource) { in clocksource_unbind()
1288 if (curr_clocksource == cs) in clocksource_unbind()
1292 if (clocksource_is_suspend(cs)) { in clocksource_unbind()
1302 clocksource_dequeue_watchdog(cs); in clocksource_unbind()
1303 list_del_init(&cs->list); in clocksource_unbind()
1313 int clocksource_unregister(struct clocksource *cs) in clocksource_unregister() argument
1318 if (!list_empty(&cs->list)) in clocksource_unregister()
1319 ret = clocksource_unbind(cs); in clocksource_unregister()
1405 struct clocksource *cs; in unbind_clocksource_store() local
1415 list_for_each_entry(cs, &clocksource_list, list) { in unbind_clocksource_store()
1416 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1418 ret = clocksource_unbind(cs); in unbind_clocksource_store()