1 /*
2 * include/asm-s390/timex.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *
7 * Derived from "include/asm-i386/timex.h"
8 * Copyright (C) 1992, Linus Torvalds
9 */
10
11 #ifndef _ASM_S390_TIMEX_H
12 #define _ASM_S390_TIMEX_H
13
14 #include <asm/lowcore.h>
15
16 /* The value of the TOD clock for 1.1.1970. */
17 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
18
19 /* Inline functions for clock register access. */
set_clock(__u64 time)20 static inline int set_clock(__u64 time)
21 {
22 int cc;
23
24 asm volatile(
25 " sck %1\n"
26 " ipm %0\n"
27 " srl %0,28\n"
28 : "=d" (cc) : "Q" (time) : "cc");
29 return cc;
30 }
31
store_clock(__u64 * time)32 static inline int store_clock(__u64 *time)
33 {
34 int cc;
35
36 asm volatile(
37 " stck %1\n"
38 " ipm %0\n"
39 " srl %0,28\n"
40 : "=d" (cc), "=Q" (*time) : : "cc");
41 return cc;
42 }
43
set_clock_comparator(__u64 time)44 static inline void set_clock_comparator(__u64 time)
45 {
46 asm volatile("sckc %0" : : "Q" (time));
47 }
48
store_clock_comparator(__u64 * time)49 static inline void store_clock_comparator(__u64 *time)
50 {
51 asm volatile("stckc %0" : "=Q" (*time));
52 }
53
54 void clock_comparator_work(void);
55
local_tick_disable(void)56 static inline unsigned long long local_tick_disable(void)
57 {
58 unsigned long long old;
59
60 old = S390_lowcore.clock_comparator;
61 S390_lowcore.clock_comparator = -1ULL;
62 set_clock_comparator(S390_lowcore.clock_comparator);
63 return old;
64 }
65
local_tick_enable(unsigned long long comp)66 static inline void local_tick_enable(unsigned long long comp)
67 {
68 S390_lowcore.clock_comparator = comp;
69 set_clock_comparator(S390_lowcore.clock_comparator);
70 }
71
72 #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
73
74 typedef unsigned long long cycles_t;
75
get_clock(void)76 static inline unsigned long long get_clock (void)
77 {
78 unsigned long long clk;
79
80 asm volatile("stck %0" : "=Q" (clk) : : "cc");
81 return clk;
82 }
83
get_clock_ext(char * clk)84 static inline void get_clock_ext(char *clk)
85 {
86 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
87 }
88
get_clock_xt(void)89 static inline unsigned long long get_clock_xt(void)
90 {
91 unsigned char clk[16];
92 get_clock_ext(clk);
93 return *((unsigned long long *)&clk[1]);
94 }
95
get_cycles(void)96 static inline cycles_t get_cycles(void)
97 {
98 return (cycles_t) get_clock() >> 2;
99 }
100
101 int get_sync_clock(unsigned long long *clock);
102 void init_cpu_timer(void);
103 unsigned long long monotonic_clock(void);
104
105 void tod_to_timeval(__u64, struct timespec *);
106
107 static inline
stck_to_timespec(unsigned long long stck,struct timespec * ts)108 void stck_to_timespec(unsigned long long stck, struct timespec *ts)
109 {
110 tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
111 }
112
113 extern u64 sched_clock_base_cc;
114
115 /**
116 * get_clock_monotonic - returns current time in clock rate units
117 *
118 * The caller must ensure that preemption is disabled.
119 * The clock and sched_clock_base get changed via stop_machine.
120 * Therefore preemption must be disabled when calling this
121 * function, otherwise the returned value is not guaranteed to
122 * be monotonic.
123 */
get_clock_monotonic(void)124 static inline unsigned long long get_clock_monotonic(void)
125 {
126 return get_clock_xt() - sched_clock_base_cc;
127 }
128
129 #endif
130