1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/time.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 * Original TMU/RTC code taken from sh version.
11 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
12 * Some code taken from i386 version.
13 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
14 */
15
16 #include <linux/config.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/param.h>
21 #include <linux/string.h>
22 #include <linux/mm.h>
23 #include <linux/interrupt.h>
24 #include <linux/time.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/smp.h>
28
29 #include <asm/registers.h> /* required by inline __asm__ stmt. */
30
31 #include <asm/processor.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/delay.h>
36
37 #include <linux/timex.h>
38 #include <linux/irq.h>
39 #include <asm/hardware.h>
40
41 #define TMU_TOCR_INIT 0x00
42 #define TMU0_TCR_INIT 0x0020
43 #define TMU_TSTR_INIT 1
44
45 /* RCR1 Bits */
46 #define RCR1_CF 0x80 /* Carry Flag */
47 #define RCR1_CIE 0x10 /* Carry Interrupt Enable */
48 #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
49 #define RCR1_AF 0x01 /* Alarm Flag */
50
51 /* RCR2 Bits */
52 #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
53 #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
54 #define RCR2_RTCEN 0x08 /* ENable RTC */
55 #define RCR2_ADJ 0x04 /* ADJustment (30-second) */
56 #define RCR2_RESET 0x02 /* Reset bit */
57 #define RCR2_START 0x01 /* Start bit */
58
59 /* Clock, Power and Reset Controller */
60 #define CPRC_BLOCK_OFF 0x01010000
61 #define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
62
63 #define FRQCR (cprc_base+0x0)
64 #define WTCSR (cprc_base+0x0018)
65 #define STBCR (cprc_base+0x0030)
66
67 /* Time Management Unit */
68 #define TMU_BLOCK_OFF 0x01020000
69 #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
70 #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
71 #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
72 #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
73
74 #define TMU_TOCR tmu_base+0x0 /* Byte access */
75 #define TMU_TSTR tmu_base+0x4 /* Byte access */
76
77 #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
78 #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
79 #define TMU0_TCR TMU0_BASE+0x8 /* Word access */
80
81 /* Real Time Clock */
82 #define RTC_BLOCK_OFF 0x01040000
83 #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
84
85 #define R64CNT rtc_base+0x00
86 #define RSECCNT rtc_base+0x04
87 #define RMINCNT rtc_base+0x08
88 #define RHRCNT rtc_base+0x0c
89 #define RWKCNT rtc_base+0x10
90 #define RDAYCNT rtc_base+0x14
91 #define RMONCNT rtc_base+0x18
92 #define RYRCNT rtc_base+0x1c /* 16bit */
93 #define RSECAR rtc_base+0x20
94 #define RMINAR rtc_base+0x24
95 #define RHRAR rtc_base+0x28
96 #define RWKAR rtc_base+0x2c
97 #define RDAYAR rtc_base+0x30
98 #define RMONAR rtc_base+0x34
99 #define RCR1 rtc_base+0x38
100 #define RCR2 rtc_base+0x3c
101
102 #ifndef BCD_TO_BIN
103 #define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
104 #endif
105
106 #ifndef BIN_TO_BCD
107 #define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
108 #endif
109
110 extern rwlock_t xtime_lock;
111 #define TICK_SIZE tick
112
113 extern unsigned long wall_jiffies;
114 extern unsigned long volatile jiffies;
115
116 static unsigned long tmu_base, rtc_base;
117 unsigned long cprc_base;
118
119 /* Variables to allow interpolation of time of day to resolution better than a
120 * jiffy. */
121
122 /* This is effectively protected by xtime_lock */
123 static unsigned long ctc_last_interrupt;
124 static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
125
126 #define CTC_JIFFY_SCALE_SHIFT 40
127
128 /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
129 static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
130
131 /* Estimate number of microseconds that have elapsed since the last timer tick,
132 by scaling the delta that has occured in the CTC register.
133
134 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
135 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
136 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
137 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
138 sleeping, though will be coarser.
139
140 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
141 is running or if the freq or tick arguments of adjtimex are modified after
142 we have calibrated the scaling factor? This will result in either a jump at
143 the end of a tick period, or a wrap backwards at the start of the next one,
144 if the application is reading the time of day often enough. I think we
145 ought to do better than this. For this reason, usecs_per_jiffy is left
146 separated out in the calculation below. This allows some future hook into
147 the adjtime-related stuff in kernel/timer.c to remove this hazard.
148
149 */
150
usecs_since_tick(void)151 static unsigned long usecs_since_tick(void)
152 {
153 unsigned long long current_ctc;
154 long ctc_ticks_since_interrupt;
155 unsigned long long ull_ctc_ticks_since_interrupt;
156 unsigned long result;
157
158 unsigned long long mul1_out;
159 unsigned long long mul1_out_high;
160 unsigned long long mul2_out_low, mul2_out_high;
161
162 /* Read CTC register */
163 asm ("getcon cr62, %0" : "=r" (current_ctc));
164 /* Note, the CTC counts down on each CPU clock, not up.
165 Note(2), use long type to get correct wraparound arithmetic when
166 the counter crosses zero. */
167 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
168 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
169
170 /* Inline assembly to do 32x32x32->64 multiplier */
171 asm volatile ("mulu.l %1, %2, %0" :
172 "=r" (mul1_out) :
173 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
174
175 mul1_out_high = mul1_out >> 32;
176
177 asm volatile ("mulu.l %1, %2, %0" :
178 "=r" (mul2_out_low) :
179 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
180
181 #if 1
182 asm volatile ("mulu.l %1, %2, %0" :
183 "=r" (mul2_out_high) :
184 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
185 #endif
186
187 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
188
189 return result;
190 }
191
do_gettimeofday(struct timeval * tv)192 void do_gettimeofday(struct timeval *tv)
193 {
194 unsigned long flags;
195 unsigned long usec, sec;
196
197 read_lock_irqsave(&xtime_lock, flags);
198 usec = usecs_since_tick();
199 {
200 unsigned long lost = jiffies - wall_jiffies;
201 if (lost)
202 usec += lost * (1000000 / HZ);
203 }
204 sec = xtime.tv_sec;
205 usec += xtime.tv_usec;
206 read_unlock_irqrestore(&xtime_lock, flags);
207
208 while (usec >= 1000000) {
209 usec -= 1000000;
210 sec++;
211 }
212
213 tv->tv_sec = sec;
214 tv->tv_usec = usec;
215 }
216
do_settimeofday(struct timeval * tv)217 void do_settimeofday(struct timeval *tv)
218 {
219 write_lock_irq(&xtime_lock);
220 xtime = *tv;
221 time_adjust = 0; /* stop active adjtime() */
222 time_status |= STA_UNSYNC;
223 time_maxerror = NTP_PHASE_LIMIT;
224 time_esterror = NTP_PHASE_LIMIT;
225 write_unlock_irq(&xtime_lock);
226 }
227
set_rtc_time(unsigned long nowtime)228 static int set_rtc_time(unsigned long nowtime)
229 {
230 int retval = 0;
231 int real_seconds, real_minutes, cmos_minutes;
232
233 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
234
235 cmos_minutes = ctrl_inb(RMINCNT);
236 BCD_TO_BIN(cmos_minutes);
237
238 /*
239 * since we're only adjusting minutes and seconds,
240 * don't interfere with hour overflow. This avoids
241 * messing with unknown time zones but requires your
242 * RTC not to be off by more than 15 minutes
243 */
244 real_seconds = nowtime % 60;
245 real_minutes = nowtime / 60;
246 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
247 real_minutes += 30; /* correct for half hour time zone */
248 real_minutes %= 60;
249
250 if (abs(real_minutes - cmos_minutes) < 30) {
251 BIN_TO_BCD(real_seconds);
252 BIN_TO_BCD(real_minutes);
253 ctrl_outb(real_seconds, RSECCNT);
254 ctrl_outb(real_minutes, RMINCNT);
255 } else {
256 printk(KERN_WARNING
257 "set_rtc_time: can't update from %d to %d\n",
258 cmos_minutes, real_minutes);
259 retval = -1;
260 }
261
262 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
263
264 return retval;
265 }
266
267 /* last time the RTC clock got updated */
268 static long last_rtc_update = 0;
269
sh64_do_profile(unsigned long pc)270 static inline void sh64_do_profile(unsigned long pc)
271 {
272 extern int _stext;
273
274 /* Don't profile cpu_idle.. */
275 if (!prof_buffer || !current->pid)
276 return;
277
278 pc -= (unsigned long) &_stext;
279 pc >>= prof_shift;
280
281 /*
282 * Don't ignore out-of-bounds PC values silently, put them into the
283 * last histogram slot, so if present, they will show up as a sharp
284 * peak.
285 */
286 if (pc > prof_len - 1)
287 pc = prof_len - 1;
288
289 /* We could just be sloppy and not lock against a re-entry on this
290 increment, but the profiling code won't always be linked in anyway. */
291 atomic_inc((atomic_t *)&prof_buffer[pc]);
292 }
293
294 /*
295 * timer_interrupt() needs to keep up the real-time clock,
296 * as well as call the "do_timer()" routine every clocktick
297 */
do_timer_interrupt(int irq,void * dev_id,struct pt_regs * regs)298 static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
299 {
300 unsigned long long current_ctc;
301 asm ("getcon cr62, %0" : "=r" (current_ctc));
302 ctc_last_interrupt = (unsigned long) current_ctc;
303
304 do_timer(regs);
305
306 if (!user_mode(regs))
307 sh64_do_profile(regs->pc);
308
309 #ifdef CONFIG_HEARTBEAT
310 extern void heartbeat(void);
311
312 heartbeat();
313 #endif
314
315 /*
316 * If we have an externally synchronized Linux clock, then update
317 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
318 * called as close as possible to 500 ms before the new second starts.
319 */
320 if ((time_status & STA_UNSYNC) == 0 &&
321 xtime.tv_sec > last_rtc_update + 660 &&
322 xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
323 xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
324 if (set_rtc_time(xtime.tv_sec) == 0)
325 last_rtc_update = xtime.tv_sec;
326 else
327 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
328 }
329 }
330
331 /*
332 * This is the same as the above, except we _also_ save the current
333 * Time Stamp Counter value at the time of the timer interrupt, so that
334 * we later on can estimate the time of day more exactly.
335 */
timer_interrupt(int irq,void * dev_id,struct pt_regs * regs)336 static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
337 {
338 unsigned long timer_status;
339
340 /* Clear UNF bit */
341 timer_status = ctrl_inw(TMU0_TCR);
342 timer_status &= ~0x100;
343 ctrl_outw(timer_status, TMU0_TCR);
344
345 /*
346 * Here we are in the timer irq handler. We just have irqs locally
347 * disabled but we don't know if the timer_bh is running on the other
348 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
349 * the irq version of write_lock because as just said we have irq
350 * locally disabled. -arca
351 */
352 write_lock(&xtime_lock);
353 do_timer_interrupt(irq, NULL, regs);
354 write_unlock(&xtime_lock);
355 }
356
357
get_rtc_time(void)358 static unsigned long get_rtc_time(void)
359 {
360 unsigned int sec, min, hr, wk, day, mon, yr, yr100;
361
362 again:
363 do {
364 ctrl_outb(0, RCR1); /* Clear CF-bit */
365 sec = ctrl_inb(RSECCNT);
366 min = ctrl_inb(RMINCNT);
367 hr = ctrl_inb(RHRCNT);
368 wk = ctrl_inb(RWKCNT);
369 day = ctrl_inb(RDAYCNT);
370 mon = ctrl_inb(RMONCNT);
371 yr = ctrl_inw(RYRCNT);
372 yr100 = (yr >> 8);
373 yr &= 0xff;
374 } while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
375
376 BCD_TO_BIN(yr100);
377 BCD_TO_BIN(yr);
378 BCD_TO_BIN(mon);
379 BCD_TO_BIN(day);
380 BCD_TO_BIN(hr);
381 BCD_TO_BIN(min);
382 BCD_TO_BIN(sec);
383
384 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
385 hr > 23 || min > 59 || sec > 59) {
386 printk(KERN_ERR
387 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
388 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
389 ctrl_outb(0, RSECCNT);
390 ctrl_outb(0, RMINCNT);
391 ctrl_outb(0, RHRCNT);
392 ctrl_outb(6, RWKCNT);
393 ctrl_outb(1, RDAYCNT);
394 ctrl_outb(1, RMONCNT);
395 ctrl_outw(0x2000, RYRCNT);
396 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
397 goto again;
398 }
399
400 return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
401 }
402
get_cpu_mhz(void)403 static __init unsigned int get_cpu_mhz(void)
404 {
405 unsigned int count;
406 unsigned long __dummy;
407 unsigned long ctc_val_init, ctc_val;
408
409 /*
410 ** Regardless the toolchain, force the compiler to use the
411 ** arbitrary register r3 as a clock tick counter.
412 ** NOTE: r3 must be in accordance with rtc_interrupt()
413 */
414 register unsigned long long __rtc_irq_flag __asm__ ("r3");
415
416 sti();
417 do {} while (ctrl_inb(R64CNT) != 0);
418 ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
419
420 /*
421 * r3 is arbitrary. CDC does not support "=z".
422 */
423 ctc_val_init = 0xffffffff;
424 ctc_val = ctc_val_init;
425
426 asm volatile("gettr " __t0 ", %1\n\t"
427 "putcon %0, cr62\n\t"
428 "and %2, r63, %2\n\t"
429 "_pta 4, " __t0 "\n\t"
430 "beq/l %2, r63, " __t0 "\n\t"
431 "ptabs %1, " __t0 "\n\t"
432 "getcon cr62, %0\n\t"
433 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
434 : "0" (0));
435 cli();
436 /*
437 * SH-3:
438 * CPU clock = 4 stages * loop
439 * tst rm,rm if id ex
440 * bt/s 1b if id ex
441 * add #1,rd if id ex
442 * (if) pipe line stole
443 * tst rm,rm if id ex
444 * ....
445 *
446 *
447 * SH-4:
448 * CPU clock = 6 stages * loop
449 * I don't know why.
450 * ....
451 *
452 * SH-5:
453 * Use CTC register to count. This approach returns the right value
454 * even if the I-cache is disabled (e.g. whilst debugging.)
455 *
456 */
457
458 count = ctc_val_init - ctc_val; /* CTC counts down */
459
460 #if defined (CONFIG_SH_SIMULATOR)
461 /*
462 * Let's pretend we are a 5MHz SH-5 to avoid a too
463 * little timer interval. Also to keep delay
464 * calibration within a reasonable time.
465 */
466 return 5000000;
467 #else
468 /*
469 * This really is count by the number of clock cycles
470 * by the ratio between a complete R64CNT
471 * wrap-around (128) and CUI interrupt being raised (64).
472 */
473 return count*2;
474 #endif
475 }
476
rtc_interrupt(int irq,void * dev_id,struct pt_regs * regs)477 static void rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
478 {
479 ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
480 regs->regs[3] = 1; /* Using r3 */
481 }
482
483 static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
484 static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, 0, "rtc", NULL, NULL};
485
time_init(void)486 void __init time_init(void)
487 {
488 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
489 unsigned long interval;
490 unsigned long frqcr, ifc, pfc;
491 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
492 #define bfc_table ifc_table /* Same */
493 #define pfc_table ifc_table /* Same */
494
495 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
496 if (tmu_base == 0UL) {
497 panic("Unable to remap TMU\n");
498 }
499
500 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
501 if (rtc_base == 0UL) {
502 panic("Unable to remap RTC\n");
503 }
504
505 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
506 if (cprc_base == 0UL) {
507 panic("Unable to remap CPRC\n");
508 }
509
510 xtime.tv_sec = get_rtc_time();
511 xtime.tv_usec = 0;
512
513 setup_irq(TIMER_IRQ, &irq0);
514 setup_irq(RTC_IRQ, &irq1);
515
516 /* Check how fast it is.. */
517 cpu_clock = get_cpu_mhz();
518
519 /* FIXME : Are these divides OK? Note careful order of operations to
520 * maintain reasonable precision and avoid overflow. */
521 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
522
523 disable_irq(RTC_IRQ);
524
525 printk("CPU clock: %d.%02dMHz\n",
526 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
527 {
528 unsigned short bfc;
529 frqcr = ctrl_inl(FRQCR);
530 ifc = ifc_table[(frqcr>> 6) & 0x0007];
531 bfc = bfc_table[(frqcr>> 3) & 0x0007];
532 pfc = pfc_table[(frqcr>> 12) & 0x0007];
533 master_clock = cpu_clock * ifc;
534 bus_clock = master_clock/bfc;
535 }
536
537 printk("Bus clock: %d.%02dMHz\n",
538 (bus_clock/1000000), (bus_clock % 1000000)/10000);
539 module_clock = master_clock/pfc;
540 printk("Module clock: %d.%02dMHz\n",
541 (module_clock/1000000), (module_clock % 1000000)/10000);
542 interval = (module_clock/(HZ*4));
543
544 printk("Interval = %ld\n", interval);
545
546 current_cpu_data.cpu_clock = cpu_clock;
547 current_cpu_data.master_clock = master_clock;
548 current_cpu_data.bus_clock = bus_clock;
549 current_cpu_data.module_clock = module_clock;
550
551 /* Start TMU0 */
552 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
553 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
554 ctrl_outl(interval, TMU0_TCOR);
555 ctrl_outl(interval, TMU0_TCNT);
556 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
557 }
558
enter_deep_standby(void)559 void enter_deep_standby(void)
560 {
561 /* Disable watchdog timer */
562 ctrl_outl(0xa5000000, WTCSR);
563 /* Configure deep standby on sleep */
564 ctrl_outl(0x03, STBCR);
565
566 #ifdef CONFIG_SH_CAYMAN
567 {
568 extern void mach_alphanum(int position, unsigned char value);
569 extern void mach_alphanum_brightness(int setting);
570 char halted[] = "Halted. ";
571 int i;
572 mach_alphanum_brightness(6); /* dimmest setting above off */
573 for (i=0; i<8; i++) {
574 mach_alphanum(i, halted[i]);
575 }
576 asm __volatile__ ("synco");
577 }
578 #endif
579
580 asm __volatile__ ("sleep");
581 asm __volatile__ ("synci");
582 asm __volatile__ ("nop");
583 asm __volatile__ ("nop");
584 asm __volatile__ ("nop");
585 asm __volatile__ ("nop");
586 panic("Unexpected wakeup!\n");
587 }
588
589