1 /*
2  *
3  * Common time routines among all ppc machines.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6  * Paul Mackerras' version and mine for PReP and Pmac.
7  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9  *
10  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11  * to make clock more stable (2.4.0-test5). The only thing
12  * that this code assumes is that the timebases have been synchronized
13  * by firmware on SMP and are never stopped (never do sleep
14  * on SMP then, nap and doze are OK).
15  *
16  * Speeded up do_gettimeofday by getting rid of references to
17  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18  *
19  * TODO (not necessarily in this file):
20  * - improve precision and reproducibility of timebase frequency
21  * measurement at boot time. (for iSeries, we calibrate the timebase
22  * against the Titan chip's clock.)
23  * - for astronomical applications: add a new function to get
24  * non ambiguous timestamps even around leap seconds. This needs
25  * a new timestamp format and a good name.
26  *
27  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
28  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
29  *
30  *      This program is free software; you can redistribute it and/or
31  *      modify it under the terms of the GNU General Public License
32  *      as published by the Free Software Foundation; either version
33  *      2 of the License, or (at your option) any later version.
34  */
35 
36 #include <linux/config.h>
37 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
40 #include <linux/param.h>
41 #include <linux/string.h>
42 #include <linux/mm.h>
43 #include <linux/interrupt.h>
44 #include <linux/timex.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/mc146818rtc.h>
47 #include <linux/time.h>
48 #include <linux/init.h>
49 
50 #include <asm/naca.h>
51 #include <asm/segment.h>
52 #include <asm/io.h>
53 #include <asm/processor.h>
54 #include <asm/nvram.h>
55 #include <asm/cache.h>
56 #include <asm/machdep.h>
57 #include <asm/init.h>
58 #ifdef CONFIG_PPC_ISERIES
59 #include <asm/iSeries/HvCallXm.h>
60 #endif
61 #include <asm/uaccess.h>
62 
63 #include <asm/time.h>
64 #include <asm/ppcdebug.h>
65 
66 void smp_local_timer_interrupt(struct pt_regs *);
67 
68 extern void setup_before_console_init();
69 
70 /* keep track of when we need to update the rtc */
71 time_t last_rtc_update;
72 extern rwlock_t xtime_lock;
73 extern int piranha_simulator;
74 #ifdef CONFIG_PPC_ISERIES
75 unsigned long iSeries_recal_titan = 0;
76 unsigned long iSeries_recal_tb = 0;
77 static unsigned long first_settimeofday = 1;
78 #endif
79 
80 #define XSEC_PER_SEC (1024*1024)
81 #define USEC_PER_SEC (1000000)
82 
83 unsigned long tb_ticks_per_jiffy;
84 unsigned long tb_ticks_per_usec;
85 unsigned long tb_ticks_per_sec;
86 unsigned long next_xtime_sync_tb;
87 unsigned long xtime_sync_interval;
88 unsigned long tb_to_xs;
89 unsigned long processor_freq;
90 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
91 
92 extern unsigned long wall_jiffies;
93 extern unsigned long lpEvent_count;
94 extern int smp_tb_synchronized;
95 
96 extern unsigned long prof_cpu_mask;
97 extern unsigned int * prof_buffer;
98 extern unsigned long prof_len;
99 extern unsigned long prof_shift;
100 extern char _stext;
101 
102 extern struct timezone sys_tz;
103 
104 void ppc_adjtimex(void);
105 
106 static unsigned adjusting_time = 0;
107 
ppc_do_profile(unsigned long nip)108 static void ppc_do_profile (unsigned long nip)
109 {
110 	/*
111 	 * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
112 	 * (default is all CPUs.)
113 	 */
114 	if (!((1<<smp_processor_id()) & prof_cpu_mask))
115 		return;
116 
117 	nip -= (unsigned long) &_stext;
118 	nip >>= prof_shift;
119 	/*
120 	 * Don't ignore out-of-bounds EIP values silently,
121 	 * put them into the last histogram slot, so if
122 	 * present, they will show up as a sharp peak.
123 	 */
124 	if (nip > prof_len-1)
125 		nip = prof_len-1;
126 	atomic_inc((atomic_t *)&prof_buffer[nip]);
127 }
128 
129 
timer_check_rtc(void)130 static __inline__ void timer_check_rtc(void)
131 {
132         /*
133          * update the rtc when needed, this should be performed on the
134          * right fraction of a second. Half or full second ?
135          * Full second works on mk48t59 clocks, others need testing.
136          * Note that this update is basically only used through
137          * the adjtimex system calls. Setting the HW clock in
138          * any other way is a /dev/rtc and userland business.
139          * This is still wrong by -0.5/+1.5 jiffies because of the
140          * timer interrupt resolution and possible delay, but here we
141          * hit a quantization limit which can only be solved by higher
142          * resolution timers and decoupling time management from timer
143          * interrupts. This is also wrong on the clocks
144          * which require being written at the half second boundary.
145          * We should have an rtc call that only sets the minutes and
146          * seconds like on Intel to avoid problems with non UTC clocks.
147          */
148         if ( (time_status & STA_UNSYNC) == 0 &&
149              xtime.tv_sec - last_rtc_update >= 659 &&
150              abs(xtime.tv_usec - (1000000-1000000/HZ)) < 500000/HZ &&
151              jiffies - wall_jiffies == 1) {
152 	    struct rtc_time tm;
153 	    to_tm(xtime.tv_sec+1, &tm);
154 	    tm.tm_year -= 1900;
155 	    tm.tm_mon -= 1;
156             if (ppc_md.set_rtc_time(&tm) == 0)
157                 last_rtc_update = xtime.tv_sec+1;
158             else
159                 /* Try again one minute later */
160                 last_rtc_update += 60;
161         }
162 }
163 
164 /* Synchronize xtime with do_gettimeofday */
165 
timer_sync_xtime(unsigned long cur_tb)166 static __inline__ void timer_sync_xtime( unsigned long cur_tb )
167 {
168 	struct timeval my_tv;
169 
170 	if ( cur_tb > next_xtime_sync_tb ) {
171 		next_xtime_sync_tb = cur_tb + xtime_sync_interval;
172 		do_gettimeofday( &my_tv );
173 		if ( xtime.tv_sec <= my_tv.tv_sec ) {
174 			xtime.tv_sec = my_tv.tv_sec;
175 			xtime.tv_usec = my_tv.tv_usec;
176 		}
177 	}
178 }
179 
180 #ifdef CONFIG_PPC_ISERIES
181 
182 /*
183  * This function recalibrates the timebase based on the 49-bit time-of-day
184  * value in the Titan chip.  The Titan is much more accurate than the value
185  * returned by the service processor for the timebase frequency.
186  */
187 
iSeries_tb_recal(void)188 static void iSeries_tb_recal(void)
189 {
190 	struct div_result divres;
191 	unsigned long titan, tb;
192 	tb = get_tb();
193 	titan = HvCallXm_loadTod();
194 	if ( iSeries_recal_titan ) {
195 		unsigned long tb_ticks = tb - iSeries_recal_tb;
196 		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
197 		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
198 		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
199 		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
200 		char sign = '+';
201 		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
202 		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
203 
204 		if ( tick_diff < 0 ) {
205 			tick_diff = -tick_diff;
206 			sign = '-';
207 		}
208 		if ( tick_diff ) {
209 			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
210 				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
211 						new_tb_ticks_per_jiffy, sign, tick_diff );
212 				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
213 				tb_ticks_per_sec   = new_tb_ticks_per_sec;
214 				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
215 				systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
216 				tb_to_xs = divres.result_low;
217 				systemcfg->tb_to_xs = tb_to_xs;
218 			}
219 			else {
220 				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
221 					"                   new tb_ticks_per_jiffy = %lu\n"
222 					"                   old tb_ticks_per_jiffy = %lu\n",
223 					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
224 			}
225 		}
226 	}
227 	iSeries_recal_titan = titan;
228 	iSeries_recal_tb = tb;
229 }
230 #endif
231 
232 /*
233  * For iSeries shared processors, we have to let the hypervisor
234  * set the hardware decrementer.  We set a virtual decrementer
235  * in the ItLpPaca and call the hypervisor if the virtual
236  * decrementer is less than the current value in the hardware
237  * decrementer. (almost always the new decrementer value will
238  * be greater than the current hardware decementer so the hypervisor
239  * call will not be needed)
240  */
241 
242 unsigned long tb_last_stamp=0;
243 
244 /*
245  * timer_interrupt - gets called when the decrementer overflows,
246  * with interrupts disabled.
247  */
timer_interrupt(struct pt_regs * regs)248 int timer_interrupt(struct pt_regs * regs)
249 {
250 	int next_dec;
251 	unsigned long cur_tb;
252 	struct paca_struct *lpaca = get_paca();
253 	unsigned long cpu = lpaca->xPacaIndex;
254 	struct ItLpQueue * lpq;
255 
256 	irq_enter(cpu);
257 
258 	if ((!user_mode(regs)) && (prof_buffer))
259 		ppc_do_profile(instruction_pointer(regs));
260 
261 	pmc_timeslice_tick(); /* Hack this in for now */
262 
263 	lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
264 
265 	while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
266 
267 #ifdef CONFIG_SMP
268 		smp_local_timer_interrupt(regs);
269 #endif
270 		if (cpu == 0) {
271 			write_lock(&xtime_lock);
272 			tb_last_stamp = lpaca->next_jiffy_update_tb;
273 			do_timer(regs);
274 			timer_sync_xtime( cur_tb );
275 			timer_check_rtc();
276 			write_unlock(&xtime_lock);
277 			if ( adjusting_time && (time_adjust == 0) )
278 				ppc_adjtimex();
279 		}
280 		lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy;
281 	}
282 
283 	next_dec = lpaca->next_jiffy_update_tb - cur_tb;
284 	if (next_dec > lpaca->default_decr)
285         	next_dec = lpaca->default_decr;
286 	set_dec(next_dec);
287 
288 	lpq = lpaca->lpQueuePtr;
289 	if (lpq && ItLpQueue_isLpIntPending(lpq))
290 		lpEvent_count += ItLpQueue_process(lpq, regs);
291 
292 	irq_exit(cpu);
293 
294 	if (softirq_pending(cpu))
295 		do_softirq();
296 
297 	return 1;
298 }
299 
300 
301 /*
302  * This version of gettimeofday has microsecond resolution.
303  */
do_gettimeofday(struct timeval * tv)304 void do_gettimeofday(struct timeval *tv)
305 {
306         unsigned long sec, usec, tb_ticks;
307 	unsigned long xsec, tb_xsec;
308 	unsigned long temp_tb_to_xs, temp_stamp_xsec;
309 	unsigned long tb_count_1, tb_count_2;
310 	unsigned long always_zero;
311 	struct systemcfg *gtdp;
312 
313 	gtdp = systemcfg;
314 	/*
315 	 * The following loop guarantees that we see a consistent view of the
316 	 * tb_to_xs and stamp_xsec variables.  These two variables can change
317 	 * (eg. when xntpd adjusts the clock frequency) and an inconsistent
318 	 * view (one variable changed, the other not) could result in a wildly
319 	 * wrong result for do_gettimeofday.
320 	 *
321 	 * The code which updates these variables (ppc_adjtimex below)
322 	 * increments tb_update_count, then updates the two variables and then
323 	 * increments tb_update_count again.  This code reads tb_update_count,
324 	 * reads the two variables and then reads tb_update_count again.  It
325 	 * loops doing this until the two reads of tb_update_count yield the
326 	 * same value and that value is even.  This ensures a consistent view
327 	 * of the two variables.
328 	 *
329 	 * The strange looking assembler code below causes the hardware to
330 	 * think that reading the two variables is dependent on the first read
331 	 * of tb_update_count and that the second reading of tb_update_count is
332 	 * dependent on reading the two variables.  This assures ordering
333 	 * without the need for a lwsync, which is much more expensive.
334 	 */
335 	do {
336 		tb_ticks = get_tb() - gtdp->tb_orig_stamp;
337 
338 		tb_count_1 = gtdp->tb_update_count;
339 
340 		__asm__ __volatile__ (
341 "		andc 	%0,%2,%2\n\
342 		add	%1,%3,%0\n\
343 "		: "=&r"(always_zero), "=r"(gtdp)
344 		: "r"(tb_count_1), "r"(gtdp) );
345 
346 		temp_tb_to_xs = gtdp->tb_to_xs;
347 		temp_stamp_xsec = gtdp->stamp_xsec;
348 
349 		__asm__ __volatile__ (
350 "		add	%0,%2,%3\n\
351 		andc	%0,%0,%0\n\
352 		add	%1,%4,%0\n\
353 "		: "=&r"(always_zero), "=r"(gtdp)
354 		: "r"(temp_stamp_xsec), "r"(temp_tb_to_xs), "r"(gtdp) );
355 
356 		tb_count_2 = gtdp->tb_update_count;
357 
358 	} while ( tb_count_2 - ( tb_count_1 & 0xfffffffffffffffe ) );
359 
360 	/* These calculations are faster (gets rid of divides)
361 	 * if done in units of 1/2^20 rather than microseconds.
362 	 * The conversion to microseconds at the end is done
363 	 * without a divide (and in fact, without a multiply) */
364 	tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs );
365 	xsec = temp_stamp_xsec + tb_xsec;
366 	sec = xsec / XSEC_PER_SEC;
367 	xsec -= sec * XSEC_PER_SEC;
368 	usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC;
369 
370         tv->tv_sec = sec;
371         tv->tv_usec = usec;
372 }
373 
do_settimeofday(struct timeval * tv)374 void do_settimeofday(struct timeval *tv)
375 {
376 	unsigned long flags;
377 	unsigned long delta_xsec;
378 	long int tb_delta, new_usec, new_sec;
379 	unsigned long new_xsec;
380 
381 	write_lock_irqsave(&xtime_lock, flags);
382 	/* Updating the RTC is not the job of this code. If the time is
383 	 * stepped under NTP, the RTC will be update after STA_UNSYNC
384 	 * is cleared. Tool like clock/hwclock either copy the RTC
385 	 * to the system time, in which case there is no point in writing
386 	 * to the RTC again, or write to the RTC but then they don't call
387 	 * settimeofday to perform this operation.
388 	 */
389 #ifdef CONFIG_PPC_ISERIES
390 	if ( first_settimeofday ) {
391 		iSeries_tb_recal();
392 		first_settimeofday = 0;
393 	}
394 #endif
395 	tb_delta = tb_ticks_since(tb_last_stamp);
396 	tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
397 
398 	new_sec = tv->tv_sec;
399 	new_usec = tv->tv_usec - tb_delta / tb_ticks_per_usec;
400 	while (new_usec <0) {
401 		new_sec--;
402 		new_usec += USEC_PER_SEC;
403 	}
404 	xtime.tv_usec = new_usec;
405 	xtime.tv_sec = new_sec;
406 
407 	/* In case of a large backwards jump in time with NTP, we want the
408 	 * clock to be updated as soon as the PLL is again in lock.
409 	 */
410 	last_rtc_update = new_sec - 658;
411 
412 	time_adjust = 0;                /* stop active adjtime() */
413 	time_status |= STA_UNSYNC;
414 	time_maxerror = NTP_PHASE_LIMIT;
415 	time_esterror = NTP_PHASE_LIMIT;
416 
417 	delta_xsec = mulhdu( (tb_last_stamp-systemcfg->tb_orig_stamp), systemcfg->tb_to_xs );
418 	new_xsec = (tv->tv_usec * XSEC_PER_SEC) / USEC_PER_SEC;
419 	new_xsec += tv->tv_sec * XSEC_PER_SEC;
420 	if ( new_xsec > delta_xsec ) {
421 		systemcfg->stamp_xsec = new_xsec - delta_xsec;
422 	}
423 	else {
424 		/* This is only for the case where the user is setting the time
425 		 * way back to a time such that the boot time would have been
426 		 * before 1970 ... eg. we booted ten days ago, and we are
427 		 * setting the time to Jan 5, 1970 */
428 		systemcfg->stamp_xsec = new_xsec;
429 		systemcfg->tb_orig_stamp = tb_last_stamp;
430 	}
431 
432 	systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
433 	systemcfg->tz_dsttime = sys_tz.tz_dsttime;
434 
435 	write_unlock_irqrestore(&xtime_lock, flags);
436 }
437 
438 /*
439  * This function is a copy of the architecture independent function
440  * but which calls do_settimeofday rather than setting the xtime
441  * fields itself.  This way, the fields which are used for
442  * do_settimeofday get updated too.
443  */
ppc64_sys32_stime(int * tptr)444 long ppc64_sys32_stime(int* tptr)
445 {
446 	int value;
447 	struct timeval myTimeval;
448 
449 	if (!capable(CAP_SYS_TIME))
450 		return -EPERM;
451 
452 	if (get_user(value, tptr))
453 		return -EFAULT;
454 
455 	myTimeval.tv_sec = value;
456 	myTimeval.tv_usec = 0;
457 
458 	do_settimeofday(&myTimeval);
459 
460 	return 0;
461 }
462 
463 /*
464  * This function is a copy of the architecture independent function
465  * but which calls do_settimeofday rather than setting the xtime
466  * fields itself.  This way, the fields which are used for
467  * do_settimeofday get updated too.
468  */
ppc64_sys_stime(long * tptr)469 long ppc64_sys_stime(long* tptr)
470 {
471 	long value;
472 	struct timeval myTimeval;
473 
474 	if (!capable(CAP_SYS_TIME))
475 		return -EPERM;
476 
477 	if (get_user(value, tptr))
478 		return -EFAULT;
479 
480 	myTimeval.tv_sec = value;
481 	myTimeval.tv_usec = 0;
482 
483 	do_settimeofday(&myTimeval);
484 
485 	return 0;
486 }
487 
time_init(void)488 void __init time_init(void)
489 {
490 	/* This function is only called on the boot processor */
491 	unsigned long flags;
492 	struct rtc_time tm;
493 
494 	ppc_md.calibrate_decr();
495 
496 	if ( ! piranha_simulator ) {
497 		ppc_md.get_boot_time(&tm);
498 	}
499 	write_lock_irqsave(&xtime_lock, flags);
500 	xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
501 			      tm.tm_hour, tm.tm_min, tm.tm_sec);
502 	tb_last_stamp = get_tb();
503 	systemcfg->tb_orig_stamp = tb_last_stamp;
504 	systemcfg->tb_update_count = 0;
505 	systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
506 	systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
507 	systemcfg->tb_to_xs = tb_to_xs;
508 
509 	xtime_sync_interval = tb_ticks_per_sec - (tb_ticks_per_sec/8);
510 	next_xtime_sync_tb = tb_last_stamp + xtime_sync_interval;
511 
512 	time_freq = 0;
513 
514 	xtime.tv_usec = 0;
515 	last_rtc_update = xtime.tv_sec;
516 	write_unlock_irqrestore(&xtime_lock, flags);
517 
518 	/* Not exact, but the timer interrupt takes care of this */
519 	set_dec(tb_ticks_per_jiffy);
520 
521 	/* This horrible hack gives setup a hook just before console_init */
522 	setup_before_console_init();
523 }
524 
525 /*
526  * After adjtimex is called, adjust the conversion of tb ticks
527  * to microseconds to keep do_gettimeofday synchronized
528  * with ntpd.
529  *
530  * Use the time_adjust, time_freq and time_offset computed by adjtimex to
531  * adjust the frequency.
532  */
533 
534 /* #define DEBUG_PPC_ADJTIMEX 1 */
535 
ppc_adjtimex(void)536 void ppc_adjtimex(void)
537 {
538 	unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec;
539 	unsigned long tb_ticks_per_sec_delta;
540 	long delta_freq, ltemp;
541 	struct div_result divres;
542 	unsigned long flags;
543 	long singleshot_ppm = 0;
544 
545 	/* Compute parts per million frequency adjustment to accomplish the time adjustment
546 	   implied by time_offset to be applied over the elapsed time indicated by time_constant.
547 	   Use SHIFT_USEC to get it into the same units as time_freq. */
548 	if ( time_offset < 0 ) {
549 		ltemp = -time_offset;
550 		ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
551 		ltemp >>= SHIFT_KG + time_constant;
552 		ltemp = -ltemp;
553 	}
554 	else {
555 		ltemp = time_offset;
556 		ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
557 		ltemp >>= SHIFT_KG + time_constant;
558 	}
559 
560 	/* If there is a single shot time adjustment in progress */
561 	if ( time_adjust ) {
562 #ifdef DEBUG_PPC_ADJTIMEX
563 		printk("ppc_adjtimex: ");
564 		if ( adjusting_time == 0 )
565 			printk("starting ");
566 		printk("single shot time_adjust = %ld\n", time_adjust);
567 #endif
568 
569 		adjusting_time = 1;
570 
571 		/* Compute parts per million frequency adjustment to match time_adjust */
572 		singleshot_ppm = tickadj * HZ;
573 		/*
574 		 * The adjustment should be tickadj*HZ to match the code in
575 		 * linux/kernel/timer.c, but experiments show that this is too
576 		 * large. 3/4 of tickadj*HZ seems about right
577 		 */
578 		singleshot_ppm -= singleshot_ppm / 4;
579 		/* Use SHIFT_USEC to get it into the same units as time_freq */
580 		singleshot_ppm <<= SHIFT_USEC;
581 		if ( time_adjust < 0 )
582 			singleshot_ppm = -singleshot_ppm;
583 	}
584 	else {
585 #ifdef DEBUG_PPC_ADJTIMEX
586 		if ( adjusting_time )
587 			printk("ppc_adjtimex: ending single shot time_adjust\n");
588 #endif
589 		adjusting_time = 0;
590 	}
591 
592 	/* Add up all of the frequency adjustments */
593 	delta_freq = time_freq + ltemp + singleshot_ppm;
594 
595 	/* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */
596 	den = 1000000 * (1 << (SHIFT_USEC - 8));
597 	if ( delta_freq < 0 ) {
598 		tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
599 		new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
600 	}
601 	else {
602 		tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
603 		new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
604 	}
605 
606 #ifdef DEBUG_PPC_ADJTIMEX
607 	printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
608 	printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld  new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
609 #endif
610 
611 	/*
612 	 * Compute a new value of tb_to_xs (used to convert tb to microseconds
613 	 * and a new value of stamp_xsec which is the time (in 1/2^20 second
614 	 * units) corresponding to tb_orig_stamp.  This new value of stamp_xsec
615 	 * compensates for the change in frequency (implied by the new
616 	 * tb_to_xs) and so guarantees that the current time remains the same
617 	 *
618 	 */
619 	tb_ticks = get_tb() - systemcfg->tb_orig_stamp;
620 	div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres );
621 	new_tb_to_xs = divres.result_low;
622 	new_xsec = mulhdu( tb_ticks, new_tb_to_xs );
623 
624 	write_lock_irqsave( &xtime_lock, flags );
625 	old_xsec = mulhdu( tb_ticks, systemcfg->tb_to_xs );
626 	new_stamp_xsec = systemcfg->stamp_xsec + old_xsec - new_xsec;
627 
628 	/*
629 	 * tb_update_count is used to allow the problem state gettimeofday code
630 	 * to assure itself that it sees a consistent view of the tb_to_xs and
631 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
632 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
633 	 * the two values of tb_update_count match and are even then the
634 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
635 	 * loops back and reads them again until this criteria is met.
636 	 */
637 	++(systemcfg->tb_update_count);
638 	wmb();
639 	systemcfg->tb_to_xs = new_tb_to_xs;
640 	systemcfg->stamp_xsec = new_stamp_xsec;
641 	wmb();
642 	++(systemcfg->tb_update_count);
643 
644 	write_unlock_irqrestore( &xtime_lock, flags );
645 
646 }
647 
648 
649 #define TICK_SIZE tick
650 #define FEBRUARY	2
651 #define	STARTOFTIME	1970
652 #define SECDAY		86400L
653 #define SECYR		(SECDAY * 365)
654 #define	leapyear(year)		((year) % 4 == 0)
655 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
656 #define	days_in_month(a) 	(month_days[(a) - 1])
657 
658 static int month_days[12] = {
659 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
660 };
661 
662 /*
663  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
664  */
GregorianDay(struct rtc_time * tm)665 void GregorianDay(struct rtc_time * tm)
666 {
667 	int leapsToDate;
668 	int lastYear;
669 	int day;
670 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
671 
672 	lastYear=tm->tm_year-1;
673 
674 	/*
675 	 * Number of leap corrections to apply up to end of last year
676 	 */
677 	leapsToDate = lastYear/4 - lastYear/100 + lastYear/400;
678 
679 	/*
680 	 * This year is a leap year if it is divisible by 4 except when it is
681 	 * divisible by 100 unless it is divisible by 400
682 	 *
683 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be
684 	 */
685 	if((tm->tm_year%4==0) &&
686 	   ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
687 	   (tm->tm_mon>2))
688 	{
689 		/*
690 		 * We are past Feb. 29 in a leap year
691 		 */
692 		day=1;
693 	}
694 	else
695 	{
696 		day=0;
697 	}
698 
699 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
700 		   tm->tm_mday;
701 
702 	tm->tm_wday=day%7;
703 }
704 
to_tm(int tim,struct rtc_time * tm)705 void to_tm(int tim, struct rtc_time * tm)
706 {
707 	register int    i;
708 	register long   hms, day;
709 
710 	day = tim / SECDAY;
711 	hms = tim % SECDAY;
712 
713 	/* Hours, minutes, seconds are easy */
714 	tm->tm_hour = hms / 3600;
715 	tm->tm_min = (hms % 3600) / 60;
716 	tm->tm_sec = (hms % 3600) % 60;
717 
718 	/* Number of years in days */
719 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
720 		day -= days_in_year(i);
721 	tm->tm_year = i;
722 
723 	/* Number of months in days left */
724 	if (leapyear(tm->tm_year))
725 		days_in_month(FEBRUARY) = 29;
726 	for (i = 1; day >= days_in_month(i); i++)
727 		day -= days_in_month(i);
728 	days_in_month(FEBRUARY) = 28;
729 	tm->tm_mon = i;
730 
731 	/* Days are what is left over (+1) from all that. */
732 	tm->tm_mday = day + 1;
733 
734 	/*
735 	 * Determine the day of week
736 	 */
737 	GregorianDay(tm);
738 }
739 
740 #if 0
741 /* Auxiliary function to compute scaling factors */
742 /* Actually the choice of a timebase running at 1/4 the of the bus
743  * frequency giving resolution of a few tens of nanoseconds is quite nice.
744  * It makes this computation very precise (27-28 bits typically) which
745  * is optimistic considering the stability of most processor clock
746  * oscillators and the precision with which the timebase frequency
747  * is measured but does not harm.
748  */
749 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
750         unsigned mlt=0, tmp, err;
751         /* No concern for performance, it's done once: use a stupid
752          * but safe and compact method to find the multiplier.
753          */
754 
755         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
756                 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp;
757         }
758 
759         /* We might still be off by 1 for the best approximation.
760          * A side effect of this is that if outscale is too large
761          * the returned value will be zero.
762          * Many corner cases have been checked and seem to work,
763          * some might have been forgotten in the test however.
764          */
765 
766         err = inscale*(mlt+1);
767         if (err <= inscale/2) mlt++;
768         return mlt;
769   }
770 #endif
771 
772 /*
773  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
774  * result.
775  */
776 
div128_by_32(unsigned long dividend_high,unsigned long dividend_low,unsigned divisor,struct div_result * dr)777 void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
778 		   unsigned divisor, struct div_result *dr )
779 {
780 	unsigned long a,b,c,d, w,x,y,z, ra,rb,rc;
781 
782 	a = dividend_high >> 32;
783 	b = dividend_high & 0xffffffff;
784 	c = dividend_low >> 32;
785 	d = dividend_low & 0xffffffff;
786 
787 	w = a/divisor;
788 	ra = (a - (w * divisor)) << 32;
789 
790 	x = (ra + b)/divisor;
791 	rb = ((ra + b) - (x * divisor)) << 32;
792 
793 	y = (rb + c)/divisor;
794 	rc = ((rb + b) - (y * divisor)) << 32;
795 
796 	z = (rc + d)/divisor;
797 
798 	dr->result_high = (w << 32) + x;
799 	dr->result_low  = (y << 32) + z;
800 
801 }
802 
803