1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * linux/arch/arm/plat-omap/dmtimer.c
4 *
5 * OMAP Dual-Mode Timers
6 *
7 * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
8 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
9 * Thara Gopinath <thara@ti.com>
10 *
11 * dmtimer adaptation to platform_driver.
12 *
13 * Copyright (C) 2005 Nokia Corporation
14 * OMAP2 support by Juha Yrjola
15 * API improvements and OMAP2 clock framework support by Timo Teras
16 *
17 * Copyright (C) 2009 Texas Instruments
18 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 */
20
21 #include <linux/clk.h>
22 #include <linux/clk-provider.h>
23 #include <linux/cpu_pm.h>
24 #include <linux/module.h>
25 #include <linux/io.h>
26 #include <linux/device.h>
27 #include <linux/err.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/platform_data/dmtimer-omap.h>
33
34 #include <clocksource/timer-ti-dm.h>
35
36 /*
37 * timer errata flags
38 *
39 * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
40 * errata prevents us from using posted mode on these devices, unless the
41 * timer counter register is never read. For more details please refer to
42 * the OMAP3/4/5 errata documents.
43 */
44 #define OMAP_TIMER_ERRATA_I103_I767 0x80000000
45
46 /* posted mode types */
47 #define OMAP_TIMER_NONPOSTED 0x00
48 #define OMAP_TIMER_POSTED 0x01
49
50 /* register offsets with the write pending bit encoded */
51 #define WPSHIFT 16
52
53 #define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
54 | (WP_NONE << WPSHIFT))
55
56 #define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
57 | (WP_TCLR << WPSHIFT))
58
59 #define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
60 | (WP_TCRR << WPSHIFT))
61
62 #define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
63 | (WP_TLDR << WPSHIFT))
64
65 #define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
66 | (WP_TTGR << WPSHIFT))
67
68 #define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
69 | (WP_NONE << WPSHIFT))
70
71 #define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
72 | (WP_TMAR << WPSHIFT))
73
74 #define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
75 | (WP_NONE << WPSHIFT))
76
77 #define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
78 | (WP_NONE << WPSHIFT))
79
80 #define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
81 | (WP_NONE << WPSHIFT))
82
83 #define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
84 | (WP_TPIR << WPSHIFT))
85
86 #define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
87 | (WP_TNIR << WPSHIFT))
88
89 #define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
90 | (WP_TCVR << WPSHIFT))
91
92 #define OMAP_TIMER_TICK_INT_MASK_SET_REG \
93 (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
94
95 #define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
96 (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
97
98 struct timer_regs {
99 u32 ocp_cfg;
100 u32 tidr;
101 u32 tier;
102 u32 twer;
103 u32 tclr;
104 u32 tcrr;
105 u32 tldr;
106 u32 ttrg;
107 u32 twps;
108 u32 tmar;
109 u32 tcar1;
110 u32 tsicr;
111 u32 tcar2;
112 u32 tpir;
113 u32 tnir;
114 u32 tcvr;
115 u32 tocr;
116 u32 towr;
117 };
118
119 struct dmtimer {
120 struct omap_dm_timer cookie;
121 int id;
122 int irq;
123 struct clk *fclk;
124
125 void __iomem *io_base;
126 int irq_stat; /* TISR/IRQSTATUS interrupt status */
127 int irq_ena; /* irq enable */
128 int irq_dis; /* irq disable, only on v2 ip */
129 void __iomem *pend; /* write pending */
130 void __iomem *func_base; /* function register base */
131
132 atomic_t enabled;
133 unsigned long rate;
134 unsigned reserved:1;
135 unsigned posted:1;
136 unsigned omap1:1;
137 struct timer_regs context;
138 int revision;
139 u32 capability;
140 u32 errata;
141 struct platform_device *pdev;
142 struct list_head node;
143 struct notifier_block nb;
144 };
145
146 static u32 omap_reserved_systimers;
147 static LIST_HEAD(omap_timer_list);
148 static DEFINE_SPINLOCK(dm_timer_lock);
149
150 enum {
151 REQUEST_ANY = 0,
152 REQUEST_BY_ID,
153 REQUEST_BY_CAP,
154 REQUEST_BY_NODE,
155 };
156
157 /**
158 * dmtimer_read - read timer registers in posted and non-posted mode
159 * @timer: timer pointer over which read operation to perform
160 * @reg: lowest byte holds the register offset
161 *
162 * The posted mode bit is encoded in reg. Note that in posted mode, write
163 * pending bit must be checked. Otherwise a read of a non completed write
164 * will produce an error.
165 */
dmtimer_read(struct dmtimer * timer,u32 reg)166 static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
167 {
168 u16 wp, offset;
169
170 wp = reg >> WPSHIFT;
171 offset = reg & 0xff;
172
173 /* Wait for a possible write pending bit in posted mode */
174 if (wp && timer->posted)
175 while (readl_relaxed(timer->pend) & wp)
176 cpu_relax();
177
178 return readl_relaxed(timer->func_base + offset);
179 }
180
181 /**
182 * dmtimer_write - write timer registers in posted and non-posted mode
183 * @timer: timer pointer over which write operation is to perform
184 * @reg: lowest byte holds the register offset
185 * @value: data to write into the register
186 *
187 * The posted mode bit is encoded in reg. Note that in posted mode, the write
188 * pending bit must be checked. Otherwise a write on a register which has a
189 * pending write will be lost.
190 */
dmtimer_write(struct dmtimer * timer,u32 reg,u32 val)191 static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
192 {
193 u16 wp, offset;
194
195 wp = reg >> WPSHIFT;
196 offset = reg & 0xff;
197
198 /* Wait for a possible write pending bit in posted mode */
199 if (wp && timer->posted)
200 while (readl_relaxed(timer->pend) & wp)
201 cpu_relax();
202
203 writel_relaxed(val, timer->func_base + offset);
204 }
205
__omap_dm_timer_init_regs(struct dmtimer * timer)206 static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
207 {
208 u32 tidr;
209
210 /* Assume v1 ip if bits [31:16] are zero */
211 tidr = readl_relaxed(timer->io_base);
212 if (!(tidr >> 16)) {
213 timer->revision = 1;
214 timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
215 timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
216 timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
217 timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
218 timer->func_base = timer->io_base;
219 } else {
220 timer->revision = 2;
221 timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
222 timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
223 timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
224 timer->pend = timer->io_base +
225 _OMAP_TIMER_WRITE_PEND_OFFSET +
226 OMAP_TIMER_V2_FUNC_OFFSET;
227 timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
228 }
229 }
230
231 /*
232 * __omap_dm_timer_enable_posted - enables write posted mode
233 * @timer: pointer to timer instance handle
234 *
235 * Enables the write posted mode for the timer. When posted mode is enabled
236 * writes to certain timer registers are immediately acknowledged by the
237 * internal bus and hence prevents stalling the CPU waiting for the write to
238 * complete. Enabling this feature can improve performance for writing to the
239 * timer registers.
240 */
__omap_dm_timer_enable_posted(struct dmtimer * timer)241 static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
242 {
243 if (timer->posted)
244 return;
245
246 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
247 timer->posted = OMAP_TIMER_NONPOSTED;
248 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
249 return;
250 }
251
252 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
253 timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
254 timer->posted = OMAP_TIMER_POSTED;
255 }
256
__omap_dm_timer_stop(struct dmtimer * timer,unsigned long rate)257 static inline void __omap_dm_timer_stop(struct dmtimer *timer,
258 unsigned long rate)
259 {
260 u32 l;
261
262 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
263 if (l & OMAP_TIMER_CTRL_ST) {
264 l &= ~0x1;
265 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
266 #ifdef CONFIG_ARCH_OMAP2PLUS
267 /* Readback to make sure write has completed */
268 dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
269 /*
270 * Wait for functional clock period x 3.5 to make sure that
271 * timer is stopped
272 */
273 udelay(3500000 / rate + 1);
274 #endif
275 }
276
277 /* Ack possibly pending interrupt */
278 dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
279 }
280
__omap_dm_timer_int_enable(struct dmtimer * timer,unsigned int value)281 static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
282 unsigned int value)
283 {
284 dmtimer_write(timer, timer->irq_ena, value);
285 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
286 }
287
288 static inline unsigned int
__omap_dm_timer_read_counter(struct dmtimer * timer)289 __omap_dm_timer_read_counter(struct dmtimer *timer)
290 {
291 return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
292 }
293
__omap_dm_timer_write_status(struct dmtimer * timer,unsigned int value)294 static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
295 unsigned int value)
296 {
297 dmtimer_write(timer, timer->irq_stat, value);
298 }
299
omap_timer_restore_context(struct dmtimer * timer)300 static void omap_timer_restore_context(struct dmtimer *timer)
301 {
302 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
303
304 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
305 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
306 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
307 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
308 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
309 dmtimer_write(timer, timer->irq_ena, timer->context.tier);
310 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
311 }
312
omap_timer_save_context(struct dmtimer * timer)313 static void omap_timer_save_context(struct dmtimer *timer)
314 {
315 timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
316
317 timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
318 timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
319 timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
320 timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
321 timer->context.tier = dmtimer_read(timer, timer->irq_ena);
322 timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
323 }
324
omap_timer_context_notifier(struct notifier_block * nb,unsigned long cmd,void * v)325 static int omap_timer_context_notifier(struct notifier_block *nb,
326 unsigned long cmd, void *v)
327 {
328 struct dmtimer *timer;
329
330 timer = container_of(nb, struct dmtimer, nb);
331
332 switch (cmd) {
333 case CPU_CLUSTER_PM_ENTER:
334 if ((timer->capability & OMAP_TIMER_ALWON) ||
335 !atomic_read(&timer->enabled))
336 break;
337 omap_timer_save_context(timer);
338 break;
339 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
340 break;
341 case CPU_CLUSTER_PM_EXIT:
342 if ((timer->capability & OMAP_TIMER_ALWON) ||
343 !atomic_read(&timer->enabled))
344 break;
345 omap_timer_restore_context(timer);
346 break;
347 }
348
349 return NOTIFY_OK;
350 }
351
omap_dm_timer_reset(struct dmtimer * timer)352 static int omap_dm_timer_reset(struct dmtimer *timer)
353 {
354 u32 l, timeout = 100000;
355
356 if (timer->revision != 1)
357 return -EINVAL;
358
359 dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
360
361 do {
362 l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
363 } while (!l && timeout--);
364
365 if (!timeout) {
366 dev_err(&timer->pdev->dev, "Timer failed to reset\n");
367 return -ETIMEDOUT;
368 }
369
370 /* Configure timer for smart-idle mode */
371 l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
372 l |= 0x2 << 0x3;
373 dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
374
375 timer->posted = 0;
376
377 return 0;
378 }
379
380 /*
381 * Functions exposed to PWM and remoteproc drivers via platform_data.
382 * Do not use these in the driver, these will get deprecated and will
383 * will be replaced by Linux generic framework functions such as
384 * chained interrupts and clock framework.
385 */
to_dmtimer(struct omap_dm_timer * cookie)386 static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
387 {
388 if (!cookie)
389 return NULL;
390
391 return container_of(cookie, struct dmtimer, cookie);
392 }
393
omap_dm_timer_set_source(struct omap_dm_timer * cookie,int source)394 static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
395 {
396 int ret;
397 const char *parent_name;
398 struct clk *parent;
399 struct dmtimer_platform_data *pdata;
400 struct dmtimer *timer;
401
402 timer = to_dmtimer(cookie);
403 if (unlikely(!timer) || IS_ERR(timer->fclk))
404 return -EINVAL;
405
406 switch (source) {
407 case OMAP_TIMER_SRC_SYS_CLK:
408 parent_name = "timer_sys_ck";
409 break;
410 case OMAP_TIMER_SRC_32_KHZ:
411 parent_name = "timer_32k_ck";
412 break;
413 case OMAP_TIMER_SRC_EXT_CLK:
414 parent_name = "timer_ext_ck";
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 pdata = timer->pdev->dev.platform_data;
421
422 /*
423 * FIXME: Used for OMAP1 devices only because they do not currently
424 * use the clock framework to set the parent clock. To be removed
425 * once OMAP1 migrated to using clock framework for dmtimers
426 */
427 if (timer->omap1 && pdata && pdata->set_timer_src)
428 return pdata->set_timer_src(timer->pdev, source);
429
430 #if defined(CONFIG_COMMON_CLK)
431 /* Check if the clock has configurable parents */
432 if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
433 return 0;
434 #endif
435
436 parent = clk_get(&timer->pdev->dev, parent_name);
437 if (IS_ERR(parent)) {
438 pr_err("%s: %s not found\n", __func__, parent_name);
439 return -EINVAL;
440 }
441
442 ret = clk_set_parent(timer->fclk, parent);
443 if (ret < 0)
444 pr_err("%s: failed to set %s as parent\n", __func__,
445 parent_name);
446
447 clk_put(parent);
448
449 return ret;
450 }
451
omap_dm_timer_enable(struct omap_dm_timer * cookie)452 static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
453 {
454 struct dmtimer *timer = to_dmtimer(cookie);
455 struct device *dev = &timer->pdev->dev;
456 int rc;
457
458 rc = pm_runtime_resume_and_get(dev);
459 if (rc)
460 dev_err(dev, "could not enable timer\n");
461 }
462
omap_dm_timer_disable(struct omap_dm_timer * cookie)463 static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
464 {
465 struct dmtimer *timer = to_dmtimer(cookie);
466 struct device *dev = &timer->pdev->dev;
467
468 pm_runtime_put_sync(dev);
469 }
470
omap_dm_timer_prepare(struct dmtimer * timer)471 static int omap_dm_timer_prepare(struct dmtimer *timer)
472 {
473 struct device *dev = &timer->pdev->dev;
474 int rc;
475
476 rc = pm_runtime_resume_and_get(dev);
477 if (rc)
478 return rc;
479
480 if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
481 rc = omap_dm_timer_reset(timer);
482 if (rc) {
483 pm_runtime_put_sync(dev);
484 return rc;
485 }
486 }
487
488 __omap_dm_timer_enable_posted(timer);
489 pm_runtime_put_sync(dev);
490
491 return 0;
492 }
493
omap_dm_timer_reserved_systimer(int id)494 static inline u32 omap_dm_timer_reserved_systimer(int id)
495 {
496 return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
497 }
498
_omap_dm_timer_request(int req_type,void * data)499 static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
500 {
501 struct dmtimer *timer = NULL, *t;
502 struct device_node *np = NULL;
503 unsigned long flags;
504 u32 cap = 0;
505 int id = 0;
506
507 switch (req_type) {
508 case REQUEST_BY_ID:
509 id = *(int *)data;
510 break;
511 case REQUEST_BY_CAP:
512 cap = *(u32 *)data;
513 break;
514 case REQUEST_BY_NODE:
515 np = (struct device_node *)data;
516 break;
517 default:
518 /* REQUEST_ANY */
519 break;
520 }
521
522 spin_lock_irqsave(&dm_timer_lock, flags);
523 list_for_each_entry(t, &omap_timer_list, node) {
524 if (t->reserved)
525 continue;
526
527 switch (req_type) {
528 case REQUEST_BY_ID:
529 if (id == t->pdev->id) {
530 timer = t;
531 timer->reserved = 1;
532 goto found;
533 }
534 break;
535 case REQUEST_BY_CAP:
536 if (cap == (t->capability & cap)) {
537 /*
538 * If timer is not NULL, we have already found
539 * one timer. But it was not an exact match
540 * because it had more capabilities than what
541 * was required. Therefore, unreserve the last
542 * timer found and see if this one is a better
543 * match.
544 */
545 if (timer)
546 timer->reserved = 0;
547 timer = t;
548 timer->reserved = 1;
549
550 /* Exit loop early if we find an exact match */
551 if (t->capability == cap)
552 goto found;
553 }
554 break;
555 case REQUEST_BY_NODE:
556 if (np == t->pdev->dev.of_node) {
557 timer = t;
558 timer->reserved = 1;
559 goto found;
560 }
561 break;
562 default:
563 /* REQUEST_ANY */
564 timer = t;
565 timer->reserved = 1;
566 goto found;
567 }
568 }
569 found:
570 spin_unlock_irqrestore(&dm_timer_lock, flags);
571
572 if (timer && omap_dm_timer_prepare(timer)) {
573 timer->reserved = 0;
574 timer = NULL;
575 }
576
577 if (!timer)
578 pr_debug("%s: timer request failed!\n", __func__);
579
580 return timer;
581 }
582
omap_dm_timer_request(void)583 static struct omap_dm_timer *omap_dm_timer_request(void)
584 {
585 struct dmtimer *timer;
586
587 timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
588 if (!timer)
589 return NULL;
590
591 return &timer->cookie;
592 }
593
omap_dm_timer_request_specific(int id)594 static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
595 {
596 struct dmtimer *timer;
597
598 /* Requesting timer by ID is not supported when device tree is used */
599 if (of_have_populated_dt()) {
600 pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
601 __func__);
602 return NULL;
603 }
604
605 timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
606 if (!timer)
607 return NULL;
608
609 return &timer->cookie;
610 }
611
612 /**
613 * omap_dm_timer_request_by_node - Request a timer by device-tree node
614 * @np: Pointer to device-tree timer node
615 *
616 * Request a timer based upon a device node pointer. Returns pointer to
617 * timer handle on success and a NULL pointer on failure.
618 */
omap_dm_timer_request_by_node(struct device_node * np)619 static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
620 {
621 struct dmtimer *timer;
622
623 if (!np)
624 return NULL;
625
626 timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
627 if (!timer)
628 return NULL;
629
630 return &timer->cookie;
631 }
632
omap_dm_timer_free(struct omap_dm_timer * cookie)633 static int omap_dm_timer_free(struct omap_dm_timer *cookie)
634 {
635 struct dmtimer *timer;
636
637 timer = to_dmtimer(cookie);
638 if (unlikely(!timer))
639 return -EINVAL;
640
641 WARN_ON(!timer->reserved);
642 timer->reserved = 0;
643 return 0;
644 }
645
omap_dm_timer_get_irq(struct omap_dm_timer * cookie)646 int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
647 {
648 struct dmtimer *timer = to_dmtimer(cookie);
649 if (timer)
650 return timer->irq;
651 return -EINVAL;
652 }
653
654 #if defined(CONFIG_ARCH_OMAP1)
655 #include <linux/soc/ti/omap1-io.h>
656
omap_dm_timer_get_fclk(struct omap_dm_timer * cookie)657 static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
658 {
659 return NULL;
660 }
661
662 /**
663 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
664 * @inputmask: current value of idlect mask
665 */
omap_dm_timer_modify_idlect_mask(__u32 inputmask)666 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
667 {
668 int i = 0;
669 struct dmtimer *timer = NULL;
670 unsigned long flags;
671
672 /* If ARMXOR cannot be idled this function call is unnecessary */
673 if (!(inputmask & (1 << 1)))
674 return inputmask;
675
676 /* If any active timer is using ARMXOR return modified mask */
677 spin_lock_irqsave(&dm_timer_lock, flags);
678 list_for_each_entry(timer, &omap_timer_list, node) {
679 u32 l;
680
681 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
682 if (l & OMAP_TIMER_CTRL_ST) {
683 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
684 inputmask &= ~(1 << 1);
685 else
686 inputmask &= ~(1 << 2);
687 }
688 i++;
689 }
690 spin_unlock_irqrestore(&dm_timer_lock, flags);
691
692 return inputmask;
693 }
694
695 #else
696
omap_dm_timer_get_fclk(struct omap_dm_timer * cookie)697 static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
698 {
699 struct dmtimer *timer = to_dmtimer(cookie);
700
701 if (timer && !IS_ERR(timer->fclk))
702 return timer->fclk;
703 return NULL;
704 }
705
omap_dm_timer_modify_idlect_mask(__u32 inputmask)706 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
707 {
708 BUG();
709
710 return 0;
711 }
712
713 #endif
714
omap_dm_timer_start(struct omap_dm_timer * cookie)715 static int omap_dm_timer_start(struct omap_dm_timer *cookie)
716 {
717 struct dmtimer *timer;
718 struct device *dev;
719 int rc;
720 u32 l;
721
722 timer = to_dmtimer(cookie);
723 if (unlikely(!timer))
724 return -EINVAL;
725
726 dev = &timer->pdev->dev;
727
728 rc = pm_runtime_resume_and_get(dev);
729 if (rc)
730 return rc;
731
732 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
733 if (!(l & OMAP_TIMER_CTRL_ST)) {
734 l |= OMAP_TIMER_CTRL_ST;
735 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
736 }
737
738 return 0;
739 }
740
omap_dm_timer_stop(struct omap_dm_timer * cookie)741 static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
742 {
743 struct dmtimer *timer;
744 struct device *dev;
745 unsigned long rate = 0;
746
747 timer = to_dmtimer(cookie);
748 if (unlikely(!timer))
749 return -EINVAL;
750
751 dev = &timer->pdev->dev;
752
753 if (!timer->omap1)
754 rate = clk_get_rate(timer->fclk);
755
756 __omap_dm_timer_stop(timer, rate);
757
758 pm_runtime_put_sync(dev);
759
760 return 0;
761 }
762
omap_dm_timer_set_load(struct omap_dm_timer * cookie,unsigned int load)763 static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
764 unsigned int load)
765 {
766 struct dmtimer *timer;
767 struct device *dev;
768 int rc;
769
770 timer = to_dmtimer(cookie);
771 if (unlikely(!timer))
772 return -EINVAL;
773
774 dev = &timer->pdev->dev;
775 rc = pm_runtime_resume_and_get(dev);
776 if (rc)
777 return rc;
778
779 dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
780
781 pm_runtime_put_sync(dev);
782
783 return 0;
784 }
785
omap_dm_timer_set_match(struct omap_dm_timer * cookie,int enable,unsigned int match)786 static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
787 unsigned int match)
788 {
789 struct dmtimer *timer;
790 struct device *dev;
791 int rc;
792 u32 l;
793
794 timer = to_dmtimer(cookie);
795 if (unlikely(!timer))
796 return -EINVAL;
797
798 dev = &timer->pdev->dev;
799 rc = pm_runtime_resume_and_get(dev);
800 if (rc)
801 return rc;
802
803 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
804 if (enable)
805 l |= OMAP_TIMER_CTRL_CE;
806 else
807 l &= ~OMAP_TIMER_CTRL_CE;
808 dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
809 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
810
811 pm_runtime_put_sync(dev);
812
813 return 0;
814 }
815
omap_dm_timer_set_pwm(struct omap_dm_timer * cookie,int def_on,int toggle,int trigger,int autoreload)816 static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
817 int toggle, int trigger, int autoreload)
818 {
819 struct dmtimer *timer;
820 struct device *dev;
821 int rc;
822 u32 l;
823
824 timer = to_dmtimer(cookie);
825 if (unlikely(!timer))
826 return -EINVAL;
827
828 dev = &timer->pdev->dev;
829 rc = pm_runtime_resume_and_get(dev);
830 if (rc)
831 return rc;
832
833 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
834 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
835 OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
836 if (def_on)
837 l |= OMAP_TIMER_CTRL_SCPWM;
838 if (toggle)
839 l |= OMAP_TIMER_CTRL_PT;
840 l |= trigger << 10;
841 if (autoreload)
842 l |= OMAP_TIMER_CTRL_AR;
843 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
844
845 pm_runtime_put_sync(dev);
846
847 return 0;
848 }
849
omap_dm_timer_get_pwm_status(struct omap_dm_timer * cookie)850 static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
851 {
852 struct dmtimer *timer;
853 struct device *dev;
854 int rc;
855 u32 l;
856
857 timer = to_dmtimer(cookie);
858 if (unlikely(!timer))
859 return -EINVAL;
860
861 dev = &timer->pdev->dev;
862 rc = pm_runtime_resume_and_get(dev);
863 if (rc)
864 return rc;
865
866 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
867
868 pm_runtime_put_sync(dev);
869
870 return l;
871 }
872
omap_dm_timer_set_prescaler(struct omap_dm_timer * cookie,int prescaler)873 static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
874 int prescaler)
875 {
876 struct dmtimer *timer;
877 struct device *dev;
878 int rc;
879 u32 l;
880
881 timer = to_dmtimer(cookie);
882 if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
883 return -EINVAL;
884
885 dev = &timer->pdev->dev;
886 rc = pm_runtime_resume_and_get(dev);
887 if (rc)
888 return rc;
889
890 l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
891 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
892 if (prescaler >= 0) {
893 l |= OMAP_TIMER_CTRL_PRE;
894 l |= prescaler << 2;
895 }
896 dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
897
898 pm_runtime_put_sync(dev);
899
900 return 0;
901 }
902
omap_dm_timer_set_int_enable(struct omap_dm_timer * cookie,unsigned int value)903 static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
904 unsigned int value)
905 {
906 struct dmtimer *timer;
907 struct device *dev;
908 int rc;
909
910 timer = to_dmtimer(cookie);
911 if (unlikely(!timer))
912 return -EINVAL;
913
914 dev = &timer->pdev->dev;
915 rc = pm_runtime_resume_and_get(dev);
916 if (rc)
917 return rc;
918
919 __omap_dm_timer_int_enable(timer, value);
920
921 pm_runtime_put_sync(dev);
922
923 return 0;
924 }
925
926 /**
927 * omap_dm_timer_set_int_disable - disable timer interrupts
928 * @timer: pointer to timer handle
929 * @mask: bit mask of interrupts to be disabled
930 *
931 * Disables the specified timer interrupts for a timer.
932 */
omap_dm_timer_set_int_disable(struct omap_dm_timer * cookie,u32 mask)933 static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
934 {
935 struct dmtimer *timer;
936 struct device *dev;
937 u32 l = mask;
938 int rc;
939
940 timer = to_dmtimer(cookie);
941 if (unlikely(!timer))
942 return -EINVAL;
943
944 dev = &timer->pdev->dev;
945 rc = pm_runtime_resume_and_get(dev);
946 if (rc)
947 return rc;
948
949 if (timer->revision == 1)
950 l = dmtimer_read(timer, timer->irq_ena) & ~mask;
951
952 dmtimer_write(timer, timer->irq_dis, l);
953 l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
954 dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
955
956 pm_runtime_put_sync(dev);
957
958 return 0;
959 }
960
omap_dm_timer_read_status(struct omap_dm_timer * cookie)961 static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
962 {
963 struct dmtimer *timer;
964 unsigned int l;
965
966 timer = to_dmtimer(cookie);
967 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
968 pr_err("%s: timer not available or enabled.\n", __func__);
969 return 0;
970 }
971
972 l = dmtimer_read(timer, timer->irq_stat);
973
974 return l;
975 }
976
omap_dm_timer_write_status(struct omap_dm_timer * cookie,unsigned int value)977 static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
978 {
979 struct dmtimer *timer;
980
981 timer = to_dmtimer(cookie);
982 if (unlikely(!timer || !atomic_read(&timer->enabled)))
983 return -EINVAL;
984
985 __omap_dm_timer_write_status(timer, value);
986
987 return 0;
988 }
989
omap_dm_timer_read_counter(struct omap_dm_timer * cookie)990 static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
991 {
992 struct dmtimer *timer;
993
994 timer = to_dmtimer(cookie);
995 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
996 pr_err("%s: timer not iavailable or enabled.\n", __func__);
997 return 0;
998 }
999
1000 return __omap_dm_timer_read_counter(timer);
1001 }
1002
omap_dm_timer_write_counter(struct omap_dm_timer * cookie,unsigned int value)1003 static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
1004 {
1005 struct dmtimer *timer;
1006
1007 timer = to_dmtimer(cookie);
1008 if (unlikely(!timer || !atomic_read(&timer->enabled))) {
1009 pr_err("%s: timer not available or enabled.\n", __func__);
1010 return -EINVAL;
1011 }
1012
1013 dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
1014
1015 /* Save the context */
1016 timer->context.tcrr = value;
1017 return 0;
1018 }
1019
omap_dm_timer_runtime_suspend(struct device * dev)1020 static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
1021 {
1022 struct dmtimer *timer = dev_get_drvdata(dev);
1023
1024 atomic_set(&timer->enabled, 0);
1025
1026 if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base)
1027 return 0;
1028
1029 omap_timer_save_context(timer);
1030
1031 return 0;
1032 }
1033
omap_dm_timer_runtime_resume(struct device * dev)1034 static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
1035 {
1036 struct dmtimer *timer = dev_get_drvdata(dev);
1037
1038 if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
1039 omap_timer_restore_context(timer);
1040
1041 atomic_set(&timer->enabled, 1);
1042
1043 return 0;
1044 }
1045
1046 static const struct dev_pm_ops omap_dm_timer_pm_ops = {
1047 SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend,
1048 omap_dm_timer_runtime_resume, NULL)
1049 };
1050
1051 static const struct of_device_id omap_timer_match[];
1052
1053 /**
1054 * omap_dm_timer_probe - probe function called for every registered device
1055 * @pdev: pointer to current timer platform device
1056 *
1057 * Called by driver framework at the end of device registration for all
1058 * timer devices.
1059 */
omap_dm_timer_probe(struct platform_device * pdev)1060 static int omap_dm_timer_probe(struct platform_device *pdev)
1061 {
1062 unsigned long flags;
1063 struct dmtimer *timer;
1064 struct device *dev = &pdev->dev;
1065 const struct dmtimer_platform_data *pdata;
1066 int ret;
1067
1068 pdata = of_device_get_match_data(dev);
1069 if (!pdata)
1070 pdata = dev_get_platdata(dev);
1071 else
1072 dev->platform_data = (void *)pdata;
1073
1074 if (!pdata) {
1075 dev_err(dev, "%s: no platform data.\n", __func__);
1076 return -ENODEV;
1077 }
1078
1079 timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
1080 if (!timer)
1081 return -ENOMEM;
1082
1083 timer->irq = platform_get_irq(pdev, 0);
1084 if (timer->irq < 0)
1085 return timer->irq;
1086
1087 timer->io_base = devm_platform_ioremap_resource(pdev, 0);
1088 if (IS_ERR(timer->io_base))
1089 return PTR_ERR(timer->io_base);
1090
1091 platform_set_drvdata(pdev, timer);
1092
1093 if (dev->of_node) {
1094 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
1095 timer->capability |= OMAP_TIMER_ALWON;
1096 if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
1097 timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
1098 if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
1099 timer->capability |= OMAP_TIMER_HAS_PWM;
1100 if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
1101 timer->capability |= OMAP_TIMER_SECURE;
1102 } else {
1103 timer->id = pdev->id;
1104 timer->capability = pdata->timer_capability;
1105 timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
1106 }
1107
1108 timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
1109
1110 /* OMAP1 devices do not yet use the clock framework for dmtimers */
1111 if (!timer->omap1) {
1112 timer->fclk = devm_clk_get(dev, "fck");
1113 if (IS_ERR(timer->fclk))
1114 return PTR_ERR(timer->fclk);
1115 } else {
1116 timer->fclk = ERR_PTR(-ENODEV);
1117 }
1118
1119 if (!(timer->capability & OMAP_TIMER_ALWON)) {
1120 timer->nb.notifier_call = omap_timer_context_notifier;
1121 cpu_pm_register_notifier(&timer->nb);
1122 }
1123
1124 timer->errata = pdata->timer_errata;
1125
1126 timer->pdev = pdev;
1127
1128 pm_runtime_enable(dev);
1129
1130 if (!timer->reserved) {
1131 ret = pm_runtime_resume_and_get(dev);
1132 if (ret) {
1133 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
1134 __func__);
1135 goto err_disable;
1136 }
1137 __omap_dm_timer_init_regs(timer);
1138 pm_runtime_put(dev);
1139 }
1140
1141 /* add the timer element to the list */
1142 spin_lock_irqsave(&dm_timer_lock, flags);
1143 list_add_tail(&timer->node, &omap_timer_list);
1144 spin_unlock_irqrestore(&dm_timer_lock, flags);
1145
1146 dev_dbg(dev, "Device Probed.\n");
1147
1148 return 0;
1149
1150 err_disable:
1151 pm_runtime_disable(dev);
1152 return ret;
1153 }
1154
1155 /**
1156 * omap_dm_timer_remove - cleanup a registered timer device
1157 * @pdev: pointer to current timer platform device
1158 *
1159 * Called by driver framework whenever a timer device is unregistered.
1160 * In addition to freeing platform resources it also deletes the timer
1161 * entry from the local list.
1162 */
omap_dm_timer_remove(struct platform_device * pdev)1163 static int omap_dm_timer_remove(struct platform_device *pdev)
1164 {
1165 struct dmtimer *timer;
1166 unsigned long flags;
1167 int ret = -EINVAL;
1168
1169 spin_lock_irqsave(&dm_timer_lock, flags);
1170 list_for_each_entry(timer, &omap_timer_list, node)
1171 if (!strcmp(dev_name(&timer->pdev->dev),
1172 dev_name(&pdev->dev))) {
1173 if (!(timer->capability & OMAP_TIMER_ALWON))
1174 cpu_pm_unregister_notifier(&timer->nb);
1175 list_del(&timer->node);
1176 ret = 0;
1177 break;
1178 }
1179 spin_unlock_irqrestore(&dm_timer_lock, flags);
1180
1181 pm_runtime_disable(&pdev->dev);
1182
1183 return ret;
1184 }
1185
1186 static const struct omap_dm_timer_ops dmtimer_ops = {
1187 .request_by_node = omap_dm_timer_request_by_node,
1188 .request_specific = omap_dm_timer_request_specific,
1189 .request = omap_dm_timer_request,
1190 .set_source = omap_dm_timer_set_source,
1191 .get_irq = omap_dm_timer_get_irq,
1192 .set_int_enable = omap_dm_timer_set_int_enable,
1193 .set_int_disable = omap_dm_timer_set_int_disable,
1194 .free = omap_dm_timer_free,
1195 .enable = omap_dm_timer_enable,
1196 .disable = omap_dm_timer_disable,
1197 .get_fclk = omap_dm_timer_get_fclk,
1198 .start = omap_dm_timer_start,
1199 .stop = omap_dm_timer_stop,
1200 .set_load = omap_dm_timer_set_load,
1201 .set_match = omap_dm_timer_set_match,
1202 .set_pwm = omap_dm_timer_set_pwm,
1203 .get_pwm_status = omap_dm_timer_get_pwm_status,
1204 .set_prescaler = omap_dm_timer_set_prescaler,
1205 .read_counter = omap_dm_timer_read_counter,
1206 .write_counter = omap_dm_timer_write_counter,
1207 .read_status = omap_dm_timer_read_status,
1208 .write_status = omap_dm_timer_write_status,
1209 };
1210
1211 static const struct dmtimer_platform_data omap3plus_pdata = {
1212 .timer_errata = OMAP_TIMER_ERRATA_I103_I767,
1213 .timer_ops = &dmtimer_ops,
1214 };
1215
1216 static const struct dmtimer_platform_data am6_pdata = {
1217 .timer_ops = &dmtimer_ops,
1218 };
1219
1220 static const struct of_device_id omap_timer_match[] = {
1221 {
1222 .compatible = "ti,omap2420-timer",
1223 },
1224 {
1225 .compatible = "ti,omap3430-timer",
1226 .data = &omap3plus_pdata,
1227 },
1228 {
1229 .compatible = "ti,omap4430-timer",
1230 .data = &omap3plus_pdata,
1231 },
1232 {
1233 .compatible = "ti,omap5430-timer",
1234 .data = &omap3plus_pdata,
1235 },
1236 {
1237 .compatible = "ti,am335x-timer",
1238 .data = &omap3plus_pdata,
1239 },
1240 {
1241 .compatible = "ti,am335x-timer-1ms",
1242 .data = &omap3plus_pdata,
1243 },
1244 {
1245 .compatible = "ti,dm816-timer",
1246 .data = &omap3plus_pdata,
1247 },
1248 {
1249 .compatible = "ti,am654-timer",
1250 .data = &am6_pdata,
1251 },
1252 {},
1253 };
1254 MODULE_DEVICE_TABLE(of, omap_timer_match);
1255
1256 static struct platform_driver omap_dm_timer_driver = {
1257 .probe = omap_dm_timer_probe,
1258 .remove = omap_dm_timer_remove,
1259 .driver = {
1260 .name = "omap_timer",
1261 .of_match_table = omap_timer_match,
1262 .pm = &omap_dm_timer_pm_ops,
1263 },
1264 };
1265
1266 module_platform_driver(omap_dm_timer_driver);
1267
1268 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
1269 MODULE_LICENSE("GPL");
1270 MODULE_AUTHOR("Texas Instruments Inc");
1271