1 /* arch/arm/mach-msm/clock.c
2  *
3  * Copyright (C) 2007 Google, Inc.
4  * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/err.h>
20 #include <linux/spinlock.h>
21 #include <linux/pm_qos_params.h>
22 #include <linux/mutex.h>
23 #include <linux/clk.h>
24 #include <linux/string.h>
25 #include <linux/module.h>
26 #include <linux/clkdev.h>
27 
28 #include "clock.h"
29 
30 static DEFINE_MUTEX(clocks_mutex);
31 static DEFINE_SPINLOCK(clocks_lock);
32 static LIST_HEAD(clocks);
33 
34 /*
35  * Standard clock functions defined in include/linux/clk.h
36  */
clk_enable(struct clk * clk)37 int clk_enable(struct clk *clk)
38 {
39 	unsigned long flags;
40 	spin_lock_irqsave(&clocks_lock, flags);
41 	clk->count++;
42 	if (clk->count == 1)
43 		clk->ops->enable(clk->id);
44 	spin_unlock_irqrestore(&clocks_lock, flags);
45 	return 0;
46 }
47 EXPORT_SYMBOL(clk_enable);
48 
clk_disable(struct clk * clk)49 void clk_disable(struct clk *clk)
50 {
51 	unsigned long flags;
52 	spin_lock_irqsave(&clocks_lock, flags);
53 	BUG_ON(clk->count == 0);
54 	clk->count--;
55 	if (clk->count == 0)
56 		clk->ops->disable(clk->id);
57 	spin_unlock_irqrestore(&clocks_lock, flags);
58 }
59 EXPORT_SYMBOL(clk_disable);
60 
clk_reset(struct clk * clk,enum clk_reset_action action)61 int clk_reset(struct clk *clk, enum clk_reset_action action)
62 {
63 	return clk->ops->reset(clk->remote_id, action);
64 }
65 EXPORT_SYMBOL(clk_reset);
66 
clk_get_rate(struct clk * clk)67 unsigned long clk_get_rate(struct clk *clk)
68 {
69 	return clk->ops->get_rate(clk->id);
70 }
71 EXPORT_SYMBOL(clk_get_rate);
72 
clk_set_rate(struct clk * clk,unsigned long rate)73 int clk_set_rate(struct clk *clk, unsigned long rate)
74 {
75 	int ret;
76 	if (clk->flags & CLKFLAG_MAX) {
77 		ret = clk->ops->set_max_rate(clk->id, rate);
78 		if (ret)
79 			return ret;
80 	}
81 	if (clk->flags & CLKFLAG_MIN) {
82 		ret = clk->ops->set_min_rate(clk->id, rate);
83 		if (ret)
84 			return ret;
85 	}
86 
87 	if (clk->flags & CLKFLAG_MAX || clk->flags & CLKFLAG_MIN)
88 		return ret;
89 
90 	return clk->ops->set_rate(clk->id, rate);
91 }
92 EXPORT_SYMBOL(clk_set_rate);
93 
clk_round_rate(struct clk * clk,unsigned long rate)94 long clk_round_rate(struct clk *clk, unsigned long rate)
95 {
96 	return clk->ops->round_rate(clk->id, rate);
97 }
98 EXPORT_SYMBOL(clk_round_rate);
99 
clk_set_min_rate(struct clk * clk,unsigned long rate)100 int clk_set_min_rate(struct clk *clk, unsigned long rate)
101 {
102 	return clk->ops->set_min_rate(clk->id, rate);
103 }
104 EXPORT_SYMBOL(clk_set_min_rate);
105 
clk_set_max_rate(struct clk * clk,unsigned long rate)106 int clk_set_max_rate(struct clk *clk, unsigned long rate)
107 {
108 	return clk->ops->set_max_rate(clk->id, rate);
109 }
110 EXPORT_SYMBOL(clk_set_max_rate);
111 
clk_set_parent(struct clk * clk,struct clk * parent)112 int clk_set_parent(struct clk *clk, struct clk *parent)
113 {
114 	return -ENOSYS;
115 }
116 EXPORT_SYMBOL(clk_set_parent);
117 
clk_get_parent(struct clk * clk)118 struct clk *clk_get_parent(struct clk *clk)
119 {
120 	return ERR_PTR(-ENOSYS);
121 }
122 EXPORT_SYMBOL(clk_get_parent);
123 
clk_set_flags(struct clk * clk,unsigned long flags)124 int clk_set_flags(struct clk *clk, unsigned long flags)
125 {
126 	if (clk == NULL || IS_ERR(clk))
127 		return -EINVAL;
128 	return clk->ops->set_flags(clk->id, flags);
129 }
130 EXPORT_SYMBOL(clk_set_flags);
131 
132 /* EBI1 is the only shared clock that several clients want to vote on as of
133  * this commit. If this changes in the future, then it might be better to
134  * make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more
135  * generic to support different clocks.
136  */
137 static struct clk *ebi1_clk;
138 
msm_clock_init(struct clk_lookup * clock_tbl,unsigned num_clocks)139 void __init msm_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks)
140 {
141 	unsigned n;
142 
143 	mutex_lock(&clocks_mutex);
144 	for (n = 0; n < num_clocks; n++) {
145 		clkdev_add(&clock_tbl[n]);
146 		list_add_tail(&clock_tbl[n].clk->list, &clocks);
147 	}
148 	mutex_unlock(&clocks_mutex);
149 
150 	ebi1_clk = clk_get(NULL, "ebi1_clk");
151 	BUG_ON(ebi1_clk == NULL);
152 
153 }
154 
155 /* The bootloader and/or AMSS may have left various clocks enabled.
156  * Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have
157  * not been explicitly enabled by a clk_enable() call.
158  */
clock_late_init(void)159 static int __init clock_late_init(void)
160 {
161 	unsigned long flags;
162 	struct clk *clk;
163 	unsigned count = 0;
164 
165 	clock_debug_init();
166 	mutex_lock(&clocks_mutex);
167 	list_for_each_entry(clk, &clocks, list) {
168 		clock_debug_add(clk);
169 		if (clk->flags & CLKFLAG_AUTO_OFF) {
170 			spin_lock_irqsave(&clocks_lock, flags);
171 			if (!clk->count) {
172 				count++;
173 				clk->ops->auto_off(clk->id);
174 			}
175 			spin_unlock_irqrestore(&clocks_lock, flags);
176 		}
177 	}
178 	mutex_unlock(&clocks_mutex);
179 	pr_info("clock_late_init() disabled %d unused clocks\n", count);
180 	return 0;
181 }
182 
183 late_initcall(clock_late_init);
184 
185