1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Aspeed 24XX/25XX I2C Controller.
4  *
5  *  Copyright (C) 2012-2017 ASPEED Technology Inc.
6  *  Copyright 2017 IBM Corporation
7  *  Copyright 2017 Google, Inc.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27 
28 /* I2C Register */
29 #define ASPEED_I2C_FUN_CTRL_REG				0x00
30 #define ASPEED_I2C_AC_TIMING_REG1			0x04
31 #define ASPEED_I2C_AC_TIMING_REG2			0x08
32 #define ASPEED_I2C_INTR_CTRL_REG			0x0c
33 #define ASPEED_I2C_INTR_STS_REG				0x10
34 #define ASPEED_I2C_CMD_REG				0x14
35 #define ASPEED_I2C_DEV_ADDR_REG				0x18
36 #define ASPEED_I2C_BYTE_BUF_REG				0x20
37 
38 /* Global Register Definition */
39 /* 0x00 : I2C Interrupt Status Register  */
40 /* 0x08 : I2C Interrupt Target Assignment  */
41 
42 /* Device Register Definition */
43 /* 0x00 : I2CD Function Control Register  */
44 #define ASPEED_I2CD_MULTI_MASTER_DIS			BIT(15)
45 #define ASPEED_I2CD_SDA_DRIVE_1T_EN			BIT(8)
46 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN			BIT(7)
47 #define ASPEED_I2CD_M_HIGH_SPEED_EN			BIT(6)
48 #define ASPEED_I2CD_SLAVE_EN				BIT(1)
49 #define ASPEED_I2CD_MASTER_EN				BIT(0)
50 
51 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
52 #define ASPEED_I2CD_TIME_TBUF_MASK			GENMASK(31, 28)
53 #define ASPEED_I2CD_TIME_THDSTA_MASK			GENMASK(27, 24)
54 #define ASPEED_I2CD_TIME_TACST_MASK			GENMASK(23, 20)
55 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT			16
56 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK			GENMASK(19, 16)
57 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT			12
58 #define ASPEED_I2CD_TIME_SCL_LOW_MASK			GENMASK(15, 12)
59 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK		GENMASK(3, 0)
60 #define ASPEED_I2CD_TIME_SCL_REG_MAX			GENMASK(3, 0)
61 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
62 #define ASPEED_NO_TIMEOUT_CTRL				0
63 
64 /* 0x0c : I2CD Interrupt Control Register &
65  * 0x10 : I2CD Interrupt Status Register
66  *
67  * These share bit definitions, so use the same values for the enable &
68  * status bits.
69  */
70 #define ASPEED_I2CD_INTR_RECV_MASK			0xf000ffff
71 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT			BIT(14)
72 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE		BIT(13)
73 #define ASPEED_I2CD_INTR_SLAVE_MATCH			BIT(7)
74 #define ASPEED_I2CD_INTR_SCL_TIMEOUT			BIT(6)
75 #define ASPEED_I2CD_INTR_ABNORMAL			BIT(5)
76 #define ASPEED_I2CD_INTR_NORMAL_STOP			BIT(4)
77 #define ASPEED_I2CD_INTR_ARBIT_LOSS			BIT(3)
78 #define ASPEED_I2CD_INTR_RX_DONE			BIT(2)
79 #define ASPEED_I2CD_INTR_TX_NAK				BIT(1)
80 #define ASPEED_I2CD_INTR_TX_ACK				BIT(0)
81 #define ASPEED_I2CD_INTR_MASTER_ERRORS					       \
82 		(ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |			       \
83 		 ASPEED_I2CD_INTR_SCL_TIMEOUT |				       \
84 		 ASPEED_I2CD_INTR_ABNORMAL |				       \
85 		 ASPEED_I2CD_INTR_ARBIT_LOSS)
86 #define ASPEED_I2CD_INTR_ALL						       \
87 		(ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |			       \
88 		 ASPEED_I2CD_INTR_BUS_RECOVER_DONE |			       \
89 		 ASPEED_I2CD_INTR_SCL_TIMEOUT |				       \
90 		 ASPEED_I2CD_INTR_ABNORMAL |				       \
91 		 ASPEED_I2CD_INTR_NORMAL_STOP |				       \
92 		 ASPEED_I2CD_INTR_ARBIT_LOSS |				       \
93 		 ASPEED_I2CD_INTR_RX_DONE |				       \
94 		 ASPEED_I2CD_INTR_TX_NAK |				       \
95 		 ASPEED_I2CD_INTR_TX_ACK)
96 
97 /* 0x14 : I2CD Command/Status Register   */
98 #define ASPEED_I2CD_SCL_LINE_STS			BIT(18)
99 #define ASPEED_I2CD_SDA_LINE_STS			BIT(17)
100 #define ASPEED_I2CD_BUS_BUSY_STS			BIT(16)
101 #define ASPEED_I2CD_BUS_RECOVER_CMD			BIT(11)
102 
103 /* Command Bit */
104 #define ASPEED_I2CD_M_STOP_CMD				BIT(5)
105 #define ASPEED_I2CD_M_S_RX_CMD_LAST			BIT(4)
106 #define ASPEED_I2CD_M_RX_CMD				BIT(3)
107 #define ASPEED_I2CD_S_TX_CMD				BIT(2)
108 #define ASPEED_I2CD_M_TX_CMD				BIT(1)
109 #define ASPEED_I2CD_M_START_CMD				BIT(0)
110 #define ASPEED_I2CD_MASTER_CMDS_MASK					       \
111 		(ASPEED_I2CD_M_STOP_CMD |				       \
112 		 ASPEED_I2CD_M_S_RX_CMD_LAST |				       \
113 		 ASPEED_I2CD_M_RX_CMD |					       \
114 		 ASPEED_I2CD_M_TX_CMD |					       \
115 		 ASPEED_I2CD_M_START_CMD)
116 
117 /* 0x18 : I2CD Slave Device Address Register   */
118 #define ASPEED_I2CD_DEV_ADDR_MASK			GENMASK(6, 0)
119 
120 enum aspeed_i2c_master_state {
121 	ASPEED_I2C_MASTER_INACTIVE,
122 	ASPEED_I2C_MASTER_PENDING,
123 	ASPEED_I2C_MASTER_START,
124 	ASPEED_I2C_MASTER_TX_FIRST,
125 	ASPEED_I2C_MASTER_TX,
126 	ASPEED_I2C_MASTER_RX_FIRST,
127 	ASPEED_I2C_MASTER_RX,
128 	ASPEED_I2C_MASTER_STOP,
129 };
130 
131 enum aspeed_i2c_slave_state {
132 	ASPEED_I2C_SLAVE_INACTIVE,
133 	ASPEED_I2C_SLAVE_START,
134 	ASPEED_I2C_SLAVE_READ_REQUESTED,
135 	ASPEED_I2C_SLAVE_READ_PROCESSED,
136 	ASPEED_I2C_SLAVE_WRITE_REQUESTED,
137 	ASPEED_I2C_SLAVE_WRITE_RECEIVED,
138 	ASPEED_I2C_SLAVE_STOP,
139 };
140 
141 struct aspeed_i2c_bus {
142 	struct i2c_adapter		adap;
143 	struct device			*dev;
144 	void __iomem			*base;
145 	struct reset_control		*rst;
146 	/* Synchronizes I/O mem access to base. */
147 	spinlock_t			lock;
148 	struct completion		cmd_complete;
149 	u32				(*get_clk_reg_val)(struct device *dev,
150 							   u32 divisor);
151 	unsigned long			parent_clk_frequency;
152 	u32				bus_frequency;
153 	/* Transaction state. */
154 	enum aspeed_i2c_master_state	master_state;
155 	struct i2c_msg			*msgs;
156 	size_t				buf_index;
157 	size_t				msgs_index;
158 	size_t				msgs_count;
159 	bool				send_stop;
160 	int				cmd_err;
161 	/* Protected only by i2c_lock_bus */
162 	int				master_xfer_result;
163 	/* Multi-master */
164 	bool				multi_master;
165 #if IS_ENABLED(CONFIG_I2C_SLAVE)
166 	struct i2c_client		*slave;
167 	enum aspeed_i2c_slave_state	slave_state;
168 #endif /* CONFIG_I2C_SLAVE */
169 };
170 
171 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
172 
aspeed_i2c_recover_bus(struct aspeed_i2c_bus * bus)173 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
174 {
175 	unsigned long time_left, flags;
176 	int ret = 0;
177 	u32 command;
178 
179 	spin_lock_irqsave(&bus->lock, flags);
180 	command = readl(bus->base + ASPEED_I2C_CMD_REG);
181 
182 	if (command & ASPEED_I2CD_SDA_LINE_STS) {
183 		/* Bus is idle: no recovery needed. */
184 		if (command & ASPEED_I2CD_SCL_LINE_STS)
185 			goto out;
186 		dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n",
187 			command);
188 
189 		reinit_completion(&bus->cmd_complete);
190 		writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
191 		spin_unlock_irqrestore(&bus->lock, flags);
192 
193 		time_left = wait_for_completion_timeout(
194 				&bus->cmd_complete, bus->adap.timeout);
195 
196 		spin_lock_irqsave(&bus->lock, flags);
197 		if (time_left == 0)
198 			goto reset_out;
199 		else if (bus->cmd_err)
200 			goto reset_out;
201 		/* Recovery failed. */
202 		else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
203 			   ASPEED_I2CD_SCL_LINE_STS))
204 			goto reset_out;
205 	/* Bus error. */
206 	} else {
207 		dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n",
208 			command);
209 
210 		reinit_completion(&bus->cmd_complete);
211 		/* Writes 1 to 8 SCL clock cycles until SDA is released. */
212 		writel(ASPEED_I2CD_BUS_RECOVER_CMD,
213 		       bus->base + ASPEED_I2C_CMD_REG);
214 		spin_unlock_irqrestore(&bus->lock, flags);
215 
216 		time_left = wait_for_completion_timeout(
217 				&bus->cmd_complete, bus->adap.timeout);
218 
219 		spin_lock_irqsave(&bus->lock, flags);
220 		if (time_left == 0)
221 			goto reset_out;
222 		else if (bus->cmd_err)
223 			goto reset_out;
224 		/* Recovery failed. */
225 		else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
226 			   ASPEED_I2CD_SDA_LINE_STS))
227 			goto reset_out;
228 	}
229 
230 out:
231 	spin_unlock_irqrestore(&bus->lock, flags);
232 
233 	return ret;
234 
235 reset_out:
236 	spin_unlock_irqrestore(&bus->lock, flags);
237 
238 	return aspeed_i2c_reset(bus);
239 }
240 
241 #if IS_ENABLED(CONFIG_I2C_SLAVE)
aspeed_i2c_slave_irq(struct aspeed_i2c_bus * bus,u32 irq_status)242 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
243 {
244 	u32 command, irq_handled = 0;
245 	struct i2c_client *slave = bus->slave;
246 	u8 value;
247 	int ret;
248 
249 	if (!slave)
250 		return 0;
251 
252 	command = readl(bus->base + ASPEED_I2C_CMD_REG);
253 
254 	/* Slave was requested, restart state machine. */
255 	if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
256 		irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
257 		bus->slave_state = ASPEED_I2C_SLAVE_START;
258 	}
259 
260 	/* Slave is not currently active, irq was for someone else. */
261 	if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
262 		return irq_handled;
263 
264 	dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
265 		irq_status, command);
266 
267 	/* Slave was sent something. */
268 	if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
269 		value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
270 		/* Handle address frame. */
271 		if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
272 			if (value & 0x1)
273 				bus->slave_state =
274 						ASPEED_I2C_SLAVE_READ_REQUESTED;
275 			else
276 				bus->slave_state =
277 						ASPEED_I2C_SLAVE_WRITE_REQUESTED;
278 		}
279 		irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
280 	}
281 
282 	/* Slave was asked to stop. */
283 	if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
284 		irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
285 		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
286 	}
287 	if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
288 	    bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
289 		irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
290 		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
291 	}
292 
293 	switch (bus->slave_state) {
294 	case ASPEED_I2C_SLAVE_READ_REQUESTED:
295 		if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
296 			dev_err(bus->dev, "Unexpected ACK on read request.\n");
297 		bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED;
298 		i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
299 		writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
300 		writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
301 		break;
302 	case ASPEED_I2C_SLAVE_READ_PROCESSED:
303 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
304 			dev_err(bus->dev,
305 				"Expected ACK after processed read.\n");
306 			break;
307 		}
308 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
309 		i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
310 		writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
311 		writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
312 		break;
313 	case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
314 		bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
315 		ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
316 		/*
317 		 * Slave ACK's on this address phase already but as the backend driver
318 		 * returns an errno, the bus driver should nack the next incoming byte.
319 		 */
320 		if (ret < 0)
321 			writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG);
322 		break;
323 	case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
324 		i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
325 		break;
326 	case ASPEED_I2C_SLAVE_STOP:
327 		i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
328 		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
329 		break;
330 	case ASPEED_I2C_SLAVE_START:
331 		/* Slave was just started. Waiting for the next event. */;
332 		break;
333 	default:
334 		dev_err(bus->dev, "unknown slave_state: %d\n",
335 			bus->slave_state);
336 		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
337 		break;
338 	}
339 
340 	return irq_handled;
341 }
342 #endif /* CONFIG_I2C_SLAVE */
343 
344 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_start(struct aspeed_i2c_bus * bus)345 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
346 {
347 	u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
348 	struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
349 	u8 slave_addr = i2c_8bit_addr_from_msg(msg);
350 
351 #if IS_ENABLED(CONFIG_I2C_SLAVE)
352 	/*
353 	 * If it's requested in the middle of a slave session, set the master
354 	 * state to 'pending' then H/W will continue handling this master
355 	 * command when the bus comes back to the idle state.
356 	 */
357 	if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
358 		bus->master_state = ASPEED_I2C_MASTER_PENDING;
359 		return;
360 	}
361 #endif /* CONFIG_I2C_SLAVE */
362 
363 	bus->master_state = ASPEED_I2C_MASTER_START;
364 	bus->buf_index = 0;
365 
366 	if (msg->flags & I2C_M_RD) {
367 		command |= ASPEED_I2CD_M_RX_CMD;
368 		/* Need to let the hardware know to NACK after RX. */
369 		if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
370 			command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
371 	}
372 
373 	writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
374 	writel(command, bus->base + ASPEED_I2C_CMD_REG);
375 }
376 
377 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_stop(struct aspeed_i2c_bus * bus)378 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
379 {
380 	bus->master_state = ASPEED_I2C_MASTER_STOP;
381 	writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
382 }
383 
384 /* precondition: bus.lock has been acquired. */
aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus * bus)385 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
386 {
387 	if (bus->msgs_index + 1 < bus->msgs_count) {
388 		bus->msgs_index++;
389 		aspeed_i2c_do_start(bus);
390 	} else {
391 		aspeed_i2c_do_stop(bus);
392 	}
393 }
394 
aspeed_i2c_is_irq_error(u32 irq_status)395 static int aspeed_i2c_is_irq_error(u32 irq_status)
396 {
397 	if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
398 		return -EAGAIN;
399 	if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |
400 			  ASPEED_I2CD_INTR_SCL_TIMEOUT))
401 		return -EBUSY;
402 	if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL))
403 		return -EPROTO;
404 
405 	return 0;
406 }
407 
aspeed_i2c_master_irq(struct aspeed_i2c_bus * bus,u32 irq_status)408 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
409 {
410 	u32 irq_handled = 0, command = 0;
411 	struct i2c_msg *msg;
412 	u8 recv_byte;
413 	int ret;
414 
415 	if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) {
416 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
417 		irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE;
418 		goto out_complete;
419 	}
420 
421 	/*
422 	 * We encountered an interrupt that reports an error: the hardware
423 	 * should clear the command queue effectively taking us back to the
424 	 * INACTIVE state.
425 	 */
426 	ret = aspeed_i2c_is_irq_error(irq_status);
427 	if (ret) {
428 		dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
429 			irq_status);
430 		irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
431 		if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
432 			bus->cmd_err = ret;
433 			bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
434 			goto out_complete;
435 		}
436 	}
437 
438 	/* Master is not currently active, irq was for someone else. */
439 	if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
440 	    bus->master_state == ASPEED_I2C_MASTER_PENDING)
441 		goto out_no_complete;
442 
443 	/* We are in an invalid state; reset bus to a known state. */
444 	if (!bus->msgs) {
445 		dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
446 			irq_status);
447 		bus->cmd_err = -EIO;
448 		if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
449 		    bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
450 			aspeed_i2c_do_stop(bus);
451 		goto out_no_complete;
452 	}
453 	msg = &bus->msgs[bus->msgs_index];
454 
455 	/*
456 	 * START is a special case because we still have to handle a subsequent
457 	 * TX or RX immediately after we handle it, so we handle it here and
458 	 * then update the state and handle the new state below.
459 	 */
460 	if (bus->master_state == ASPEED_I2C_MASTER_START) {
461 #if IS_ENABLED(CONFIG_I2C_SLAVE)
462 		/*
463 		 * If a peer master starts a xfer immediately after it queues a
464 		 * master command, clear the queued master command and change
465 		 * its state to 'pending'. To simplify handling of pending
466 		 * cases, it uses S/W solution instead of H/W command queue
467 		 * handling.
468 		 */
469 		if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
470 			writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
471 				~ASPEED_I2CD_MASTER_CMDS_MASK,
472 			       bus->base + ASPEED_I2C_CMD_REG);
473 			bus->master_state = ASPEED_I2C_MASTER_PENDING;
474 			dev_dbg(bus->dev,
475 				"master goes pending due to a slave start\n");
476 			goto out_no_complete;
477 		}
478 #endif /* CONFIG_I2C_SLAVE */
479 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
480 			if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
481 				bus->cmd_err = -ENXIO;
482 				bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
483 				goto out_complete;
484 			}
485 			pr_devel("no slave present at %02x\n", msg->addr);
486 			irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
487 			bus->cmd_err = -ENXIO;
488 			aspeed_i2c_do_stop(bus);
489 			goto out_no_complete;
490 		}
491 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
492 		if (msg->len == 0) { /* SMBUS_QUICK */
493 			aspeed_i2c_do_stop(bus);
494 			goto out_no_complete;
495 		}
496 		if (msg->flags & I2C_M_RD)
497 			bus->master_state = ASPEED_I2C_MASTER_RX_FIRST;
498 		else
499 			bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
500 	}
501 
502 	switch (bus->master_state) {
503 	case ASPEED_I2C_MASTER_TX:
504 		if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
505 			dev_dbg(bus->dev, "slave NACKed TX\n");
506 			irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
507 			goto error_and_stop;
508 		} else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
509 			dev_err(bus->dev, "slave failed to ACK TX\n");
510 			goto error_and_stop;
511 		}
512 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
513 		fallthrough;
514 	case ASPEED_I2C_MASTER_TX_FIRST:
515 		if (bus->buf_index < msg->len) {
516 			bus->master_state = ASPEED_I2C_MASTER_TX;
517 			writel(msg->buf[bus->buf_index++],
518 			       bus->base + ASPEED_I2C_BYTE_BUF_REG);
519 			writel(ASPEED_I2CD_M_TX_CMD,
520 			       bus->base + ASPEED_I2C_CMD_REG);
521 		} else {
522 			aspeed_i2c_next_msg_or_stop(bus);
523 		}
524 		goto out_no_complete;
525 	case ASPEED_I2C_MASTER_RX_FIRST:
526 		/* RX may not have completed yet (only address cycle) */
527 		if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
528 			goto out_no_complete;
529 		fallthrough;
530 	case ASPEED_I2C_MASTER_RX:
531 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
532 			dev_err(bus->dev, "master failed to RX\n");
533 			goto error_and_stop;
534 		}
535 		irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
536 
537 		recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
538 		msg->buf[bus->buf_index++] = recv_byte;
539 
540 		if (msg->flags & I2C_M_RECV_LEN) {
541 			if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
542 				bus->cmd_err = -EPROTO;
543 				aspeed_i2c_do_stop(bus);
544 				goto out_no_complete;
545 			}
546 			msg->len = recv_byte +
547 					((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
548 			msg->flags &= ~I2C_M_RECV_LEN;
549 		}
550 
551 		if (bus->buf_index < msg->len) {
552 			bus->master_state = ASPEED_I2C_MASTER_RX;
553 			command = ASPEED_I2CD_M_RX_CMD;
554 			if (bus->buf_index + 1 == msg->len)
555 				command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
556 			writel(command, bus->base + ASPEED_I2C_CMD_REG);
557 		} else {
558 			aspeed_i2c_next_msg_or_stop(bus);
559 		}
560 		goto out_no_complete;
561 	case ASPEED_I2C_MASTER_STOP:
562 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
563 			dev_err(bus->dev,
564 				"master failed to STOP. irq_status:0x%x\n",
565 				irq_status);
566 			bus->cmd_err = -EIO;
567 			/* Do not STOP as we have already tried. */
568 		} else {
569 			irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
570 		}
571 
572 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
573 		goto out_complete;
574 	case ASPEED_I2C_MASTER_INACTIVE:
575 		dev_err(bus->dev,
576 			"master received interrupt 0x%08x, but is inactive\n",
577 			irq_status);
578 		bus->cmd_err = -EIO;
579 		/* Do not STOP as we should be inactive. */
580 		goto out_complete;
581 	default:
582 		WARN(1, "unknown master state\n");
583 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
584 		bus->cmd_err = -EINVAL;
585 		goto out_complete;
586 	}
587 error_and_stop:
588 	bus->cmd_err = -EIO;
589 	aspeed_i2c_do_stop(bus);
590 	goto out_no_complete;
591 out_complete:
592 	bus->msgs = NULL;
593 	if (bus->cmd_err)
594 		bus->master_xfer_result = bus->cmd_err;
595 	else
596 		bus->master_xfer_result = bus->msgs_index + 1;
597 	complete(&bus->cmd_complete);
598 out_no_complete:
599 	return irq_handled;
600 }
601 
aspeed_i2c_bus_irq(int irq,void * dev_id)602 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
603 {
604 	struct aspeed_i2c_bus *bus = dev_id;
605 	u32 irq_received, irq_remaining, irq_handled;
606 
607 	spin_lock(&bus->lock);
608 	irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
609 	/* Ack all interrupts except for Rx done */
610 	writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
611 	       bus->base + ASPEED_I2C_INTR_STS_REG);
612 	readl(bus->base + ASPEED_I2C_INTR_STS_REG);
613 	irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
614 	irq_remaining = irq_received;
615 
616 #if IS_ENABLED(CONFIG_I2C_SLAVE)
617 	/*
618 	 * In most cases, interrupt bits will be set one by one, although
619 	 * multiple interrupt bits could be set at the same time. It's also
620 	 * possible that master interrupt bits could be set along with slave
621 	 * interrupt bits. Each case needs to be handled using corresponding
622 	 * handlers depending on the current state.
623 	 */
624 	if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
625 	    bus->master_state != ASPEED_I2C_MASTER_PENDING) {
626 		irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
627 		irq_remaining &= ~irq_handled;
628 		if (irq_remaining)
629 			irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
630 	} else {
631 		irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
632 		irq_remaining &= ~irq_handled;
633 		if (irq_remaining)
634 			irq_handled |= aspeed_i2c_master_irq(bus,
635 							     irq_remaining);
636 	}
637 
638 	/*
639 	 * Start a pending master command at here if a slave operation is
640 	 * completed.
641 	 */
642 	if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
643 	    bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
644 		aspeed_i2c_do_start(bus);
645 #else
646 	irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
647 #endif /* CONFIG_I2C_SLAVE */
648 
649 	irq_remaining &= ~irq_handled;
650 	if (irq_remaining)
651 		dev_err(bus->dev,
652 			"irq handled != irq. expected 0x%08x, but was 0x%08x\n",
653 			irq_received, irq_handled);
654 
655 	/* Ack Rx done */
656 	if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
657 		writel(ASPEED_I2CD_INTR_RX_DONE,
658 		       bus->base + ASPEED_I2C_INTR_STS_REG);
659 		readl(bus->base + ASPEED_I2C_INTR_STS_REG);
660 	}
661 	spin_unlock(&bus->lock);
662 	return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
663 }
664 
aspeed_i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)665 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
666 				  struct i2c_msg *msgs, int num)
667 {
668 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
669 	unsigned long time_left, flags;
670 
671 	spin_lock_irqsave(&bus->lock, flags);
672 	bus->cmd_err = 0;
673 
674 	/* If bus is busy in a single master environment, attempt recovery. */
675 	if (!bus->multi_master &&
676 	    (readl(bus->base + ASPEED_I2C_CMD_REG) &
677 	     ASPEED_I2CD_BUS_BUSY_STS)) {
678 		int ret;
679 
680 		spin_unlock_irqrestore(&bus->lock, flags);
681 		ret = aspeed_i2c_recover_bus(bus);
682 		if (ret)
683 			return ret;
684 		spin_lock_irqsave(&bus->lock, flags);
685 	}
686 
687 	bus->cmd_err = 0;
688 	bus->msgs = msgs;
689 	bus->msgs_index = 0;
690 	bus->msgs_count = num;
691 
692 	reinit_completion(&bus->cmd_complete);
693 	aspeed_i2c_do_start(bus);
694 	spin_unlock_irqrestore(&bus->lock, flags);
695 
696 	time_left = wait_for_completion_timeout(&bus->cmd_complete,
697 						bus->adap.timeout);
698 
699 	if (time_left == 0) {
700 		/*
701 		 * If timed out and bus is still busy in a multi master
702 		 * environment, attempt recovery at here.
703 		 */
704 		if (bus->multi_master &&
705 		    (readl(bus->base + ASPEED_I2C_CMD_REG) &
706 		     ASPEED_I2CD_BUS_BUSY_STS))
707 			aspeed_i2c_recover_bus(bus);
708 
709 		/*
710 		 * If timed out and the state is still pending, drop the pending
711 		 * master command.
712 		 */
713 		spin_lock_irqsave(&bus->lock, flags);
714 		if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
715 			bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
716 		spin_unlock_irqrestore(&bus->lock, flags);
717 
718 		return -ETIMEDOUT;
719 	}
720 
721 	return bus->master_xfer_result;
722 }
723 
aspeed_i2c_functionality(struct i2c_adapter * adap)724 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap)
725 {
726 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
727 }
728 
729 #if IS_ENABLED(CONFIG_I2C_SLAVE)
730 /* precondition: bus.lock has been acquired. */
__aspeed_i2c_reg_slave(struct aspeed_i2c_bus * bus,u16 slave_addr)731 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
732 {
733 	u32 addr_reg_val, func_ctrl_reg_val;
734 
735 	/*
736 	 * Set slave addr.  Reserved bits can all safely be written with zeros
737 	 * on all of ast2[456]00, so zero everything else to ensure we only
738 	 * enable a single slave address (ast2500 has two, ast2600 has three,
739 	 * the enable bits for which are also in this register) so that we don't
740 	 * end up with additional phantom devices responding on the bus.
741 	 */
742 	addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
743 	writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
744 
745 	/* Turn on slave mode. */
746 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
747 	func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
748 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
749 }
750 
aspeed_i2c_reg_slave(struct i2c_client * client)751 static int aspeed_i2c_reg_slave(struct i2c_client *client)
752 {
753 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
754 	unsigned long flags;
755 
756 	spin_lock_irqsave(&bus->lock, flags);
757 	if (bus->slave) {
758 		spin_unlock_irqrestore(&bus->lock, flags);
759 		return -EINVAL;
760 	}
761 
762 	__aspeed_i2c_reg_slave(bus, client->addr);
763 
764 	bus->slave = client;
765 	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
766 	spin_unlock_irqrestore(&bus->lock, flags);
767 
768 	return 0;
769 }
770 
aspeed_i2c_unreg_slave(struct i2c_client * client)771 static int aspeed_i2c_unreg_slave(struct i2c_client *client)
772 {
773 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
774 	u32 func_ctrl_reg_val;
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&bus->lock, flags);
778 	if (!bus->slave) {
779 		spin_unlock_irqrestore(&bus->lock, flags);
780 		return -EINVAL;
781 	}
782 
783 	/* Turn off slave mode. */
784 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
785 	func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
786 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
787 
788 	bus->slave = NULL;
789 	spin_unlock_irqrestore(&bus->lock, flags);
790 
791 	return 0;
792 }
793 #endif /* CONFIG_I2C_SLAVE */
794 
795 static const struct i2c_algorithm aspeed_i2c_algo = {
796 	.master_xfer	= aspeed_i2c_master_xfer,
797 	.functionality	= aspeed_i2c_functionality,
798 #if IS_ENABLED(CONFIG_I2C_SLAVE)
799 	.reg_slave	= aspeed_i2c_reg_slave,
800 	.unreg_slave	= aspeed_i2c_unreg_slave,
801 #endif /* CONFIG_I2C_SLAVE */
802 };
803 
aspeed_i2c_get_clk_reg_val(struct device * dev,u32 clk_high_low_mask,u32 divisor)804 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev,
805 				      u32 clk_high_low_mask,
806 				      u32 divisor)
807 {
808 	u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp;
809 
810 	/*
811 	 * SCL_high and SCL_low represent a value 1 greater than what is stored
812 	 * since a zero divider is meaningless. Thus, the max value each can
813 	 * store is every bit set + 1. Since SCL_high and SCL_low are added
814 	 * together (see below), the max value of both is the max value of one
815 	 * them times two.
816 	 */
817 	clk_high_low_max = (clk_high_low_mask + 1) * 2;
818 
819 	/*
820 	 * The actual clock frequency of SCL is:
821 	 *	SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
822 	 *		 = APB_freq / divisor
823 	 * where base_freq is a programmable clock divider; its value is
824 	 *	base_freq = 1 << base_clk_divisor
825 	 * SCL_high is the number of base_freq clock cycles that SCL stays high
826 	 * and SCL_low is the number of base_freq clock cycles that SCL stays
827 	 * low for a period of SCL.
828 	 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
829 	 * thus, they start counting at zero. So
830 	 *	SCL_high = clk_high + 1
831 	 *	SCL_low	 = clk_low + 1
832 	 * Thus,
833 	 *	SCL_freq = APB_freq /
834 	 *		((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
835 	 * The documentation recommends clk_high >= clk_high_max / 2 and
836 	 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
837 	 * gives us the following solution:
838 	 */
839 	base_clk_divisor = divisor > clk_high_low_max ?
840 			ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
841 
842 	if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) {
843 		base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK;
844 		clk_low = clk_high_low_mask;
845 		clk_high = clk_high_low_mask;
846 		dev_err(dev,
847 			"clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
848 			divisor, (1 << base_clk_divisor) * clk_high_low_max);
849 	} else {
850 		tmp = (divisor + (1 << base_clk_divisor) - 1)
851 				>> base_clk_divisor;
852 		clk_low = tmp / 2;
853 		clk_high = tmp - clk_low;
854 
855 		if (clk_high)
856 			clk_high--;
857 
858 		if (clk_low)
859 			clk_low--;
860 	}
861 
862 
863 	return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT)
864 		& ASPEED_I2CD_TIME_SCL_HIGH_MASK)
865 			| ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT)
866 			   & ASPEED_I2CD_TIME_SCL_LOW_MASK)
867 			| (base_clk_divisor
868 			   & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK);
869 }
870 
aspeed_i2c_24xx_get_clk_reg_val(struct device * dev,u32 divisor)871 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
872 {
873 	/*
874 	 * clk_high and clk_low are each 3 bits wide, so each can hold a max
875 	 * value of 8 giving a clk_high_low_max of 16.
876 	 */
877 	return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
878 }
879 
aspeed_i2c_25xx_get_clk_reg_val(struct device * dev,u32 divisor)880 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
881 {
882 	/*
883 	 * clk_high and clk_low are each 4 bits wide, so each can hold a max
884 	 * value of 16 giving a clk_high_low_max of 32.
885 	 */
886 	return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
887 }
888 
889 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init_clk(struct aspeed_i2c_bus * bus)890 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
891 {
892 	u32 divisor, clk_reg_val;
893 
894 	divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
895 	clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1);
896 	clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK |
897 			ASPEED_I2CD_TIME_THDSTA_MASK |
898 			ASPEED_I2CD_TIME_TACST_MASK);
899 	clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
900 	writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
901 	writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
902 
903 	return 0;
904 }
905 
906 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init(struct aspeed_i2c_bus * bus,struct platform_device * pdev)907 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
908 			     struct platform_device *pdev)
909 {
910 	u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN;
911 	int ret;
912 
913 	/* Disable everything. */
914 	writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
915 
916 	ret = aspeed_i2c_init_clk(bus);
917 	if (ret < 0)
918 		return ret;
919 
920 	if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
921 		bus->multi_master = true;
922 	else
923 		fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS;
924 
925 	/* Enable Master Mode */
926 	writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg,
927 	       bus->base + ASPEED_I2C_FUN_CTRL_REG);
928 
929 #if IS_ENABLED(CONFIG_I2C_SLAVE)
930 	/* If slave has already been registered, re-enable it. */
931 	if (bus->slave)
932 		__aspeed_i2c_reg_slave(bus, bus->slave->addr);
933 #endif /* CONFIG_I2C_SLAVE */
934 
935 	/* Set interrupt generation of I2C controller */
936 	writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
937 
938 	return 0;
939 }
940 
aspeed_i2c_reset(struct aspeed_i2c_bus * bus)941 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
942 {
943 	struct platform_device *pdev = to_platform_device(bus->dev);
944 	unsigned long flags;
945 	int ret;
946 
947 	spin_lock_irqsave(&bus->lock, flags);
948 
949 	/* Disable and ack all interrupts. */
950 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
951 	writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
952 
953 	ret = aspeed_i2c_init(bus, pdev);
954 
955 	spin_unlock_irqrestore(&bus->lock, flags);
956 
957 	return ret;
958 }
959 
960 static const struct of_device_id aspeed_i2c_bus_of_table[] = {
961 	{
962 		.compatible = "aspeed,ast2400-i2c-bus",
963 		.data = aspeed_i2c_24xx_get_clk_reg_val,
964 	},
965 	{
966 		.compatible = "aspeed,ast2500-i2c-bus",
967 		.data = aspeed_i2c_25xx_get_clk_reg_val,
968 	},
969 	{
970 		.compatible = "aspeed,ast2600-i2c-bus",
971 		.data = aspeed_i2c_25xx_get_clk_reg_val,
972 	},
973 	{ },
974 };
975 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
976 
aspeed_i2c_probe_bus(struct platform_device * pdev)977 static int aspeed_i2c_probe_bus(struct platform_device *pdev)
978 {
979 	const struct of_device_id *match;
980 	struct aspeed_i2c_bus *bus;
981 	struct clk *parent_clk;
982 	struct resource *res;
983 	int irq, ret;
984 
985 	bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
986 	if (!bus)
987 		return -ENOMEM;
988 
989 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
990 	bus->base = devm_ioremap_resource(&pdev->dev, res);
991 	if (IS_ERR(bus->base))
992 		return PTR_ERR(bus->base);
993 
994 	parent_clk = devm_clk_get(&pdev->dev, NULL);
995 	if (IS_ERR(parent_clk))
996 		return PTR_ERR(parent_clk);
997 	bus->parent_clk_frequency = clk_get_rate(parent_clk);
998 	/* We just need the clock rate, we don't actually use the clk object. */
999 	devm_clk_put(&pdev->dev, parent_clk);
1000 
1001 	bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
1002 	if (IS_ERR(bus->rst)) {
1003 		dev_err(&pdev->dev,
1004 			"missing or invalid reset controller device tree entry\n");
1005 		return PTR_ERR(bus->rst);
1006 	}
1007 	reset_control_deassert(bus->rst);
1008 
1009 	ret = of_property_read_u32(pdev->dev.of_node,
1010 				   "bus-frequency", &bus->bus_frequency);
1011 	if (ret < 0) {
1012 		dev_err(&pdev->dev,
1013 			"Could not read bus-frequency property\n");
1014 		bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
1015 	}
1016 
1017 	match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
1018 	if (!match)
1019 		bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1020 	else
1021 		bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
1022 				match->data;
1023 
1024 	/* Initialize the I2C adapter */
1025 	spin_lock_init(&bus->lock);
1026 	init_completion(&bus->cmd_complete);
1027 	bus->adap.owner = THIS_MODULE;
1028 	bus->adap.retries = 0;
1029 	bus->adap.algo = &aspeed_i2c_algo;
1030 	bus->adap.dev.parent = &pdev->dev;
1031 	bus->adap.dev.of_node = pdev->dev.of_node;
1032 	strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
1033 	i2c_set_adapdata(&bus->adap, bus);
1034 
1035 	bus->dev = &pdev->dev;
1036 
1037 	/* Clean up any left over interrupt state. */
1038 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1039 	writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
1040 	/*
1041 	 * bus.lock does not need to be held because the interrupt handler has
1042 	 * not been enabled yet.
1043 	 */
1044 	ret = aspeed_i2c_init(bus, pdev);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1049 	ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
1050 			       0, dev_name(&pdev->dev), bus);
1051 	if (ret < 0)
1052 		return ret;
1053 
1054 	ret = i2c_add_adapter(&bus->adap);
1055 	if (ret < 0)
1056 		return ret;
1057 
1058 	platform_set_drvdata(pdev, bus);
1059 
1060 	dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
1061 		 bus->adap.nr, irq);
1062 
1063 	return 0;
1064 }
1065 
aspeed_i2c_remove_bus(struct platform_device * pdev)1066 static int aspeed_i2c_remove_bus(struct platform_device *pdev)
1067 {
1068 	struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
1069 	unsigned long flags;
1070 
1071 	spin_lock_irqsave(&bus->lock, flags);
1072 
1073 	/* Disable everything. */
1074 	writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
1075 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1076 
1077 	spin_unlock_irqrestore(&bus->lock, flags);
1078 
1079 	reset_control_assert(bus->rst);
1080 
1081 	i2c_del_adapter(&bus->adap);
1082 
1083 	return 0;
1084 }
1085 
1086 static struct platform_driver aspeed_i2c_bus_driver = {
1087 	.probe		= aspeed_i2c_probe_bus,
1088 	.remove		= aspeed_i2c_remove_bus,
1089 	.driver		= {
1090 		.name		= "aspeed-i2c-bus",
1091 		.of_match_table	= aspeed_i2c_bus_of_table,
1092 	},
1093 };
1094 module_platform_driver(aspeed_i2c_bus_driver);
1095 
1096 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1097 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1098 MODULE_LICENSE("GPL v2");
1099