1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVEC: NVIDIA compliant embedded controller interface
4  *
5  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
6  *
7  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
8  *           Ilya Petrov <ilya.muromec@gmail.com>
9  *           Marc Dietrich <marvin24@gmx.de>
10  *           Julian Andres Klode <jak@jak-linux.org>
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/atomic.h>
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/of.h>
25 #include <linux/list.h>
26 #include <linux/mfd/core.h>
27 #include <linux/mutex.h>
28 #include <linux/notifier.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/workqueue.h>
32 
33 #include "nvec.h"
34 
35 #define I2C_CNFG			0x00
36 #define I2C_CNFG_PACKET_MODE_EN		BIT(10)
37 #define I2C_CNFG_NEW_MASTER_SFM		BIT(11)
38 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
39 
40 #define I2C_SL_CNFG		0x20
41 #define I2C_SL_NEWSL		BIT(2)
42 #define I2C_SL_NACK		BIT(1)
43 #define I2C_SL_RESP		BIT(0)
44 #define I2C_SL_IRQ		BIT(3)
45 #define END_TRANS		BIT(4)
46 #define RCVD			BIT(2)
47 #define RNW			BIT(1)
48 
49 #define I2C_SL_RCVD		0x24
50 #define I2C_SL_STATUS		0x28
51 #define I2C_SL_ADDR1		0x2c
52 #define I2C_SL_ADDR2		0x30
53 #define I2C_SL_DELAY_COUNT	0x3c
54 
55 /**
56  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
57  * @NVEC_MSG_RX: The message is an incoming message (from EC)
58  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
59  */
60 enum nvec_msg_category  {
61 	NVEC_MSG_RX,
62 	NVEC_MSG_TX,
63 };
64 
65 enum nvec_sleep_subcmds {
66 	GLOBAL_EVENTS,
67 	AP_PWR_DOWN,
68 	AP_SUSPEND,
69 };
70 
71 #define CNF_EVENT_REPORTING 0x01
72 #define GET_FIRMWARE_VERSION 0x15
73 #define LID_SWITCH BIT(1)
74 #define PWR_BUTTON BIT(15)
75 
76 static struct nvec_chip *nvec_power_handle;
77 
78 static const struct mfd_cell nvec_devices[] = {
79 	{
80 		.name = "nvec-kbd",
81 	},
82 	{
83 		.name = "nvec-mouse",
84 	},
85 	{
86 		.name = "nvec-power",
87 		.id = 0,
88 	},
89 	{
90 		.name = "nvec-power",
91 		.id = 1,
92 	},
93 	{
94 		.name = "nvec-paz00",
95 	},
96 };
97 
98 /**
99  * nvec_register_notifier - Register a notifier with nvec
100  * @nvec: A &struct nvec_chip
101  * @nb: The notifier block to register
102  * @events: Unused
103  *
104  * Registers a notifier with @nvec. The notifier will be added to an atomic
105  * notifier chain that is called for all received messages except those that
106  * correspond to a request initiated by nvec_write_sync().
107  */
nvec_register_notifier(struct nvec_chip * nvec,struct notifier_block * nb,unsigned int events)108 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
109 			   unsigned int events)
110 {
111 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
112 }
113 EXPORT_SYMBOL_GPL(nvec_register_notifier);
114 
115 /**
116  * nvec_unregister_notifier - Unregister a notifier with nvec
117  * @nvec: A &struct nvec_chip
118  * @nb: The notifier block to unregister
119  *
120  * Unregisters a notifier with @nvec. The notifier will be removed from the
121  * atomic notifier chain.
122  */
nvec_unregister_notifier(struct nvec_chip * nvec,struct notifier_block * nb)123 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
124 {
125 	return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
126 }
127 EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
128 
129 /*
130  * nvec_status_notifier - The final notifier
131  *
132  * Prints a message about control events not handled in the notifier
133  * chain.
134  */
nvec_status_notifier(struct notifier_block * nb,unsigned long event_type,void * data)135 static int nvec_status_notifier(struct notifier_block *nb,
136 				unsigned long event_type, void *data)
137 {
138 	struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
139 						nvec_status_notifier);
140 	unsigned char *msg = data;
141 
142 	if (event_type != NVEC_CNTL)
143 		return NOTIFY_DONE;
144 
145 	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
146 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
147 		       msg, msg[1] + 2, true);
148 
149 	return NOTIFY_OK;
150 }
151 
152 /**
153  * nvec_msg_alloc:
154  * @nvec: A &struct nvec_chip
155  * @category: Pool category, see &enum nvec_msg_category
156  *
157  * Allocate a single &struct nvec_msg object from the message pool of
158  * @nvec. The result shall be passed to nvec_msg_free() if no longer
159  * used.
160  *
161  * Outgoing messages are placed in the upper 75% of the pool, keeping the
162  * lower 25% available for RX buffers only. The reason is to prevent a
163  * situation where all buffers are full and a message is thus endlessly
164  * retried because the response could never be processed.
165  */
nvec_msg_alloc(struct nvec_chip * nvec,enum nvec_msg_category category)166 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
167 				       enum nvec_msg_category category)
168 {
169 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
170 
171 	for (; i < NVEC_POOL_SIZE; i++) {
172 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
173 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
174 			return &nvec->msg_pool[i];
175 		}
176 	}
177 
178 	dev_err(nvec->dev, "could not allocate %s buffer\n",
179 		(category == NVEC_MSG_TX) ? "TX" : "RX");
180 
181 	return NULL;
182 }
183 
184 /**
185  * nvec_msg_free:
186  * @nvec: A &struct nvec_chip
187  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
188  *
189  * Free the given message
190  */
nvec_msg_free(struct nvec_chip * nvec,struct nvec_msg * msg)191 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
192 {
193 	if (msg != &nvec->tx_scratch)
194 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
195 	atomic_set(&msg->used, 0);
196 }
197 EXPORT_SYMBOL_GPL(nvec_msg_free);
198 
199 /**
200  * nvec_msg_is_event - Return %true if @msg is an event
201  * @msg: A message
202  */
nvec_msg_is_event(struct nvec_msg * msg)203 static bool nvec_msg_is_event(struct nvec_msg *msg)
204 {
205 	return msg->data[0] >> 7;
206 }
207 
208 /**
209  * nvec_msg_size - Get the size of a message
210  * @msg: The message to get the size for
211  *
212  * This only works for received messages, not for outgoing messages.
213  */
nvec_msg_size(struct nvec_msg * msg)214 static size_t nvec_msg_size(struct nvec_msg *msg)
215 {
216 	bool is_event = nvec_msg_is_event(msg);
217 	int event_length = (msg->data[0] & 0x60) >> 5;
218 
219 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
220 	if (!is_event || event_length == NVEC_VAR_SIZE)
221 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
222 	else if (event_length == NVEC_2BYTES)
223 		return 2;
224 	else if (event_length == NVEC_3BYTES)
225 		return 3;
226 	return 0;
227 }
228 
229 /**
230  * nvec_gpio_set_value - Set the GPIO value
231  * @nvec: A &struct nvec_chip
232  * @value: The value to write (0 or 1)
233  *
234  * Like gpio_set_value(), but generating debugging information
235  */
nvec_gpio_set_value(struct nvec_chip * nvec,int value)236 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
237 {
238 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
239 		gpiod_get_value(nvec->gpiod), value);
240 	gpiod_set_value(nvec->gpiod, value);
241 }
242 
243 /**
244  * nvec_write_async - Asynchronously write a message to NVEC
245  * @nvec: An nvec_chip instance
246  * @data: The message data, starting with the request type
247  * @size: The size of @data
248  *
249  * Queue a single message to be transferred to the embedded controller
250  * and return immediately.
251  *
252  * Returns: 0 on success, a negative error code on failure. If a failure
253  * occurred, the nvec driver may print an error.
254  */
nvec_write_async(struct nvec_chip * nvec,const unsigned char * data,short size)255 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
256 		     short size)
257 {
258 	struct nvec_msg *msg;
259 	unsigned long flags;
260 
261 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
262 
263 	if (!msg)
264 		return -ENOMEM;
265 
266 	msg->data[0] = size;
267 	memcpy(msg->data + 1, data, size);
268 	msg->size = size + 1;
269 
270 	spin_lock_irqsave(&nvec->tx_lock, flags);
271 	list_add_tail(&msg->node, &nvec->tx_data);
272 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
273 
274 	schedule_work(&nvec->tx_work);
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL(nvec_write_async);
279 
280 /**
281  * nvec_write_sync - Write a message to nvec and read the response
282  * @nvec: An &struct nvec_chip
283  * @data: The data to write
284  * @size: The size of @data
285  * @msg:  The response message received
286  *
287  * This is similar to nvec_write_async(), but waits for the
288  * request to be answered before returning. This function
289  * uses a mutex and can thus not be called from e.g.
290  * interrupt handlers.
291  *
292  * Returns: 0 on success, a negative error code on failure.
293  * The response message is returned in @msg. Shall be freed
294  * with nvec_msg_free() once no longer used.
295  *
296  */
nvec_write_sync(struct nvec_chip * nvec,const unsigned char * data,short size,struct nvec_msg ** msg)297 int nvec_write_sync(struct nvec_chip *nvec,
298 		    const unsigned char *data, short size,
299 		    struct nvec_msg **msg)
300 {
301 	mutex_lock(&nvec->sync_write_mutex);
302 
303 	*msg = NULL;
304 	nvec->sync_write_pending = (data[1] << 8) + data[0];
305 
306 	if (nvec_write_async(nvec, data, size) < 0) {
307 		mutex_unlock(&nvec->sync_write_mutex);
308 		return -ENOMEM;
309 	}
310 
311 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
312 		nvec->sync_write_pending);
313 	if (!(wait_for_completion_timeout(&nvec->sync_write,
314 					  msecs_to_jiffies(2000)))) {
315 		dev_warn(nvec->dev,
316 			 "timeout waiting for sync write to complete\n");
317 		mutex_unlock(&nvec->sync_write_mutex);
318 		return -ETIMEDOUT;
319 	}
320 
321 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
322 
323 	*msg = nvec->last_sync_msg;
324 
325 	mutex_unlock(&nvec->sync_write_mutex);
326 
327 	return 0;
328 }
329 EXPORT_SYMBOL(nvec_write_sync);
330 
331 /**
332  * nvec_toggle_global_events - enables or disables global event reporting
333  * @nvec: nvec handle
334  * @state: true for enable, false for disable
335  *
336  * This switches on/off global event reports by the embedded controller.
337  */
nvec_toggle_global_events(struct nvec_chip * nvec,bool state)338 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
339 {
340 	unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
341 
342 	nvec_write_async(nvec, global_events, 3);
343 }
344 
345 /**
346  * nvec_event_mask - fill the command string with event bitfield
347  * @ev: points to event command string
348  * @mask: bit to insert into the event mask
349  *
350  * Configure event command expects a 32 bit bitfield which describes
351  * which events to enable. The bitfield has the following structure
352  * (from highest byte to lowest):
353  *	system state bits 7-0
354  *	system state bits 15-8
355  *	oem system state bits 7-0
356  *	oem system state bits 15-8
357  */
nvec_event_mask(char * ev,u32 mask)358 static void nvec_event_mask(char *ev, u32 mask)
359 {
360 	ev[3] = mask >> 16 & 0xff;
361 	ev[4] = mask >> 24 & 0xff;
362 	ev[5] = mask >> 0  & 0xff;
363 	ev[6] = mask >> 8  & 0xff;
364 }
365 
366 /**
367  * nvec_request_master - Process outgoing messages
368  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
369  *
370  * Processes all outgoing requests by sending the request and awaiting the
371  * response, then continuing with the next request. Once a request has a
372  * matching response, it will be freed and removed from the list.
373  */
nvec_request_master(struct work_struct * work)374 static void nvec_request_master(struct work_struct *work)
375 {
376 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
377 	unsigned long flags;
378 	long err;
379 	struct nvec_msg *msg;
380 
381 	spin_lock_irqsave(&nvec->tx_lock, flags);
382 	while (!list_empty(&nvec->tx_data)) {
383 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
384 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
385 		nvec_gpio_set_value(nvec, 0);
386 		err = wait_for_completion_interruptible_timeout(&nvec->ec_transfer,
387 								msecs_to_jiffies(5000));
388 
389 		if (err == 0) {
390 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
391 			nvec_gpio_set_value(nvec, 1);
392 			msg->pos = 0;
393 		}
394 
395 		spin_lock_irqsave(&nvec->tx_lock, flags);
396 
397 		if (err > 0) {
398 			list_del_init(&msg->node);
399 			nvec_msg_free(nvec, msg);
400 		}
401 	}
402 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
403 }
404 
405 /**
406  * parse_msg - Print some information and call the notifiers on an RX message
407  * @nvec: A &struct nvec_chip
408  * @msg: A message received by @nvec
409  *
410  * Paarse some pieces of the message and then call the chain of notifiers
411  * registered via nvec_register_notifier.
412  */
parse_msg(struct nvec_chip * nvec,struct nvec_msg * msg)413 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
414 {
415 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
416 		dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
417 		return -EINVAL;
418 	}
419 
420 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
421 		print_hex_dump(KERN_WARNING, "ec system event ",
422 			       DUMP_PREFIX_NONE, 16, 1, msg->data,
423 			       msg->data[1] + 2, true);
424 
425 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
426 				   msg->data);
427 
428 	return 0;
429 }
430 
431 /**
432  * nvec_dispatch - Process messages received from the EC
433  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
434  *
435  * Process messages previously received from the EC and put into the RX
436  * queue of the &struct nvec_chip instance associated with @work.
437  */
nvec_dispatch(struct work_struct * work)438 static void nvec_dispatch(struct work_struct *work)
439 {
440 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
441 	unsigned long flags;
442 	struct nvec_msg *msg;
443 
444 	spin_lock_irqsave(&nvec->rx_lock, flags);
445 	while (!list_empty(&nvec->rx_data)) {
446 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
447 		list_del_init(&msg->node);
448 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
449 
450 		if (nvec->sync_write_pending ==
451 		      (msg->data[2] << 8) + msg->data[0]) {
452 			dev_dbg(nvec->dev, "sync write completed!\n");
453 			nvec->sync_write_pending = 0;
454 			nvec->last_sync_msg = msg;
455 			complete(&nvec->sync_write);
456 		} else {
457 			parse_msg(nvec, msg);
458 			nvec_msg_free(nvec, msg);
459 		}
460 		spin_lock_irqsave(&nvec->rx_lock, flags);
461 	}
462 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
463 }
464 
465 /**
466  * nvec_tx_completed - Complete the current transfer
467  * @nvec: A &struct nvec_chip
468  *
469  * This is called when we have received an END_TRANS on a TX transfer.
470  */
nvec_tx_completed(struct nvec_chip * nvec)471 static void nvec_tx_completed(struct nvec_chip *nvec)
472 {
473 	/* We got an END_TRANS, let's skip this, maybe there's an event */
474 	if (nvec->tx->pos != nvec->tx->size) {
475 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
476 		nvec->tx->pos = 0;
477 		nvec_gpio_set_value(nvec, 0);
478 	} else {
479 		nvec->state = 0;
480 	}
481 }
482 
483 /**
484  * nvec_rx_completed - Complete the current transfer
485  * @nvec: A &struct nvec_chip
486  *
487  * This is called when we have received an END_TRANS on a RX transfer.
488  */
nvec_rx_completed(struct nvec_chip * nvec)489 static void nvec_rx_completed(struct nvec_chip *nvec)
490 {
491 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
492 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
493 			(uint)nvec_msg_size(nvec->rx),
494 			(uint)nvec->rx->pos);
495 
496 		nvec_msg_free(nvec, nvec->rx);
497 		nvec->state = 0;
498 
499 		/* Battery quirk - Often incomplete, and likes to crash */
500 		if (nvec->rx->data[0] == NVEC_BAT)
501 			complete(&nvec->ec_transfer);
502 
503 		return;
504 	}
505 
506 	spin_lock(&nvec->rx_lock);
507 
508 	/*
509 	 * Add the received data to the work list and move the ring buffer
510 	 * pointer to the next entry.
511 	 */
512 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
513 
514 	spin_unlock(&nvec->rx_lock);
515 
516 	nvec->state = 0;
517 
518 	if (!nvec_msg_is_event(nvec->rx))
519 		complete(&nvec->ec_transfer);
520 
521 	schedule_work(&nvec->rx_work);
522 }
523 
524 /**
525  * nvec_invalid_flags - Send an error message about invalid flags and jump
526  * @nvec: The nvec device
527  * @status: The status flags
528  * @reset: Whether we shall jump to state 0.
529  */
nvec_invalid_flags(struct nvec_chip * nvec,unsigned int status,bool reset)530 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
531 			       bool reset)
532 {
533 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
534 		status, nvec->state);
535 	if (reset)
536 		nvec->state = 0;
537 }
538 
539 /**
540  * nvec_tx_set - Set the message to transfer (nvec->tx)
541  * @nvec: A &struct nvec_chip
542  *
543  * Gets the first entry from the tx_data list of @nvec and sets the
544  * tx member to it. If the tx_data list is empty, this uses the
545  * tx_scratch message to send a no operation message.
546  */
nvec_tx_set(struct nvec_chip * nvec)547 static void nvec_tx_set(struct nvec_chip *nvec)
548 {
549 	spin_lock(&nvec->tx_lock);
550 	if (list_empty(&nvec->tx_data)) {
551 		dev_err(nvec->dev, "empty tx - sending no-op\n");
552 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
553 		nvec->tx_scratch.size = 3;
554 		nvec->tx_scratch.pos = 0;
555 		nvec->tx = &nvec->tx_scratch;
556 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
557 	} else {
558 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
559 					    node);
560 		nvec->tx->pos = 0;
561 	}
562 	spin_unlock(&nvec->tx_lock);
563 
564 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
565 		(uint)nvec->tx->size, nvec->tx->data[1]);
566 }
567 
568 /**
569  * nvec_interrupt - Interrupt handler
570  * @irq: The IRQ
571  * @dev: The nvec device
572  *
573  * Interrupt handler that fills our RX buffers and empties our TX
574  * buffers. This uses a finite state machine with ridiculous amounts
575  * of error checking, in order to be fairly reliable.
576  */
nvec_interrupt(int irq,void * dev)577 static irqreturn_t nvec_interrupt(int irq, void *dev)
578 {
579 	unsigned long status;
580 	unsigned int received = 0;
581 	unsigned char to_send = 0xff;
582 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
583 	struct nvec_chip *nvec = dev;
584 	unsigned int state = nvec->state;
585 
586 	status = readl(nvec->base + I2C_SL_STATUS);
587 
588 	/* Filter out some errors */
589 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
590 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
591 		return IRQ_HANDLED;
592 	}
593 	if ((status & I2C_SL_IRQ) == 0) {
594 		dev_err(nvec->dev, "Spurious IRQ\n");
595 		return IRQ_HANDLED;
596 	}
597 
598 	/* The EC did not request a read, so it send us something, read it */
599 	if ((status & RNW) == 0) {
600 		received = readl(nvec->base + I2C_SL_RCVD);
601 		if (status & RCVD)
602 			writel(0, nvec->base + I2C_SL_RCVD);
603 	}
604 
605 	if (status == (I2C_SL_IRQ | RCVD))
606 		nvec->state = 0;
607 
608 	switch (nvec->state) {
609 	case 0:		/* Verify that its a transfer start, the rest later */
610 		if (status != (I2C_SL_IRQ | RCVD))
611 			nvec_invalid_flags(nvec, status, false);
612 		break;
613 	case 1:		/* command byte */
614 		if (status != I2C_SL_IRQ) {
615 			nvec_invalid_flags(nvec, status, true);
616 		} else {
617 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
618 			/* Should not happen in a normal world */
619 			if (unlikely(!nvec->rx)) {
620 				nvec->state = 0;
621 				break;
622 			}
623 			nvec->rx->data[0] = received;
624 			nvec->rx->pos = 1;
625 			nvec->state = 2;
626 		}
627 		break;
628 	case 2:		/* first byte after command */
629 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
630 			udelay(33);
631 			if (nvec->rx->data[0] != 0x01) {
632 				dev_err(nvec->dev,
633 					"Read without prior read command\n");
634 				nvec->state = 0;
635 				break;
636 			}
637 			nvec_msg_free(nvec, nvec->rx);
638 			nvec->state = 3;
639 			nvec_tx_set(nvec);
640 			to_send = nvec->tx->data[0];
641 			nvec->tx->pos = 1;
642 		} else if (status == (I2C_SL_IRQ)) {
643 			nvec->rx->data[1] = received;
644 			nvec->rx->pos = 2;
645 			nvec->state = 4;
646 		} else {
647 			nvec_invalid_flags(nvec, status, true);
648 		}
649 		break;
650 	case 3:		/* EC does a block read, we transmit data */
651 		if (status & END_TRANS) {
652 			nvec_tx_completed(nvec);
653 		} else if ((status & RNW) == 0 || (status & RCVD)) {
654 			nvec_invalid_flags(nvec, status, true);
655 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
656 			to_send = nvec->tx->data[nvec->tx->pos++];
657 		} else {
658 			dev_err(nvec->dev,
659 				"tx buffer underflow on %p (%u > %u)\n",
660 				nvec->tx,
661 				(uint)(nvec->tx ? nvec->tx->pos : 0),
662 				(uint)(nvec->tx ? nvec->tx->size : 0));
663 			nvec->state = 0;
664 		}
665 		break;
666 	case 4:		/* EC does some write, we read the data */
667 		if ((status & (END_TRANS | RNW)) == END_TRANS)
668 			nvec_rx_completed(nvec);
669 		else if (status & (RNW | RCVD))
670 			nvec_invalid_flags(nvec, status, true);
671 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
672 			nvec->rx->data[nvec->rx->pos++] = received;
673 		else
674 			dev_err(nvec->dev,
675 				"RX buffer overflow on %p: Trying to write byte %u of %u\n",
676 				nvec->rx, nvec->rx ? nvec->rx->pos : 0,
677 				NVEC_MSG_SIZE);
678 		break;
679 	default:
680 		nvec->state = 0;
681 	}
682 
683 	/* If we are told that a new transfer starts, verify it */
684 	if ((status & (RCVD | RNW)) == RCVD) {
685 		if (received != nvec->i2c_addr)
686 			dev_err(nvec->dev,
687 				"received address 0x%02x, expected 0x%02x\n",
688 				received, nvec->i2c_addr);
689 		nvec->state = 1;
690 	}
691 
692 	/* Send data if requested, but not on end of transmission */
693 	if ((status & (RNW | END_TRANS)) == RNW)
694 		writel(to_send, nvec->base + I2C_SL_RCVD);
695 
696 	/* If we have send the first byte */
697 	if (status == (I2C_SL_IRQ | RNW | RCVD))
698 		nvec_gpio_set_value(nvec, 1);
699 
700 	dev_dbg(nvec->dev,
701 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
702 		(status & RNW) == 0 ? "received" : "R=",
703 		received,
704 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
705 		to_send,
706 		state,
707 		status & END_TRANS ? " END_TRANS" : "",
708 		status & RCVD ? " RCVD" : "",
709 		status & RNW ? " RNW" : "");
710 
711 	/*
712 	 * TODO: A correct fix needs to be found for this.
713 	 *
714 	 * We experience less incomplete messages with this delay than without
715 	 * it, but we don't know why. Help is appreciated.
716 	 */
717 	udelay(100);
718 
719 	return IRQ_HANDLED;
720 }
721 
tegra_init_i2c_slave(struct nvec_chip * nvec)722 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
723 {
724 	u32 val;
725 
726 	clk_prepare_enable(nvec->i2c_clk);
727 
728 	reset_control_assert(nvec->rst);
729 	udelay(2);
730 	reset_control_deassert(nvec->rst);
731 
732 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
733 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
734 	writel(val, nvec->base + I2C_CNFG);
735 
736 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
737 
738 	writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
739 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
740 
741 	writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
742 	writel(0, nvec->base + I2C_SL_ADDR2);
743 
744 	enable_irq(nvec->irq);
745 }
746 
747 #ifdef CONFIG_PM_SLEEP
nvec_disable_i2c_slave(struct nvec_chip * nvec)748 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
749 {
750 	disable_irq(nvec->irq);
751 	writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
752 	clk_disable_unprepare(nvec->i2c_clk);
753 }
754 #endif
755 
nvec_power_off(void)756 static void nvec_power_off(void)
757 {
758 	char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
759 
760 	nvec_toggle_global_events(nvec_power_handle, false);
761 	nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
762 }
763 
tegra_nvec_probe(struct platform_device * pdev)764 static int tegra_nvec_probe(struct platform_device *pdev)
765 {
766 	int err, ret;
767 	struct clk *i2c_clk;
768 	struct device *dev = &pdev->dev;
769 	struct nvec_chip *nvec;
770 	struct nvec_msg *msg;
771 	void __iomem *base;
772 	char	get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
773 		unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
774 		enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
775 
776 	if (!dev->of_node) {
777 		dev_err(dev, "must be instantiated using device tree\n");
778 		return -ENODEV;
779 	}
780 
781 	nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL);
782 	if (!nvec)
783 		return -ENOMEM;
784 
785 	platform_set_drvdata(pdev, nvec);
786 	nvec->dev = dev;
787 
788 	if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) {
789 		dev_err(dev, "no i2c address specified");
790 		return -ENODEV;
791 	}
792 
793 	base = devm_platform_ioremap_resource(pdev, 0);
794 	if (IS_ERR(base))
795 		return PTR_ERR(base);
796 
797 	nvec->irq = platform_get_irq(pdev, 0);
798 	if (nvec->irq < 0)
799 		return -ENODEV;
800 
801 	i2c_clk = devm_clk_get(dev, "div-clk");
802 	if (IS_ERR(i2c_clk)) {
803 		dev_err(dev, "failed to get controller clock\n");
804 		return -ENODEV;
805 	}
806 
807 	nvec->rst = devm_reset_control_get_exclusive(dev, "i2c");
808 	if (IS_ERR(nvec->rst)) {
809 		dev_err(dev, "failed to get controller reset\n");
810 		return PTR_ERR(nvec->rst);
811 	}
812 
813 	nvec->base = base;
814 	nvec->i2c_clk = i2c_clk;
815 	nvec->rx = &nvec->msg_pool[0];
816 
817 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
818 
819 	init_completion(&nvec->sync_write);
820 	init_completion(&nvec->ec_transfer);
821 	mutex_init(&nvec->sync_write_mutex);
822 	spin_lock_init(&nvec->tx_lock);
823 	spin_lock_init(&nvec->rx_lock);
824 	INIT_LIST_HEAD(&nvec->rx_data);
825 	INIT_LIST_HEAD(&nvec->tx_data);
826 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
827 	INIT_WORK(&nvec->tx_work, nvec_request_master);
828 
829 	nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH);
830 	if (IS_ERR(nvec->gpiod)) {
831 		dev_err(dev, "couldn't request gpio\n");
832 		return PTR_ERR(nvec->gpiod);
833 	}
834 
835 	err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
836 			       "nvec", nvec);
837 	if (err) {
838 		dev_err(dev, "couldn't request irq\n");
839 		return -ENODEV;
840 	}
841 	disable_irq(nvec->irq);
842 
843 	tegra_init_i2c_slave(nvec);
844 
845 	/* enable event reporting */
846 	nvec_toggle_global_events(nvec, true);
847 
848 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
849 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
850 
851 	nvec_power_handle = nvec;
852 	pm_power_off = nvec_power_off;
853 
854 	/* Get Firmware Version */
855 	err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
856 
857 	if (!err) {
858 		dev_warn(dev,
859 			 "ec firmware version %02x.%02x.%02x / %02x\n",
860 			 msg->data[4], msg->data[5],
861 			 msg->data[6], msg->data[7]);
862 
863 		nvec_msg_free(nvec, msg);
864 	}
865 
866 	ret = mfd_add_devices(dev, 0, nvec_devices,
867 			      ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
868 	if (ret)
869 		dev_err(dev, "error adding subdevices\n");
870 
871 	/* unmute speakers? */
872 	nvec_write_async(nvec, unmute_speakers, 4);
873 
874 	/* enable lid switch event */
875 	nvec_event_mask(enable_event, LID_SWITCH);
876 	nvec_write_async(nvec, enable_event, 7);
877 
878 	/* enable power button event */
879 	nvec_event_mask(enable_event, PWR_BUTTON);
880 	nvec_write_async(nvec, enable_event, 7);
881 
882 	return 0;
883 }
884 
tegra_nvec_remove(struct platform_device * pdev)885 static int tegra_nvec_remove(struct platform_device *pdev)
886 {
887 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
888 
889 	nvec_toggle_global_events(nvec, false);
890 	mfd_remove_devices(nvec->dev);
891 	nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
892 	cancel_work_sync(&nvec->rx_work);
893 	cancel_work_sync(&nvec->tx_work);
894 	/* FIXME: needs check whether nvec is responsible for power off */
895 	pm_power_off = NULL;
896 
897 	return 0;
898 }
899 
900 #ifdef CONFIG_PM_SLEEP
nvec_suspend(struct device * dev)901 static int nvec_suspend(struct device *dev)
902 {
903 	int err;
904 	struct nvec_chip *nvec = dev_get_drvdata(dev);
905 	struct nvec_msg *msg;
906 	char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
907 
908 	dev_dbg(nvec->dev, "suspending\n");
909 
910 	/* keep these sync or you'll break suspend */
911 	nvec_toggle_global_events(nvec, false);
912 
913 	err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
914 	if (!err)
915 		nvec_msg_free(nvec, msg);
916 
917 	nvec_disable_i2c_slave(nvec);
918 
919 	return 0;
920 }
921 
nvec_resume(struct device * dev)922 static int nvec_resume(struct device *dev)
923 {
924 	struct nvec_chip *nvec = dev_get_drvdata(dev);
925 
926 	dev_dbg(nvec->dev, "resuming\n");
927 	tegra_init_i2c_slave(nvec);
928 	nvec_toggle_global_events(nvec, true);
929 
930 	return 0;
931 }
932 #endif
933 
934 static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
935 
936 /* Match table for of_platform binding */
937 static const struct of_device_id nvidia_nvec_of_match[] = {
938 	{ .compatible = "nvidia,nvec", },
939 	{},
940 };
941 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
942 
943 static struct platform_driver nvec_device_driver = {
944 	.probe   = tegra_nvec_probe,
945 	.remove  = tegra_nvec_remove,
946 	.driver  = {
947 		.name = "nvec",
948 		.pm = &nvec_pm_ops,
949 		.of_match_table = nvidia_nvec_of_match,
950 	}
951 };
952 
953 module_platform_driver(nvec_device_driver);
954 
955 MODULE_ALIAS("platform:nvec");
956 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
957 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
958 MODULE_LICENSE("GPL");
959