1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * UCSI driver for Cypress CCGx Type-C controller
4  *
5  * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6  * Author: Ajay Gupta <ajayg@nvidia.com>
7  *
8  * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9  */
10 #include <linux/acpi.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/i2c.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/usb/typec_dp.h>
20 
21 #include <asm/unaligned.h>
22 #include "ucsi.h"
23 
24 enum enum_fw_mode {
25 	BOOT,   /* bootloader */
26 	FW1,    /* FW partition-1 (contains secondary fw) */
27 	FW2,    /* FW partition-2 (contains primary fw) */
28 	FW_INVALID,
29 };
30 
31 #define CCGX_RAB_DEVICE_MODE			0x0000
32 #define CCGX_RAB_INTR_REG			0x0006
33 #define  DEV_INT				BIT(0)
34 #define  PORT0_INT				BIT(1)
35 #define  PORT1_INT				BIT(2)
36 #define  UCSI_READ_INT				BIT(7)
37 #define CCGX_RAB_JUMP_TO_BOOT			0x0007
38 #define  TO_BOOT				'J'
39 #define  TO_ALT_FW				'A'
40 #define CCGX_RAB_RESET_REQ			0x0008
41 #define  RESET_SIG				'R'
42 #define  CMD_RESET_I2C				0x0
43 #define  CMD_RESET_DEV				0x1
44 #define CCGX_RAB_ENTER_FLASHING			0x000A
45 #define  FLASH_ENTER_SIG			'P'
46 #define CCGX_RAB_VALIDATE_FW			0x000B
47 #define CCGX_RAB_FLASH_ROW_RW			0x000C
48 #define  FLASH_SIG				'F'
49 #define  FLASH_RD_CMD				0x0
50 #define  FLASH_WR_CMD				0x1
51 #define  FLASH_FWCT1_WR_CMD			0x2
52 #define  FLASH_FWCT2_WR_CMD			0x3
53 #define  FLASH_FWCT_SIG_WR_CMD			0x4
54 #define CCGX_RAB_READ_ALL_VER			0x0010
55 #define CCGX_RAB_READ_FW2_VER			0x0020
56 #define CCGX_RAB_UCSI_CONTROL			0x0039
57 #define CCGX_RAB_UCSI_CONTROL_START		BIT(0)
58 #define CCGX_RAB_UCSI_CONTROL_STOP		BIT(1)
59 #define CCGX_RAB_UCSI_DATA_BLOCK(offset)	(0xf000 | ((offset) & 0xff))
60 #define REG_FLASH_RW_MEM        0x0200
61 #define DEV_REG_IDX				CCGX_RAB_DEVICE_MODE
62 #define CCGX_RAB_PDPORT_ENABLE			0x002C
63 #define  PDPORT_1		BIT(0)
64 #define  PDPORT_2		BIT(1)
65 #define CCGX_RAB_RESPONSE			0x007E
66 #define  ASYNC_EVENT				BIT(7)
67 
68 /* CCGx events & async msg codes */
69 #define RESET_COMPLETE		0x80
70 #define EVENT_INDEX		RESET_COMPLETE
71 #define PORT_CONNECT_DET	0x84
72 #define PORT_DISCONNECT_DET	0x85
73 #define ROLE_SWAP_COMPELETE	0x87
74 
75 /* ccg firmware */
76 #define CYACD_LINE_SIZE         527
77 #define CCG4_ROW_SIZE           256
78 #define FW1_METADATA_ROW        0x1FF
79 #define FW2_METADATA_ROW        0x1FE
80 #define FW_CFG_TABLE_SIG_SIZE	256
81 
82 static int secondary_fw_min_ver = 41;
83 
84 enum enum_flash_mode {
85 	SECONDARY_BL,	/* update secondary using bootloader */
86 	PRIMARY,	/* update primary using secondary */
87 	SECONDARY,	/* update secondary using primary */
88 	FLASH_NOT_NEEDED,	/* update not required */
89 	FLASH_INVALID,
90 };
91 
92 static const char * const ccg_fw_names[] = {
93 	"ccg_boot.cyacd",
94 	"ccg_primary.cyacd",
95 	"ccg_secondary.cyacd"
96 };
97 
98 struct ccg_dev_info {
99 #define CCG_DEVINFO_FWMODE_SHIFT (0)
100 #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
101 #define CCG_DEVINFO_PDPORTS_SHIFT (2)
102 #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
103 	u8 mode;
104 	u8 bl_mode;
105 	__le16 silicon_id;
106 	__le16 bl_last_row;
107 } __packed;
108 
109 struct version_format {
110 	__le16 build;
111 	u8 patch;
112 	u8 ver;
113 #define CCG_VERSION_PATCH(x) ((x) << 16)
114 #define CCG_VERSION(x)	((x) << 24)
115 #define CCG_VERSION_MIN_SHIFT (0)
116 #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
117 #define CCG_VERSION_MAJ_SHIFT (4)
118 #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
119 } __packed;
120 
121 /*
122  * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
123  * of missing interrupt when a device is connected for runtime resume
124  */
125 #define CCG_FW_BUILD_NVIDIA	(('n' << 8) | 'v')
126 #define CCG_OLD_FW_VERSION	(CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
127 
128 /* Altmode offset for NVIDIA Function Test Board (FTB) */
129 #define NVIDIA_FTB_DP_OFFSET	(2)
130 #define NVIDIA_FTB_DBG_OFFSET	(3)
131 
132 struct version_info {
133 	struct version_format base;
134 	struct version_format app;
135 };
136 
137 struct fw_config_table {
138 	u32 identity;
139 	u16 table_size;
140 	u8 fwct_version;
141 	u8 is_key_change;
142 	u8 guid[16];
143 	struct version_format base;
144 	struct version_format app;
145 	u8 primary_fw_digest[32];
146 	u32 key_exp_length;
147 	u8 key_modulus[256];
148 	u8 key_exp[4];
149 };
150 
151 /* CCGx response codes */
152 enum ccg_resp_code {
153 	CMD_NO_RESP             = 0x00,
154 	CMD_SUCCESS             = 0x02,
155 	FLASH_DATA_AVAILABLE    = 0x03,
156 	CMD_INVALID             = 0x05,
157 	FLASH_UPDATE_FAIL       = 0x07,
158 	INVALID_FW              = 0x08,
159 	INVALID_ARG             = 0x09,
160 	CMD_NOT_SUPPORT         = 0x0A,
161 	TRANSACTION_FAIL        = 0x0C,
162 	PD_CMD_FAIL             = 0x0D,
163 	UNDEF_ERROR             = 0x0F,
164 	INVALID_RESP		= 0x10,
165 };
166 
167 #define CCG_EVENT_MAX	(EVENT_INDEX + 43)
168 
169 struct ccg_cmd {
170 	u16 reg;
171 	u32 data;
172 	int len;
173 	u32 delay; /* ms delay for cmd timeout  */
174 };
175 
176 struct ccg_resp {
177 	u8 code;
178 	u8 length;
179 };
180 
181 struct ucsi_ccg_altmode {
182 	u16 svid;
183 	u32 mid;
184 	u8 linked_idx;
185 	u8 active_idx;
186 #define UCSI_MULTI_DP_INDEX	(0xff)
187 	bool checked;
188 } __packed;
189 
190 struct ucsi_ccg {
191 	struct device *dev;
192 	struct ucsi *ucsi;
193 	struct i2c_client *client;
194 
195 	struct ccg_dev_info info;
196 	/* version info for boot, primary and secondary */
197 	struct version_info version[FW2 + 1];
198 	u32 fw_version;
199 	/* CCG HPI communication flags */
200 	unsigned long flags;
201 #define RESET_PENDING	0
202 #define DEV_CMD_PENDING	1
203 	struct ccg_resp dev_resp;
204 	u8 cmd_resp;
205 	int port_num;
206 	int irq;
207 	struct work_struct work;
208 	struct mutex lock; /* to sync between user and driver thread */
209 
210 	/* fw build with vendor information */
211 	u16 fw_build;
212 	struct work_struct pm_work;
213 
214 	struct completion complete;
215 
216 	u64 last_cmd_sent;
217 	bool has_multiple_dp;
218 	struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
219 	struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
220 };
221 
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)222 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
223 {
224 	struct i2c_client *client = uc->client;
225 	const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
226 	unsigned char buf[2];
227 	struct i2c_msg msgs[] = {
228 		{
229 			.addr	= client->addr,
230 			.flags  = 0x0,
231 			.len	= sizeof(buf),
232 			.buf	= buf,
233 		},
234 		{
235 			.addr	= client->addr,
236 			.flags  = I2C_M_RD,
237 			.buf	= data,
238 		},
239 	};
240 	u32 rlen, rem_len = len, max_read_len = len;
241 	int status;
242 
243 	/* check any max_read_len limitation on i2c adapter */
244 	if (quirks && quirks->max_read_len)
245 		max_read_len = quirks->max_read_len;
246 
247 	pm_runtime_get_sync(uc->dev);
248 	while (rem_len > 0) {
249 		msgs[1].buf = &data[len - rem_len];
250 		rlen = min_t(u16, rem_len, max_read_len);
251 		msgs[1].len = rlen;
252 		put_unaligned_le16(rab, buf);
253 		status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
254 		if (status < 0) {
255 			dev_err(uc->dev, "i2c_transfer failed %d\n", status);
256 			pm_runtime_put_sync(uc->dev);
257 			return status;
258 		}
259 		rab += rlen;
260 		rem_len -= rlen;
261 	}
262 
263 	pm_runtime_put_sync(uc->dev);
264 	return 0;
265 }
266 
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)267 static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
268 {
269 	struct i2c_client *client = uc->client;
270 	unsigned char *buf;
271 	struct i2c_msg msgs[] = {
272 		{
273 			.addr	= client->addr,
274 			.flags  = 0x0,
275 		}
276 	};
277 	int status;
278 
279 	buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
280 	if (!buf)
281 		return -ENOMEM;
282 
283 	put_unaligned_le16(rab, buf);
284 	memcpy(buf + sizeof(rab), data, len);
285 
286 	msgs[0].len = len + sizeof(rab);
287 	msgs[0].buf = buf;
288 
289 	pm_runtime_get_sync(uc->dev);
290 	status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
291 	if (status < 0) {
292 		dev_err(uc->dev, "i2c_transfer failed %d\n", status);
293 		pm_runtime_put_sync(uc->dev);
294 		kfree(buf);
295 		return status;
296 	}
297 
298 	pm_runtime_put_sync(uc->dev);
299 	kfree(buf);
300 	return 0;
301 }
302 
ucsi_ccg_init(struct ucsi_ccg * uc)303 static int ucsi_ccg_init(struct ucsi_ccg *uc)
304 {
305 	unsigned int count = 10;
306 	u8 data;
307 	int status;
308 
309 	data = CCGX_RAB_UCSI_CONTROL_STOP;
310 	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
311 	if (status < 0)
312 		return status;
313 
314 	data = CCGX_RAB_UCSI_CONTROL_START;
315 	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
316 	if (status < 0)
317 		return status;
318 
319 	/*
320 	 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
321 	 * register write will push response which must be cleared.
322 	 */
323 	do {
324 		status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
325 		if (status < 0)
326 			return status;
327 
328 		if (!(data & DEV_INT))
329 			return 0;
330 
331 		status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
332 		if (status < 0)
333 			return status;
334 
335 		usleep_range(10000, 11000);
336 	} while (--count);
337 
338 	return -ETIMEDOUT;
339 }
340 
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)341 static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
342 {
343 	u8 cam, new_cam;
344 
345 	cam = data[0];
346 	new_cam = uc->orig[cam].linked_idx;
347 	uc->updated[new_cam].active_idx = cam;
348 	data[0] = new_cam;
349 }
350 
ucsi_ccg_update_altmodes(struct ucsi * ucsi,struct ucsi_altmode * orig,struct ucsi_altmode * updated)351 static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
352 				     struct ucsi_altmode *orig,
353 				     struct ucsi_altmode *updated)
354 {
355 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
356 	struct ucsi_ccg_altmode *alt, *new_alt;
357 	int i, j, k = 0;
358 	bool found = false;
359 
360 	alt = uc->orig;
361 	new_alt = uc->updated;
362 	memset(uc->updated, 0, sizeof(uc->updated));
363 
364 	/*
365 	 * Copy original connector altmodes to new structure.
366 	 * We need this before second loop since second loop
367 	 * checks for duplicate altmodes.
368 	 */
369 	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
370 		alt[i].svid = orig[i].svid;
371 		alt[i].mid = orig[i].mid;
372 		if (!alt[i].svid)
373 			break;
374 	}
375 
376 	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
377 		if (!alt[i].svid)
378 			break;
379 
380 		/* already checked and considered */
381 		if (alt[i].checked)
382 			continue;
383 
384 		if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
385 			/* Found Non DP altmode */
386 			new_alt[k].svid = alt[i].svid;
387 			new_alt[k].mid |= alt[i].mid;
388 			new_alt[k].linked_idx = i;
389 			alt[i].linked_idx = k;
390 			updated[k].svid = new_alt[k].svid;
391 			updated[k].mid = new_alt[k].mid;
392 			k++;
393 			continue;
394 		}
395 
396 		for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
397 			if (alt[i].svid != alt[j].svid ||
398 			    !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
399 				continue;
400 			} else {
401 				/* Found duplicate DP mode */
402 				new_alt[k].svid = alt[i].svid;
403 				new_alt[k].mid |= alt[i].mid | alt[j].mid;
404 				new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
405 				alt[i].linked_idx = k;
406 				alt[j].linked_idx = k;
407 				alt[j].checked = true;
408 				found = true;
409 			}
410 		}
411 		if (found) {
412 			uc->has_multiple_dp = true;
413 		} else {
414 			/* Didn't find any duplicate DP altmode */
415 			new_alt[k].svid = alt[i].svid;
416 			new_alt[k].mid |= alt[i].mid;
417 			new_alt[k].linked_idx = i;
418 			alt[i].linked_idx = k;
419 		}
420 		updated[k].svid = new_alt[k].svid;
421 		updated[k].mid = new_alt[k].mid;
422 		k++;
423 	}
424 	return found;
425 }
426 
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)427 static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
428 					    struct ucsi_connector *con,
429 					    u64 *cmd)
430 {
431 	struct ucsi_ccg_altmode *new_port, *port;
432 	struct typec_altmode *alt = NULL;
433 	u8 new_cam, cam, pin;
434 	bool enter_new_mode;
435 	int i, j, k = 0xff;
436 
437 	port = uc->orig;
438 	new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
439 	new_port = &uc->updated[new_cam];
440 	cam = new_port->linked_idx;
441 	enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
442 
443 	/*
444 	 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
445 	 * with multiple DP mode. Find out CAM for best pin assignment
446 	 * among all DP mode. Priorite pin E->D->C after making sure
447 	 * the partner supports that pin.
448 	 */
449 	if (cam == UCSI_MULTI_DP_INDEX) {
450 		if (enter_new_mode) {
451 			for (i = 0; con->partner_altmode[i]; i++) {
452 				alt = con->partner_altmode[i];
453 				if (alt->svid == new_port->svid)
454 					break;
455 			}
456 			/*
457 			 * alt will always be non NULL since this is
458 			 * UCSI_SET_NEW_CAM command and so there will be
459 			 * at least one con->partner_altmode[i] with svid
460 			 * matching with new_port->svid.
461 			 */
462 			for (j = 0; port[j].svid; j++) {
463 				pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
464 				if (alt && port[j].svid == alt->svid &&
465 				    (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
466 					/* prioritize pin E->D->C */
467 					if (k == 0xff || (k != 0xff && pin >
468 					    DP_CONF_GET_PIN_ASSIGN(port[k].mid))
469 					    ) {
470 						k = j;
471 					}
472 				}
473 			}
474 			cam = k;
475 			new_port->active_idx = cam;
476 		} else {
477 			cam = new_port->active_idx;
478 		}
479 	}
480 	*cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
481 	*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
482 }
483 
484 /*
485  * Change the order of vdo values of NVIDIA test device FTB
486  * (Function Test Board) which reports altmode list with vdo=0x3
487  * first and then vdo=0x. Current logic to assign mode value is
488  * based on order in altmode list and it causes a mismatch of CON
489  * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
490  * first and then vdo=0x3
491  */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt)492 static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
493 				    struct ucsi_altmode *alt)
494 {
495 	switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
496 	case NVIDIA_FTB_DP_OFFSET:
497 		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
498 			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
499 				DP_CAP_DP_SIGNALING | DP_CAP_USB |
500 				DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
501 		break;
502 	case NVIDIA_FTB_DBG_OFFSET:
503 		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
504 			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
505 		break;
506 	default:
507 		break;
508 	}
509 }
510 
ucsi_ccg_read(struct ucsi * ucsi,unsigned int offset,void * val,size_t val_len)511 static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
512 			 void *val, size_t val_len)
513 {
514 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
515 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
516 	struct ucsi_altmode *alt;
517 	int ret;
518 
519 	ret = ccg_read(uc, reg, val, val_len);
520 	if (ret)
521 		return ret;
522 
523 	if (offset != UCSI_MESSAGE_IN)
524 		return ret;
525 
526 	switch (UCSI_COMMAND(uc->last_cmd_sent)) {
527 	case UCSI_GET_CURRENT_CAM:
528 		if (uc->has_multiple_dp)
529 			ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
530 		break;
531 	case UCSI_GET_ALTERNATE_MODES:
532 		if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
533 		    UCSI_RECIPIENT_SOP) {
534 			alt = val;
535 			if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
536 				ucsi_ccg_nvidia_altmode(uc, alt);
537 		}
538 		break;
539 	default:
540 		break;
541 	}
542 	uc->last_cmd_sent = 0;
543 
544 	return ret;
545 }
546 
ucsi_ccg_async_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)547 static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
548 				const void *val, size_t val_len)
549 {
550 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
551 
552 	return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
553 }
554 
ucsi_ccg_sync_write(struct ucsi * ucsi,unsigned int offset,const void * val,size_t val_len)555 static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
556 			       const void *val, size_t val_len)
557 {
558 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
559 	struct ucsi_connector *con;
560 	int con_index;
561 	int ret;
562 
563 	mutex_lock(&uc->lock);
564 	pm_runtime_get_sync(uc->dev);
565 	set_bit(DEV_CMD_PENDING, &uc->flags);
566 
567 	if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
568 		uc->last_cmd_sent = *(u64 *)val;
569 
570 		if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
571 		    uc->has_multiple_dp) {
572 			con_index = (uc->last_cmd_sent >> 16) &
573 				    UCSI_CMD_CONNECTOR_MASK;
574 			con = &uc->ucsi->connector[con_index - 1];
575 			ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
576 		}
577 	}
578 
579 	ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
580 	if (ret)
581 		goto err_clear_bit;
582 
583 	if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
584 		ret = -ETIMEDOUT;
585 
586 err_clear_bit:
587 	clear_bit(DEV_CMD_PENDING, &uc->flags);
588 	pm_runtime_put_sync(uc->dev);
589 	mutex_unlock(&uc->lock);
590 
591 	return ret;
592 }
593 
594 static const struct ucsi_operations ucsi_ccg_ops = {
595 	.read = ucsi_ccg_read,
596 	.sync_write = ucsi_ccg_sync_write,
597 	.async_write = ucsi_ccg_async_write,
598 	.update_altmodes = ucsi_ccg_update_altmodes
599 };
600 
ccg_irq_handler(int irq,void * data)601 static irqreturn_t ccg_irq_handler(int irq, void *data)
602 {
603 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
604 	struct ucsi_ccg *uc = data;
605 	u8 intr_reg;
606 	u32 cci;
607 	int ret;
608 
609 	ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
610 	if (ret)
611 		return ret;
612 
613 	ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
614 	if (ret)
615 		goto err_clear_irq;
616 
617 	if (UCSI_CCI_CONNECTOR(cci))
618 		ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
619 
620 	if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
621 	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
622 		complete(&uc->complete);
623 
624 err_clear_irq:
625 	ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
626 
627 	return IRQ_HANDLED;
628 }
629 
ccg_pm_workaround_work(struct work_struct * pm_work)630 static void ccg_pm_workaround_work(struct work_struct *pm_work)
631 {
632 	ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
633 }
634 
get_fw_info(struct ucsi_ccg * uc)635 static int get_fw_info(struct ucsi_ccg *uc)
636 {
637 	int err;
638 
639 	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
640 		       sizeof(uc->version));
641 	if (err < 0)
642 		return err;
643 
644 	uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
645 			CCG_VERSION_PATCH(uc->version[FW2].app.patch);
646 
647 	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
648 		       sizeof(uc->info));
649 	if (err < 0)
650 		return err;
651 
652 	return 0;
653 }
654 
invalid_async_evt(int code)655 static inline bool invalid_async_evt(int code)
656 {
657 	return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
658 }
659 
ccg_process_response(struct ucsi_ccg * uc)660 static void ccg_process_response(struct ucsi_ccg *uc)
661 {
662 	struct device *dev = uc->dev;
663 
664 	if (uc->dev_resp.code & ASYNC_EVENT) {
665 		if (uc->dev_resp.code == RESET_COMPLETE) {
666 			if (test_bit(RESET_PENDING, &uc->flags))
667 				uc->cmd_resp = uc->dev_resp.code;
668 			get_fw_info(uc);
669 		}
670 		if (invalid_async_evt(uc->dev_resp.code))
671 			dev_err(dev, "invalid async evt %d\n",
672 				uc->dev_resp.code);
673 	} else {
674 		if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
675 			uc->cmd_resp = uc->dev_resp.code;
676 			clear_bit(DEV_CMD_PENDING, &uc->flags);
677 		} else {
678 			dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
679 				uc->dev_resp.code);
680 		}
681 	}
682 }
683 
ccg_read_response(struct ucsi_ccg * uc)684 static int ccg_read_response(struct ucsi_ccg *uc)
685 {
686 	unsigned long target = jiffies + msecs_to_jiffies(1000);
687 	struct device *dev = uc->dev;
688 	u8 intval;
689 	int status;
690 
691 	/* wait for interrupt status to get updated */
692 	do {
693 		status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
694 				  sizeof(intval));
695 		if (status < 0)
696 			return status;
697 
698 		if (intval & DEV_INT)
699 			break;
700 		usleep_range(500, 600);
701 	} while (time_is_after_jiffies(target));
702 
703 	if (time_is_before_jiffies(target)) {
704 		dev_err(dev, "response timeout error\n");
705 		return -ETIME;
706 	}
707 
708 	status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
709 			  sizeof(uc->dev_resp));
710 	if (status < 0)
711 		return status;
712 
713 	status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
714 	if (status < 0)
715 		return status;
716 
717 	return 0;
718 }
719 
720 /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)721 static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
722 {
723 	struct device *dev = uc->dev;
724 	int ret;
725 
726 	switch (cmd->reg & 0xF000) {
727 	case DEV_REG_IDX:
728 		set_bit(DEV_CMD_PENDING, &uc->flags);
729 		break;
730 	default:
731 		dev_err(dev, "invalid cmd register\n");
732 		break;
733 	}
734 
735 	ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
736 	if (ret < 0)
737 		return ret;
738 
739 	msleep(cmd->delay);
740 
741 	ret = ccg_read_response(uc);
742 	if (ret < 0) {
743 		dev_err(dev, "response read error\n");
744 		switch (cmd->reg & 0xF000) {
745 		case DEV_REG_IDX:
746 			clear_bit(DEV_CMD_PENDING, &uc->flags);
747 			break;
748 		default:
749 			dev_err(dev, "invalid cmd register\n");
750 			break;
751 		}
752 		return -EIO;
753 	}
754 	ccg_process_response(uc);
755 
756 	return uc->cmd_resp;
757 }
758 
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)759 static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
760 {
761 	struct ccg_cmd cmd;
762 	int ret;
763 
764 	cmd.reg = CCGX_RAB_ENTER_FLASHING;
765 	cmd.data = FLASH_ENTER_SIG;
766 	cmd.len = 1;
767 	cmd.delay = 50;
768 
769 	mutex_lock(&uc->lock);
770 
771 	ret = ccg_send_command(uc, &cmd);
772 
773 	mutex_unlock(&uc->lock);
774 
775 	if (ret != CMD_SUCCESS) {
776 		dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
777 		return ret;
778 	}
779 
780 	return 0;
781 }
782 
ccg_cmd_reset(struct ucsi_ccg * uc)783 static int ccg_cmd_reset(struct ucsi_ccg *uc)
784 {
785 	struct ccg_cmd cmd;
786 	u8 *p;
787 	int ret;
788 
789 	p = (u8 *)&cmd.data;
790 	cmd.reg = CCGX_RAB_RESET_REQ;
791 	p[0] = RESET_SIG;
792 	p[1] = CMD_RESET_DEV;
793 	cmd.len = 2;
794 	cmd.delay = 5000;
795 
796 	mutex_lock(&uc->lock);
797 
798 	set_bit(RESET_PENDING, &uc->flags);
799 
800 	ret = ccg_send_command(uc, &cmd);
801 	if (ret != RESET_COMPLETE)
802 		goto err_clear_flag;
803 
804 	ret = 0;
805 
806 err_clear_flag:
807 	clear_bit(RESET_PENDING, &uc->flags);
808 
809 	mutex_unlock(&uc->lock);
810 
811 	return ret;
812 }
813 
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)814 static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
815 {
816 	struct ccg_cmd cmd;
817 	int ret;
818 
819 	cmd.reg = CCGX_RAB_PDPORT_ENABLE;
820 	if (enable)
821 		cmd.data = (uc->port_num == 1) ?
822 			    PDPORT_1 : (PDPORT_1 | PDPORT_2);
823 	else
824 		cmd.data = 0x0;
825 	cmd.len = 1;
826 	cmd.delay = 10;
827 
828 	mutex_lock(&uc->lock);
829 
830 	ret = ccg_send_command(uc, &cmd);
831 
832 	mutex_unlock(&uc->lock);
833 
834 	if (ret != CMD_SUCCESS) {
835 		dev_err(uc->dev, "port control failed ret=%d\n", ret);
836 		return ret;
837 	}
838 	return 0;
839 }
840 
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)841 static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
842 {
843 	struct ccg_cmd cmd;
844 	int ret;
845 
846 	cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
847 
848 	if (bl_mode)
849 		cmd.data = TO_BOOT;
850 	else
851 		cmd.data = TO_ALT_FW;
852 
853 	cmd.len = 1;
854 	cmd.delay = 100;
855 
856 	mutex_lock(&uc->lock);
857 
858 	set_bit(RESET_PENDING, &uc->flags);
859 
860 	ret = ccg_send_command(uc, &cmd);
861 	if (ret != RESET_COMPLETE)
862 		goto err_clear_flag;
863 
864 	ret = 0;
865 
866 err_clear_flag:
867 	clear_bit(RESET_PENDING, &uc->flags);
868 
869 	mutex_unlock(&uc->lock);
870 
871 	return ret;
872 }
873 
874 static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)875 ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
876 			const void *data, u8 fcmd)
877 {
878 	struct i2c_client *client = uc->client;
879 	struct ccg_cmd cmd;
880 	u8 buf[CCG4_ROW_SIZE + 2];
881 	u8 *p;
882 	int ret;
883 
884 	/* Copy the data into the flash read/write memory. */
885 	put_unaligned_le16(REG_FLASH_RW_MEM, buf);
886 
887 	memcpy(buf + 2, data, CCG4_ROW_SIZE);
888 
889 	mutex_lock(&uc->lock);
890 
891 	ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
892 	if (ret != CCG4_ROW_SIZE + 2) {
893 		dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
894 		mutex_unlock(&uc->lock);
895 		return ret < 0 ? ret : -EIO;
896 	}
897 
898 	/* Use the FLASH_ROW_READ_WRITE register to trigger */
899 	/* writing of data to the desired flash row */
900 	p = (u8 *)&cmd.data;
901 	cmd.reg = CCGX_RAB_FLASH_ROW_RW;
902 	p[0] = FLASH_SIG;
903 	p[1] = fcmd;
904 	put_unaligned_le16(row, &p[2]);
905 	cmd.len = 4;
906 	cmd.delay = 50;
907 	if (fcmd == FLASH_FWCT_SIG_WR_CMD)
908 		cmd.delay += 400;
909 	if (row == 510)
910 		cmd.delay += 220;
911 	ret = ccg_send_command(uc, &cmd);
912 
913 	mutex_unlock(&uc->lock);
914 
915 	if (ret != CMD_SUCCESS) {
916 		dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
917 		return ret;
918 	}
919 
920 	return 0;
921 }
922 
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)923 static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
924 {
925 	struct ccg_cmd cmd;
926 	int ret;
927 
928 	cmd.reg = CCGX_RAB_VALIDATE_FW;
929 	cmd.data = fwid;
930 	cmd.len = 1;
931 	cmd.delay = 500;
932 
933 	mutex_lock(&uc->lock);
934 
935 	ret = ccg_send_command(uc, &cmd);
936 
937 	mutex_unlock(&uc->lock);
938 
939 	if (ret != CMD_SUCCESS)
940 		return ret;
941 
942 	return 0;
943 }
944 
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)945 static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
946 				     struct version_format *app,
947 				     struct fw_config_table *fw_cfg)
948 {
949 	struct device *dev = uc->dev;
950 
951 	/* Check if the fw build is for supported vendors */
952 	if (le16_to_cpu(app->build) != uc->fw_build) {
953 		dev_info(dev, "current fw is not from supported vendor\n");
954 		return false;
955 	}
956 
957 	/* Check if the new fw build is for supported vendors */
958 	if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
959 		dev_info(dev, "new fw is not from supported vendor\n");
960 		return false;
961 	}
962 	return true;
963 }
964 
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)965 static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
966 				 struct version_format *app)
967 {
968 	const struct firmware *fw = NULL;
969 	struct device *dev = uc->dev;
970 	struct fw_config_table fw_cfg;
971 	u32 cur_version, new_version;
972 	bool is_later = false;
973 
974 	if (request_firmware(&fw, fw_name, dev) != 0) {
975 		dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
976 		return false;
977 	}
978 
979 	/*
980 	 * check if signed fw
981 	 * last part of fw image is fw cfg table and signature
982 	 */
983 	if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
984 		goto out_release_firmware;
985 
986 	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
987 	       sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
988 
989 	if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
990 		dev_info(dev, "not a signed image\n");
991 		goto out_release_firmware;
992 	}
993 
994 	/* compare input version with FWCT version */
995 	cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
996 			CCG_VERSION(app->ver);
997 
998 	new_version = le16_to_cpu(fw_cfg.app.build) |
999 			CCG_VERSION_PATCH(fw_cfg.app.patch) |
1000 			CCG_VERSION(fw_cfg.app.ver);
1001 
1002 	if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1003 		goto out_release_firmware;
1004 
1005 	if (new_version > cur_version)
1006 		is_later = true;
1007 
1008 out_release_firmware:
1009 	release_firmware(fw);
1010 	return is_later;
1011 }
1012 
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1013 static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1014 				enum enum_flash_mode *mode)
1015 {
1016 	struct device *dev = uc->dev;
1017 	int err;
1018 	struct version_info version[3];
1019 
1020 	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1021 		       sizeof(uc->info));
1022 	if (err) {
1023 		dev_err(dev, "read device mode failed\n");
1024 		return err;
1025 	}
1026 
1027 	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1028 		       sizeof(version));
1029 	if (err) {
1030 		dev_err(dev, "read device mode failed\n");
1031 		return err;
1032 	}
1033 
1034 	if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1035 		   sizeof(struct version_info)) == 0) {
1036 		dev_info(dev, "secondary fw is not flashed\n");
1037 		*mode = SECONDARY_BL;
1038 	} else if (le16_to_cpu(version[FW1].base.build) <
1039 		secondary_fw_min_ver) {
1040 		dev_info(dev, "secondary fw version is too low (< %d)\n",
1041 			 secondary_fw_min_ver);
1042 		*mode = SECONDARY;
1043 	} else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1044 		   sizeof(struct version_info)) == 0) {
1045 		dev_info(dev, "primary fw is not flashed\n");
1046 		*mode = PRIMARY;
1047 	} else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1048 		   &version[FW2].app)) {
1049 		dev_info(dev, "found primary fw with later version\n");
1050 		*mode = PRIMARY;
1051 	} else {
1052 		dev_info(dev, "secondary and primary fw are the latest\n");
1053 		*mode = FLASH_NOT_NEEDED;
1054 	}
1055 	return 0;
1056 }
1057 
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1058 static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1059 {
1060 	struct device *dev = uc->dev;
1061 	const struct firmware *fw = NULL;
1062 	const char *p, *s;
1063 	const char *eof;
1064 	int err, row, len, line_sz, line_cnt = 0;
1065 	unsigned long start_time = jiffies;
1066 	struct fw_config_table  fw_cfg;
1067 	u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1068 	u8 *wr_buf;
1069 
1070 	err = request_firmware(&fw, ccg_fw_names[mode], dev);
1071 	if (err) {
1072 		dev_err(dev, "request %s failed err=%d\n",
1073 			ccg_fw_names[mode], err);
1074 		return err;
1075 	}
1076 
1077 	if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1078 			CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1079 		err = ccg_cmd_port_control(uc, false);
1080 		if (err < 0)
1081 			goto release_fw;
1082 		err = ccg_cmd_jump_boot_mode(uc, 0);
1083 		if (err < 0)
1084 			goto release_fw;
1085 	}
1086 
1087 	eof = fw->data + fw->size;
1088 
1089 	/*
1090 	 * check if signed fw
1091 	 * last part of fw image is fw cfg table and signature
1092 	 */
1093 	if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1094 		goto not_signed_fw;
1095 
1096 	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1097 	       sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1098 
1099 	if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1100 		dev_info(dev, "not a signed image\n");
1101 		goto not_signed_fw;
1102 	}
1103 	eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1104 
1105 	memcpy((uint8_t *)&fw_cfg_sig,
1106 	       fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1107 
1108 	/* flash fw config table and signature first */
1109 	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1110 				      FLASH_FWCT1_WR_CMD);
1111 	if (err)
1112 		goto release_fw;
1113 
1114 	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1115 				      FLASH_FWCT2_WR_CMD);
1116 	if (err)
1117 		goto release_fw;
1118 
1119 	err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1120 				      FLASH_FWCT_SIG_WR_CMD);
1121 	if (err)
1122 		goto release_fw;
1123 
1124 not_signed_fw:
1125 	wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1126 	if (!wr_buf) {
1127 		err = -ENOMEM;
1128 		goto release_fw;
1129 	}
1130 
1131 	err = ccg_cmd_enter_flashing(uc);
1132 	if (err)
1133 		goto release_mem;
1134 
1135 	/*****************************************************************
1136 	 * CCG firmware image (.cyacd) file line format
1137 	 *
1138 	 * :00rrrrllll[dd....]cc/r/n
1139 	 *
1140 	 * :00   header
1141 	 * rrrr is row number to flash				(4 char)
1142 	 * llll is data len to flash				(4 char)
1143 	 * dd   is a data field represents one byte of data	(512 char)
1144 	 * cc   is checksum					(2 char)
1145 	 * \r\n newline
1146 	 *
1147 	 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1148 	 *
1149 	 *****************************************************************/
1150 
1151 	p = strnchr(fw->data, fw->size, ':');
1152 	while (p < eof) {
1153 		s = strnchr(p + 1, eof - p - 1, ':');
1154 
1155 		if (!s)
1156 			s = eof;
1157 
1158 		line_sz = s - p;
1159 
1160 		if (line_sz != CYACD_LINE_SIZE) {
1161 			dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1162 			err =  -EINVAL;
1163 			goto release_mem;
1164 		}
1165 
1166 		if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1167 			err =  -EINVAL;
1168 			goto release_mem;
1169 		}
1170 
1171 		row = get_unaligned_be16(wr_buf);
1172 		len = get_unaligned_be16(&wr_buf[2]);
1173 
1174 		if (len != CCG4_ROW_SIZE) {
1175 			err =  -EINVAL;
1176 			goto release_mem;
1177 		}
1178 
1179 		err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1180 					      FLASH_WR_CMD);
1181 		if (err)
1182 			goto release_mem;
1183 
1184 		line_cnt++;
1185 		p = s;
1186 	}
1187 
1188 	dev_info(dev, "total %d row flashed. time: %dms\n",
1189 		 line_cnt, jiffies_to_msecs(jiffies - start_time));
1190 
1191 	err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 :  FW1);
1192 	if (err)
1193 		dev_err(dev, "%s validation failed err=%d\n",
1194 			(mode == PRIMARY) ? "FW2" :  "FW1", err);
1195 	else
1196 		dev_info(dev, "%s validated\n",
1197 			 (mode == PRIMARY) ? "FW2" :  "FW1");
1198 
1199 	err = ccg_cmd_port_control(uc, false);
1200 	if (err < 0)
1201 		goto release_mem;
1202 
1203 	err = ccg_cmd_reset(uc);
1204 	if (err < 0)
1205 		goto release_mem;
1206 
1207 	err = ccg_cmd_port_control(uc, true);
1208 	if (err < 0)
1209 		goto release_mem;
1210 
1211 release_mem:
1212 	kfree(wr_buf);
1213 
1214 release_fw:
1215 	release_firmware(fw);
1216 	return err;
1217 }
1218 
1219 /*******************************************************************************
1220  * CCG4 has two copies of the firmware in addition to the bootloader.
1221  * If the device is running FW1, FW2 can be updated with the new version.
1222  * Dual firmware mode allows the CCG device to stay in a PD contract and support
1223  * USB PD and Type-C functionality while a firmware update is in progress.
1224  ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1225 static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1226 {
1227 	int err = 0;
1228 
1229 	while (flash_mode != FLASH_NOT_NEEDED) {
1230 		err = do_flash(uc, flash_mode);
1231 		if (err < 0)
1232 			return err;
1233 		err = ccg_fw_update_needed(uc, &flash_mode);
1234 		if (err < 0)
1235 			return err;
1236 	}
1237 	dev_info(uc->dev, "CCG FW update successful\n");
1238 
1239 	return err;
1240 }
1241 
ccg_restart(struct ucsi_ccg * uc)1242 static int ccg_restart(struct ucsi_ccg *uc)
1243 {
1244 	struct device *dev = uc->dev;
1245 	int status;
1246 
1247 	status = ucsi_ccg_init(uc);
1248 	if (status < 0) {
1249 		dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1250 		return status;
1251 	}
1252 
1253 	status = request_threaded_irq(uc->irq, NULL, ccg_irq_handler,
1254 				      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
1255 				      dev_name(dev), uc);
1256 	if (status < 0) {
1257 		dev_err(dev, "request_threaded_irq failed - %d\n", status);
1258 		return status;
1259 	}
1260 
1261 	status = ucsi_register(uc->ucsi);
1262 	if (status) {
1263 		dev_err(uc->dev, "failed to register the interface\n");
1264 		return status;
1265 	}
1266 
1267 	pm_runtime_enable(uc->dev);
1268 	return 0;
1269 }
1270 
ccg_update_firmware(struct work_struct * work)1271 static void ccg_update_firmware(struct work_struct *work)
1272 {
1273 	struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1274 	enum enum_flash_mode flash_mode;
1275 	int status;
1276 
1277 	status = ccg_fw_update_needed(uc, &flash_mode);
1278 	if (status < 0)
1279 		return;
1280 
1281 	if (flash_mode != FLASH_NOT_NEEDED) {
1282 		ucsi_unregister(uc->ucsi);
1283 		pm_runtime_disable(uc->dev);
1284 		free_irq(uc->irq, uc);
1285 
1286 		ccg_fw_update(uc, flash_mode);
1287 		ccg_restart(uc);
1288 	}
1289 }
1290 
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1291 static ssize_t do_flash_store(struct device *dev,
1292 			      struct device_attribute *attr,
1293 			      const char *buf, size_t n)
1294 {
1295 	struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1296 	bool flash;
1297 
1298 	if (kstrtobool(buf, &flash))
1299 		return -EINVAL;
1300 
1301 	if (!flash)
1302 		return n;
1303 
1304 	if (uc->fw_build == 0x0) {
1305 		dev_err(dev, "fail to flash FW due to missing FW build info\n");
1306 		return -EINVAL;
1307 	}
1308 
1309 	schedule_work(&uc->work);
1310 	return n;
1311 }
1312 
1313 static DEVICE_ATTR_WO(do_flash);
1314 
1315 static struct attribute *ucsi_ccg_attrs[] = {
1316 	&dev_attr_do_flash.attr,
1317 	NULL,
1318 };
1319 ATTRIBUTE_GROUPS(ucsi_ccg);
1320 
ucsi_ccg_probe(struct i2c_client * client,const struct i2c_device_id * id)1321 static int ucsi_ccg_probe(struct i2c_client *client,
1322 			  const struct i2c_device_id *id)
1323 {
1324 	struct device *dev = &client->dev;
1325 	struct ucsi_ccg *uc;
1326 	int status;
1327 
1328 	uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1329 	if (!uc)
1330 		return -ENOMEM;
1331 
1332 	uc->dev = dev;
1333 	uc->client = client;
1334 	mutex_init(&uc->lock);
1335 	init_completion(&uc->complete);
1336 	INIT_WORK(&uc->work, ccg_update_firmware);
1337 	INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1338 
1339 	/* Only fail FW flashing when FW build information is not provided */
1340 	status = device_property_read_u16(dev, "ccgx,firmware-build",
1341 					  &uc->fw_build);
1342 	if (status)
1343 		dev_err(uc->dev, "failed to get FW build information\n");
1344 
1345 	/* reset ccg device and initialize ucsi */
1346 	status = ucsi_ccg_init(uc);
1347 	if (status < 0) {
1348 		dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1349 		return status;
1350 	}
1351 
1352 	status = get_fw_info(uc);
1353 	if (status < 0) {
1354 		dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1355 		return status;
1356 	}
1357 
1358 	uc->port_num = 1;
1359 
1360 	if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1361 		uc->port_num++;
1362 
1363 	uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1364 	if (IS_ERR(uc->ucsi))
1365 		return PTR_ERR(uc->ucsi);
1366 
1367 	ucsi_set_drvdata(uc->ucsi, uc);
1368 
1369 	status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
1370 				      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
1371 				      dev_name(dev), uc);
1372 	if (status < 0) {
1373 		dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1374 		goto out_ucsi_destroy;
1375 	}
1376 
1377 	uc->irq = client->irq;
1378 
1379 	status = ucsi_register(uc->ucsi);
1380 	if (status)
1381 		goto out_free_irq;
1382 
1383 	i2c_set_clientdata(client, uc);
1384 
1385 	pm_runtime_set_active(uc->dev);
1386 	pm_runtime_enable(uc->dev);
1387 	pm_runtime_use_autosuspend(uc->dev);
1388 	pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1389 	pm_runtime_idle(uc->dev);
1390 
1391 	return 0;
1392 
1393 out_free_irq:
1394 	free_irq(uc->irq, uc);
1395 out_ucsi_destroy:
1396 	ucsi_destroy(uc->ucsi);
1397 
1398 	return status;
1399 }
1400 
ucsi_ccg_remove(struct i2c_client * client)1401 static int ucsi_ccg_remove(struct i2c_client *client)
1402 {
1403 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1404 
1405 	cancel_work_sync(&uc->pm_work);
1406 	cancel_work_sync(&uc->work);
1407 	pm_runtime_disable(uc->dev);
1408 	ucsi_unregister(uc->ucsi);
1409 	ucsi_destroy(uc->ucsi);
1410 	free_irq(uc->irq, uc);
1411 
1412 	return 0;
1413 }
1414 
1415 static const struct i2c_device_id ucsi_ccg_device_id[] = {
1416 	{"ccgx-ucsi", 0},
1417 	{}
1418 };
1419 MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1420 
ucsi_ccg_resume(struct device * dev)1421 static int ucsi_ccg_resume(struct device *dev)
1422 {
1423 	struct i2c_client *client = to_i2c_client(dev);
1424 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1425 
1426 	return ucsi_resume(uc->ucsi);
1427 }
1428 
ucsi_ccg_runtime_suspend(struct device * dev)1429 static int ucsi_ccg_runtime_suspend(struct device *dev)
1430 {
1431 	return 0;
1432 }
1433 
ucsi_ccg_runtime_resume(struct device * dev)1434 static int ucsi_ccg_runtime_resume(struct device *dev)
1435 {
1436 	struct i2c_client *client = to_i2c_client(dev);
1437 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1438 
1439 	/*
1440 	 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1441 	 * of missing interrupt when a device is connected for runtime resume.
1442 	 * Schedule a work to call ISR as a workaround.
1443 	 */
1444 	if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1445 	    uc->fw_version <= CCG_OLD_FW_VERSION)
1446 		schedule_work(&uc->pm_work);
1447 
1448 	return 0;
1449 }
1450 
1451 static const struct dev_pm_ops ucsi_ccg_pm = {
1452 	.resume = ucsi_ccg_resume,
1453 	.runtime_suspend = ucsi_ccg_runtime_suspend,
1454 	.runtime_resume = ucsi_ccg_runtime_resume,
1455 };
1456 
1457 static struct i2c_driver ucsi_ccg_driver = {
1458 	.driver = {
1459 		.name = "ucsi_ccg",
1460 		.pm = &ucsi_ccg_pm,
1461 		.dev_groups = ucsi_ccg_groups,
1462 	},
1463 	.probe = ucsi_ccg_probe,
1464 	.remove = ucsi_ccg_remove,
1465 	.id_table = ucsi_ccg_device_id,
1466 };
1467 
1468 module_i2c_driver(ucsi_ccg_driver);
1469 
1470 MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1471 MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1472 MODULE_LICENSE("GPL v2");
1473