1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Type-C Port Controller Interface.
6 */
7
8 #include <linux/delay.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/i2c.h>
12 #include <linux/interrupt.h>
13 #include <linux/property.h>
14 #include <linux/regmap.h>
15 #include <linux/usb/pd.h>
16 #include <linux/usb/tcpci.h>
17 #include <linux/usb/tcpm.h>
18 #include <linux/usb/typec.h>
19
20 #define PD_RETRY_COUNT_DEFAULT 3
21 #define PD_RETRY_COUNT_3_0_OR_HIGHER 2
22 #define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
23 #define VSINKPD_MIN_IR_DROP_MV 750
24 #define VSRC_NEW_MIN_PERCENT 95
25 #define VSRC_VALID_MIN_MV 500
26 #define VPPS_NEW_MIN_PERCENT 95
27 #define VPPS_VALID_MIN_MV 100
28 #define VSINKDISCONNECT_PD_MIN_PERCENT 90
29
30 struct tcpci {
31 struct device *dev;
32
33 struct tcpm_port *port;
34
35 struct regmap *regmap;
36 unsigned int alert_mask;
37
38 bool controls_vbus;
39
40 struct tcpc_dev tcpc;
41 struct tcpci_data *data;
42 };
43
44 struct tcpci_chip {
45 struct tcpci *tcpci;
46 struct tcpci_data data;
47 };
48
tcpci_get_tcpm_port(struct tcpci * tcpci)49 struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci)
50 {
51 return tcpci->port;
52 }
53 EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port);
54
tcpc_to_tcpci(struct tcpc_dev * tcpc)55 static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
56 {
57 return container_of(tcpc, struct tcpci, tcpc);
58 }
59
tcpci_read16(struct tcpci * tcpci,unsigned int reg,u16 * val)60 static int tcpci_read16(struct tcpci *tcpci, unsigned int reg, u16 *val)
61 {
62 return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
63 }
64
tcpci_write16(struct tcpci * tcpci,unsigned int reg,u16 val)65 static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
66 {
67 return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16));
68 }
69
tcpci_set_cc(struct tcpc_dev * tcpc,enum typec_cc_status cc)70 static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
71 {
72 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
73 bool vconn_pres;
74 enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
75 unsigned int reg;
76 int ret;
77
78 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
79 if (ret < 0)
80 return ret;
81
82 vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
83 if (vconn_pres) {
84 ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, ®);
85 if (ret < 0)
86 return ret;
87
88 if (reg & TCPC_TCPC_CTRL_ORIENTATION)
89 polarity = TYPEC_POLARITY_CC2;
90 }
91
92 switch (cc) {
93 case TYPEC_CC_RA:
94 reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
95 (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
96 break;
97 case TYPEC_CC_RD:
98 reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
99 (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
100 break;
101 case TYPEC_CC_RP_DEF:
102 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
103 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
104 (TCPC_ROLE_CTRL_RP_VAL_DEF <<
105 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
106 break;
107 case TYPEC_CC_RP_1_5:
108 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
109 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
110 (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
111 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
112 break;
113 case TYPEC_CC_RP_3_0:
114 reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
115 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
116 (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
117 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
118 break;
119 case TYPEC_CC_OPEN:
120 default:
121 reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
122 (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
123 break;
124 }
125
126 if (vconn_pres) {
127 if (polarity == TYPEC_POLARITY_CC2) {
128 reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
129 reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
130 } else {
131 reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
132 reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
133 }
134 }
135
136 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
137 if (ret < 0)
138 return ret;
139
140 return 0;
141 }
142
tcpci_apply_rc(struct tcpc_dev * tcpc,enum typec_cc_status cc,enum typec_cc_polarity polarity)143 static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
144 enum typec_cc_polarity polarity)
145 {
146 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
147 unsigned int reg;
148 int ret;
149
150 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
151 if (ret < 0)
152 return ret;
153
154 /*
155 * APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
156 * disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
157 */
158 if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
159 TCPC_ROLE_CTRL_CC2_SHIFT) !=
160 ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
161 TCPC_ROLE_CTRL_CC1_SHIFT))
162 return 0;
163
164 return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
165 TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
166 TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
167 TCPC_ROLE_CTRL_CC_OPEN);
168 }
169
tcpci_start_toggling(struct tcpc_dev * tcpc,enum typec_port_type port_type,enum typec_cc_status cc)170 static int tcpci_start_toggling(struct tcpc_dev *tcpc,
171 enum typec_port_type port_type,
172 enum typec_cc_status cc)
173 {
174 int ret;
175 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
176 unsigned int reg = TCPC_ROLE_CTRL_DRP;
177
178 if (port_type != TYPEC_PORT_DRP)
179 return -EOPNOTSUPP;
180
181 /* Handle vendor drp toggling */
182 if (tcpci->data->start_drp_toggling) {
183 ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
184 if (ret < 0)
185 return ret;
186 }
187
188 switch (cc) {
189 default:
190 case TYPEC_CC_RP_DEF:
191 reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
192 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
193 break;
194 case TYPEC_CC_RP_1_5:
195 reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
196 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
197 break;
198 case TYPEC_CC_RP_3_0:
199 reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
200 TCPC_ROLE_CTRL_RP_VAL_SHIFT);
201 break;
202 }
203
204 if (cc == TYPEC_CC_RD)
205 reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
206 (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
207 else
208 reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
209 (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
210 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
211 if (ret < 0)
212 return ret;
213 return regmap_write(tcpci->regmap, TCPC_COMMAND,
214 TCPC_CMD_LOOK4CONNECTION);
215 }
216
tcpci_get_cc(struct tcpc_dev * tcpc,enum typec_cc_status * cc1,enum typec_cc_status * cc2)217 static int tcpci_get_cc(struct tcpc_dev *tcpc,
218 enum typec_cc_status *cc1, enum typec_cc_status *cc2)
219 {
220 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
221 unsigned int reg, role_control;
222 int ret;
223
224 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
225 if (ret < 0)
226 return ret;
227
228 ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, ®);
229 if (ret < 0)
230 return ret;
231
232 *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
233 TCPC_CC_STATUS_CC1_MASK,
234 reg & TCPC_CC_STATUS_TERM ||
235 tcpc_presenting_rd(role_control, CC1));
236 *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
237 TCPC_CC_STATUS_CC2_MASK,
238 reg & TCPC_CC_STATUS_TERM ||
239 tcpc_presenting_rd(role_control, CC2));
240
241 return 0;
242 }
243
tcpci_set_polarity(struct tcpc_dev * tcpc,enum typec_cc_polarity polarity)244 static int tcpci_set_polarity(struct tcpc_dev *tcpc,
245 enum typec_cc_polarity polarity)
246 {
247 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
248 unsigned int reg;
249 int ret;
250 enum typec_cc_status cc1, cc2;
251
252 /* Obtain Rp setting from role control */
253 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
254 if (ret < 0)
255 return ret;
256
257 ret = tcpci_get_cc(tcpc, &cc1, &cc2);
258 if (ret < 0)
259 return ret;
260
261 /*
262 * When port has drp toggling enabled, ROLE_CONTROL would only have the initial
263 * terminations for the toggling and does not indicate the final cc
264 * terminations when ConnectionResult is 0 i.e. drp toggling stops and
265 * the connection is resolved. Infer port role from TCPC_CC_STATUS based on the
266 * terminations seen. The port role is then used to set the cc terminations.
267 */
268 if (reg & TCPC_ROLE_CTRL_DRP) {
269 /* Disable DRP for the OPEN setting to take effect */
270 reg = reg & ~TCPC_ROLE_CTRL_DRP;
271
272 if (polarity == TYPEC_POLARITY_CC2) {
273 reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
274 /* Local port is source */
275 if (cc2 == TYPEC_CC_RD)
276 /* Role control would have the Rp setting when DRP was enabled */
277 reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
278 else
279 reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
280 } else {
281 reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
282 /* Local port is source */
283 if (cc1 == TYPEC_CC_RD)
284 /* Role control would have the Rp setting when DRP was enabled */
285 reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
286 else
287 reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
288 }
289 }
290
291 if (polarity == TYPEC_POLARITY_CC2)
292 reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
293 else
294 reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
295 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
296 if (ret < 0)
297 return ret;
298
299 return regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
300 (polarity == TYPEC_POLARITY_CC2) ?
301 TCPC_TCPC_CTRL_ORIENTATION : 0);
302 }
303
tcpci_set_partner_usb_comm_capable(struct tcpc_dev * tcpc,bool capable)304 static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
305 {
306 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
307
308 if (tcpci->data->set_partner_usb_comm_capable)
309 tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
310 }
311
tcpci_set_vconn(struct tcpc_dev * tcpc,bool enable)312 static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
313 {
314 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
315 int ret;
316
317 /* Handle vendor set vconn */
318 if (tcpci->data->set_vconn) {
319 ret = tcpci->data->set_vconn(tcpci, tcpci->data, enable);
320 if (ret < 0)
321 return ret;
322 }
323
324 return regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL,
325 TCPC_POWER_CTRL_VCONN_ENABLE,
326 enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
327 }
328
tcpci_enable_auto_vbus_discharge(struct tcpc_dev * dev,bool enable)329 static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
330 {
331 struct tcpci *tcpci = tcpc_to_tcpci(dev);
332 int ret;
333
334 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_AUTO_DISCHARGE,
335 enable ? TCPC_POWER_CTRL_AUTO_DISCHARGE : 0);
336 return ret;
337 }
338
tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev * dev,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage_mv)339 static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
340 bool pps_active, u32 requested_vbus_voltage_mv)
341 {
342 struct tcpci *tcpci = tcpc_to_tcpci(dev);
343 unsigned int pwr_ctrl, threshold = 0;
344 int ret;
345
346 /*
347 * Indicates that vbus is going to go away due PR_SWAP, hard reset etc.
348 * Do not discharge vbus here.
349 */
350 if (requested_vbus_voltage_mv == 0)
351 goto write_thresh;
352
353 ret = regmap_read(tcpci->regmap, TCPC_POWER_CTRL, &pwr_ctrl);
354 if (ret < 0)
355 return ret;
356
357 if (pwr_ctrl & TCPC_FAST_ROLE_SWAP_EN) {
358 /* To prevent disconnect when the source is fast role swap is capable. */
359 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
360 } else if (mode == TYPEC_PWR_MODE_PD) {
361 if (pps_active)
362 threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
363 VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
364 VSINKDISCONNECT_PD_MIN_PERCENT / 100;
365 else
366 threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
367 VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
368 VSINKDISCONNECT_PD_MIN_PERCENT / 100;
369 } else {
370 /* 3.5V for non-pd sink */
371 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
372 }
373
374 threshold = threshold / TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV;
375
376 if (threshold > TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX)
377 return -EINVAL;
378
379 write_thresh:
380 return tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, threshold);
381 }
382
tcpci_enable_frs(struct tcpc_dev * dev,bool enable)383 static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
384 {
385 struct tcpci *tcpci = tcpc_to_tcpci(dev);
386 int ret;
387
388 /* To prevent disconnect during FRS, set disconnect threshold to 3.5V */
389 ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c);
390 if (ret < 0)
391 return ret;
392
393 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ?
394 TCPC_FAST_ROLE_SWAP_EN : 0);
395
396 return ret;
397 }
398
tcpci_frs_sourcing_vbus(struct tcpc_dev * dev)399 static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
400 {
401 struct tcpci *tcpci = tcpc_to_tcpci(dev);
402
403 if (tcpci->data->frs_sourcing_vbus)
404 tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
405 }
406
tcpci_check_contaminant(struct tcpc_dev * dev)407 static void tcpci_check_contaminant(struct tcpc_dev *dev)
408 {
409 struct tcpci *tcpci = tcpc_to_tcpci(dev);
410
411 if (tcpci->data->check_contaminant)
412 tcpci->data->check_contaminant(tcpci, tcpci->data);
413 }
414
tcpci_set_bist_data(struct tcpc_dev * tcpc,bool enable)415 static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
416 {
417 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
418
419 return regmap_update_bits(tcpci->regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_BIST_TM,
420 enable ? TCPC_TCPC_CTRL_BIST_TM : 0);
421 }
422
tcpci_set_roles(struct tcpc_dev * tcpc,bool attached,enum typec_role role,enum typec_data_role data)423 static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
424 enum typec_role role, enum typec_data_role data)
425 {
426 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
427 unsigned int reg;
428 int ret;
429
430 reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
431 if (role == TYPEC_SOURCE)
432 reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
433 if (data == TYPEC_HOST)
434 reg |= TCPC_MSG_HDR_INFO_DATA_ROLE;
435 ret = regmap_write(tcpci->regmap, TCPC_MSG_HDR_INFO, reg);
436 if (ret < 0)
437 return ret;
438
439 return 0;
440 }
441
tcpci_set_pd_rx(struct tcpc_dev * tcpc,bool enable)442 static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
443 {
444 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
445 unsigned int reg = 0;
446 int ret;
447
448 if (enable)
449 reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
450 ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
451 if (ret < 0)
452 return ret;
453
454 return 0;
455 }
456
tcpci_get_vbus(struct tcpc_dev * tcpc)457 static int tcpci_get_vbus(struct tcpc_dev *tcpc)
458 {
459 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
460 unsigned int reg;
461 int ret;
462
463 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
464 if (ret < 0)
465 return ret;
466
467 return !!(reg & TCPC_POWER_STATUS_VBUS_PRES);
468 }
469
tcpci_is_vbus_vsafe0v(struct tcpc_dev * tcpc)470 static bool tcpci_is_vbus_vsafe0v(struct tcpc_dev *tcpc)
471 {
472 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
473 unsigned int reg;
474 int ret;
475
476 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, ®);
477 if (ret < 0)
478 return false;
479
480 return !!(reg & TCPC_EXTENDED_STATUS_VSAFE0V);
481 }
482
tcpci_set_vbus(struct tcpc_dev * tcpc,bool source,bool sink)483 static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
484 {
485 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
486 int ret;
487
488 if (tcpci->data->set_vbus) {
489 ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink);
490 /* Bypass when ret > 0 */
491 if (ret != 0)
492 return ret < 0 ? ret : 0;
493 }
494
495 /* Disable both source and sink first before enabling anything */
496
497 if (!source) {
498 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
499 TCPC_CMD_DISABLE_SRC_VBUS);
500 if (ret < 0)
501 return ret;
502 }
503
504 if (!sink) {
505 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
506 TCPC_CMD_DISABLE_SINK_VBUS);
507 if (ret < 0)
508 return ret;
509 }
510
511 if (source) {
512 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
513 TCPC_CMD_SRC_VBUS_DEFAULT);
514 if (ret < 0)
515 return ret;
516 }
517
518 if (sink) {
519 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
520 TCPC_CMD_SINK_VBUS);
521 if (ret < 0)
522 return ret;
523 }
524
525 return 0;
526 }
527
tcpci_pd_transmit(struct tcpc_dev * tcpc,enum tcpm_transmit_type type,const struct pd_message * msg,unsigned int negotiated_rev)528 static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type,
529 const struct pd_message *msg, unsigned int negotiated_rev)
530 {
531 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
532 u16 header = msg ? le16_to_cpu(msg->header) : 0;
533 unsigned int reg, cnt;
534 int ret;
535
536 cnt = msg ? pd_header_cnt(header) * 4 : 0;
537 /**
538 * TCPCI spec forbids direct access of TCPC_TX_DATA.
539 * But, since some of the chipsets offer this capability,
540 * it's fair to support both.
541 */
542 if (tcpci->data->TX_BUF_BYTE_x_hidden) {
543 u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,};
544 u8 pos = 0;
545
546 /* Payload + header + TCPC_TX_BYTE_CNT */
547 buf[pos++] = cnt + 2;
548
549 if (msg)
550 memcpy(&buf[pos], &msg->header, sizeof(msg->header));
551
552 pos += sizeof(header);
553
554 if (cnt > 0)
555 memcpy(&buf[pos], msg->payload, cnt);
556
557 pos += cnt;
558 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos);
559 if (ret < 0)
560 return ret;
561 } else {
562 ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
563 if (ret < 0)
564 return ret;
565
566 ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
567 if (ret < 0)
568 return ret;
569
570 if (cnt > 0) {
571 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt);
572 if (ret < 0)
573 return ret;
574 }
575 }
576
577 /* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
578 reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT)
579 << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
580 ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
581 if (ret < 0)
582 return ret;
583
584 return 0;
585 }
586
tcpci_init(struct tcpc_dev * tcpc)587 static int tcpci_init(struct tcpc_dev *tcpc)
588 {
589 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
590 unsigned long timeout = jiffies + msecs_to_jiffies(2000); /* XXX */
591 unsigned int reg;
592 int ret;
593
594 while (time_before_eq(jiffies, timeout)) {
595 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
596 if (ret < 0)
597 return ret;
598 if (!(reg & TCPC_POWER_STATUS_UNINIT))
599 break;
600 usleep_range(10000, 20000);
601 }
602 if (time_after(jiffies, timeout))
603 return -ETIMEDOUT;
604
605 ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
606 if (ret < 0)
607 return ret;
608
609 /* Handle vendor init */
610 if (tcpci->data->init) {
611 ret = tcpci->data->init(tcpci, tcpci->data);
612 if (ret < 0)
613 return ret;
614 }
615
616 /* Clear all events */
617 ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
618 if (ret < 0)
619 return ret;
620
621 if (tcpci->controls_vbus)
622 reg = TCPC_POWER_STATUS_VBUS_PRES;
623 else
624 reg = 0;
625 ret = regmap_write(tcpci->regmap, TCPC_POWER_STATUS_MASK, reg);
626 if (ret < 0)
627 return ret;
628
629 /* Enable Vbus detection */
630 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
631 TCPC_CMD_ENABLE_VBUS_DETECT);
632 if (ret < 0)
633 return ret;
634
635 reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
636 TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
637 TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
638 if (tcpci->controls_vbus)
639 reg |= TCPC_ALERT_POWER_STATUS;
640 /* Enable VSAFE0V status interrupt when detecting VSAFE0V is supported */
641 if (tcpci->data->vbus_vsafe0v) {
642 reg |= TCPC_ALERT_EXTENDED_STATUS;
643 ret = regmap_write(tcpci->regmap, TCPC_EXTENDED_STATUS_MASK,
644 TCPC_EXTENDED_STATUS_VSAFE0V);
645 if (ret < 0)
646 return ret;
647 }
648
649 tcpci->alert_mask = reg;
650
651 return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
652 }
653
tcpci_irq(struct tcpci * tcpci)654 irqreturn_t tcpci_irq(struct tcpci *tcpci)
655 {
656 u16 status;
657 int ret;
658 unsigned int raw;
659
660 tcpci_read16(tcpci, TCPC_ALERT, &status);
661
662 /*
663 * Clear alert status for everything except RX_STATUS, which shouldn't
664 * be cleared until we have successfully retrieved message.
665 */
666 if (status & ~TCPC_ALERT_RX_STATUS)
667 tcpci_write16(tcpci, TCPC_ALERT,
668 status & ~TCPC_ALERT_RX_STATUS);
669
670 if (status & TCPC_ALERT_CC_STATUS)
671 tcpm_cc_change(tcpci->port);
672
673 if (status & TCPC_ALERT_POWER_STATUS) {
674 regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &raw);
675 /*
676 * If power status mask has been reset, then the TCPC
677 * has reset.
678 */
679 if (raw == 0xff)
680 tcpm_tcpc_reset(tcpci->port);
681 else
682 tcpm_vbus_change(tcpci->port);
683 }
684
685 if (status & TCPC_ALERT_RX_STATUS) {
686 struct pd_message msg;
687 unsigned int cnt, payload_cnt;
688 u16 header;
689
690 regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
691 /*
692 * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
693 * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
694 * defined in table 4-36 as one greater than the number of
695 * bytes received. And that number includes the header. So:
696 */
697 if (cnt > 3)
698 payload_cnt = cnt - (1 + sizeof(msg.header));
699 else
700 payload_cnt = 0;
701
702 tcpci_read16(tcpci, TCPC_RX_HDR, &header);
703 msg.header = cpu_to_le16(header);
704
705 if (WARN_ON(payload_cnt > sizeof(msg.payload)))
706 payload_cnt = sizeof(msg.payload);
707
708 if (payload_cnt > 0)
709 regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
710 &msg.payload, payload_cnt);
711
712 /* Read complete, clear RX status alert bit */
713 tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
714
715 tcpm_pd_receive(tcpci->port, &msg);
716 }
717
718 if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
719 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
720 if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
721 tcpm_vbus_change(tcpci->port);
722 }
723
724 if (status & TCPC_ALERT_RX_HARD_RST)
725 tcpm_pd_hard_reset(tcpci->port);
726
727 if (status & TCPC_ALERT_TX_SUCCESS)
728 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_SUCCESS);
729 else if (status & TCPC_ALERT_TX_DISCARDED)
730 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_DISCARDED);
731 else if (status & TCPC_ALERT_TX_FAILED)
732 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
733
734 return IRQ_RETVAL(status & tcpci->alert_mask);
735 }
736 EXPORT_SYMBOL_GPL(tcpci_irq);
737
_tcpci_irq(int irq,void * dev_id)738 static irqreturn_t _tcpci_irq(int irq, void *dev_id)
739 {
740 struct tcpci_chip *chip = dev_id;
741
742 return tcpci_irq(chip->tcpci);
743 }
744
745 static const struct regmap_config tcpci_regmap_config = {
746 .reg_bits = 8,
747 .val_bits = 8,
748
749 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
750 };
751
tcpci_parse_config(struct tcpci * tcpci)752 static int tcpci_parse_config(struct tcpci *tcpci)
753 {
754 tcpci->controls_vbus = true; /* XXX */
755
756 tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
757 "connector");
758 if (!tcpci->tcpc.fwnode) {
759 dev_err(tcpci->dev, "Can't find connector node.\n");
760 return -EINVAL;
761 }
762
763 return 0;
764 }
765
tcpci_register_port(struct device * dev,struct tcpci_data * data)766 struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
767 {
768 struct tcpci *tcpci;
769 int err;
770
771 tcpci = devm_kzalloc(dev, sizeof(*tcpci), GFP_KERNEL);
772 if (!tcpci)
773 return ERR_PTR(-ENOMEM);
774
775 tcpci->dev = dev;
776 tcpci->data = data;
777 tcpci->regmap = data->regmap;
778
779 tcpci->tcpc.init = tcpci_init;
780 tcpci->tcpc.get_vbus = tcpci_get_vbus;
781 tcpci->tcpc.set_vbus = tcpci_set_vbus;
782 tcpci->tcpc.set_cc = tcpci_set_cc;
783 tcpci->tcpc.apply_rc = tcpci_apply_rc;
784 tcpci->tcpc.get_cc = tcpci_get_cc;
785 tcpci->tcpc.set_polarity = tcpci_set_polarity;
786 tcpci->tcpc.set_vconn = tcpci_set_vconn;
787 tcpci->tcpc.start_toggling = tcpci_start_toggling;
788
789 tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
790 tcpci->tcpc.set_roles = tcpci_set_roles;
791 tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
792 tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
793 tcpci->tcpc.enable_frs = tcpci_enable_frs;
794 tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
795 tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
796
797 if (tcpci->data->check_contaminant)
798 tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
799
800 if (tcpci->data->auto_discharge_disconnect) {
801 tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
802 tcpci->tcpc.set_auto_vbus_discharge_threshold =
803 tcpci_set_auto_vbus_discharge_threshold;
804 regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_BLEED_DISCHARGE,
805 TCPC_POWER_CTRL_BLEED_DISCHARGE);
806 }
807
808 if (tcpci->data->vbus_vsafe0v)
809 tcpci->tcpc.is_vbus_vsafe0v = tcpci_is_vbus_vsafe0v;
810
811 err = tcpci_parse_config(tcpci);
812 if (err < 0)
813 return ERR_PTR(err);
814
815 tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
816 if (IS_ERR(tcpci->port)) {
817 fwnode_handle_put(tcpci->tcpc.fwnode);
818 return ERR_CAST(tcpci->port);
819 }
820
821 return tcpci;
822 }
823 EXPORT_SYMBOL_GPL(tcpci_register_port);
824
tcpci_unregister_port(struct tcpci * tcpci)825 void tcpci_unregister_port(struct tcpci *tcpci)
826 {
827 tcpm_unregister_port(tcpci->port);
828 fwnode_handle_put(tcpci->tcpc.fwnode);
829 }
830 EXPORT_SYMBOL_GPL(tcpci_unregister_port);
831
tcpci_probe(struct i2c_client * client)832 static int tcpci_probe(struct i2c_client *client)
833 {
834 struct tcpci_chip *chip;
835 int err;
836 u16 val = 0;
837
838 chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
839 if (!chip)
840 return -ENOMEM;
841
842 chip->data.regmap = devm_regmap_init_i2c(client, &tcpci_regmap_config);
843 if (IS_ERR(chip->data.regmap))
844 return PTR_ERR(chip->data.regmap);
845
846 i2c_set_clientdata(client, chip);
847
848 /* Disable chip interrupts before requesting irq */
849 err = regmap_raw_write(chip->data.regmap, TCPC_ALERT_MASK, &val,
850 sizeof(u16));
851 if (err < 0)
852 return err;
853
854 chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
855 if (IS_ERR(chip->tcpci))
856 return PTR_ERR(chip->tcpci);
857
858 err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
859 _tcpci_irq,
860 IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
861 dev_name(&client->dev), chip);
862 if (err < 0) {
863 tcpci_unregister_port(chip->tcpci);
864 return err;
865 }
866
867 return 0;
868 }
869
tcpci_remove(struct i2c_client * client)870 static void tcpci_remove(struct i2c_client *client)
871 {
872 struct tcpci_chip *chip = i2c_get_clientdata(client);
873 int err;
874
875 /* Disable chip interrupts before unregistering port */
876 err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
877 if (err < 0)
878 dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
879
880 tcpci_unregister_port(chip->tcpci);
881 }
882
883 static const struct i2c_device_id tcpci_id[] = {
884 { "tcpci", 0 },
885 { }
886 };
887 MODULE_DEVICE_TABLE(i2c, tcpci_id);
888
889 #ifdef CONFIG_OF
890 static const struct of_device_id tcpci_of_match[] = {
891 { .compatible = "nxp,ptn5110", },
892 {},
893 };
894 MODULE_DEVICE_TABLE(of, tcpci_of_match);
895 #endif
896
897 static struct i2c_driver tcpci_i2c_driver = {
898 .driver = {
899 .name = "tcpci",
900 .of_match_table = of_match_ptr(tcpci_of_match),
901 },
902 .probe = tcpci_probe,
903 .remove = tcpci_remove,
904 .id_table = tcpci_id,
905 };
906 module_i2c_driver(tcpci_i2c_driver);
907
908 MODULE_DESCRIPTION("USB Type-C Port Controller Interface driver");
909 MODULE_LICENSE("GPL");
910