1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - control channel and configuration commands
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15
16 #include "ctl.h"
17
18
19 #define TB_CTL_RX_PKG_COUNT 10
20 #define TB_CTL_RETRIES 4
21
22 /**
23 * struct tb_ctl - Thunderbolt control channel
24 * @nhi: Pointer to the NHI structure
25 * @tx: Transmit ring
26 * @rx: Receive ring
27 * @frame_pool: DMA pool for control messages
28 * @rx_packets: Received control messages
29 * @request_queue_lock: Lock protecting @request_queue
30 * @request_queue: List of outstanding requests
31 * @running: Is the control channel running at the moment
32 * @timeout_msec: Default timeout for non-raw control messages
33 * @callback: Callback called when hotplug message is received
34 * @callback_data: Data passed to @callback
35 */
36 struct tb_ctl {
37 struct tb_nhi *nhi;
38 struct tb_ring *tx;
39 struct tb_ring *rx;
40
41 struct dma_pool *frame_pool;
42 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
43 struct mutex request_queue_lock;
44 struct list_head request_queue;
45 bool running;
46
47 int timeout_msec;
48 event_cb callback;
49 void *callback_data;
50 };
51
52
53 #define tb_ctl_WARN(ctl, format, arg...) \
54 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
55
56 #define tb_ctl_err(ctl, format, arg...) \
57 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
58
59 #define tb_ctl_warn(ctl, format, arg...) \
60 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
61
62 #define tb_ctl_info(ctl, format, arg...) \
63 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
64
65 #define tb_ctl_dbg(ctl, format, arg...) \
66 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
67
68 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
69 /* Serializes access to request kref_get/put */
70 static DEFINE_MUTEX(tb_cfg_request_lock);
71
72 /**
73 * tb_cfg_request_alloc() - Allocates a new config request
74 *
75 * This is refcounted object so when you are done with this, call
76 * tb_cfg_request_put() to it.
77 */
tb_cfg_request_alloc(void)78 struct tb_cfg_request *tb_cfg_request_alloc(void)
79 {
80 struct tb_cfg_request *req;
81
82 req = kzalloc(sizeof(*req), GFP_KERNEL);
83 if (!req)
84 return NULL;
85
86 kref_init(&req->kref);
87
88 return req;
89 }
90
91 /**
92 * tb_cfg_request_get() - Increase refcount of a request
93 * @req: Request whose refcount is increased
94 */
tb_cfg_request_get(struct tb_cfg_request * req)95 void tb_cfg_request_get(struct tb_cfg_request *req)
96 {
97 mutex_lock(&tb_cfg_request_lock);
98 kref_get(&req->kref);
99 mutex_unlock(&tb_cfg_request_lock);
100 }
101
tb_cfg_request_destroy(struct kref * kref)102 static void tb_cfg_request_destroy(struct kref *kref)
103 {
104 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
105
106 kfree(req);
107 }
108
109 /**
110 * tb_cfg_request_put() - Decrease refcount and possibly release the request
111 * @req: Request whose refcount is decreased
112 *
113 * Call this function when you are done with the request. When refcount
114 * goes to %0 the object is released.
115 */
tb_cfg_request_put(struct tb_cfg_request * req)116 void tb_cfg_request_put(struct tb_cfg_request *req)
117 {
118 mutex_lock(&tb_cfg_request_lock);
119 kref_put(&req->kref, tb_cfg_request_destroy);
120 mutex_unlock(&tb_cfg_request_lock);
121 }
122
tb_cfg_request_enqueue(struct tb_ctl * ctl,struct tb_cfg_request * req)123 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
124 struct tb_cfg_request *req)
125 {
126 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
127 WARN_ON(req->ctl);
128
129 mutex_lock(&ctl->request_queue_lock);
130 if (!ctl->running) {
131 mutex_unlock(&ctl->request_queue_lock);
132 return -ENOTCONN;
133 }
134 req->ctl = ctl;
135 list_add_tail(&req->list, &ctl->request_queue);
136 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
137 mutex_unlock(&ctl->request_queue_lock);
138 return 0;
139 }
140
tb_cfg_request_dequeue(struct tb_cfg_request * req)141 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
142 {
143 struct tb_ctl *ctl = req->ctl;
144
145 mutex_lock(&ctl->request_queue_lock);
146 list_del(&req->list);
147 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
148 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
149 wake_up(&tb_cfg_request_cancel_queue);
150 mutex_unlock(&ctl->request_queue_lock);
151 }
152
tb_cfg_request_is_active(struct tb_cfg_request * req)153 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
154 {
155 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
156 }
157
158 static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl * ctl,struct ctl_pkg * pkg)159 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
160 {
161 struct tb_cfg_request *req = NULL, *iter;
162
163 mutex_lock(&pkg->ctl->request_queue_lock);
164 list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
165 tb_cfg_request_get(iter);
166 if (iter->match(iter, pkg)) {
167 req = iter;
168 break;
169 }
170 tb_cfg_request_put(iter);
171 }
172 mutex_unlock(&pkg->ctl->request_queue_lock);
173
174 return req;
175 }
176
177 /* utility functions */
178
179
check_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)180 static int check_header(const struct ctl_pkg *pkg, u32 len,
181 enum tb_cfg_pkg_type type, u64 route)
182 {
183 struct tb_cfg_header *header = pkg->buffer;
184
185 /* check frame, TODO: frame flags */
186 if (WARN(len != pkg->frame.size,
187 "wrong framesize (expected %#x, got %#x)\n",
188 len, pkg->frame.size))
189 return -EIO;
190 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
191 type, pkg->frame.eof))
192 return -EIO;
193 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
194 pkg->frame.sof))
195 return -EIO;
196
197 /* check header */
198 if (WARN(header->unknown != 1 << 9,
199 "header->unknown is %#x\n", header->unknown))
200 return -EIO;
201 if (WARN(route != tb_cfg_get_route(header),
202 "wrong route (expected %llx, got %llx)",
203 route, tb_cfg_get_route(header)))
204 return -EIO;
205 return 0;
206 }
207
check_config_address(struct tb_cfg_address addr,enum tb_cfg_space space,u32 offset,u32 length)208 static int check_config_address(struct tb_cfg_address addr,
209 enum tb_cfg_space space, u32 offset,
210 u32 length)
211 {
212 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
213 return -EIO;
214 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
215 space, addr.space))
216 return -EIO;
217 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
218 offset, addr.offset))
219 return -EIO;
220 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
221 length, addr.length))
222 return -EIO;
223 /*
224 * We cannot check addr->port as it is set to the upstream port of the
225 * sender.
226 */
227 return 0;
228 }
229
decode_error(const struct ctl_pkg * response)230 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
231 {
232 struct cfg_error_pkg *pkg = response->buffer;
233 struct tb_ctl *ctl = response->ctl;
234 struct tb_cfg_result res = { 0 };
235 res.response_route = tb_cfg_get_route(&pkg->header);
236 res.response_port = 0;
237 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
238 tb_cfg_get_route(&pkg->header));
239 if (res.err)
240 return res;
241
242 if (pkg->zero1)
243 tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
244 if (pkg->zero2)
245 tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
246 if (pkg->zero3)
247 tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
248
249 res.err = 1;
250 res.tb_error = pkg->error;
251 res.response_port = pkg->port;
252 return res;
253
254 }
255
parse_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)256 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
257 enum tb_cfg_pkg_type type, u64 route)
258 {
259 struct tb_cfg_header *header = pkg->buffer;
260 struct tb_cfg_result res = { 0 };
261
262 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
263 return decode_error(pkg);
264
265 res.response_port = 0; /* will be updated later for cfg_read/write */
266 res.response_route = tb_cfg_get_route(header);
267 res.err = check_header(pkg, len, type, route);
268 return res;
269 }
270
tb_cfg_print_error(struct tb_ctl * ctl,const struct tb_cfg_result * res)271 static void tb_cfg_print_error(struct tb_ctl *ctl,
272 const struct tb_cfg_result *res)
273 {
274 WARN_ON(res->err != 1);
275 switch (res->tb_error) {
276 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
277 /* Port is not connected. This can happen during surprise
278 * removal. Do not warn. */
279 return;
280 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
281 /*
282 * Invalid cfg_space/offset/length combination in
283 * cfg_read/cfg_write.
284 */
285 tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
286 res->response_route, res->response_port);
287 return;
288 case TB_CFG_ERROR_NO_SUCH_PORT:
289 /*
290 * - The route contains a non-existent port.
291 * - The route contains a non-PHY port (e.g. PCIe).
292 * - The port in cfg_read/cfg_write does not exist.
293 */
294 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
295 res->response_route, res->response_port);
296 return;
297 case TB_CFG_ERROR_LOOP:
298 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
299 res->response_route, res->response_port);
300 return;
301 case TB_CFG_ERROR_LOCK:
302 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
303 res->response_route, res->response_port);
304 return;
305 default:
306 /* 5,6,7,9 and 11 are also valid error codes */
307 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
308 res->response_route, res->response_port);
309 return;
310 }
311 }
312
tb_crc(const void * data,size_t len)313 static __be32 tb_crc(const void *data, size_t len)
314 {
315 return cpu_to_be32(~__crc32c_le(~0, data, len));
316 }
317
tb_ctl_pkg_free(struct ctl_pkg * pkg)318 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
319 {
320 if (pkg) {
321 dma_pool_free(pkg->ctl->frame_pool,
322 pkg->buffer, pkg->frame.buffer_phy);
323 kfree(pkg);
324 }
325 }
326
tb_ctl_pkg_alloc(struct tb_ctl * ctl)327 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
328 {
329 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
330 if (!pkg)
331 return NULL;
332 pkg->ctl = ctl;
333 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
334 &pkg->frame.buffer_phy);
335 if (!pkg->buffer) {
336 kfree(pkg);
337 return NULL;
338 }
339 return pkg;
340 }
341
342
343 /* RX/TX handling */
344
tb_ctl_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)345 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
346 bool canceled)
347 {
348 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
349 tb_ctl_pkg_free(pkg);
350 }
351
352 /*
353 * tb_cfg_tx() - transmit a packet on the control channel
354 *
355 * len must be a multiple of four.
356 *
357 * Return: Returns 0 on success or an error code on failure.
358 */
tb_ctl_tx(struct tb_ctl * ctl,const void * data,size_t len,enum tb_cfg_pkg_type type)359 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
360 enum tb_cfg_pkg_type type)
361 {
362 int res;
363 struct ctl_pkg *pkg;
364 if (len % 4 != 0) { /* required for le->be conversion */
365 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
366 return -EINVAL;
367 }
368 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
369 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
370 len, TB_FRAME_SIZE - 4);
371 return -EINVAL;
372 }
373 pkg = tb_ctl_pkg_alloc(ctl);
374 if (!pkg)
375 return -ENOMEM;
376 pkg->frame.callback = tb_ctl_tx_callback;
377 pkg->frame.size = len + 4;
378 pkg->frame.sof = type;
379 pkg->frame.eof = type;
380 cpu_to_be32_array(pkg->buffer, data, len / 4);
381 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
382
383 res = tb_ring_tx(ctl->tx, &pkg->frame);
384 if (res) /* ring is stopped */
385 tb_ctl_pkg_free(pkg);
386 return res;
387 }
388
389 /*
390 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
391 */
tb_ctl_handle_event(struct tb_ctl * ctl,enum tb_cfg_pkg_type type,struct ctl_pkg * pkg,size_t size)392 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
393 struct ctl_pkg *pkg, size_t size)
394 {
395 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
396 }
397
tb_ctl_rx_submit(struct ctl_pkg * pkg)398 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
399 {
400 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
401 * We ignore failures during stop.
402 * All rx packets are referenced
403 * from ctl->rx_packets, so we do
404 * not loose them.
405 */
406 }
407
tb_async_error(const struct ctl_pkg * pkg)408 static int tb_async_error(const struct ctl_pkg *pkg)
409 {
410 const struct cfg_error_pkg *error = pkg->buffer;
411
412 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
413 return false;
414
415 switch (error->error) {
416 case TB_CFG_ERROR_LINK_ERROR:
417 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
418 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
419 return true;
420
421 default:
422 return false;
423 }
424 }
425
tb_ctl_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)426 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
427 bool canceled)
428 {
429 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
430 struct tb_cfg_request *req;
431 __be32 crc32;
432
433 if (canceled)
434 return; /*
435 * ring is stopped, packet is referenced from
436 * ctl->rx_packets.
437 */
438
439 if (frame->size < 4 || frame->size % 4 != 0) {
440 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
441 frame->size);
442 goto rx;
443 }
444
445 frame->size -= 4; /* remove checksum */
446 crc32 = tb_crc(pkg->buffer, frame->size);
447 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
448
449 switch (frame->eof) {
450 case TB_CFG_PKG_READ:
451 case TB_CFG_PKG_WRITE:
452 case TB_CFG_PKG_ERROR:
453 case TB_CFG_PKG_OVERRIDE:
454 case TB_CFG_PKG_RESET:
455 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
456 tb_ctl_err(pkg->ctl,
457 "RX: checksum mismatch, dropping packet\n");
458 goto rx;
459 }
460 if (tb_async_error(pkg)) {
461 tb_ctl_handle_event(pkg->ctl, frame->eof,
462 pkg, frame->size);
463 goto rx;
464 }
465 break;
466
467 case TB_CFG_PKG_EVENT:
468 case TB_CFG_PKG_XDOMAIN_RESP:
469 case TB_CFG_PKG_XDOMAIN_REQ:
470 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
471 tb_ctl_err(pkg->ctl,
472 "RX: checksum mismatch, dropping packet\n");
473 goto rx;
474 }
475 fallthrough;
476 case TB_CFG_PKG_ICM_EVENT:
477 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
478 goto rx;
479 break;
480
481 default:
482 break;
483 }
484
485 /*
486 * The received packet will be processed only if there is an
487 * active request and that the packet is what is expected. This
488 * prevents packets such as replies coming after timeout has
489 * triggered from messing with the active requests.
490 */
491 req = tb_cfg_request_find(pkg->ctl, pkg);
492 if (req) {
493 if (req->copy(req, pkg))
494 schedule_work(&req->work);
495 tb_cfg_request_put(req);
496 }
497
498 rx:
499 tb_ctl_rx_submit(pkg);
500 }
501
tb_cfg_request_work(struct work_struct * work)502 static void tb_cfg_request_work(struct work_struct *work)
503 {
504 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
505
506 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
507 req->callback(req->callback_data);
508
509 tb_cfg_request_dequeue(req);
510 tb_cfg_request_put(req);
511 }
512
513 /**
514 * tb_cfg_request() - Start control request not waiting for it to complete
515 * @ctl: Control channel to use
516 * @req: Request to start
517 * @callback: Callback called when the request is completed
518 * @callback_data: Data to be passed to @callback
519 *
520 * This queues @req on the given control channel without waiting for it
521 * to complete. When the request completes @callback is called.
522 */
tb_cfg_request(struct tb_ctl * ctl,struct tb_cfg_request * req,void (* callback)(void *),void * callback_data)523 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
524 void (*callback)(void *), void *callback_data)
525 {
526 int ret;
527
528 req->flags = 0;
529 req->callback = callback;
530 req->callback_data = callback_data;
531 INIT_WORK(&req->work, tb_cfg_request_work);
532 INIT_LIST_HEAD(&req->list);
533
534 tb_cfg_request_get(req);
535 ret = tb_cfg_request_enqueue(ctl, req);
536 if (ret)
537 goto err_put;
538
539 ret = tb_ctl_tx(ctl, req->request, req->request_size,
540 req->request_type);
541 if (ret)
542 goto err_dequeue;
543
544 if (!req->response)
545 schedule_work(&req->work);
546
547 return 0;
548
549 err_dequeue:
550 tb_cfg_request_dequeue(req);
551 err_put:
552 tb_cfg_request_put(req);
553
554 return ret;
555 }
556
557 /**
558 * tb_cfg_request_cancel() - Cancel a control request
559 * @req: Request to cancel
560 * @err: Error to assign to the request
561 *
562 * This function can be used to cancel ongoing request. It will wait
563 * until the request is not active anymore.
564 */
tb_cfg_request_cancel(struct tb_cfg_request * req,int err)565 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
566 {
567 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
568 schedule_work(&req->work);
569 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
570 req->result.err = err;
571 }
572
tb_cfg_request_complete(void * data)573 static void tb_cfg_request_complete(void *data)
574 {
575 complete(data);
576 }
577
578 /**
579 * tb_cfg_request_sync() - Start control request and wait until it completes
580 * @ctl: Control channel to use
581 * @req: Request to start
582 * @timeout_msec: Timeout how long to wait @req to complete
583 *
584 * Starts a control request and waits until it completes. If timeout
585 * triggers the request is canceled before function returns. Note the
586 * caller needs to make sure only one message for given switch is active
587 * at a time.
588 */
tb_cfg_request_sync(struct tb_ctl * ctl,struct tb_cfg_request * req,int timeout_msec)589 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
590 struct tb_cfg_request *req,
591 int timeout_msec)
592 {
593 unsigned long timeout = msecs_to_jiffies(timeout_msec);
594 struct tb_cfg_result res = { 0 };
595 DECLARE_COMPLETION_ONSTACK(done);
596 int ret;
597
598 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
599 if (ret) {
600 res.err = ret;
601 return res;
602 }
603
604 if (!wait_for_completion_timeout(&done, timeout))
605 tb_cfg_request_cancel(req, -ETIMEDOUT);
606
607 flush_work(&req->work);
608
609 return req->result;
610 }
611
612 /* public interface, alloc/start/stop/free */
613
614 /**
615 * tb_ctl_alloc() - allocate a control channel
616 * @nhi: Pointer to NHI
617 * @timeout_msec: Default timeout used with non-raw control messages
618 * @cb: Callback called for plug events
619 * @cb_data: Data passed to @cb
620 *
621 * cb will be invoked once for every hot plug event.
622 *
623 * Return: Returns a pointer on success or NULL on failure.
624 */
tb_ctl_alloc(struct tb_nhi * nhi,int timeout_msec,event_cb cb,void * cb_data)625 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
626 void *cb_data)
627 {
628 int i;
629 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
630 if (!ctl)
631 return NULL;
632 ctl->nhi = nhi;
633 ctl->timeout_msec = timeout_msec;
634 ctl->callback = cb;
635 ctl->callback_data = cb_data;
636
637 mutex_init(&ctl->request_queue_lock);
638 INIT_LIST_HEAD(&ctl->request_queue);
639 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
640 TB_FRAME_SIZE, 4, 0);
641 if (!ctl->frame_pool)
642 goto err;
643
644 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
645 if (!ctl->tx)
646 goto err;
647
648 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
649 0xffff, NULL, NULL);
650 if (!ctl->rx)
651 goto err;
652
653 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
654 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
655 if (!ctl->rx_packets[i])
656 goto err;
657 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
658 }
659
660 tb_ctl_dbg(ctl, "control channel created\n");
661 return ctl;
662 err:
663 tb_ctl_free(ctl);
664 return NULL;
665 }
666
667 /**
668 * tb_ctl_free() - free a control channel
669 * @ctl: Control channel to free
670 *
671 * Must be called after tb_ctl_stop.
672 *
673 * Must NOT be called from ctl->callback.
674 */
tb_ctl_free(struct tb_ctl * ctl)675 void tb_ctl_free(struct tb_ctl *ctl)
676 {
677 int i;
678
679 if (!ctl)
680 return;
681
682 if (ctl->rx)
683 tb_ring_free(ctl->rx);
684 if (ctl->tx)
685 tb_ring_free(ctl->tx);
686
687 /* free RX packets */
688 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
689 tb_ctl_pkg_free(ctl->rx_packets[i]);
690
691
692 dma_pool_destroy(ctl->frame_pool);
693 kfree(ctl);
694 }
695
696 /**
697 * tb_cfg_start() - start/resume the control channel
698 * @ctl: Control channel to start
699 */
tb_ctl_start(struct tb_ctl * ctl)700 void tb_ctl_start(struct tb_ctl *ctl)
701 {
702 int i;
703 tb_ctl_dbg(ctl, "control channel starting...\n");
704 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
705 tb_ring_start(ctl->rx);
706 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
707 tb_ctl_rx_submit(ctl->rx_packets[i]);
708
709 ctl->running = true;
710 }
711
712 /**
713 * tb_ctrl_stop() - pause the control channel
714 * @ctl: Control channel to stop
715 *
716 * All invocations of ctl->callback will have finished after this method
717 * returns.
718 *
719 * Must NOT be called from ctl->callback.
720 */
tb_ctl_stop(struct tb_ctl * ctl)721 void tb_ctl_stop(struct tb_ctl *ctl)
722 {
723 mutex_lock(&ctl->request_queue_lock);
724 ctl->running = false;
725 mutex_unlock(&ctl->request_queue_lock);
726
727 tb_ring_stop(ctl->rx);
728 tb_ring_stop(ctl->tx);
729
730 if (!list_empty(&ctl->request_queue))
731 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
732 INIT_LIST_HEAD(&ctl->request_queue);
733 tb_ctl_dbg(ctl, "control channel stopped\n");
734 }
735
736 /* public interface, commands */
737
738 /**
739 * tb_cfg_ack_plug() - Ack hot plug/unplug event
740 * @ctl: Control channel to use
741 * @route: Router that originated the event
742 * @port: Port where the hot plug/unplug happened
743 * @unplug: Ack hot plug or unplug
744 *
745 * Call this as response for hot plug/unplug event to ack it.
746 * Returns %0 on success or an error code on failure.
747 */
tb_cfg_ack_plug(struct tb_ctl * ctl,u64 route,u32 port,bool unplug)748 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
749 {
750 struct cfg_error_pkg pkg = {
751 .header = tb_cfg_make_header(route),
752 .port = port,
753 .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
754 .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
755 : TB_CFG_ERROR_PG_HOT_PLUG,
756 };
757 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
758 unplug ? "un" : "", route, port);
759 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
760 }
761
tb_cfg_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)762 static bool tb_cfg_match(const struct tb_cfg_request *req,
763 const struct ctl_pkg *pkg)
764 {
765 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
766
767 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
768 return true;
769
770 if (pkg->frame.eof != req->response_type)
771 return false;
772 if (route != tb_cfg_get_route(req->request))
773 return false;
774 if (pkg->frame.size != req->response_size)
775 return false;
776
777 if (pkg->frame.eof == TB_CFG_PKG_READ ||
778 pkg->frame.eof == TB_CFG_PKG_WRITE) {
779 const struct cfg_read_pkg *req_hdr = req->request;
780 const struct cfg_read_pkg *res_hdr = pkg->buffer;
781
782 if (req_hdr->addr.seq != res_hdr->addr.seq)
783 return false;
784 }
785
786 return true;
787 }
788
tb_cfg_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)789 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
790 {
791 struct tb_cfg_result res;
792
793 /* Now make sure it is in expected format */
794 res = parse_header(pkg, req->response_size, req->response_type,
795 tb_cfg_get_route(req->request));
796 if (!res.err)
797 memcpy(req->response, pkg->buffer, req->response_size);
798
799 req->result = res;
800
801 /* Always complete when first response is received */
802 return true;
803 }
804
805 /**
806 * tb_cfg_reset() - send a reset packet and wait for a response
807 * @ctl: Control channel pointer
808 * @route: Router string for the router to send reset
809 *
810 * If the switch at route is incorrectly configured then we will not receive a
811 * reply (even though the switch will reset). The caller should check for
812 * -ETIMEDOUT and attempt to reconfigure the switch.
813 */
tb_cfg_reset(struct tb_ctl * ctl,u64 route)814 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
815 {
816 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
817 struct tb_cfg_result res = { 0 };
818 struct tb_cfg_header reply;
819 struct tb_cfg_request *req;
820
821 req = tb_cfg_request_alloc();
822 if (!req) {
823 res.err = -ENOMEM;
824 return res;
825 }
826
827 req->match = tb_cfg_match;
828 req->copy = tb_cfg_copy;
829 req->request = &request;
830 req->request_size = sizeof(request);
831 req->request_type = TB_CFG_PKG_RESET;
832 req->response = &reply;
833 req->response_size = sizeof(reply);
834 req->response_type = TB_CFG_PKG_RESET;
835
836 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
837
838 tb_cfg_request_put(req);
839
840 return res;
841 }
842
843 /**
844 * tb_cfg_read_raw() - read from config space into buffer
845 * @ctl: Pointer to the control channel
846 * @buffer: Buffer where the data is read
847 * @route: Route string of the router
848 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
849 * @space: Config space selector
850 * @offset: Dword word offset of the register to start reading
851 * @length: Number of dwords to read
852 * @timeout_msec: Timeout in ms how long to wait for the response
853 *
854 * Reads from router config space without translating the possible error.
855 */
tb_cfg_read_raw(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)856 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
857 u64 route, u32 port, enum tb_cfg_space space,
858 u32 offset, u32 length, int timeout_msec)
859 {
860 struct tb_cfg_result res = { 0 };
861 struct cfg_read_pkg request = {
862 .header = tb_cfg_make_header(route),
863 .addr = {
864 .port = port,
865 .space = space,
866 .offset = offset,
867 .length = length,
868 },
869 };
870 struct cfg_write_pkg reply;
871 int retries = 0;
872
873 while (retries < TB_CTL_RETRIES) {
874 struct tb_cfg_request *req;
875
876 req = tb_cfg_request_alloc();
877 if (!req) {
878 res.err = -ENOMEM;
879 return res;
880 }
881
882 request.addr.seq = retries++;
883
884 req->match = tb_cfg_match;
885 req->copy = tb_cfg_copy;
886 req->request = &request;
887 req->request_size = sizeof(request);
888 req->request_type = TB_CFG_PKG_READ;
889 req->response = &reply;
890 req->response_size = 12 + 4 * length;
891 req->response_type = TB_CFG_PKG_READ;
892
893 res = tb_cfg_request_sync(ctl, req, timeout_msec);
894
895 tb_cfg_request_put(req);
896
897 if (res.err != -ETIMEDOUT)
898 break;
899
900 /* Wait a bit (arbitrary time) until we send a retry */
901 usleep_range(10, 100);
902 }
903
904 if (res.err)
905 return res;
906
907 res.response_port = reply.addr.port;
908 res.err = check_config_address(reply.addr, space, offset, length);
909 if (!res.err)
910 memcpy(buffer, &reply.data, 4 * length);
911 return res;
912 }
913
914 /**
915 * tb_cfg_write() - write from buffer into config space
916 * @ctl: Pointer to the control channel
917 * @buffer: Data to write
918 * @route: Route string of the router
919 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
920 * @space: Config space selector
921 * @offset: Dword word offset of the register to start writing
922 * @length: Number of dwords to write
923 * @timeout_msec: Timeout in ms how long to wait for the response
924 *
925 * Writes to router config space without translating the possible error.
926 */
tb_cfg_write_raw(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)927 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
928 u64 route, u32 port, enum tb_cfg_space space,
929 u32 offset, u32 length, int timeout_msec)
930 {
931 struct tb_cfg_result res = { 0 };
932 struct cfg_write_pkg request = {
933 .header = tb_cfg_make_header(route),
934 .addr = {
935 .port = port,
936 .space = space,
937 .offset = offset,
938 .length = length,
939 },
940 };
941 struct cfg_read_pkg reply;
942 int retries = 0;
943
944 memcpy(&request.data, buffer, length * 4);
945
946 while (retries < TB_CTL_RETRIES) {
947 struct tb_cfg_request *req;
948
949 req = tb_cfg_request_alloc();
950 if (!req) {
951 res.err = -ENOMEM;
952 return res;
953 }
954
955 request.addr.seq = retries++;
956
957 req->match = tb_cfg_match;
958 req->copy = tb_cfg_copy;
959 req->request = &request;
960 req->request_size = 12 + 4 * length;
961 req->request_type = TB_CFG_PKG_WRITE;
962 req->response = &reply;
963 req->response_size = sizeof(reply);
964 req->response_type = TB_CFG_PKG_WRITE;
965
966 res = tb_cfg_request_sync(ctl, req, timeout_msec);
967
968 tb_cfg_request_put(req);
969
970 if (res.err != -ETIMEDOUT)
971 break;
972
973 /* Wait a bit (arbitrary time) until we send a retry */
974 usleep_range(10, 100);
975 }
976
977 if (res.err)
978 return res;
979
980 res.response_port = reply.addr.port;
981 res.err = check_config_address(reply.addr, space, offset, length);
982 return res;
983 }
984
tb_cfg_get_error(struct tb_ctl * ctl,enum tb_cfg_space space,const struct tb_cfg_result * res)985 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
986 const struct tb_cfg_result *res)
987 {
988 /*
989 * For unimplemented ports access to port config space may return
990 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
991 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
992 * that the caller can mark the port as disabled.
993 */
994 if (space == TB_CFG_PORT &&
995 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
996 return -ENODEV;
997
998 tb_cfg_print_error(ctl, res);
999
1000 if (res->tb_error == TB_CFG_ERROR_LOCK)
1001 return -EACCES;
1002 else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1003 return -ENOTCONN;
1004
1005 return -EIO;
1006 }
1007
tb_cfg_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1008 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1009 enum tb_cfg_space space, u32 offset, u32 length)
1010 {
1011 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1012 space, offset, length, ctl->timeout_msec);
1013 switch (res.err) {
1014 case 0:
1015 /* Success */
1016 break;
1017
1018 case 1:
1019 /* Thunderbolt error, tb_error holds the actual number */
1020 return tb_cfg_get_error(ctl, space, &res);
1021
1022 case -ETIMEDOUT:
1023 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1024 route, space, offset);
1025 break;
1026
1027 default:
1028 WARN(1, "tb_cfg_read: %d\n", res.err);
1029 break;
1030 }
1031 return res.err;
1032 }
1033
tb_cfg_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1034 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1035 enum tb_cfg_space space, u32 offset, u32 length)
1036 {
1037 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1038 space, offset, length, ctl->timeout_msec);
1039 switch (res.err) {
1040 case 0:
1041 /* Success */
1042 break;
1043
1044 case 1:
1045 /* Thunderbolt error, tb_error holds the actual number */
1046 return tb_cfg_get_error(ctl, space, &res);
1047
1048 case -ETIMEDOUT:
1049 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1050 route, space, offset);
1051 break;
1052
1053 default:
1054 WARN(1, "tb_cfg_write: %d\n", res.err);
1055 break;
1056 }
1057 return res.err;
1058 }
1059
1060 /**
1061 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1062 * @ctl: Pointer to the control channel
1063 * @route: Route string of the router
1064 *
1065 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1066 * returns the port number from which the reply originated.
1067 *
1068 * Return: Returns the upstream port number on success or an error code on
1069 * failure.
1070 */
tb_cfg_get_upstream_port(struct tb_ctl * ctl,u64 route)1071 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1072 {
1073 u32 dummy;
1074 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1075 TB_CFG_SWITCH, 0, 1,
1076 ctl->timeout_msec);
1077 if (res.err == 1)
1078 return -EIO;
1079 if (res.err)
1080 return res.err;
1081 return res.response_port;
1082 }
1083