1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Thunderbolt service API
4 *
5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 */
10
11 #ifndef THUNDERBOLT_H_
12 #define THUNDERBOLT_H_
13
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/pci.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
22
23 enum tb_cfg_pkg_type {
24 TB_CFG_PKG_READ = 1,
25 TB_CFG_PKG_WRITE = 2,
26 TB_CFG_PKG_ERROR = 3,
27 TB_CFG_PKG_NOTIFY_ACK = 4,
28 TB_CFG_PKG_EVENT = 5,
29 TB_CFG_PKG_XDOMAIN_REQ = 6,
30 TB_CFG_PKG_XDOMAIN_RESP = 7,
31 TB_CFG_PKG_OVERRIDE = 8,
32 TB_CFG_PKG_RESET = 9,
33 TB_CFG_PKG_ICM_EVENT = 10,
34 TB_CFG_PKG_ICM_CMD = 11,
35 TB_CFG_PKG_ICM_RESP = 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
37 };
38
39 /**
40 * enum tb_security_level - Thunderbolt security level
41 * @TB_SECURITY_NONE: No security, legacy mode
42 * @TB_SECURITY_USER: User approval required at minimum
43 * @TB_SECURITY_SECURE: One time saved key required at minimum
44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
46 * Thunderbolt dock (and Display Port). All PCIe
47 * links downstream of the dock are removed.
48 * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
49 * PCIe tunneling is disabled from the BIOS.
50 */
51 enum tb_security_level {
52 TB_SECURITY_NONE,
53 TB_SECURITY_USER,
54 TB_SECURITY_SECURE,
55 TB_SECURITY_DPONLY,
56 TB_SECURITY_USBONLY,
57 TB_SECURITY_NOPCIE,
58 };
59
60 /**
61 * struct tb - main thunderbolt bus structure
62 * @dev: Domain device
63 * @lock: Big lock. Must be held when accessing any struct
64 * tb_switch / struct tb_port.
65 * @nhi: Pointer to the NHI structure
66 * @ctl: Control channel for this domain
67 * @wq: Ordered workqueue for all domain specific work
68 * @root_switch: Root switch of this domain
69 * @cm_ops: Connection manager specific operations vector
70 * @index: Linux assigned domain number
71 * @security_level: Current security level
72 * @nboot_acl: Number of boot ACLs the domain supports
73 * @privdata: Private connection manager specific data
74 */
75 struct tb {
76 struct device dev;
77 struct mutex lock;
78 struct tb_nhi *nhi;
79 struct tb_ctl *ctl;
80 struct workqueue_struct *wq;
81 struct tb_switch *root_switch;
82 const struct tb_cm_ops *cm_ops;
83 int index;
84 enum tb_security_level security_level;
85 size_t nboot_acl;
86 unsigned long privdata[];
87 };
88
89 extern struct bus_type tb_bus_type;
90 extern struct device_type tb_service_type;
91 extern struct device_type tb_xdomain_type;
92
93 #define TB_LINKS_PER_PHY_PORT 2
94
tb_phy_port_from_link(unsigned int link)95 static inline unsigned int tb_phy_port_from_link(unsigned int link)
96 {
97 return (link - 1) / TB_LINKS_PER_PHY_PORT;
98 }
99
100 /**
101 * struct tb_property_dir - XDomain property directory
102 * @uuid: Directory UUID or %NULL if root directory
103 * @properties: List of properties in this directory
104 *
105 * User needs to provide serialization if needed.
106 */
107 struct tb_property_dir {
108 const uuid_t *uuid;
109 struct list_head properties;
110 };
111
112 enum tb_property_type {
113 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
114 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
115 TB_PROPERTY_TYPE_DATA = 0x64,
116 TB_PROPERTY_TYPE_TEXT = 0x74,
117 TB_PROPERTY_TYPE_VALUE = 0x76,
118 };
119
120 #define TB_PROPERTY_KEY_SIZE 8
121
122 /**
123 * struct tb_property - XDomain property
124 * @list: Used to link properties together in a directory
125 * @key: Key for the property (always terminated).
126 * @type: Type of the property
127 * @length: Length of the property data in dwords
128 * @value: Property value
129 *
130 * Users use @type to determine which field in @value is filled.
131 */
132 struct tb_property {
133 struct list_head list;
134 char key[TB_PROPERTY_KEY_SIZE + 1];
135 enum tb_property_type type;
136 size_t length;
137 union {
138 struct tb_property_dir *dir;
139 u8 *data;
140 char *text;
141 u32 immediate;
142 } value;
143 };
144
145 struct tb_property_dir *tb_property_parse_dir(const u32 *block,
146 size_t block_len);
147 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
148 size_t block_len);
149 struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
150 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
151 void tb_property_free_dir(struct tb_property_dir *dir);
152 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
153 u32 value);
154 int tb_property_add_data(struct tb_property_dir *parent, const char *key,
155 const void *buf, size_t buflen);
156 int tb_property_add_text(struct tb_property_dir *parent, const char *key,
157 const char *text);
158 int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
159 struct tb_property_dir *dir);
160 void tb_property_remove(struct tb_property *tb_property);
161 struct tb_property *tb_property_find(struct tb_property_dir *dir,
162 const char *key, enum tb_property_type type);
163 struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
164 struct tb_property *prev);
165
166 #define tb_property_for_each(dir, property) \
167 for (property = tb_property_get_next(dir, NULL); \
168 property; \
169 property = tb_property_get_next(dir, property))
170
171 int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
172 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
173
174 /**
175 * enum tb_link_width - Thunderbolt/USB4 link width
176 * @TB_LINK_WIDTH_SINGLE: Single lane link
177 * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
178 * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
179 * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
180 */
181 enum tb_link_width {
182 TB_LINK_WIDTH_SINGLE = BIT(0),
183 TB_LINK_WIDTH_DUAL = BIT(1),
184 TB_LINK_WIDTH_ASYM_TX = BIT(2),
185 TB_LINK_WIDTH_ASYM_RX = BIT(3),
186 };
187
188 /**
189 * struct tb_xdomain - Cross-domain (XDomain) connection
190 * @dev: XDomain device
191 * @tb: Pointer to the domain
192 * @remote_uuid: UUID of the remote domain (host)
193 * @local_uuid: Cached local UUID
194 * @route: Route string the other domain can be reached
195 * @vendor: Vendor ID of the remote domain
196 * @device: Device ID of the demote domain
197 * @local_max_hopid: Maximum input HopID of this host
198 * @remote_max_hopid: Maximum input HopID of the remote host
199 * @lock: Lock to serialize access to the following fields of this structure
200 * @vendor_name: Name of the vendor (or %NULL if not known)
201 * @device_name: Name of the device (or %NULL if not known)
202 * @link_speed: Speed of the link in Gb/s
203 * @link_width: Width of the downstream facing link
204 * @link_usb4: Downstream link is USB4
205 * @is_unplugged: The XDomain is unplugged
206 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
207 * queried first
208 * @service_ids: Used to generate IDs for the services
209 * @in_hopids: Input HopIDs for DMA tunneling
210 * @out_hopids; Output HopIDs for DMA tunneling
211 * @local_property_block: Local block of properties
212 * @local_property_block_gen: Generation of @local_property_block
213 * @local_property_block_len: Length of the @local_property_block in dwords
214 * @remote_properties: Properties exported by the remote domain
215 * @remote_property_block_gen: Generation of @remote_properties
216 * @state: Next XDomain discovery state to run
217 * @state_work: Work used to run the next state
218 * @state_retries: Number of retries remain for the state
219 * @properties_changed_work: Work used to notify the remote domain that
220 * our properties have changed
221 * @properties_changed_retries: Number of times left to send properties
222 * changed notification
223 * @bonding_possible: True if lane bonding is possible on local side
224 * @target_link_width: Target link width from the remote host
225 * @link: Root switch link the remote domain is connected (ICM only)
226 * @depth: Depth in the chain the remote domain is connected (ICM only)
227 *
228 * This structure represents connection across two domains (hosts).
229 * Each XDomain contains zero or more services which are exposed as
230 * &struct tb_service objects.
231 *
232 * Service drivers may access this structure if they need to enumerate
233 * non-standard properties but they need hold @lock when doing so
234 * because properties can be changed asynchronously in response to
235 * changes in the remote domain.
236 */
237 struct tb_xdomain {
238 struct device dev;
239 struct tb *tb;
240 uuid_t *remote_uuid;
241 const uuid_t *local_uuid;
242 u64 route;
243 u16 vendor;
244 u16 device;
245 unsigned int local_max_hopid;
246 unsigned int remote_max_hopid;
247 struct mutex lock;
248 const char *vendor_name;
249 const char *device_name;
250 unsigned int link_speed;
251 enum tb_link_width link_width;
252 bool link_usb4;
253 bool is_unplugged;
254 bool needs_uuid;
255 struct ida service_ids;
256 struct ida in_hopids;
257 struct ida out_hopids;
258 u32 *local_property_block;
259 u32 local_property_block_gen;
260 u32 local_property_block_len;
261 struct tb_property_dir *remote_properties;
262 u32 remote_property_block_gen;
263 int state;
264 struct delayed_work state_work;
265 int state_retries;
266 struct delayed_work properties_changed_work;
267 int properties_changed_retries;
268 bool bonding_possible;
269 u8 target_link_width;
270 u8 link;
271 u8 depth;
272 };
273
274 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
275 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
276 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
277 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
278 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
279 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
280 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
281 int transmit_ring, int receive_path,
282 int receive_ring);
283 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
284 int transmit_ring, int receive_path,
285 int receive_ring);
286
tb_xdomain_disable_all_paths(struct tb_xdomain * xd)287 static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
288 {
289 return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
290 }
291
292 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
293 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
294
295 static inline struct tb_xdomain *
tb_xdomain_find_by_uuid_locked(struct tb * tb,const uuid_t * uuid)296 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
297 {
298 struct tb_xdomain *xd;
299
300 mutex_lock(&tb->lock);
301 xd = tb_xdomain_find_by_uuid(tb, uuid);
302 mutex_unlock(&tb->lock);
303
304 return xd;
305 }
306
307 static inline struct tb_xdomain *
tb_xdomain_find_by_route_locked(struct tb * tb,u64 route)308 tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
309 {
310 struct tb_xdomain *xd;
311
312 mutex_lock(&tb->lock);
313 xd = tb_xdomain_find_by_route(tb, route);
314 mutex_unlock(&tb->lock);
315
316 return xd;
317 }
318
tb_xdomain_get(struct tb_xdomain * xd)319 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
320 {
321 if (xd)
322 get_device(&xd->dev);
323 return xd;
324 }
325
tb_xdomain_put(struct tb_xdomain * xd)326 static inline void tb_xdomain_put(struct tb_xdomain *xd)
327 {
328 if (xd)
329 put_device(&xd->dev);
330 }
331
tb_is_xdomain(const struct device * dev)332 static inline bool tb_is_xdomain(const struct device *dev)
333 {
334 return dev->type == &tb_xdomain_type;
335 }
336
tb_to_xdomain(struct device * dev)337 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
338 {
339 if (tb_is_xdomain(dev))
340 return container_of(dev, struct tb_xdomain, dev);
341 return NULL;
342 }
343
344 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
345 size_t size, enum tb_cfg_pkg_type type);
346 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
347 size_t request_size, enum tb_cfg_pkg_type request_type,
348 void *response, size_t response_size,
349 enum tb_cfg_pkg_type response_type,
350 unsigned int timeout_msec);
351
352 /**
353 * tb_protocol_handler - Protocol specific handler
354 * @uuid: XDomain messages with this UUID are dispatched to this handler
355 * @callback: Callback called with the XDomain message. Returning %1
356 * here tells the XDomain core that the message was handled
357 * by this handler and should not be forwared to other
358 * handlers.
359 * @data: Data passed with the callback
360 * @list: Handlers are linked using this
361 *
362 * Thunderbolt services can hook into incoming XDomain requests by
363 * registering protocol handler. Only limitation is that the XDomain
364 * discovery protocol UUID cannot be registered since it is handled by
365 * the core XDomain code.
366 *
367 * The @callback must check that the message is really directed to the
368 * service the driver implements.
369 */
370 struct tb_protocol_handler {
371 const uuid_t *uuid;
372 int (*callback)(const void *buf, size_t size, void *data);
373 void *data;
374 struct list_head list;
375 };
376
377 int tb_register_protocol_handler(struct tb_protocol_handler *handler);
378 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
379
380 /**
381 * struct tb_service - Thunderbolt service
382 * @dev: XDomain device
383 * @id: ID of the service (shown in sysfs)
384 * @key: Protocol key from the properties directory
385 * @prtcid: Protocol ID from the properties directory
386 * @prtcvers: Protocol version from the properties directory
387 * @prtcrevs: Protocol software revision from the properties directory
388 * @prtcstns: Protocol settings mask from the properties directory
389 * @debugfs_dir: Pointer to the service debugfs directory. Always created
390 * when debugfs is enabled. Can be used by service drivers to
391 * add their own entries under the service.
392 *
393 * Each domain exposes set of services it supports as collection of
394 * properties. For each service there will be one corresponding
395 * &struct tb_service. Service drivers are bound to these.
396 */
397 struct tb_service {
398 struct device dev;
399 int id;
400 const char *key;
401 u32 prtcid;
402 u32 prtcvers;
403 u32 prtcrevs;
404 u32 prtcstns;
405 struct dentry *debugfs_dir;
406 };
407
tb_service_get(struct tb_service * svc)408 static inline struct tb_service *tb_service_get(struct tb_service *svc)
409 {
410 if (svc)
411 get_device(&svc->dev);
412 return svc;
413 }
414
tb_service_put(struct tb_service * svc)415 static inline void tb_service_put(struct tb_service *svc)
416 {
417 if (svc)
418 put_device(&svc->dev);
419 }
420
tb_is_service(const struct device * dev)421 static inline bool tb_is_service(const struct device *dev)
422 {
423 return dev->type == &tb_service_type;
424 }
425
tb_to_service(struct device * dev)426 static inline struct tb_service *tb_to_service(struct device *dev)
427 {
428 if (tb_is_service(dev))
429 return container_of(dev, struct tb_service, dev);
430 return NULL;
431 }
432
433 /**
434 * tb_service_driver - Thunderbolt service driver
435 * @driver: Driver structure
436 * @probe: Called when the driver is probed
437 * @remove: Called when the driver is removed (optional)
438 * @shutdown: Called at shutdown time to stop the service (optional)
439 * @id_table: Table of service identifiers the driver supports
440 */
441 struct tb_service_driver {
442 struct device_driver driver;
443 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
444 void (*remove)(struct tb_service *svc);
445 void (*shutdown)(struct tb_service *svc);
446 const struct tb_service_id *id_table;
447 };
448
449 #define TB_SERVICE(key, id) \
450 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
451 TBSVC_MATCH_PROTOCOL_ID, \
452 .protocol_key = (key), \
453 .protocol_id = (id)
454
455 int tb_register_service_driver(struct tb_service_driver *drv);
456 void tb_unregister_service_driver(struct tb_service_driver *drv);
457
tb_service_get_drvdata(const struct tb_service * svc)458 static inline void *tb_service_get_drvdata(const struct tb_service *svc)
459 {
460 return dev_get_drvdata(&svc->dev);
461 }
462
tb_service_set_drvdata(struct tb_service * svc,void * data)463 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
464 {
465 dev_set_drvdata(&svc->dev, data);
466 }
467
tb_service_parent(struct tb_service * svc)468 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
469 {
470 return tb_to_xdomain(svc->dev.parent);
471 }
472
473 /**
474 * struct tb_nhi - thunderbolt native host interface
475 * @lock: Must be held during ring creation/destruction. Is acquired by
476 * interrupt_work when dispatching interrupts to individual rings.
477 * @pdev: Pointer to the PCI device
478 * @ops: NHI specific optional ops
479 * @iobase: MMIO space of the NHI
480 * @tx_rings: All Tx rings available on this host controller
481 * @rx_rings: All Rx rings available on this host controller
482 * @msix_ida: Used to allocate MSI-X vectors for rings
483 * @going_away: The host controller device is about to disappear so when
484 * this flag is set, avoid touching the hardware anymore.
485 * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
486 * @interrupt_work: Work scheduled to handle ring interrupt when no
487 * MSI-X is used.
488 * @hop_count: Number of rings (end point hops) supported by NHI.
489 * @quirks: NHI specific quirks if any
490 */
491 struct tb_nhi {
492 spinlock_t lock;
493 struct pci_dev *pdev;
494 const struct tb_nhi_ops *ops;
495 void __iomem *iobase;
496 struct tb_ring **tx_rings;
497 struct tb_ring **rx_rings;
498 struct ida msix_ida;
499 bool going_away;
500 bool iommu_dma_protection;
501 struct work_struct interrupt_work;
502 u32 hop_count;
503 unsigned long quirks;
504 };
505
506 /**
507 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
508 * @lock: Lock serializing actions to this ring. Must be acquired after
509 * nhi->lock.
510 * @nhi: Pointer to the native host controller interface
511 * @size: Size of the ring
512 * @hop: Hop (DMA channel) associated with this ring
513 * @head: Head of the ring (write next descriptor here)
514 * @tail: Tail of the ring (complete next descriptor here)
515 * @descriptors: Allocated descriptors for this ring
516 * @queue: Queue holding frames to be transferred over this ring
517 * @in_flight: Queue holding frames that are currently in flight
518 * @work: Interrupt work structure
519 * @is_tx: Is the ring Tx or Rx
520 * @running: Is the ring running
521 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
522 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
523 * @flags: Ring specific flags
524 * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
525 * RX ring. For TX ring this should be set to %0.
526 * @sof_mask: Bit mask used to detect start of frame PDF
527 * @eof_mask: Bit mask used to detect end of frame PDF
528 * @start_poll: Called when ring interrupt is triggered to start
529 * polling. Passing %NULL keeps the ring in interrupt mode.
530 * @poll_data: Data passed to @start_poll
531 */
532 struct tb_ring {
533 spinlock_t lock;
534 struct tb_nhi *nhi;
535 int size;
536 int hop;
537 int head;
538 int tail;
539 struct ring_desc *descriptors;
540 dma_addr_t descriptors_dma;
541 struct list_head queue;
542 struct list_head in_flight;
543 struct work_struct work;
544 bool is_tx:1;
545 bool running:1;
546 int irq;
547 u8 vector;
548 unsigned int flags;
549 int e2e_tx_hop;
550 u16 sof_mask;
551 u16 eof_mask;
552 void (*start_poll)(void *data);
553 void *poll_data;
554 };
555
556 /* Leave ring interrupt enabled on suspend */
557 #define RING_FLAG_NO_SUSPEND BIT(0)
558 /* Configure the ring to be in frame mode */
559 #define RING_FLAG_FRAME BIT(1)
560 /* Enable end-to-end flow control */
561 #define RING_FLAG_E2E BIT(2)
562
563 struct ring_frame;
564 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
565
566 /**
567 * enum ring_desc_flags - Flags for DMA ring descriptor
568 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
569 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
570 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
571 * %RING_DESC_POSTED: Always set this
572 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
573 * %RING_DESC_INTERRUPT: Request an interrupt on completion
574 */
575 enum ring_desc_flags {
576 RING_DESC_ISOCH = 0x1,
577 RING_DESC_CRC_ERROR = 0x1,
578 RING_DESC_COMPLETED = 0x2,
579 RING_DESC_POSTED = 0x4,
580 RING_DESC_BUFFER_OVERRUN = 0x04,
581 RING_DESC_INTERRUPT = 0x8,
582 };
583
584 /**
585 * struct ring_frame - For use with ring_rx/ring_tx
586 * @buffer_phy: DMA mapped address of the frame
587 * @callback: Callback called when the frame is finished (optional)
588 * @list: Frame is linked to a queue using this
589 * @size: Size of the frame in bytes (%0 means %4096)
590 * @flags: Flags for the frame (see &enum ring_desc_flags)
591 * @eof: End of frame protocol defined field
592 * @sof: Start of frame protocol defined field
593 */
594 struct ring_frame {
595 dma_addr_t buffer_phy;
596 ring_cb callback;
597 struct list_head list;
598 u32 size:12;
599 u32 flags:12;
600 u32 eof:4;
601 u32 sof:4;
602 };
603
604 /* Minimum size for ring_rx */
605 #define TB_FRAME_SIZE 0x100
606
607 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
608 unsigned int flags);
609 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
610 unsigned int flags, int e2e_tx_hop,
611 u16 sof_mask, u16 eof_mask,
612 void (*start_poll)(void *), void *poll_data);
613 void tb_ring_start(struct tb_ring *ring);
614 void tb_ring_stop(struct tb_ring *ring);
615 void tb_ring_free(struct tb_ring *ring);
616
617 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
618
619 /**
620 * tb_ring_rx() - enqueue a frame on an RX ring
621 * @ring: Ring to enqueue the frame
622 * @frame: Frame to enqueue
623 *
624 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
625 * contain at least %TB_FRAME_SIZE bytes.
626 *
627 * @frame->callback will be invoked with @frame->size, @frame->flags,
628 * @frame->eof, @frame->sof set once the frame has been received.
629 *
630 * If ring_stop() is called after the packet has been enqueued
631 * @frame->callback will be called with canceled set to true.
632 *
633 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
634 */
tb_ring_rx(struct tb_ring * ring,struct ring_frame * frame)635 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
636 {
637 WARN_ON(ring->is_tx);
638 return __tb_ring_enqueue(ring, frame);
639 }
640
641 /**
642 * tb_ring_tx() - enqueue a frame on an TX ring
643 * @ring: Ring the enqueue the frame
644 * @frame: Frame to enqueue
645 *
646 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
647 * @frame->sof have to be set.
648 *
649 * @frame->callback will be invoked with once the frame has been transmitted.
650 *
651 * If ring_stop() is called after the packet has been enqueued @frame->callback
652 * will be called with canceled set to true.
653 *
654 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
655 */
tb_ring_tx(struct tb_ring * ring,struct ring_frame * frame)656 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
657 {
658 WARN_ON(!ring->is_tx);
659 return __tb_ring_enqueue(ring, frame);
660 }
661
662 /* Used only when the ring is in polling mode */
663 struct ring_frame *tb_ring_poll(struct tb_ring *ring);
664 void tb_ring_poll_complete(struct tb_ring *ring);
665
666 /**
667 * tb_ring_dma_device() - Return device used for DMA mapping
668 * @ring: Ring whose DMA device is retrieved
669 *
670 * Use this function when you are mapping DMA for buffers that are
671 * passed to the ring for sending/receiving.
672 */
tb_ring_dma_device(struct tb_ring * ring)673 static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
674 {
675 return &ring->nhi->pdev->dev;
676 }
677
678 #endif /* THUNDERBOLT_H_ */
679