1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2021 Linaro Ltd.
5  */
6 #ifndef _GSI_H_
7 #define _GSI_H_
8 
9 #include <linux/types.h>
10 #include <linux/spinlock.h>
11 #include <linux/mutex.h>
12 #include <linux/completion.h>
13 #include <linux/platform_device.h>
14 #include <linux/netdevice.h>
15 
16 #include "ipa_version.h"
17 
18 /* Maximum number of channels and event rings supported by the driver */
19 #define GSI_CHANNEL_COUNT_MAX	23
20 #define GSI_EVT_RING_COUNT_MAX	24
21 
22 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
23 #define GSI_TLV_MAX		64
24 
25 struct device;
26 struct scatterlist;
27 struct platform_device;
28 
29 struct gsi;
30 struct gsi_trans;
31 struct gsi_channel_data;
32 struct ipa_gsi_endpoint_data;
33 
34 /* Execution environment IDs */
35 enum gsi_ee_id {
36 	GSI_EE_AP				= 0x0,
37 	GSI_EE_MODEM				= 0x1,
38 	GSI_EE_UC				= 0x2,
39 	GSI_EE_TZ				= 0x3,
40 };
41 
42 struct gsi_ring {
43 	void *virt;			/* ring array base address */
44 	dma_addr_t addr;		/* primarily low 32 bits used */
45 	u32 count;			/* number of elements in ring */
46 
47 	/* The ring index value indicates the next "open" entry in the ring.
48 	 *
49 	 * A channel ring consists of TRE entries filled by the AP and passed
50 	 * to the hardware for processing.  For a channel ring, the ring index
51 	 * identifies the next unused entry to be filled by the AP.
52 	 *
53 	 * An event ring consists of event structures filled by the hardware
54 	 * and passed to the AP.  For event rings, the ring index identifies
55 	 * the next ring entry that is not known to have been filled by the
56 	 * hardware.
57 	 */
58 	u32 index;
59 };
60 
61 /* Transactions use several resources that can be allocated dynamically
62  * but taken from a fixed-size pool.  The number of elements required for
63  * the pool is limited by the total number of TREs that can be outstanding.
64  *
65  * If sufficient TREs are available to reserve for a transaction,
66  * allocation from these pools is guaranteed to succeed.  Furthermore,
67  * these resources are implicitly freed whenever the TREs in the
68  * transaction they're associated with are released.
69  *
70  * The result of a pool allocation of multiple elements is always
71  * contiguous.
72  */
73 struct gsi_trans_pool {
74 	void *base;			/* base address of element pool */
75 	u32 count;			/* # elements in the pool */
76 	u32 free;			/* next free element in pool (modulo) */
77 	u32 size;			/* size (bytes) of an element */
78 	u32 max_alloc;			/* max allocation request */
79 	dma_addr_t addr;		/* DMA address if DMA pool (or 0) */
80 };
81 
82 struct gsi_trans_info {
83 	atomic_t tre_avail;		/* TREs available for allocation */
84 	struct gsi_trans_pool pool;	/* transaction pool */
85 	struct gsi_trans_pool sg_pool;	/* scatterlist pool */
86 	struct gsi_trans_pool cmd_pool;	/* command payload DMA pool */
87 	struct gsi_trans **map;		/* TRE -> transaction map */
88 
89 	spinlock_t spinlock;		/* protects updates to the lists */
90 	struct list_head alloc;		/* allocated, not committed */
91 	struct list_head pending;	/* committed, awaiting completion */
92 	struct list_head complete;	/* completed, awaiting poll */
93 	struct list_head polled;	/* returned by gsi_channel_poll_one() */
94 };
95 
96 /* Hardware values signifying the state of a channel */
97 enum gsi_channel_state {
98 	GSI_CHANNEL_STATE_NOT_ALLOCATED		= 0x0,
99 	GSI_CHANNEL_STATE_ALLOCATED		= 0x1,
100 	GSI_CHANNEL_STATE_STARTED		= 0x2,
101 	GSI_CHANNEL_STATE_STOPPED		= 0x3,
102 	GSI_CHANNEL_STATE_STOP_IN_PROC		= 0x4,
103 	GSI_CHANNEL_STATE_FLOW_CONTROLLED	= 0x5,	/* IPA v4.2-v4.9 */
104 	GSI_CHANNEL_STATE_ERROR			= 0xf,
105 };
106 
107 /* We only care about channels between IPA and AP */
108 struct gsi_channel {
109 	struct gsi *gsi;
110 	bool toward_ipa;
111 	bool command;			/* AP command TX channel or not */
112 
113 	u8 tlv_count;			/* # entries in TLV FIFO */
114 	u16 tre_count;
115 	u16 event_count;
116 
117 	struct gsi_ring tre_ring;
118 	u32 evt_ring_id;
119 
120 	u64 byte_count;			/* total # bytes transferred */
121 	u64 trans_count;		/* total # transactions */
122 	/* The following counts are used only for TX endpoints */
123 	u64 queued_byte_count;		/* last reported queued byte count */
124 	u64 queued_trans_count;		/* ...and queued trans count */
125 	u64 compl_byte_count;		/* last reported completed byte count */
126 	u64 compl_trans_count;		/* ...and completed trans count */
127 
128 	struct gsi_trans_info trans_info;
129 
130 	struct napi_struct napi;
131 };
132 
133 /* Hardware values signifying the state of an event ring */
134 enum gsi_evt_ring_state {
135 	GSI_EVT_RING_STATE_NOT_ALLOCATED	= 0x0,
136 	GSI_EVT_RING_STATE_ALLOCATED		= 0x1,
137 	GSI_EVT_RING_STATE_ERROR		= 0xf,
138 };
139 
140 struct gsi_evt_ring {
141 	struct gsi_channel *channel;
142 	struct gsi_ring ring;
143 };
144 
145 struct gsi {
146 	struct device *dev;		/* Same as IPA device */
147 	enum ipa_version version;
148 	void __iomem *virt_raw;		/* I/O mapped address range */
149 	void __iomem *virt;		/* Adjusted for most registers */
150 	u32 irq;
151 	u32 channel_count;
152 	u32 evt_ring_count;
153 	u32 event_bitmap;		/* allocated event rings */
154 	u32 modem_channel_bitmap;	/* modem channels to allocate */
155 	u32 type_enabled_bitmap;	/* GSI IRQ types enabled */
156 	u32 ieob_enabled_bitmap;	/* IEOB IRQ enabled (event rings) */
157 	int result;			/* Negative errno (generic commands) */
158 	struct completion completion;	/* Signals GSI command completion */
159 	struct mutex mutex;		/* protects commands, programming */
160 	struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
161 	struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
162 	struct net_device dummy_dev;	/* needed for NAPI */
163 };
164 
165 /**
166  * gsi_setup() - Set up the GSI subsystem
167  * @gsi:	Address of GSI structure embedded in an IPA structure
168  *
169  * Return:	0 if successful, or a negative error code
170  *
171  * Performs initialization that must wait until the GSI hardware is
172  * ready (including firmware loaded).
173  */
174 int gsi_setup(struct gsi *gsi);
175 
176 /**
177  * gsi_teardown() - Tear down GSI subsystem
178  * @gsi:	GSI address previously passed to a successful gsi_setup() call
179  */
180 void gsi_teardown(struct gsi *gsi);
181 
182 /**
183  * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
184  * @gsi:	GSI pointer
185  * @channel_id:	Channel whose limit is to be returned
186  *
187  * Return:	 The maximum number of TREs oustanding on the channel
188  */
189 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
190 
191 /**
192  * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
193  * @gsi:	GSI pointer
194  * @channel_id:	Channel whose limit is to be returned
195  *
196  * Return:	 The maximum TRE count per transaction on the channel
197  */
198 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
199 
200 /**
201  * gsi_channel_start() - Start an allocated GSI channel
202  * @gsi:	GSI pointer
203  * @channel_id:	Channel to start
204  *
205  * Return:	0 if successful, or a negative error code
206  */
207 int gsi_channel_start(struct gsi *gsi, u32 channel_id);
208 
209 /**
210  * gsi_channel_stop() - Stop a started GSI channel
211  * @gsi:	GSI pointer returned by gsi_setup()
212  * @channel_id:	Channel to stop
213  *
214  * Return:	0 if successful, or a negative error code
215  */
216 int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
217 
218 /**
219  * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
220  * @gsi:	GSI pointer returned by gsi_setup()
221  * @channel_id:	Modem TX channel to control
222  * @enable:	Whether to enable flow control (i.e., prevent flow)
223  */
224 void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
225 				    bool enable);
226 
227 /**
228  * gsi_channel_reset() - Reset an allocated GSI channel
229  * @gsi:	GSI pointer
230  * @channel_id:	Channel to be reset
231  * @doorbell:	Whether to (possibly) enable the doorbell engine
232  *
233  * Reset a channel and reconfigure it.  The @doorbell flag indicates
234  * that the doorbell engine should be enabled if needed.
235  *
236  * GSI hardware relinquishes ownership of all pending receive buffer
237  * transactions and they will complete with their cancelled flag set.
238  */
239 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
240 
241 /**
242  * gsi_suspend() - Prepare the GSI subsystem for suspend
243  * @gsi:	GSI pointer
244  */
245 void gsi_suspend(struct gsi *gsi);
246 
247 /**
248  * gsi_resume() - Resume the GSI subsystem following suspend
249  * @gsi:	GSI pointer
250  */
251 void gsi_resume(struct gsi *gsi);
252 
253 /**
254  * gsi_channel_suspend() - Suspend a GSI channel
255  * @gsi:	GSI pointer
256  * @channel_id:	Channel to suspend
257  *
258  * For IPA v4.0+, suspend is implemented by stopping the channel.
259  */
260 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id);
261 
262 /**
263  * gsi_channel_resume() - Resume a suspended GSI channel
264  * @gsi:	GSI pointer
265  * @channel_id:	Channel to resume
266  *
267  * For IPA v4.0+, the stopped channel is started again.
268  */
269 int gsi_channel_resume(struct gsi *gsi, u32 channel_id);
270 
271 /**
272  * gsi_init() - Initialize the GSI subsystem
273  * @gsi:	Address of GSI structure embedded in an IPA structure
274  * @pdev:	IPA platform device
275  * @version:	IPA hardware version (implies GSI version)
276  * @count:	Number of entries in the configuration data array
277  * @data:	Endpoint and channel configuration data
278  *
279  * Return:	0 if successful, or a negative error code
280  *
281  * Early stage initialization of the GSI subsystem, performing tasks
282  * that can be done before the GSI hardware is ready to use.
283  */
284 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
285 	     enum ipa_version version, u32 count,
286 	     const struct ipa_gsi_endpoint_data *data);
287 
288 /**
289  * gsi_exit() - Exit the GSI subsystem
290  * @gsi:	GSI address previously passed to a successful gsi_init() call
291  */
292 void gsi_exit(struct gsi *gsi);
293 
294 #endif /* _GSI_H_ */
295