1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2021 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25
26 /**
27 * DOC: The IPA Generic Software Interface
28 *
29 * The generic software interface (GSI) is an integral component of the IPA,
30 * providing a well-defined communication layer between the AP subsystem
31 * and the IPA core. The modem uses the GSI layer as well.
32 *
33 * -------- ---------
34 * | | | |
35 * | AP +<---. .----+ Modem |
36 * | +--. | | .->+ |
37 * | | | | | | | |
38 * -------- | | | | ---------
39 * v | v |
40 * --+-+---+-+--
41 * | GSI |
42 * |-----------|
43 * | |
44 * | IPA |
45 * | |
46 * -------------
47 *
48 * In the above diagram, the AP and Modem represent "execution environments"
49 * (EEs), which are independent operating environments that use the IPA for
50 * data transfer.
51 *
52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53 * of data to or from the IPA. A channel is implemented as a ring buffer,
54 * with a DRAM-resident array of "transfer elements" (TREs) available to
55 * describe transfers to or from other EEs through the IPA. A transfer
56 * element can also contain an immediate command, requesting the IPA perform
57 * actions other than data transfer.
58 *
59 * Each TRE refers to a block of data--also located DRAM. After writing one
60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61 * doorbell register to inform the receiving side how many elements have
62 * been written.
63 *
64 * Each channel has a GSI "event ring" associated with it. An event ring
65 * is implemented very much like a channel ring, but is always directed from
66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
67 * events by adding an entry to the event ring associated with the channel.
68 * The GSI then writes its doorbell for the event ring, causing the target
69 * EE to be interrupted. Each entry in an event ring contains a pointer
70 * to the channel TRE whose completion the event represents.
71 *
72 * Each TRE in a channel ring has a set of flags. One flag indicates whether
73 * the completion of the transfer operation generates an entry (and possibly
74 * an interrupt) in the channel's event ring. Other flags allow transfer
75 * elements to be chained together, forming a single logical transaction.
76 * TRE flags are used to control whether and when interrupts are generated
77 * to signal completion of channel transfers.
78 *
79 * Elements in channel and event rings are completed (or consumed) strictly
80 * in order. Completion of one entry implies the completion of all preceding
81 * entries. A single completion interrupt can therefore communicate the
82 * completion of many transfers.
83 *
84 * Note that all GSI registers are little-endian, which is the assumed
85 * endianness of I/O space accesses. The accessor functions perform byte
86 * swapping if needed (i.e., for a big endian CPU).
87 */
88
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
91
92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */
93
94 #define GSI_CHANNEL_STOP_RETRIES 10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10
96 #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
97
98 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
99 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
100
101 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
102
103 /* An entry in an event ring */
104 struct gsi_event {
105 __le64 xfer_ptr;
106 __le16 len;
107 u8 reserved1;
108 u8 code;
109 __le16 reserved2;
110 u8 type;
111 u8 chid;
112 };
113
114 /** gsi_channel_scratch_gpi - GPI protocol scratch register
115 * @max_outstanding_tre:
116 * Defines the maximum number of TREs allowed in a single transaction
117 * on a channel (in bytes). This determines the amount of prefetch
118 * performed by the hardware. We configure this to equal the size of
119 * the TLV FIFO for the channel.
120 * @outstanding_threshold:
121 * Defines the threshold (in bytes) determining when the sequencer
122 * should update the channel doorbell. We configure this to equal
123 * the size of two TREs.
124 */
125 struct gsi_channel_scratch_gpi {
126 u64 reserved1;
127 u16 reserved2;
128 u16 max_outstanding_tre;
129 u16 reserved3;
130 u16 outstanding_threshold;
131 };
132
133 /** gsi_channel_scratch - channel scratch configuration area
134 *
135 * The exact interpretation of this register is protocol-specific.
136 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
137 */
138 union gsi_channel_scratch {
139 struct gsi_channel_scratch_gpi gpi;
140 struct {
141 u32 word1;
142 u32 word2;
143 u32 word3;
144 u32 word4;
145 } data;
146 };
147
148 /* Check things that can be validated at build time. */
gsi_validate_build(void)149 static void gsi_validate_build(void)
150 {
151 /* This is used as a divisor */
152 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
153
154 /* Code assumes the size of channel and event ring element are
155 * the same (and fixed). Make sure the size of an event ring
156 * element is what's expected.
157 */
158 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
159
160 /* Hardware requires a 2^n ring size. We ensure the number of
161 * elements in an event ring is a power of 2 elsewhere; this
162 * ensure the elements themselves meet the requirement.
163 */
164 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
165
166 /* The channel element size must fit in this field */
167 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
168
169 /* The event ring element size must fit in this field */
170 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
171 }
172
173 /* Return the channel id associated with a given channel */
gsi_channel_id(struct gsi_channel * channel)174 static u32 gsi_channel_id(struct gsi_channel *channel)
175 {
176 return channel - &channel->gsi->channel[0];
177 }
178
179 /* An initialized channel has a non-null GSI pointer */
gsi_channel_initialized(struct gsi_channel * channel)180 static bool gsi_channel_initialized(struct gsi_channel *channel)
181 {
182 return !!channel->gsi;
183 }
184
185 /* Update the GSI IRQ type register with the cached value */
gsi_irq_type_update(struct gsi * gsi,u32 val)186 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
187 {
188 gsi->type_enabled_bitmap = val;
189 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
190 }
191
gsi_irq_type_enable(struct gsi * gsi,enum gsi_irq_type_id type_id)192 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
193 {
194 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
195 }
196
gsi_irq_type_disable(struct gsi * gsi,enum gsi_irq_type_id type_id)197 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
198 {
199 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
200 }
201
202 /* Event ring commands are performed one at a time. Their completion
203 * is signaled by the event ring control GSI interrupt type, which is
204 * only enabled when we issue an event ring command. Only the event
205 * ring being operated on has this interrupt enabled.
206 */
gsi_irq_ev_ctrl_enable(struct gsi * gsi,u32 evt_ring_id)207 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
208 {
209 u32 val = BIT(evt_ring_id);
210
211 /* There's a small chance that a previous command completed
212 * after the interrupt was disabled, so make sure we have no
213 * pending interrupts before we enable them.
214 */
215 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
216
217 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
218 gsi_irq_type_enable(gsi, GSI_EV_CTRL);
219 }
220
221 /* Disable event ring control interrupts */
gsi_irq_ev_ctrl_disable(struct gsi * gsi)222 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
223 {
224 gsi_irq_type_disable(gsi, GSI_EV_CTRL);
225 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
226 }
227
228 /* Channel commands are performed one at a time. Their completion is
229 * signaled by the channel control GSI interrupt type, which is only
230 * enabled when we issue a channel command. Only the channel being
231 * operated on has this interrupt enabled.
232 */
gsi_irq_ch_ctrl_enable(struct gsi * gsi,u32 channel_id)233 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
234 {
235 u32 val = BIT(channel_id);
236
237 /* There's a small chance that a previous command completed
238 * after the interrupt was disabled, so make sure we have no
239 * pending interrupts before we enable them.
240 */
241 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
242
243 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
244 gsi_irq_type_enable(gsi, GSI_CH_CTRL);
245 }
246
247 /* Disable channel control interrupts */
gsi_irq_ch_ctrl_disable(struct gsi * gsi)248 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
249 {
250 gsi_irq_type_disable(gsi, GSI_CH_CTRL);
251 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
252 }
253
gsi_irq_ieob_enable_one(struct gsi * gsi,u32 evt_ring_id)254 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
255 {
256 bool enable_ieob = !gsi->ieob_enabled_bitmap;
257 u32 val;
258
259 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
260 val = gsi->ieob_enabled_bitmap;
261 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
262
263 /* Enable the interrupt type if this is the first channel enabled */
264 if (enable_ieob)
265 gsi_irq_type_enable(gsi, GSI_IEOB);
266 }
267
gsi_irq_ieob_disable(struct gsi * gsi,u32 event_mask)268 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
269 {
270 u32 val;
271
272 gsi->ieob_enabled_bitmap &= ~event_mask;
273
274 /* Disable the interrupt type if this was the last enabled channel */
275 if (!gsi->ieob_enabled_bitmap)
276 gsi_irq_type_disable(gsi, GSI_IEOB);
277
278 val = gsi->ieob_enabled_bitmap;
279 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
280 }
281
gsi_irq_ieob_disable_one(struct gsi * gsi,u32 evt_ring_id)282 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
283 {
284 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
285 }
286
287 /* Enable all GSI_interrupt types */
gsi_irq_enable(struct gsi * gsi)288 static void gsi_irq_enable(struct gsi *gsi)
289 {
290 u32 val;
291
292 /* Global interrupts include hardware error reports. Enable
293 * that so we can at least report the error should it occur.
294 */
295 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
296 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
297
298 /* General GSI interrupts are reported to all EEs; if they occur
299 * they are unrecoverable (without reset). A breakpoint interrupt
300 * also exists, but we don't support that. We want to be notified
301 * of errors so we can report them, even if they can't be handled.
302 */
303 val = BIT(BUS_ERROR);
304 val |= BIT(CMD_FIFO_OVRFLOW);
305 val |= BIT(MCS_STACK_OVRFLOW);
306 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
307 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
308 }
309
310 /* Disable all GSI interrupt types */
gsi_irq_disable(struct gsi * gsi)311 static void gsi_irq_disable(struct gsi *gsi)
312 {
313 gsi_irq_type_update(gsi, 0);
314
315 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
316 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
317 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
318 }
319
320 /* Return the virtual address associated with a ring index */
gsi_ring_virt(struct gsi_ring * ring,u32 index)321 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
322 {
323 /* Note: index *must* be used modulo the ring count here */
324 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
325 }
326
327 /* Return the 32-bit DMA address associated with a ring index */
gsi_ring_addr(struct gsi_ring * ring,u32 index)328 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
329 {
330 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
331 }
332
333 /* Return the ring index of a 32-bit ring offset */
gsi_ring_index(struct gsi_ring * ring,u32 offset)334 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
335 {
336 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
337 }
338
339 /* Issue a GSI command by writing a value to a register, then wait for
340 * completion to be signaled. Returns true if the command completes
341 * or false if it times out.
342 */
gsi_command(struct gsi * gsi,u32 reg,u32 val)343 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
344 {
345 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
346 struct completion *completion = &gsi->completion;
347
348 reinit_completion(completion);
349
350 iowrite32(val, gsi->virt + reg);
351
352 return !!wait_for_completion_timeout(completion, timeout);
353 }
354
355 /* Return the hardware's notion of the current state of an event ring */
356 static enum gsi_evt_ring_state
gsi_evt_ring_state(struct gsi * gsi,u32 evt_ring_id)357 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
358 {
359 u32 val;
360
361 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
362
363 return u32_get_bits(val, EV_CHSTATE_FMASK);
364 }
365
366 /* Issue an event ring command and wait for it to complete */
gsi_evt_ring_command(struct gsi * gsi,u32 evt_ring_id,enum gsi_evt_cmd_opcode opcode)367 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
368 enum gsi_evt_cmd_opcode opcode)
369 {
370 struct device *dev = gsi->dev;
371 bool timeout;
372 u32 val;
373
374 /* Enable the completion interrupt for the command */
375 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
376
377 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
378 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
379
380 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
381
382 gsi_irq_ev_ctrl_disable(gsi);
383
384 if (!timeout)
385 return;
386
387 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
388 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
389 }
390
391 /* Allocate an event ring in NOT_ALLOCATED state */
gsi_evt_ring_alloc_command(struct gsi * gsi,u32 evt_ring_id)392 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
393 {
394 enum gsi_evt_ring_state state;
395
396 /* Get initial event ring state */
397 state = gsi_evt_ring_state(gsi, evt_ring_id);
398 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
399 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
400 evt_ring_id, state);
401 return -EINVAL;
402 }
403
404 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
405
406 /* If successful the event ring state will have changed */
407 state = gsi_evt_ring_state(gsi, evt_ring_id);
408 if (state == GSI_EVT_RING_STATE_ALLOCATED)
409 return 0;
410
411 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
412 evt_ring_id, state);
413
414 return -EIO;
415 }
416
417 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
gsi_evt_ring_reset_command(struct gsi * gsi,u32 evt_ring_id)418 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
419 {
420 enum gsi_evt_ring_state state;
421
422 state = gsi_evt_ring_state(gsi, evt_ring_id);
423 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
424 state != GSI_EVT_RING_STATE_ERROR) {
425 dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
426 evt_ring_id, state);
427 return;
428 }
429
430 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
431
432 /* If successful the event ring state will have changed */
433 state = gsi_evt_ring_state(gsi, evt_ring_id);
434 if (state == GSI_EVT_RING_STATE_ALLOCATED)
435 return;
436
437 dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
438 evt_ring_id, state);
439 }
440
441 /* Issue a hardware de-allocation request for an allocated event ring */
gsi_evt_ring_de_alloc_command(struct gsi * gsi,u32 evt_ring_id)442 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
443 {
444 enum gsi_evt_ring_state state;
445
446 state = gsi_evt_ring_state(gsi, evt_ring_id);
447 if (state != GSI_EVT_RING_STATE_ALLOCATED) {
448 dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
449 evt_ring_id, state);
450 return;
451 }
452
453 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
454
455 /* If successful the event ring state will have changed */
456 state = gsi_evt_ring_state(gsi, evt_ring_id);
457 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
458 return;
459
460 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
461 evt_ring_id, state);
462 }
463
464 /* Fetch the current state of a channel from hardware */
gsi_channel_state(struct gsi_channel * channel)465 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
466 {
467 u32 channel_id = gsi_channel_id(channel);
468 void __iomem *virt = channel->gsi->virt;
469 u32 val;
470
471 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
472
473 return u32_get_bits(val, CHSTATE_FMASK);
474 }
475
476 /* Issue a channel command and wait for it to complete */
477 static void
gsi_channel_command(struct gsi_channel * channel,enum gsi_ch_cmd_opcode opcode)478 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
479 {
480 u32 channel_id = gsi_channel_id(channel);
481 struct gsi *gsi = channel->gsi;
482 struct device *dev = gsi->dev;
483 bool timeout;
484 u32 val;
485
486 /* Enable the completion interrupt for the command */
487 gsi_irq_ch_ctrl_enable(gsi, channel_id);
488
489 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
490 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
491 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
492
493 gsi_irq_ch_ctrl_disable(gsi);
494
495 if (!timeout)
496 return;
497
498 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
499 opcode, channel_id, gsi_channel_state(channel));
500 }
501
502 /* Allocate GSI channel in NOT_ALLOCATED state */
gsi_channel_alloc_command(struct gsi * gsi,u32 channel_id)503 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
504 {
505 struct gsi_channel *channel = &gsi->channel[channel_id];
506 struct device *dev = gsi->dev;
507 enum gsi_channel_state state;
508
509 /* Get initial channel state */
510 state = gsi_channel_state(channel);
511 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
512 dev_err(dev, "channel %u bad state %u before alloc\n",
513 channel_id, state);
514 return -EINVAL;
515 }
516
517 gsi_channel_command(channel, GSI_CH_ALLOCATE);
518
519 /* If successful the channel state will have changed */
520 state = gsi_channel_state(channel);
521 if (state == GSI_CHANNEL_STATE_ALLOCATED)
522 return 0;
523
524 dev_err(dev, "channel %u bad state %u after alloc\n",
525 channel_id, state);
526
527 return -EIO;
528 }
529
530 /* Start an ALLOCATED channel */
gsi_channel_start_command(struct gsi_channel * channel)531 static int gsi_channel_start_command(struct gsi_channel *channel)
532 {
533 struct device *dev = channel->gsi->dev;
534 enum gsi_channel_state state;
535
536 state = gsi_channel_state(channel);
537 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
538 state != GSI_CHANNEL_STATE_STOPPED) {
539 dev_err(dev, "channel %u bad state %u before start\n",
540 gsi_channel_id(channel), state);
541 return -EINVAL;
542 }
543
544 gsi_channel_command(channel, GSI_CH_START);
545
546 /* If successful the channel state will have changed */
547 state = gsi_channel_state(channel);
548 if (state == GSI_CHANNEL_STATE_STARTED)
549 return 0;
550
551 dev_err(dev, "channel %u bad state %u after start\n",
552 gsi_channel_id(channel), state);
553
554 return -EIO;
555 }
556
557 /* Stop a GSI channel in STARTED state */
gsi_channel_stop_command(struct gsi_channel * channel)558 static int gsi_channel_stop_command(struct gsi_channel *channel)
559 {
560 struct device *dev = channel->gsi->dev;
561 enum gsi_channel_state state;
562
563 state = gsi_channel_state(channel);
564
565 /* Channel could have entered STOPPED state since last call
566 * if it timed out. If so, we're done.
567 */
568 if (state == GSI_CHANNEL_STATE_STOPPED)
569 return 0;
570
571 if (state != GSI_CHANNEL_STATE_STARTED &&
572 state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
573 dev_err(dev, "channel %u bad state %u before stop\n",
574 gsi_channel_id(channel), state);
575 return -EINVAL;
576 }
577
578 gsi_channel_command(channel, GSI_CH_STOP);
579
580 /* If successful the channel state will have changed */
581 state = gsi_channel_state(channel);
582 if (state == GSI_CHANNEL_STATE_STOPPED)
583 return 0;
584
585 /* We may have to try again if stop is in progress */
586 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
587 return -EAGAIN;
588
589 dev_err(dev, "channel %u bad state %u after stop\n",
590 gsi_channel_id(channel), state);
591
592 return -EIO;
593 }
594
595 /* Reset a GSI channel in ALLOCATED or ERROR state. */
gsi_channel_reset_command(struct gsi_channel * channel)596 static void gsi_channel_reset_command(struct gsi_channel *channel)
597 {
598 struct device *dev = channel->gsi->dev;
599 enum gsi_channel_state state;
600
601 /* A short delay is required before a RESET command */
602 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
603
604 state = gsi_channel_state(channel);
605 if (state != GSI_CHANNEL_STATE_STOPPED &&
606 state != GSI_CHANNEL_STATE_ERROR) {
607 /* No need to reset a channel already in ALLOCATED state */
608 if (state != GSI_CHANNEL_STATE_ALLOCATED)
609 dev_err(dev, "channel %u bad state %u before reset\n",
610 gsi_channel_id(channel), state);
611 return;
612 }
613
614 gsi_channel_command(channel, GSI_CH_RESET);
615
616 /* If successful the channel state will have changed */
617 state = gsi_channel_state(channel);
618 if (state != GSI_CHANNEL_STATE_ALLOCATED)
619 dev_err(dev, "channel %u bad state %u after reset\n",
620 gsi_channel_id(channel), state);
621 }
622
623 /* Deallocate an ALLOCATED GSI channel */
gsi_channel_de_alloc_command(struct gsi * gsi,u32 channel_id)624 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
625 {
626 struct gsi_channel *channel = &gsi->channel[channel_id];
627 struct device *dev = gsi->dev;
628 enum gsi_channel_state state;
629
630 state = gsi_channel_state(channel);
631 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
632 dev_err(dev, "channel %u bad state %u before dealloc\n",
633 channel_id, state);
634 return;
635 }
636
637 gsi_channel_command(channel, GSI_CH_DE_ALLOC);
638
639 /* If successful the channel state will have changed */
640 state = gsi_channel_state(channel);
641
642 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
643 dev_err(dev, "channel %u bad state %u after dealloc\n",
644 channel_id, state);
645 }
646
647 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
648 * The index argument (modulo the ring count) is the first unfilled entry, so
649 * we supply one less than that with the doorbell. Update the event ring
650 * index field with the value provided.
651 */
gsi_evt_ring_doorbell(struct gsi * gsi,u32 evt_ring_id,u32 index)652 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
653 {
654 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
655 u32 val;
656
657 ring->index = index; /* Next unused entry */
658
659 /* Note: index *must* be used modulo the ring count here */
660 val = gsi_ring_addr(ring, (index - 1) % ring->count);
661 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
662 }
663
664 /* Program an event ring for use */
gsi_evt_ring_program(struct gsi * gsi,u32 evt_ring_id)665 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
666 {
667 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
668 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
669 u32 val;
670
671 /* We program all event rings as GPI type/protocol */
672 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
673 val |= EV_INTYPE_FMASK;
674 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
675 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
676
677 val = ev_r_length_encoded(gsi->version, size);
678 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
679
680 /* The context 2 and 3 registers store the low-order and
681 * high-order 32 bits of the address of the event ring,
682 * respectively.
683 */
684 val = lower_32_bits(evt_ring->ring.addr);
685 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
686 val = upper_32_bits(evt_ring->ring.addr);
687 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
688
689 /* Enable interrupt moderation by setting the moderation delay */
690 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
691 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
692 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
693
694 /* No MSI write data, and MSI address high and low address is 0 */
695 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
696 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
697 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
698
699 /* We don't need to get event read pointer updates */
700 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
701 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
702
703 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
704 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
705 }
706
707 /* Find the transaction whose completion indicates a channel is quiesced */
gsi_channel_trans_last(struct gsi_channel * channel)708 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
709 {
710 struct gsi_trans_info *trans_info = &channel->trans_info;
711 const struct list_head *list;
712 struct gsi_trans *trans;
713
714 spin_lock_bh(&trans_info->spinlock);
715
716 /* There is a small chance a TX transaction got allocated just
717 * before we disabled transmits, so check for that.
718 */
719 if (channel->toward_ipa) {
720 list = &trans_info->alloc;
721 if (!list_empty(list))
722 goto done;
723 list = &trans_info->pending;
724 if (!list_empty(list))
725 goto done;
726 }
727
728 /* Otherwise (TX or RX) we want to wait for anything that
729 * has completed, or has been polled but not released yet.
730 */
731 list = &trans_info->complete;
732 if (!list_empty(list))
733 goto done;
734 list = &trans_info->polled;
735 if (list_empty(list))
736 list = NULL;
737 done:
738 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
739
740 /* Caller will wait for this, so take a reference */
741 if (trans)
742 refcount_inc(&trans->refcount);
743
744 spin_unlock_bh(&trans_info->spinlock);
745
746 return trans;
747 }
748
749 /* Wait for transaction activity on a channel to complete */
gsi_channel_trans_quiesce(struct gsi_channel * channel)750 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
751 {
752 struct gsi_trans *trans;
753
754 /* Get the last transaction, and wait for it to complete */
755 trans = gsi_channel_trans_last(channel);
756 if (trans) {
757 wait_for_completion(&trans->completion);
758 gsi_trans_free(trans);
759 }
760 }
761
762 /* Program a channel for use; there is no gsi_channel_deprogram() */
gsi_channel_program(struct gsi_channel * channel,bool doorbell)763 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
764 {
765 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
766 u32 channel_id = gsi_channel_id(channel);
767 union gsi_channel_scratch scr = { };
768 struct gsi_channel_scratch_gpi *gpi;
769 struct gsi *gsi = channel->gsi;
770 u32 wrr_weight = 0;
771 u32 val;
772
773 /* Arbitrarily pick TRE 0 as the first channel element to use */
774 channel->tre_ring.index = 0;
775
776 /* We program all channels as GPI type/protocol */
777 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
778 if (channel->toward_ipa)
779 val |= CHTYPE_DIR_FMASK;
780 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
781 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
782 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
783
784 val = r_length_encoded(gsi->version, size);
785 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
786
787 /* The context 2 and 3 registers store the low-order and
788 * high-order 32 bits of the address of the channel ring,
789 * respectively.
790 */
791 val = lower_32_bits(channel->tre_ring.addr);
792 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
793 val = upper_32_bits(channel->tre_ring.addr);
794 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
795
796 /* Command channel gets low weighted round-robin priority */
797 if (channel->command)
798 wrr_weight = field_max(WRR_WEIGHT_FMASK);
799 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
800
801 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
802
803 /* No need to use the doorbell engine starting at IPA v4.0 */
804 if (gsi->version < IPA_VERSION_4_0 && doorbell)
805 val |= USE_DB_ENG_FMASK;
806
807 /* v4.0 introduces an escape buffer for prefetch. We use it
808 * on all but the AP command channel.
809 */
810 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
811 /* If not otherwise set, prefetch buffers are used */
812 if (gsi->version < IPA_VERSION_4_5)
813 val |= USE_ESCAPE_BUF_ONLY_FMASK;
814 else
815 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
816 PREFETCH_MODE_FMASK);
817 }
818 /* All channels set DB_IN_BYTES */
819 if (gsi->version >= IPA_VERSION_4_9)
820 val |= DB_IN_BYTES;
821
822 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
823
824 /* Now update the scratch registers for GPI protocol */
825 gpi = &scr.gpi;
826 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
827 GSI_RING_ELEMENT_SIZE;
828 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
829
830 val = scr.data.word1;
831 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
832
833 val = scr.data.word2;
834 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
835
836 val = scr.data.word3;
837 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
838
839 /* We must preserve the upper 16 bits of the last scratch register.
840 * The next sequence assumes those bits remain unchanged between the
841 * read and the write.
842 */
843 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
844 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
845 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
846
847 /* All done! */
848 }
849
__gsi_channel_start(struct gsi_channel * channel,bool resume)850 static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
851 {
852 struct gsi *gsi = channel->gsi;
853 int ret;
854
855 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
856 if (resume && gsi->version < IPA_VERSION_4_0)
857 return 0;
858
859 mutex_lock(&gsi->mutex);
860
861 ret = gsi_channel_start_command(channel);
862
863 mutex_unlock(&gsi->mutex);
864
865 return ret;
866 }
867
868 /* Start an allocated GSI channel */
gsi_channel_start(struct gsi * gsi,u32 channel_id)869 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
870 {
871 struct gsi_channel *channel = &gsi->channel[channel_id];
872 int ret;
873
874 /* Enable NAPI and the completion interrupt */
875 napi_enable(&channel->napi);
876 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
877
878 ret = __gsi_channel_start(channel, false);
879 if (ret) {
880 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
881 napi_disable(&channel->napi);
882 }
883
884 return ret;
885 }
886
gsi_channel_stop_retry(struct gsi_channel * channel)887 static int gsi_channel_stop_retry(struct gsi_channel *channel)
888 {
889 u32 retries = GSI_CHANNEL_STOP_RETRIES;
890 int ret;
891
892 do {
893 ret = gsi_channel_stop_command(channel);
894 if (ret != -EAGAIN)
895 break;
896 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
897 } while (retries--);
898
899 return ret;
900 }
901
__gsi_channel_stop(struct gsi_channel * channel,bool suspend)902 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
903 {
904 struct gsi *gsi = channel->gsi;
905 int ret;
906
907 /* Wait for any underway transactions to complete before stopping. */
908 gsi_channel_trans_quiesce(channel);
909
910 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
911 if (suspend && gsi->version < IPA_VERSION_4_0)
912 return 0;
913
914 mutex_lock(&gsi->mutex);
915
916 ret = gsi_channel_stop_retry(channel);
917
918 mutex_unlock(&gsi->mutex);
919
920 return ret;
921 }
922
923 /* Stop a started channel */
gsi_channel_stop(struct gsi * gsi,u32 channel_id)924 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
925 {
926 struct gsi_channel *channel = &gsi->channel[channel_id];
927 int ret;
928
929 ret = __gsi_channel_stop(channel, false);
930 if (ret)
931 return ret;
932
933 /* Disable the completion interrupt and NAPI if successful */
934 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
935 napi_disable(&channel->napi);
936
937 return 0;
938 }
939
940 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
gsi_channel_reset(struct gsi * gsi,u32 channel_id,bool doorbell)941 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
942 {
943 struct gsi_channel *channel = &gsi->channel[channel_id];
944
945 mutex_lock(&gsi->mutex);
946
947 gsi_channel_reset_command(channel);
948 /* Due to a hardware quirk we may need to reset RX channels twice. */
949 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
950 gsi_channel_reset_command(channel);
951
952 gsi_channel_program(channel, doorbell);
953 gsi_channel_trans_cancel_pending(channel);
954
955 mutex_unlock(&gsi->mutex);
956 }
957
958 /* Stop a started channel for suspend */
gsi_channel_suspend(struct gsi * gsi,u32 channel_id)959 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
960 {
961 struct gsi_channel *channel = &gsi->channel[channel_id];
962 int ret;
963
964 ret = __gsi_channel_stop(channel, true);
965 if (ret)
966 return ret;
967
968 /* Ensure NAPI polling has finished. */
969 napi_synchronize(&channel->napi);
970
971 return 0;
972 }
973
974 /* Resume a suspended channel (starting if stopped) */
gsi_channel_resume(struct gsi * gsi,u32 channel_id)975 int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
976 {
977 struct gsi_channel *channel = &gsi->channel[channel_id];
978
979 return __gsi_channel_start(channel, true);
980 }
981
982 /* Prevent all GSI interrupts while suspended */
gsi_suspend(struct gsi * gsi)983 void gsi_suspend(struct gsi *gsi)
984 {
985 disable_irq(gsi->irq);
986 }
987
988 /* Allow all GSI interrupts again when resuming */
gsi_resume(struct gsi * gsi)989 void gsi_resume(struct gsi *gsi)
990 {
991 enable_irq(gsi->irq);
992 }
993
994 /**
995 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
996 * @channel: Channel for which to report
997 *
998 * Report to the network stack the number of bytes and transactions that
999 * have been queued to hardware since last call. This and the next function
1000 * supply information used by the network stack for throttling.
1001 *
1002 * For each channel we track the number of transactions used and bytes of
1003 * data those transactions represent. We also track what those values are
1004 * each time this function is called. Subtracting the two tells us
1005 * the number of bytes and transactions that have been added between
1006 * successive calls.
1007 *
1008 * Calling this each time we ring the channel doorbell allows us to
1009 * provide accurate information to the network stack about how much
1010 * work we've given the hardware at any point in time.
1011 */
gsi_channel_tx_queued(struct gsi_channel * channel)1012 void gsi_channel_tx_queued(struct gsi_channel *channel)
1013 {
1014 u32 trans_count;
1015 u32 byte_count;
1016
1017 byte_count = channel->byte_count - channel->queued_byte_count;
1018 trans_count = channel->trans_count - channel->queued_trans_count;
1019 channel->queued_byte_count = channel->byte_count;
1020 channel->queued_trans_count = channel->trans_count;
1021
1022 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
1023 trans_count, byte_count);
1024 }
1025
1026 /**
1027 * gsi_channel_tx_update() - Report completed TX transfers
1028 * @channel: Channel that has completed transmitting packets
1029 * @trans: Last transation known to be complete
1030 *
1031 * Compute the number of transactions and bytes that have been transferred
1032 * over a TX channel since the given transaction was committed. Report this
1033 * information to the network stack.
1034 *
1035 * At the time a transaction is committed, we record its channel's
1036 * committed transaction and byte counts *in the transaction*.
1037 * Completions are signaled by the hardware with an interrupt, and
1038 * we can determine the latest completed transaction at that time.
1039 *
1040 * The difference between the byte/transaction count recorded in
1041 * the transaction and the count last time we recorded a completion
1042 * tells us exactly how much data has been transferred between
1043 * completions.
1044 *
1045 * Calling this each time we learn of a newly-completed transaction
1046 * allows us to provide accurate information to the network stack
1047 * about how much work has been completed by the hardware at a given
1048 * point in time.
1049 */
1050 static void
gsi_channel_tx_update(struct gsi_channel * channel,struct gsi_trans * trans)1051 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1052 {
1053 u64 byte_count = trans->byte_count + trans->len;
1054 u64 trans_count = trans->trans_count + 1;
1055
1056 byte_count -= channel->compl_byte_count;
1057 channel->compl_byte_count += byte_count;
1058 trans_count -= channel->compl_trans_count;
1059 channel->compl_trans_count += trans_count;
1060
1061 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1062 trans_count, byte_count);
1063 }
1064
1065 /* Channel control interrupt handler */
gsi_isr_chan_ctrl(struct gsi * gsi)1066 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1067 {
1068 u32 channel_mask;
1069
1070 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1071 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1072
1073 while (channel_mask) {
1074 u32 channel_id = __ffs(channel_mask);
1075
1076 channel_mask ^= BIT(channel_id);
1077
1078 complete(&gsi->completion);
1079 }
1080 }
1081
1082 /* Event ring control interrupt handler */
gsi_isr_evt_ctrl(struct gsi * gsi)1083 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1084 {
1085 u32 event_mask;
1086
1087 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1088 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1089
1090 while (event_mask) {
1091 u32 evt_ring_id = __ffs(event_mask);
1092
1093 event_mask ^= BIT(evt_ring_id);
1094
1095 complete(&gsi->completion);
1096 }
1097 }
1098
1099 /* Global channel error interrupt handler */
1100 static void
gsi_isr_glob_chan_err(struct gsi * gsi,u32 err_ee,u32 channel_id,u32 code)1101 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1102 {
1103 if (code == GSI_OUT_OF_RESOURCES) {
1104 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1105 complete(&gsi->completion);
1106 return;
1107 }
1108
1109 /* Report, but otherwise ignore all other error codes */
1110 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1111 channel_id, err_ee, code);
1112 }
1113
1114 /* Global event error interrupt handler */
1115 static void
gsi_isr_glob_evt_err(struct gsi * gsi,u32 err_ee,u32 evt_ring_id,u32 code)1116 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1117 {
1118 if (code == GSI_OUT_OF_RESOURCES) {
1119 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1120 u32 channel_id = gsi_channel_id(evt_ring->channel);
1121
1122 complete(&gsi->completion);
1123 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1124 channel_id);
1125 return;
1126 }
1127
1128 /* Report, but otherwise ignore all other error codes */
1129 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1130 evt_ring_id, err_ee, code);
1131 }
1132
1133 /* Global error interrupt handler */
gsi_isr_glob_err(struct gsi * gsi)1134 static void gsi_isr_glob_err(struct gsi *gsi)
1135 {
1136 enum gsi_err_type type;
1137 enum gsi_err_code code;
1138 u32 which;
1139 u32 val;
1140 u32 ee;
1141
1142 /* Get the logged error, then reinitialize the log */
1143 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1144 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1145 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1146
1147 ee = u32_get_bits(val, ERR_EE_FMASK);
1148 type = u32_get_bits(val, ERR_TYPE_FMASK);
1149 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1150 code = u32_get_bits(val, ERR_CODE_FMASK);
1151
1152 if (type == GSI_ERR_TYPE_CHAN)
1153 gsi_isr_glob_chan_err(gsi, ee, which, code);
1154 else if (type == GSI_ERR_TYPE_EVT)
1155 gsi_isr_glob_evt_err(gsi, ee, which, code);
1156 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1157 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1158 }
1159
1160 /* Generic EE interrupt handler */
gsi_isr_gp_int1(struct gsi * gsi)1161 static void gsi_isr_gp_int1(struct gsi *gsi)
1162 {
1163 u32 result;
1164 u32 val;
1165
1166 /* This interrupt is used to handle completions of GENERIC GSI
1167 * commands. We use these to allocate and halt channels on the
1168 * modem's behalf due to a hardware quirk on IPA v4.2. The modem
1169 * "owns" channels even when the AP allocates them, and have no
1170 * way of knowing whether a modem channel's state has been changed.
1171 *
1172 * We also use GENERIC commands to enable/disable channel flow
1173 * control for IPA v4.2+.
1174 *
1175 * It is recommended that we halt the modem channels we allocated
1176 * when shutting down, but it's possible the channel isn't running
1177 * at the time we issue the HALT command. We'll get an error in
1178 * that case, but it's harmless (the channel is already halted).
1179 * Similarly, we could get an error back when updating flow control
1180 * on a channel because it's not in the proper state.
1181 *
1182 * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
1183 * error if we receive it.
1184 */
1185 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1186 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1187
1188 switch (result) {
1189 case GENERIC_EE_SUCCESS:
1190 case GENERIC_EE_INCORRECT_CHANNEL_STATE:
1191 gsi->result = 0;
1192 break;
1193
1194 case GENERIC_EE_RETRY:
1195 gsi->result = -EAGAIN;
1196 break;
1197
1198 default:
1199 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1200 gsi->result = -EIO;
1201 break;
1202 }
1203
1204 complete(&gsi->completion);
1205 }
1206
1207 /* Inter-EE interrupt handler */
gsi_isr_glob_ee(struct gsi * gsi)1208 static void gsi_isr_glob_ee(struct gsi *gsi)
1209 {
1210 u32 val;
1211
1212 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1213
1214 if (val & BIT(ERROR_INT))
1215 gsi_isr_glob_err(gsi);
1216
1217 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1218
1219 val &= ~BIT(ERROR_INT);
1220
1221 if (val & BIT(GP_INT1)) {
1222 val ^= BIT(GP_INT1);
1223 gsi_isr_gp_int1(gsi);
1224 }
1225
1226 if (val)
1227 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1228 }
1229
1230 /* I/O completion interrupt event */
gsi_isr_ieob(struct gsi * gsi)1231 static void gsi_isr_ieob(struct gsi *gsi)
1232 {
1233 u32 event_mask;
1234
1235 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1236 gsi_irq_ieob_disable(gsi, event_mask);
1237 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1238
1239 while (event_mask) {
1240 u32 evt_ring_id = __ffs(event_mask);
1241
1242 event_mask ^= BIT(evt_ring_id);
1243
1244 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1245 }
1246 }
1247
1248 /* General event interrupts represent serious problems, so report them */
gsi_isr_general(struct gsi * gsi)1249 static void gsi_isr_general(struct gsi *gsi)
1250 {
1251 struct device *dev = gsi->dev;
1252 u32 val;
1253
1254 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1255 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1256
1257 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1258 }
1259
1260 /**
1261 * gsi_isr() - Top level GSI interrupt service routine
1262 * @irq: Interrupt number (ignored)
1263 * @dev_id: GSI pointer supplied to request_irq()
1264 *
1265 * This is the main handler function registered for the GSI IRQ. Each type
1266 * of interrupt has a separate handler function that is called from here.
1267 */
gsi_isr(int irq,void * dev_id)1268 static irqreturn_t gsi_isr(int irq, void *dev_id)
1269 {
1270 struct gsi *gsi = dev_id;
1271 u32 intr_mask;
1272 u32 cnt = 0;
1273
1274 /* enum gsi_irq_type_id defines GSI interrupt types */
1275 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1276 /* intr_mask contains bitmask of pending GSI interrupts */
1277 do {
1278 u32 gsi_intr = BIT(__ffs(intr_mask));
1279
1280 intr_mask ^= gsi_intr;
1281
1282 switch (gsi_intr) {
1283 case BIT(GSI_CH_CTRL):
1284 gsi_isr_chan_ctrl(gsi);
1285 break;
1286 case BIT(GSI_EV_CTRL):
1287 gsi_isr_evt_ctrl(gsi);
1288 break;
1289 case BIT(GSI_GLOB_EE):
1290 gsi_isr_glob_ee(gsi);
1291 break;
1292 case BIT(GSI_IEOB):
1293 gsi_isr_ieob(gsi);
1294 break;
1295 case BIT(GSI_GENERAL):
1296 gsi_isr_general(gsi);
1297 break;
1298 default:
1299 dev_err(gsi->dev,
1300 "unrecognized interrupt type 0x%08x\n",
1301 gsi_intr);
1302 break;
1303 }
1304 } while (intr_mask);
1305
1306 if (++cnt > GSI_ISR_MAX_ITER) {
1307 dev_err(gsi->dev, "interrupt flood\n");
1308 break;
1309 }
1310 }
1311
1312 return IRQ_HANDLED;
1313 }
1314
1315 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
gsi_irq_init(struct gsi * gsi,struct platform_device * pdev)1316 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1317 {
1318 int ret;
1319
1320 ret = platform_get_irq_byname(pdev, "gsi");
1321 if (ret <= 0)
1322 return ret ? : -EINVAL;
1323
1324 gsi->irq = ret;
1325
1326 return 0;
1327 }
1328
1329 /* Return the transaction associated with a transfer completion event */
gsi_event_trans(struct gsi_channel * channel,struct gsi_event * event)1330 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1331 struct gsi_event *event)
1332 {
1333 u32 tre_offset;
1334 u32 tre_index;
1335
1336 /* Event xfer_ptr records the TRE it's associated with */
1337 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1338 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1339
1340 return gsi_channel_trans_mapped(channel, tre_index);
1341 }
1342
1343 /**
1344 * gsi_evt_ring_rx_update() - Record lengths of received data
1345 * @evt_ring: Event ring associated with channel that received packets
1346 * @index: Event index in ring reported by hardware
1347 *
1348 * Events for RX channels contain the actual number of bytes received into
1349 * the buffer. Every event has a transaction associated with it, and here
1350 * we update transactions to record their actual received lengths.
1351 *
1352 * This function is called whenever we learn that the GSI hardware has filled
1353 * new events since the last time we checked. The ring's index field tells
1354 * the first entry in need of processing. The index provided is the
1355 * first *unfilled* event in the ring (following the last filled one).
1356 *
1357 * Events are sequential within the event ring, and transactions are
1358 * sequential within the transaction pool.
1359 *
1360 * Note that @index always refers to an element *within* the event ring.
1361 */
gsi_evt_ring_rx_update(struct gsi_evt_ring * evt_ring,u32 index)1362 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1363 {
1364 struct gsi_channel *channel = evt_ring->channel;
1365 struct gsi_ring *ring = &evt_ring->ring;
1366 struct gsi_trans_info *trans_info;
1367 struct gsi_event *event_done;
1368 struct gsi_event *event;
1369 struct gsi_trans *trans;
1370 u32 trans_count = 0;
1371 u32 byte_count = 0;
1372 u32 event_avail;
1373 u32 old_index;
1374
1375 trans_info = &channel->trans_info;
1376
1377 /* We'll start with the oldest un-processed event. RX channels
1378 * replenish receive buffers in single-TRE transactions, so we
1379 * can just map that event to its transaction. Transactions
1380 * associated with completion events are consecutive.
1381 */
1382 old_index = ring->index;
1383 event = gsi_ring_virt(ring, old_index);
1384 trans = gsi_event_trans(channel, event);
1385
1386 /* Compute the number of events to process before we wrap,
1387 * and determine when we'll be done processing events.
1388 */
1389 event_avail = ring->count - old_index % ring->count;
1390 event_done = gsi_ring_virt(ring, index);
1391 do {
1392 trans->len = __le16_to_cpu(event->len);
1393 byte_count += trans->len;
1394 trans_count++;
1395
1396 /* Move on to the next event and transaction */
1397 if (--event_avail)
1398 event++;
1399 else
1400 event = gsi_ring_virt(ring, 0);
1401 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1402 } while (event != event_done);
1403
1404 /* We record RX bytes when they are received */
1405 channel->byte_count += byte_count;
1406 channel->trans_count += trans_count;
1407 }
1408
1409 /* Initialize a ring, including allocating DMA memory for its entries */
gsi_ring_alloc(struct gsi * gsi,struct gsi_ring * ring,u32 count)1410 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1411 {
1412 u32 size = count * GSI_RING_ELEMENT_SIZE;
1413 struct device *dev = gsi->dev;
1414 dma_addr_t addr;
1415
1416 /* Hardware requires a 2^n ring size, with alignment equal to size.
1417 * The DMA address returned by dma_alloc_coherent() is guaranteed to
1418 * be a power-of-2 number of pages, which satisfies the requirement.
1419 */
1420 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1421 if (!ring->virt)
1422 return -ENOMEM;
1423
1424 ring->addr = addr;
1425 ring->count = count;
1426
1427 return 0;
1428 }
1429
1430 /* Free a previously-allocated ring */
gsi_ring_free(struct gsi * gsi,struct gsi_ring * ring)1431 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1432 {
1433 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1434
1435 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1436 }
1437
1438 /* Allocate an available event ring id */
gsi_evt_ring_id_alloc(struct gsi * gsi)1439 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1440 {
1441 u32 evt_ring_id;
1442
1443 if (gsi->event_bitmap == ~0U) {
1444 dev_err(gsi->dev, "event rings exhausted\n");
1445 return -ENOSPC;
1446 }
1447
1448 evt_ring_id = ffz(gsi->event_bitmap);
1449 gsi->event_bitmap |= BIT(evt_ring_id);
1450
1451 return (int)evt_ring_id;
1452 }
1453
1454 /* Free a previously-allocated event ring id */
gsi_evt_ring_id_free(struct gsi * gsi,u32 evt_ring_id)1455 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1456 {
1457 gsi->event_bitmap &= ~BIT(evt_ring_id);
1458 }
1459
1460 /* Ring a channel doorbell, reporting the first un-filled entry */
gsi_channel_doorbell(struct gsi_channel * channel)1461 void gsi_channel_doorbell(struct gsi_channel *channel)
1462 {
1463 struct gsi_ring *tre_ring = &channel->tre_ring;
1464 u32 channel_id = gsi_channel_id(channel);
1465 struct gsi *gsi = channel->gsi;
1466 u32 val;
1467
1468 /* Note: index *must* be used modulo the ring count here */
1469 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1470 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1471 }
1472
1473 /* Consult hardware, move any newly completed transactions to completed list */
gsi_channel_update(struct gsi_channel * channel)1474 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1475 {
1476 u32 evt_ring_id = channel->evt_ring_id;
1477 struct gsi *gsi = channel->gsi;
1478 struct gsi_evt_ring *evt_ring;
1479 struct gsi_trans *trans;
1480 struct gsi_ring *ring;
1481 u32 offset;
1482 u32 index;
1483
1484 evt_ring = &gsi->evt_ring[evt_ring_id];
1485 ring = &evt_ring->ring;
1486
1487 /* See if there's anything new to process; if not, we're done. Note
1488 * that index always refers to an entry *within* the event ring.
1489 */
1490 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1491 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1492 if (index == ring->index % ring->count)
1493 return NULL;
1494
1495 /* Get the transaction for the latest completed event. */
1496 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1497
1498 /* For RX channels, update each completed transaction with the number
1499 * of bytes that were actually received. For TX channels, report
1500 * the number of transactions and bytes this completion represents
1501 * up the network stack.
1502 */
1503 if (channel->toward_ipa)
1504 gsi_channel_tx_update(channel, trans);
1505 else
1506 gsi_evt_ring_rx_update(evt_ring, index);
1507
1508 gsi_trans_move_complete(trans);
1509
1510 /* Tell the hardware we've handled these events */
1511 gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1512
1513 return gsi_channel_trans_complete(channel);
1514 }
1515
1516 /**
1517 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1518 * @channel: Channel to be polled
1519 *
1520 * Return: Transaction pointer, or null if none are available
1521 *
1522 * This function returns the first entry on a channel's completed transaction
1523 * list. If that list is empty, the hardware is consulted to determine
1524 * whether any new transactions have completed. If so, they're moved to the
1525 * completed list and the new first entry is returned. If there are no more
1526 * completed transactions, a null pointer is returned.
1527 */
gsi_channel_poll_one(struct gsi_channel * channel)1528 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1529 {
1530 struct gsi_trans *trans;
1531
1532 /* Get the first transaction from the completed list */
1533 trans = gsi_channel_trans_complete(channel);
1534 if (!trans) /* List is empty; see if there's more to do */
1535 trans = gsi_channel_update(channel);
1536
1537 if (trans)
1538 gsi_trans_move_polled(trans);
1539
1540 return trans;
1541 }
1542
1543 /**
1544 * gsi_channel_poll() - NAPI poll function for a channel
1545 * @napi: NAPI structure for the channel
1546 * @budget: Budget supplied by NAPI core
1547 *
1548 * Return: Number of items polled (<= budget)
1549 *
1550 * Single transactions completed by hardware are polled until either
1551 * the budget is exhausted, or there are no more. Each transaction
1552 * polled is passed to gsi_trans_complete(), to perform remaining
1553 * completion processing and retire/free the transaction.
1554 */
gsi_channel_poll(struct napi_struct * napi,int budget)1555 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1556 {
1557 struct gsi_channel *channel;
1558 int count;
1559
1560 channel = container_of(napi, struct gsi_channel, napi);
1561 for (count = 0; count < budget; count++) {
1562 struct gsi_trans *trans;
1563
1564 trans = gsi_channel_poll_one(channel);
1565 if (!trans)
1566 break;
1567 gsi_trans_complete(trans);
1568 }
1569
1570 if (count < budget && napi_complete(napi))
1571 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1572
1573 return count;
1574 }
1575
1576 /* The event bitmap represents which event ids are available for allocation.
1577 * Set bits are not available, clear bits can be used. This function
1578 * initializes the map so all events supported by the hardware are available,
1579 * then precludes any reserved events from being allocated.
1580 */
gsi_event_bitmap_init(u32 evt_ring_max)1581 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1582 {
1583 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1584
1585 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1586
1587 return event_bitmap;
1588 }
1589
1590 /* Setup function for a single channel */
gsi_channel_setup_one(struct gsi * gsi,u32 channel_id)1591 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1592 {
1593 struct gsi_channel *channel = &gsi->channel[channel_id];
1594 u32 evt_ring_id = channel->evt_ring_id;
1595 int ret;
1596
1597 if (!gsi_channel_initialized(channel))
1598 return 0;
1599
1600 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1601 if (ret)
1602 return ret;
1603
1604 gsi_evt_ring_program(gsi, evt_ring_id);
1605
1606 ret = gsi_channel_alloc_command(gsi, channel_id);
1607 if (ret)
1608 goto err_evt_ring_de_alloc;
1609
1610 gsi_channel_program(channel, true);
1611
1612 if (channel->toward_ipa)
1613 netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1614 gsi_channel_poll);
1615 else
1616 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1617 gsi_channel_poll, NAPI_POLL_WEIGHT);
1618
1619 return 0;
1620
1621 err_evt_ring_de_alloc:
1622 /* We've done nothing with the event ring yet so don't reset */
1623 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1624
1625 return ret;
1626 }
1627
1628 /* Inverse of gsi_channel_setup_one() */
gsi_channel_teardown_one(struct gsi * gsi,u32 channel_id)1629 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1630 {
1631 struct gsi_channel *channel = &gsi->channel[channel_id];
1632 u32 evt_ring_id = channel->evt_ring_id;
1633
1634 if (!gsi_channel_initialized(channel))
1635 return;
1636
1637 netif_napi_del(&channel->napi);
1638
1639 gsi_channel_de_alloc_command(gsi, channel_id);
1640 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1641 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1642 }
1643
1644 /* We use generic commands only to operate on modem channels. We don't have
1645 * the ability to determine channel state for a modem channel, so we simply
1646 * issue the command and wait for it to complete.
1647 */
gsi_generic_command(struct gsi * gsi,u32 channel_id,enum gsi_generic_cmd_opcode opcode,u8 params)1648 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1649 enum gsi_generic_cmd_opcode opcode,
1650 u8 params)
1651 {
1652 bool timeout;
1653 u32 val;
1654
1655 /* The error global interrupt type is always enabled (until we tear
1656 * down), so we will keep it enabled.
1657 *
1658 * A generic EE command completes with a GSI global interrupt of
1659 * type GP_INT1. We only perform one generic command at a time
1660 * (to allocate, halt, or enable/disable flow control on a modem
1661 * channel), and only from this function. So we enable the GP_INT1
1662 * IRQ type here, and disable it again after the command completes.
1663 */
1664 val = BIT(ERROR_INT) | BIT(GP_INT1);
1665 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1666
1667 /* First zero the result code field */
1668 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1669 val &= ~GENERIC_EE_RESULT_FMASK;
1670 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1671
1672 /* Now issue the command */
1673 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1674 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1675 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1676 val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1677
1678 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1679
1680 /* Disable the GP_INT1 IRQ type again */
1681 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1682
1683 if (!timeout)
1684 return gsi->result;
1685
1686 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1687 opcode, channel_id);
1688
1689 return -ETIMEDOUT;
1690 }
1691
gsi_modem_channel_alloc(struct gsi * gsi,u32 channel_id)1692 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1693 {
1694 return gsi_generic_command(gsi, channel_id,
1695 GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1696 }
1697
gsi_modem_channel_halt(struct gsi * gsi,u32 channel_id)1698 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1699 {
1700 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1701 int ret;
1702
1703 do
1704 ret = gsi_generic_command(gsi, channel_id,
1705 GSI_GENERIC_HALT_CHANNEL, 0);
1706 while (ret == -EAGAIN && retries--);
1707
1708 if (ret)
1709 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1710 ret, channel_id);
1711 }
1712
1713 /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1714 void
gsi_modem_channel_flow_control(struct gsi * gsi,u32 channel_id,bool enable)1715 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1716 {
1717 u32 retries = 0;
1718 u32 command;
1719 int ret;
1720
1721 command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1722 : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1723 /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
1724 * is underway. In this case we need to retry the command.
1725 */
1726 if (!enable && gsi->version >= IPA_VERSION_4_11)
1727 retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1728
1729 do
1730 ret = gsi_generic_command(gsi, channel_id, command, 0);
1731 while (ret == -EAGAIN && retries--);
1732
1733 if (ret)
1734 dev_err(gsi->dev,
1735 "error %d %sabling mode channel %u flow control\n",
1736 ret, enable ? "en" : "dis", channel_id);
1737 }
1738
1739 /* Setup function for channels */
gsi_channel_setup(struct gsi * gsi)1740 static int gsi_channel_setup(struct gsi *gsi)
1741 {
1742 u32 channel_id = 0;
1743 u32 mask;
1744 int ret;
1745
1746 gsi_irq_enable(gsi);
1747
1748 mutex_lock(&gsi->mutex);
1749
1750 do {
1751 ret = gsi_channel_setup_one(gsi, channel_id);
1752 if (ret)
1753 goto err_unwind;
1754 } while (++channel_id < gsi->channel_count);
1755
1756 /* Make sure no channels were defined that hardware does not support */
1757 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1758 struct gsi_channel *channel = &gsi->channel[channel_id++];
1759
1760 if (!gsi_channel_initialized(channel))
1761 continue;
1762
1763 ret = -EINVAL;
1764 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1765 channel_id - 1);
1766 channel_id = gsi->channel_count;
1767 goto err_unwind;
1768 }
1769
1770 /* Allocate modem channels if necessary */
1771 mask = gsi->modem_channel_bitmap;
1772 while (mask) {
1773 u32 modem_channel_id = __ffs(mask);
1774
1775 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1776 if (ret)
1777 goto err_unwind_modem;
1778
1779 /* Clear bit from mask only after success (for unwind) */
1780 mask ^= BIT(modem_channel_id);
1781 }
1782
1783 mutex_unlock(&gsi->mutex);
1784
1785 return 0;
1786
1787 err_unwind_modem:
1788 /* Compute which modem channels need to be deallocated */
1789 mask ^= gsi->modem_channel_bitmap;
1790 while (mask) {
1791 channel_id = __fls(mask);
1792
1793 mask ^= BIT(channel_id);
1794
1795 gsi_modem_channel_halt(gsi, channel_id);
1796 }
1797
1798 err_unwind:
1799 while (channel_id--)
1800 gsi_channel_teardown_one(gsi, channel_id);
1801
1802 mutex_unlock(&gsi->mutex);
1803
1804 gsi_irq_disable(gsi);
1805
1806 return ret;
1807 }
1808
1809 /* Inverse of gsi_channel_setup() */
gsi_channel_teardown(struct gsi * gsi)1810 static void gsi_channel_teardown(struct gsi *gsi)
1811 {
1812 u32 mask = gsi->modem_channel_bitmap;
1813 u32 channel_id;
1814
1815 mutex_lock(&gsi->mutex);
1816
1817 while (mask) {
1818 channel_id = __fls(mask);
1819
1820 mask ^= BIT(channel_id);
1821
1822 gsi_modem_channel_halt(gsi, channel_id);
1823 }
1824
1825 channel_id = gsi->channel_count - 1;
1826 do
1827 gsi_channel_teardown_one(gsi, channel_id);
1828 while (channel_id--);
1829
1830 mutex_unlock(&gsi->mutex);
1831
1832 gsi_irq_disable(gsi);
1833 }
1834
1835 /* Turn off all GSI interrupts initially */
gsi_irq_setup(struct gsi * gsi)1836 static int gsi_irq_setup(struct gsi *gsi)
1837 {
1838 int ret;
1839
1840 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1841 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1842
1843 /* Disable all interrupt types */
1844 gsi_irq_type_update(gsi, 0);
1845
1846 /* Clear all type-specific interrupt masks */
1847 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1848 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1849 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1850 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1851
1852 /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
1853 if (gsi->version > IPA_VERSION_3_1) {
1854 u32 offset;
1855
1856 /* These registers are in the non-adjusted address range */
1857 offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1858 iowrite32(0, gsi->virt_raw + offset);
1859 offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1860 iowrite32(0, gsi->virt_raw + offset);
1861 }
1862
1863 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1864
1865 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1866 if (ret)
1867 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1868
1869 return ret;
1870 }
1871
gsi_irq_teardown(struct gsi * gsi)1872 static void gsi_irq_teardown(struct gsi *gsi)
1873 {
1874 free_irq(gsi->irq, gsi);
1875 }
1876
1877 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */
gsi_ring_setup(struct gsi * gsi)1878 static int gsi_ring_setup(struct gsi *gsi)
1879 {
1880 struct device *dev = gsi->dev;
1881 u32 count;
1882 u32 val;
1883
1884 if (gsi->version < IPA_VERSION_3_5_1) {
1885 /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
1886 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1887 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1888
1889 return 0;
1890 }
1891
1892 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1893
1894 count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1895 if (!count) {
1896 dev_err(dev, "GSI reports zero channels supported\n");
1897 return -EINVAL;
1898 }
1899 if (count > GSI_CHANNEL_COUNT_MAX) {
1900 dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1901 GSI_CHANNEL_COUNT_MAX, count);
1902 count = GSI_CHANNEL_COUNT_MAX;
1903 }
1904 gsi->channel_count = count;
1905
1906 count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1907 if (!count) {
1908 dev_err(dev, "GSI reports zero event rings supported\n");
1909 return -EINVAL;
1910 }
1911 if (count > GSI_EVT_RING_COUNT_MAX) {
1912 dev_warn(dev,
1913 "limiting to %u event rings; hardware supports %u\n",
1914 GSI_EVT_RING_COUNT_MAX, count);
1915 count = GSI_EVT_RING_COUNT_MAX;
1916 }
1917 gsi->evt_ring_count = count;
1918
1919 return 0;
1920 }
1921
1922 /* Setup function for GSI. GSI firmware must be loaded and initialized */
gsi_setup(struct gsi * gsi)1923 int gsi_setup(struct gsi *gsi)
1924 {
1925 u32 val;
1926 int ret;
1927
1928 /* Here is where we first touch the GSI hardware */
1929 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1930 if (!(val & ENABLED_FMASK)) {
1931 dev_err(gsi->dev, "GSI has not been enabled\n");
1932 return -EIO;
1933 }
1934
1935 ret = gsi_irq_setup(gsi);
1936 if (ret)
1937 return ret;
1938
1939 ret = gsi_ring_setup(gsi); /* No matching teardown required */
1940 if (ret)
1941 goto err_irq_teardown;
1942
1943 /* Initialize the error log */
1944 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1945
1946 ret = gsi_channel_setup(gsi);
1947 if (ret)
1948 goto err_irq_teardown;
1949
1950 return 0;
1951
1952 err_irq_teardown:
1953 gsi_irq_teardown(gsi);
1954
1955 return ret;
1956 }
1957
1958 /* Inverse of gsi_setup() */
gsi_teardown(struct gsi * gsi)1959 void gsi_teardown(struct gsi *gsi)
1960 {
1961 gsi_channel_teardown(gsi);
1962 gsi_irq_teardown(gsi);
1963 }
1964
1965 /* Initialize a channel's event ring */
gsi_channel_evt_ring_init(struct gsi_channel * channel)1966 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1967 {
1968 struct gsi *gsi = channel->gsi;
1969 struct gsi_evt_ring *evt_ring;
1970 int ret;
1971
1972 ret = gsi_evt_ring_id_alloc(gsi);
1973 if (ret < 0)
1974 return ret;
1975 channel->evt_ring_id = ret;
1976
1977 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1978 evt_ring->channel = channel;
1979
1980 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1981 if (!ret)
1982 return 0; /* Success! */
1983
1984 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1985 ret, gsi_channel_id(channel));
1986
1987 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1988
1989 return ret;
1990 }
1991
1992 /* Inverse of gsi_channel_evt_ring_init() */
gsi_channel_evt_ring_exit(struct gsi_channel * channel)1993 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1994 {
1995 u32 evt_ring_id = channel->evt_ring_id;
1996 struct gsi *gsi = channel->gsi;
1997 struct gsi_evt_ring *evt_ring;
1998
1999 evt_ring = &gsi->evt_ring[evt_ring_id];
2000 gsi_ring_free(gsi, &evt_ring->ring);
2001 gsi_evt_ring_id_free(gsi, evt_ring_id);
2002 }
2003
gsi_channel_data_valid(struct gsi * gsi,const struct ipa_gsi_endpoint_data * data)2004 static bool gsi_channel_data_valid(struct gsi *gsi,
2005 const struct ipa_gsi_endpoint_data *data)
2006 {
2007 u32 channel_id = data->channel_id;
2008 struct device *dev = gsi->dev;
2009
2010 /* Make sure channel ids are in the range driver supports */
2011 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2012 dev_err(dev, "bad channel id %u; must be less than %u\n",
2013 channel_id, GSI_CHANNEL_COUNT_MAX);
2014 return false;
2015 }
2016
2017 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2018 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2019 return false;
2020 }
2021
2022 if (!data->channel.tlv_count ||
2023 data->channel.tlv_count > GSI_TLV_MAX) {
2024 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2025 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
2026 return false;
2027 }
2028
2029 /* We have to allow at least one maximally-sized transaction to
2030 * be outstanding (which would use tlv_count TREs). Given how
2031 * gsi_channel_tre_max() is computed, tre_count has to be almost
2032 * twice the TLV FIFO size to satisfy this requirement.
2033 */
2034 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
2035 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2036 channel_id, data->channel.tlv_count,
2037 data->channel.tre_count);
2038 return false;
2039 }
2040
2041 if (!is_power_of_2(data->channel.tre_count)) {
2042 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2043 channel_id, data->channel.tre_count);
2044 return false;
2045 }
2046
2047 if (!is_power_of_2(data->channel.event_count)) {
2048 dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2049 channel_id, data->channel.event_count);
2050 return false;
2051 }
2052
2053 return true;
2054 }
2055
2056 /* Init function for a single channel */
gsi_channel_init_one(struct gsi * gsi,const struct ipa_gsi_endpoint_data * data,bool command)2057 static int gsi_channel_init_one(struct gsi *gsi,
2058 const struct ipa_gsi_endpoint_data *data,
2059 bool command)
2060 {
2061 struct gsi_channel *channel;
2062 u32 tre_count;
2063 int ret;
2064
2065 if (!gsi_channel_data_valid(gsi, data))
2066 return -EINVAL;
2067
2068 /* Worst case we need an event for every outstanding TRE */
2069 if (data->channel.tre_count > data->channel.event_count) {
2070 tre_count = data->channel.event_count;
2071 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2072 data->channel_id, tre_count);
2073 } else {
2074 tre_count = data->channel.tre_count;
2075 }
2076
2077 channel = &gsi->channel[data->channel_id];
2078 memset(channel, 0, sizeof(*channel));
2079
2080 channel->gsi = gsi;
2081 channel->toward_ipa = data->toward_ipa;
2082 channel->command = command;
2083 channel->tlv_count = data->channel.tlv_count;
2084 channel->tre_count = tre_count;
2085 channel->event_count = data->channel.event_count;
2086
2087 ret = gsi_channel_evt_ring_init(channel);
2088 if (ret)
2089 goto err_clear_gsi;
2090
2091 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2092 if (ret) {
2093 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2094 ret, data->channel_id);
2095 goto err_channel_evt_ring_exit;
2096 }
2097
2098 ret = gsi_channel_trans_init(gsi, data->channel_id);
2099 if (ret)
2100 goto err_ring_free;
2101
2102 if (command) {
2103 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2104
2105 ret = ipa_cmd_pool_init(channel, tre_max);
2106 }
2107 if (!ret)
2108 return 0; /* Success! */
2109
2110 gsi_channel_trans_exit(channel);
2111 err_ring_free:
2112 gsi_ring_free(gsi, &channel->tre_ring);
2113 err_channel_evt_ring_exit:
2114 gsi_channel_evt_ring_exit(channel);
2115 err_clear_gsi:
2116 channel->gsi = NULL; /* Mark it not (fully) initialized */
2117
2118 return ret;
2119 }
2120
2121 /* Inverse of gsi_channel_init_one() */
gsi_channel_exit_one(struct gsi_channel * channel)2122 static void gsi_channel_exit_one(struct gsi_channel *channel)
2123 {
2124 if (!gsi_channel_initialized(channel))
2125 return;
2126
2127 if (channel->command)
2128 ipa_cmd_pool_exit(channel);
2129 gsi_channel_trans_exit(channel);
2130 gsi_ring_free(channel->gsi, &channel->tre_ring);
2131 gsi_channel_evt_ring_exit(channel);
2132 }
2133
2134 /* Init function for channels */
gsi_channel_init(struct gsi * gsi,u32 count,const struct ipa_gsi_endpoint_data * data)2135 static int gsi_channel_init(struct gsi *gsi, u32 count,
2136 const struct ipa_gsi_endpoint_data *data)
2137 {
2138 bool modem_alloc;
2139 int ret = 0;
2140 u32 i;
2141
2142 /* IPA v4.2 requires the AP to allocate channels for the modem */
2143 modem_alloc = gsi->version == IPA_VERSION_4_2;
2144
2145 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2146 gsi->ieob_enabled_bitmap = 0;
2147
2148 /* The endpoint data array is indexed by endpoint name */
2149 for (i = 0; i < count; i++) {
2150 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2151
2152 if (ipa_gsi_endpoint_data_empty(&data[i]))
2153 continue; /* Skip over empty slots */
2154
2155 /* Mark modem channels to be allocated (hardware workaround) */
2156 if (data[i].ee_id == GSI_EE_MODEM) {
2157 if (modem_alloc)
2158 gsi->modem_channel_bitmap |=
2159 BIT(data[i].channel_id);
2160 continue;
2161 }
2162
2163 ret = gsi_channel_init_one(gsi, &data[i], command);
2164 if (ret)
2165 goto err_unwind;
2166 }
2167
2168 return ret;
2169
2170 err_unwind:
2171 while (i--) {
2172 if (ipa_gsi_endpoint_data_empty(&data[i]))
2173 continue;
2174 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2175 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2176 continue;
2177 }
2178 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2179 }
2180
2181 return ret;
2182 }
2183
2184 /* Inverse of gsi_channel_init() */
gsi_channel_exit(struct gsi * gsi)2185 static void gsi_channel_exit(struct gsi *gsi)
2186 {
2187 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2188
2189 do
2190 gsi_channel_exit_one(&gsi->channel[channel_id]);
2191 while (channel_id--);
2192 gsi->modem_channel_bitmap = 0;
2193 }
2194
2195 /* Init function for GSI. GSI hardware does not need to be "ready" */
gsi_init(struct gsi * gsi,struct platform_device * pdev,enum ipa_version version,u32 count,const struct ipa_gsi_endpoint_data * data)2196 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2197 enum ipa_version version, u32 count,
2198 const struct ipa_gsi_endpoint_data *data)
2199 {
2200 struct device *dev = &pdev->dev;
2201 struct resource *res;
2202 resource_size_t size;
2203 u32 adjust;
2204 int ret;
2205
2206 gsi_validate_build();
2207
2208 gsi->dev = dev;
2209 gsi->version = version;
2210
2211 /* GSI uses NAPI on all channels. Create a dummy network device
2212 * for the channel NAPI contexts to be associated with.
2213 */
2214 init_dummy_netdev(&gsi->dummy_dev);
2215
2216 /* Get GSI memory range and map it */
2217 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2218 if (!res) {
2219 dev_err(dev, "DT error getting \"gsi\" memory property\n");
2220 return -ENODEV;
2221 }
2222
2223 size = resource_size(res);
2224 if (res->start > U32_MAX || size > U32_MAX - res->start) {
2225 dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2226 return -EINVAL;
2227 }
2228
2229 /* Make sure we can make our pointer adjustment if necessary */
2230 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2231 if (res->start < adjust) {
2232 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2233 adjust);
2234 return -EINVAL;
2235 }
2236
2237 gsi->virt_raw = ioremap(res->start, size);
2238 if (!gsi->virt_raw) {
2239 dev_err(dev, "unable to remap \"gsi\" memory\n");
2240 return -ENOMEM;
2241 }
2242 /* Most registers are accessed using an adjusted register range */
2243 gsi->virt = gsi->virt_raw - adjust;
2244
2245 init_completion(&gsi->completion);
2246
2247 ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
2248 if (ret)
2249 goto err_iounmap;
2250
2251 ret = gsi_channel_init(gsi, count, data);
2252 if (ret)
2253 goto err_iounmap;
2254
2255 mutex_init(&gsi->mutex);
2256
2257 return 0;
2258
2259 err_iounmap:
2260 iounmap(gsi->virt_raw);
2261
2262 return ret;
2263 }
2264
2265 /* Inverse of gsi_init() */
gsi_exit(struct gsi * gsi)2266 void gsi_exit(struct gsi *gsi)
2267 {
2268 mutex_destroy(&gsi->mutex);
2269 gsi_channel_exit(gsi);
2270 iounmap(gsi->virt_raw);
2271 }
2272
2273 /* The maximum number of outstanding TREs on a channel. This limits
2274 * a channel's maximum number of transactions outstanding (worst case
2275 * is one TRE per transaction).
2276 *
2277 * The absolute limit is the number of TREs in the channel's TRE ring,
2278 * and in theory we should be able use all of them. But in practice,
2279 * doing that led to the hardware reporting exhaustion of event ring
2280 * slots for writing completion information. So the hardware limit
2281 * would be (tre_count - 1).
2282 *
2283 * We reduce it a bit further though. Transaction resource pools are
2284 * sized to be a little larger than this maximum, to allow resource
2285 * allocations to always be contiguous. The number of entries in a
2286 * TRE ring buffer is a power of 2, and the extra resources in a pool
2287 * tends to nearly double the memory allocated for it. Reducing the
2288 * maximum number of outstanding TREs allows the number of entries in
2289 * a pool to avoid crossing that power-of-2 boundary, and this can
2290 * substantially reduce pool memory requirements. The number we
2291 * reduce it by matches the number added in gsi_trans_pool_init().
2292 */
gsi_channel_tre_max(struct gsi * gsi,u32 channel_id)2293 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2294 {
2295 struct gsi_channel *channel = &gsi->channel[channel_id];
2296
2297 /* Hardware limit is channel->tre_count - 1 */
2298 return channel->tre_count - (channel->tlv_count - 1);
2299 }
2300
2301 /* Returns the maximum number of TREs in a single transaction for a channel */
gsi_channel_trans_tre_max(struct gsi * gsi,u32 channel_id)2302 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2303 {
2304 struct gsi_channel *channel = &gsi->channel[channel_id];
2305
2306 return channel->tlv_count;
2307 }
2308