1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/refcount.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_private.h"
16 #include "gsi_trans.h"
17 #include "ipa_gsi.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20
21 /**
22 * DOC: GSI Transactions
23 *
24 * A GSI transaction abstracts the behavior of a GSI channel by representing
25 * everything about a related group of IPA commands in a single structure.
26 * (A "command" in this sense is either a data transfer or an IPA immediate
27 * command.) Most details of interaction with the GSI hardware are managed
28 * by the GSI transaction core, allowing users to simply describe commands
29 * to be performed. When a transaction has completed a callback function
30 * (dependent on the type of endpoint associated with the channel) allows
31 * cleanup of resources associated with the transaction.
32 *
33 * To perform a command (or set of them), a user of the GSI transaction
34 * interface allocates a transaction, indicating the number of TREs required
35 * (one per command). If sufficient TREs are available, they are reserved
36 * for use in the transaction and the allocation succeeds. This way
37 * exhaustion of the available TREs in a channel ring is detected
38 * as early as possible. All resources required to complete a transaction
39 * are allocated at transaction allocation time.
40 *
41 * Commands performed as part of a transaction are represented in an array
42 * of Linux scatterlist structures. This array is allocated with the
43 * transaction, and its entries are initialized using standard scatterlist
44 * functions (such as sg_set_buf() or skb_to_sgvec()).
45 *
46 * Once a transaction's scatterlist structures have been initialized, the
47 * transaction is committed. The caller is responsible for mapping buffers
48 * for DMA if necessary, and this should be done *before* allocating
49 * the transaction. Between a successful allocation and commit of a
50 * transaction no errors should occur.
51 *
52 * Committing transfers ownership of the entire transaction to the GSI
53 * transaction core. The GSI transaction code formats the content of
54 * the scatterlist array into the channel ring buffer and informs the
55 * hardware that new TREs are available to process.
56 *
57 * The last TRE in each transaction is marked to interrupt the AP when the
58 * GSI hardware has completed it. Because transfers described by TREs are
59 * performed strictly in order, signaling the completion of just the last
60 * TRE in the transaction is sufficient to indicate the full transaction
61 * is complete.
62 *
63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the
64 * GSI code into the IPA layer, allowing it to perform any final cleanup
65 * required before the transaction is freed.
66 */
67
68 /* Hardware values representing a transfer element type */
69 enum gsi_tre_type {
70 GSI_RE_XFER = 0x2,
71 GSI_RE_IMMD_CMD = 0x3,
72 };
73
74 /* An entry in a channel ring */
75 struct gsi_tre {
76 __le64 addr; /* DMA address */
77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
78 __le16 reserved;
79 __le32 flags; /* TRE_FLAGS_* */
80 };
81
82 /* gsi_tre->flags mask values (in CPU byte order) */
83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
84 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
85 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
86 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
87
gsi_trans_pool_init(struct gsi_trans_pool * pool,size_t size,u32 count,u32 max_alloc)88 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
89 u32 max_alloc)
90 {
91 void *virt;
92
93 if (!size)
94 return -EINVAL;
95 if (count < max_alloc)
96 return -EINVAL;
97 if (!max_alloc)
98 return -EINVAL;
99
100 /* By allocating a few extra entries in our pool (one less
101 * than the maximum number that will be requested in a
102 * single allocation), we can always satisfy requests without
103 * ever worrying about straddling the end of the pool array.
104 * If there aren't enough entries starting at the free index,
105 * we just allocate free entries from the beginning of the pool.
106 */
107 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
108 if (!virt)
109 return -ENOMEM;
110
111 pool->base = virt;
112 /* If the allocator gave us any extra memory, use it */
113 pool->count = ksize(pool->base) / size;
114 pool->free = 0;
115 pool->max_alloc = max_alloc;
116 pool->size = size;
117 pool->addr = 0; /* Only used for DMA pools */
118
119 return 0;
120 }
121
gsi_trans_pool_exit(struct gsi_trans_pool * pool)122 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
123 {
124 kfree(pool->base);
125 memset(pool, 0, sizeof(*pool));
126 }
127
128 /* Allocate the requested number of (zeroed) entries from the pool */
129 /* Home-grown DMA pool. This way we can preallocate and use the tre_count
130 * to guarantee allocations will succeed. Even though we specify max_alloc
131 * (and it can be more than one), we only allow allocation of a single
132 * element from a DMA pool.
133 */
gsi_trans_pool_init_dma(struct device * dev,struct gsi_trans_pool * pool,size_t size,u32 count,u32 max_alloc)134 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
135 size_t size, u32 count, u32 max_alloc)
136 {
137 size_t total_size;
138 dma_addr_t addr;
139 void *virt;
140
141 if (!size)
142 return -EINVAL;
143 if (count < max_alloc)
144 return -EINVAL;
145 if (!max_alloc)
146 return -EINVAL;
147
148 /* Don't let allocations cross a power-of-two boundary */
149 size = __roundup_pow_of_two(size);
150 total_size = (count + max_alloc - 1) * size;
151
152 /* The allocator will give us a power-of-2 number of pages
153 * sufficient to satisfy our request. Round up our requested
154 * size to avoid any unused space in the allocation. This way
155 * gsi_trans_pool_exit_dma() can assume the total allocated
156 * size is exactly (count * size).
157 */
158 total_size = get_order(total_size) << PAGE_SHIFT;
159
160 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
161 if (!virt)
162 return -ENOMEM;
163
164 pool->base = virt;
165 pool->count = total_size / size;
166 pool->free = 0;
167 pool->size = size;
168 pool->max_alloc = max_alloc;
169 pool->addr = addr;
170
171 return 0;
172 }
173
gsi_trans_pool_exit_dma(struct device * dev,struct gsi_trans_pool * pool)174 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
175 {
176 size_t total_size = pool->count * pool->size;
177
178 dma_free_coherent(dev, total_size, pool->base, pool->addr);
179 memset(pool, 0, sizeof(*pool));
180 }
181
182 /* Return the byte offset of the next free entry in the pool */
gsi_trans_pool_alloc_common(struct gsi_trans_pool * pool,u32 count)183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
184 {
185 u32 offset;
186
187 WARN_ON(!count);
188 WARN_ON(count > pool->max_alloc);
189
190 /* Allocate from beginning if wrap would occur */
191 if (count > pool->count - pool->free)
192 pool->free = 0;
193
194 offset = pool->free * pool->size;
195 pool->free += count;
196 memset(pool->base + offset, 0, count * pool->size);
197
198 return offset;
199 }
200
201 /* Allocate a contiguous block of zeroed entries from a pool */
gsi_trans_pool_alloc(struct gsi_trans_pool * pool,u32 count)202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
203 {
204 return pool->base + gsi_trans_pool_alloc_common(pool, count);
205 }
206
207 /* Allocate a single zeroed entry from a DMA pool */
gsi_trans_pool_alloc_dma(struct gsi_trans_pool * pool,dma_addr_t * addr)208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
209 {
210 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
211
212 *addr = pool->addr + offset;
213
214 return pool->base + offset;
215 }
216
217 /* Return the pool element that immediately follows the one given.
218 * This only works done if elements are allocated one at a time.
219 */
gsi_trans_pool_next(struct gsi_trans_pool * pool,void * element)220 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
221 {
222 void *end = pool->base + pool->count * pool->size;
223
224 WARN_ON(element < pool->base);
225 WARN_ON(element >= end);
226 WARN_ON(pool->max_alloc != 1);
227
228 element += pool->size;
229
230 return element < end ? element : pool->base;
231 }
232
233 /* Map a given ring entry index to the transaction associated with it */
gsi_channel_trans_map(struct gsi_channel * channel,u32 index,struct gsi_trans * trans)234 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
235 struct gsi_trans *trans)
236 {
237 /* Note: index *must* be used modulo the ring count here */
238 channel->trans_info.map[index % channel->tre_ring.count] = trans;
239 }
240
241 /* Return the transaction mapped to a given ring entry */
242 struct gsi_trans *
gsi_channel_trans_mapped(struct gsi_channel * channel,u32 index)243 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
244 {
245 /* Note: index *must* be used modulo the ring count here */
246 return channel->trans_info.map[index % channel->tre_ring.count];
247 }
248
249 /* Return the oldest completed transaction for a channel (or null) */
gsi_channel_trans_complete(struct gsi_channel * channel)250 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
251 {
252 return list_first_entry_or_null(&channel->trans_info.complete,
253 struct gsi_trans, links);
254 }
255
256 /* Move a transaction from the allocated list to the pending list */
gsi_trans_move_pending(struct gsi_trans * trans)257 static void gsi_trans_move_pending(struct gsi_trans *trans)
258 {
259 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
260 struct gsi_trans_info *trans_info = &channel->trans_info;
261
262 spin_lock_bh(&trans_info->spinlock);
263
264 list_move_tail(&trans->links, &trans_info->pending);
265
266 spin_unlock_bh(&trans_info->spinlock);
267 }
268
269 /* Move a transaction and all of its predecessors from the pending list
270 * to the completed list.
271 */
gsi_trans_move_complete(struct gsi_trans * trans)272 void gsi_trans_move_complete(struct gsi_trans *trans)
273 {
274 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
275 struct gsi_trans_info *trans_info = &channel->trans_info;
276 struct list_head list;
277
278 spin_lock_bh(&trans_info->spinlock);
279
280 /* Move this transaction and all predecessors to completed list */
281 list_cut_position(&list, &trans_info->pending, &trans->links);
282 list_splice_tail(&list, &trans_info->complete);
283
284 spin_unlock_bh(&trans_info->spinlock);
285 }
286
287 /* Move a transaction from the completed list to the polled list */
gsi_trans_move_polled(struct gsi_trans * trans)288 void gsi_trans_move_polled(struct gsi_trans *trans)
289 {
290 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
291 struct gsi_trans_info *trans_info = &channel->trans_info;
292
293 spin_lock_bh(&trans_info->spinlock);
294
295 list_move_tail(&trans->links, &trans_info->polled);
296
297 spin_unlock_bh(&trans_info->spinlock);
298 }
299
300 /* Reserve some number of TREs on a channel. Returns true if successful */
301 static bool
gsi_trans_tre_reserve(struct gsi_trans_info * trans_info,u32 tre_count)302 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
303 {
304 int avail = atomic_read(&trans_info->tre_avail);
305 int new;
306
307 do {
308 new = avail - (int)tre_count;
309 if (unlikely(new < 0))
310 return false;
311 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
312
313 return true;
314 }
315
316 /* Release previously-reserved TRE entries to a channel */
317 static void
gsi_trans_tre_release(struct gsi_trans_info * trans_info,u32 tre_count)318 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
319 {
320 atomic_add(tre_count, &trans_info->tre_avail);
321 }
322
323 /* Return true if no transactions are allocated, false otherwise */
gsi_channel_trans_idle(struct gsi * gsi,u32 channel_id)324 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
325 {
326 u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
327 struct gsi_trans_info *trans_info;
328
329 trans_info = &gsi->channel[channel_id].trans_info;
330
331 return atomic_read(&trans_info->tre_avail) == tre_max;
332 }
333
334 /* Allocate a GSI transaction on a channel */
gsi_channel_trans_alloc(struct gsi * gsi,u32 channel_id,u32 tre_count,enum dma_data_direction direction)335 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
336 u32 tre_count,
337 enum dma_data_direction direction)
338 {
339 struct gsi_channel *channel = &gsi->channel[channel_id];
340 struct gsi_trans_info *trans_info;
341 struct gsi_trans *trans;
342
343 if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
344 return NULL;
345
346 trans_info = &channel->trans_info;
347
348 /* We reserve the TREs now, but consume them at commit time.
349 * If there aren't enough available, we're done.
350 */
351 if (!gsi_trans_tre_reserve(trans_info, tre_count))
352 return NULL;
353
354 /* Allocate and initialize non-zero fields in the the transaction */
355 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
356 trans->gsi = gsi;
357 trans->channel_id = channel_id;
358 trans->tre_count = tre_count;
359 init_completion(&trans->completion);
360
361 /* Allocate the scatterlist and (if requested) info entries. */
362 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
363 sg_init_marker(trans->sgl, tre_count);
364
365 trans->direction = direction;
366
367 spin_lock_bh(&trans_info->spinlock);
368
369 list_add_tail(&trans->links, &trans_info->alloc);
370
371 spin_unlock_bh(&trans_info->spinlock);
372
373 refcount_set(&trans->refcount, 1);
374
375 return trans;
376 }
377
378 /* Free a previously-allocated transaction */
gsi_trans_free(struct gsi_trans * trans)379 void gsi_trans_free(struct gsi_trans *trans)
380 {
381 refcount_t *refcount = &trans->refcount;
382 struct gsi_trans_info *trans_info;
383 bool last;
384
385 /* We must hold the lock to release the last reference */
386 if (refcount_dec_not_one(refcount))
387 return;
388
389 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
390
391 spin_lock_bh(&trans_info->spinlock);
392
393 /* Reference might have been added before we got the lock */
394 last = refcount_dec_and_test(refcount);
395 if (last)
396 list_del(&trans->links);
397
398 spin_unlock_bh(&trans_info->spinlock);
399
400 if (!last)
401 return;
402
403 ipa_gsi_trans_release(trans);
404
405 /* Releasing the reserved TREs implicitly frees the sgl[] and
406 * (if present) info[] arrays, plus the transaction itself.
407 */
408 gsi_trans_tre_release(trans_info, trans->tre_count);
409 }
410
411 /* Add an immediate command to a transaction */
gsi_trans_cmd_add(struct gsi_trans * trans,void * buf,u32 size,dma_addr_t addr,enum ipa_cmd_opcode opcode)412 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
413 dma_addr_t addr, enum ipa_cmd_opcode opcode)
414 {
415 u32 which = trans->used++;
416 struct scatterlist *sg;
417
418 WARN_ON(which >= trans->tre_count);
419
420 /* Commands are quite different from data transfer requests.
421 * Their payloads come from a pool whose memory is allocated
422 * using dma_alloc_coherent(). We therefore do *not* map them
423 * for DMA (unlike what we do for pages and skbs).
424 *
425 * When a transaction completes, the SGL is normally unmapped.
426 * A command transaction has direction DMA_NONE, which tells
427 * gsi_trans_complete() to skip the unmapping step.
428 *
429 * The only things we use directly in a command scatter/gather
430 * entry are the DMA address and length. We still need the SG
431 * table flags to be maintained though, so assign a NULL page
432 * pointer for that purpose.
433 */
434 sg = &trans->sgl[which];
435 sg_assign_page(sg, NULL);
436 sg_dma_address(sg) = addr;
437 sg_dma_len(sg) = size;
438
439 trans->cmd_opcode[which] = opcode;
440 }
441
442 /* Add a page transfer to a transaction. It will fill the only TRE. */
gsi_trans_page_add(struct gsi_trans * trans,struct page * page,u32 size,u32 offset)443 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
444 u32 offset)
445 {
446 struct scatterlist *sg = &trans->sgl[0];
447 int ret;
448
449 if (WARN_ON(trans->tre_count != 1))
450 return -EINVAL;
451 if (WARN_ON(trans->used))
452 return -EINVAL;
453
454 sg_set_page(sg, page, size, offset);
455 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
456 if (!ret)
457 return -ENOMEM;
458
459 trans->used++; /* Transaction now owns the (DMA mapped) page */
460
461 return 0;
462 }
463
464 /* Add an SKB transfer to a transaction. No other TREs will be used. */
gsi_trans_skb_add(struct gsi_trans * trans,struct sk_buff * skb)465 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
466 {
467 struct scatterlist *sg = &trans->sgl[0];
468 u32 used;
469 int ret;
470
471 if (WARN_ON(trans->tre_count != 1))
472 return -EINVAL;
473 if (WARN_ON(trans->used))
474 return -EINVAL;
475
476 /* skb->len will not be 0 (checked early) */
477 ret = skb_to_sgvec(skb, sg, 0, skb->len);
478 if (ret < 0)
479 return ret;
480 used = ret;
481
482 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
483 if (!ret)
484 return -ENOMEM;
485
486 trans->used += used; /* Transaction now owns the (DMA mapped) skb */
487
488 return 0;
489 }
490
491 /* Compute the length/opcode value to use for a TRE */
gsi_tre_len_opcode(enum ipa_cmd_opcode opcode,u32 len)492 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
493 {
494 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
495 : cpu_to_le16((u16)opcode);
496 }
497
498 /* Compute the flags value to use for a given TRE */
gsi_tre_flags(bool last_tre,bool bei,enum ipa_cmd_opcode opcode)499 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
500 {
501 enum gsi_tre_type tre_type;
502 u32 tre_flags;
503
504 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
505 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
506
507 /* Last TRE contains interrupt flags */
508 if (last_tre) {
509 /* All transactions end in a transfer completion interrupt */
510 tre_flags |= TRE_FLAGS_IEOT_FMASK;
511 /* Don't interrupt when outbound commands are acknowledged */
512 if (bei)
513 tre_flags |= TRE_FLAGS_BEI_FMASK;
514 } else { /* All others indicate there's more to come */
515 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
516 }
517
518 return cpu_to_le32(tre_flags);
519 }
520
gsi_trans_tre_fill(struct gsi_tre * dest_tre,dma_addr_t addr,u32 len,bool last_tre,bool bei,enum ipa_cmd_opcode opcode)521 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
522 u32 len, bool last_tre, bool bei,
523 enum ipa_cmd_opcode opcode)
524 {
525 struct gsi_tre tre;
526
527 tre.addr = cpu_to_le64(addr);
528 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
529 tre.reserved = 0;
530 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
531
532 /* ARM64 can write 16 bytes as a unit with a single instruction.
533 * Doing the assignment this way is an attempt to make that happen.
534 */
535 *dest_tre = tre;
536 }
537
538 /**
539 * __gsi_trans_commit() - Common GSI transaction commit code
540 * @trans: Transaction to commit
541 * @ring_db: Whether to tell the hardware about these queued transfers
542 *
543 * Formats channel ring TRE entries based on the content of the scatterlist.
544 * Maps a transaction pointer to the last ring entry used for the transaction,
545 * so it can be recovered when it completes. Moves the transaction to the
546 * pending list. Finally, updates the channel ring pointer and optionally
547 * rings the doorbell.
548 */
__gsi_trans_commit(struct gsi_trans * trans,bool ring_db)549 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
550 {
551 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
552 struct gsi_ring *ring = &channel->tre_ring;
553 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
554 bool bei = channel->toward_ipa;
555 struct gsi_tre *dest_tre;
556 struct scatterlist *sg;
557 u32 byte_count = 0;
558 u8 *cmd_opcode;
559 u32 avail;
560 u32 i;
561
562 WARN_ON(!trans->used);
563
564 /* Consume the entries. If we cross the end of the ring while
565 * filling them we'll switch to the beginning to finish.
566 * If there is no info array we're doing a simple data
567 * transfer request, whose opcode is IPA_CMD_NONE.
568 */
569 cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
570 avail = ring->count - ring->index % ring->count;
571 dest_tre = gsi_ring_virt(ring, ring->index);
572 for_each_sg(trans->sgl, sg, trans->used, i) {
573 bool last_tre = i == trans->used - 1;
574 dma_addr_t addr = sg_dma_address(sg);
575 u32 len = sg_dma_len(sg);
576
577 byte_count += len;
578 if (!avail--)
579 dest_tre = gsi_ring_virt(ring, 0);
580 if (cmd_opcode)
581 opcode = *cmd_opcode++;
582
583 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
584 dest_tre++;
585 }
586 ring->index += trans->used;
587
588 if (channel->toward_ipa) {
589 /* We record TX bytes when they are sent */
590 trans->len = byte_count;
591 trans->trans_count = channel->trans_count;
592 trans->byte_count = channel->byte_count;
593 channel->trans_count++;
594 channel->byte_count += byte_count;
595 }
596
597 /* Associate the last TRE with the transaction */
598 gsi_channel_trans_map(channel, ring->index - 1, trans);
599
600 gsi_trans_move_pending(trans);
601
602 /* Ring doorbell if requested, or if all TREs are allocated */
603 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
604 /* Report what we're handing off to hardware for TX channels */
605 if (channel->toward_ipa)
606 gsi_channel_tx_queued(channel);
607 gsi_channel_doorbell(channel);
608 }
609 }
610
611 /* Commit a GSI transaction */
gsi_trans_commit(struct gsi_trans * trans,bool ring_db)612 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
613 {
614 if (trans->used)
615 __gsi_trans_commit(trans, ring_db);
616 else
617 gsi_trans_free(trans);
618 }
619
620 /* Commit a GSI transaction and wait for it to complete */
gsi_trans_commit_wait(struct gsi_trans * trans)621 void gsi_trans_commit_wait(struct gsi_trans *trans)
622 {
623 if (!trans->used)
624 goto out_trans_free;
625
626 refcount_inc(&trans->refcount);
627
628 __gsi_trans_commit(trans, true);
629
630 wait_for_completion(&trans->completion);
631
632 out_trans_free:
633 gsi_trans_free(trans);
634 }
635
636 /* Process the completion of a transaction; called while polling */
gsi_trans_complete(struct gsi_trans * trans)637 void gsi_trans_complete(struct gsi_trans *trans)
638 {
639 /* If the entire SGL was mapped when added, unmap it now */
640 if (trans->direction != DMA_NONE)
641 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
642 trans->direction);
643
644 ipa_gsi_trans_complete(trans);
645
646 complete(&trans->completion);
647
648 gsi_trans_free(trans);
649 }
650
651 /* Cancel a channel's pending transactions */
gsi_channel_trans_cancel_pending(struct gsi_channel * channel)652 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
653 {
654 struct gsi_trans_info *trans_info = &channel->trans_info;
655 struct gsi_trans *trans;
656 bool cancelled;
657
658 /* channel->gsi->mutex is held by caller */
659 spin_lock_bh(&trans_info->spinlock);
660
661 cancelled = !list_empty(&trans_info->pending);
662 list_for_each_entry(trans, &trans_info->pending, links)
663 trans->cancelled = true;
664
665 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
666
667 spin_unlock_bh(&trans_info->spinlock);
668
669 /* Schedule NAPI polling to complete the cancelled transactions */
670 if (cancelled)
671 napi_schedule(&channel->napi);
672 }
673
674 /* Issue a command to read a single byte from a channel */
gsi_trans_read_byte(struct gsi * gsi,u32 channel_id,dma_addr_t addr)675 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
676 {
677 struct gsi_channel *channel = &gsi->channel[channel_id];
678 struct gsi_ring *ring = &channel->tre_ring;
679 struct gsi_trans_info *trans_info;
680 struct gsi_tre *dest_tre;
681
682 trans_info = &channel->trans_info;
683
684 /* First reserve the TRE, if possible */
685 if (!gsi_trans_tre_reserve(trans_info, 1))
686 return -EBUSY;
687
688 /* Now fill the the reserved TRE and tell the hardware */
689
690 dest_tre = gsi_ring_virt(ring, ring->index);
691 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
692
693 ring->index++;
694 gsi_channel_doorbell(channel);
695
696 return 0;
697 }
698
699 /* Mark a gsi_trans_read_byte() request done */
gsi_trans_read_byte_done(struct gsi * gsi,u32 channel_id)700 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
701 {
702 struct gsi_channel *channel = &gsi->channel[channel_id];
703
704 gsi_trans_tre_release(&channel->trans_info, 1);
705 }
706
707 /* Initialize a channel's GSI transaction info */
gsi_channel_trans_init(struct gsi * gsi,u32 channel_id)708 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
709 {
710 struct gsi_channel *channel = &gsi->channel[channel_id];
711 struct gsi_trans_info *trans_info;
712 u32 tre_max;
713 int ret;
714
715 /* Ensure the size of a channel element is what's expected */
716 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
717
718 /* The map array is used to determine what transaction is associated
719 * with a TRE that the hardware reports has completed. We need one
720 * map entry per TRE.
721 */
722 trans_info = &channel->trans_info;
723 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
724 GFP_KERNEL);
725 if (!trans_info->map)
726 return -ENOMEM;
727
728 /* We can't use more TREs than there are available in the ring.
729 * This limits the number of transactions that can be oustanding.
730 * Worst case is one TRE per transaction (but we actually limit
731 * it to something a little less than that). We allocate resources
732 * for transactions (including transaction structures) based on
733 * this maximum number.
734 */
735 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
736
737 /* Transactions are allocated one at a time. */
738 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
739 tre_max, 1);
740 if (ret)
741 goto err_kfree;
742
743 /* A transaction uses a scatterlist array to represent the data
744 * transfers implemented by the transaction. Each scatterlist
745 * element is used to fill a single TRE when the transaction is
746 * committed. So we need as many scatterlist elements as the
747 * maximum number of TREs that can be outstanding.
748 *
749 * All TREs in a transaction must fit within the channel's TLV FIFO.
750 * A transaction on a channel can allocate as many TREs as that but
751 * no more.
752 */
753 ret = gsi_trans_pool_init(&trans_info->sg_pool,
754 sizeof(struct scatterlist),
755 tre_max, channel->tlv_count);
756 if (ret)
757 goto err_trans_pool_exit;
758
759 /* Finally, the tre_avail field is what ultimately limits the number
760 * of outstanding transactions and their resources. A transaction
761 * allocation succeeds only if the TREs available are sufficient for
762 * what the transaction might need. Transaction resource pools are
763 * sized based on the maximum number of outstanding TREs, so there
764 * will always be resources available if there are TREs available.
765 */
766 atomic_set(&trans_info->tre_avail, tre_max);
767
768 spin_lock_init(&trans_info->spinlock);
769 INIT_LIST_HEAD(&trans_info->alloc);
770 INIT_LIST_HEAD(&trans_info->pending);
771 INIT_LIST_HEAD(&trans_info->complete);
772 INIT_LIST_HEAD(&trans_info->polled);
773
774 return 0;
775
776 err_trans_pool_exit:
777 gsi_trans_pool_exit(&trans_info->pool);
778 err_kfree:
779 kfree(trans_info->map);
780
781 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
782 ret, channel_id);
783
784 return ret;
785 }
786
787 /* Inverse of gsi_channel_trans_init() */
gsi_channel_trans_exit(struct gsi_channel * channel)788 void gsi_channel_trans_exit(struct gsi_channel *channel)
789 {
790 struct gsi_trans_info *trans_info = &channel->trans_info;
791
792 gsi_trans_pool_exit(&trans_info->sg_pool);
793 gsi_trans_pool_exit(&trans_info->pool);
794 kfree(trans_info->map);
795 }
796