1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2022 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bitfield.h>
9 #include <linux/bug.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/iommu.h>
12 #include <linux/io.h>
13 #include <linux/soc/qcom/smem.h>
14
15 #include "ipa.h"
16 #include "ipa_reg.h"
17 #include "ipa_data.h"
18 #include "ipa_cmd.h"
19 #include "ipa_mem.h"
20 #include "ipa_table.h"
21 #include "gsi_trans.h"
22
23 /* "Canary" value placed between memory regions to detect overflow */
24 #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
25
26 /* SMEM host id representing the modem. */
27 #define QCOM_SMEM_HOST_MODEM 1
28
ipa_mem_find(struct ipa * ipa,enum ipa_mem_id mem_id)29 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
30 {
31 u32 i;
32
33 for (i = 0; i < ipa->mem_count; i++) {
34 const struct ipa_mem *mem = &ipa->mem[i];
35
36 if (mem->id == mem_id)
37 return mem;
38 }
39
40 return NULL;
41 }
42
43 /* Add an immediate command to a transaction that zeroes a memory region */
44 static void
ipa_mem_zero_region_add(struct gsi_trans * trans,enum ipa_mem_id mem_id)45 ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
46 {
47 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
48 const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
49 dma_addr_t addr = ipa->zero_addr;
50
51 if (!mem->size)
52 return;
53
54 ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
55 }
56
57 /**
58 * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
59 * @ipa: IPA pointer
60 *
61 * Set up the shared memory regions in IPA local memory. This involves
62 * zero-filling memory regions, and in the case of header memory, telling
63 * the IPA where it's located.
64 *
65 * This function performs the initial setup of this memory. If the modem
66 * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
67 *
68 * The AP informs the modem where its portions of memory are located
69 * in a QMI exchange that occurs at modem startup.
70 *
71 * There is no need for a matching ipa_mem_teardown() function.
72 *
73 * Return: 0 if successful, or a negative error code
74 */
ipa_mem_setup(struct ipa * ipa)75 int ipa_mem_setup(struct ipa *ipa)
76 {
77 dma_addr_t addr = ipa->zero_addr;
78 const struct ipa_reg *reg;
79 const struct ipa_mem *mem;
80 struct gsi_trans *trans;
81 u32 offset;
82 u16 size;
83 u32 val;
84
85 /* Get a transaction to define the header memory region and to zero
86 * the processing context and modem memory regions.
87 */
88 trans = ipa_cmd_trans_alloc(ipa, 4);
89 if (!trans) {
90 dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
91 return -EBUSY;
92 }
93
94 /* Initialize IPA-local header memory. The AP header region, if
95 * present, is contiguous with and follows the modem header region,
96 * and they are initialized together.
97 */
98 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
99 offset = mem->offset;
100 size = mem->size;
101 mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
102 if (mem)
103 size += mem->size;
104
105 ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
106
107 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
108 ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
109 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
110
111 gsi_trans_commit_wait(trans);
112
113 /* Tell the hardware where the processing context area is located */
114 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
115 offset = ipa->mem_offset + mem->offset;
116
117 reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
118 val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
119 iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
120
121 return 0;
122 }
123
124 /* Is the given memory region ID is valid for the current IPA version? */
ipa_mem_id_valid(struct ipa * ipa,enum ipa_mem_id mem_id)125 static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
126 {
127 enum ipa_version version = ipa->version;
128
129 switch (mem_id) {
130 case IPA_MEM_UC_SHARED:
131 case IPA_MEM_UC_INFO:
132 case IPA_MEM_V4_FILTER_HASHED:
133 case IPA_MEM_V4_FILTER:
134 case IPA_MEM_V6_FILTER_HASHED:
135 case IPA_MEM_V6_FILTER:
136 case IPA_MEM_V4_ROUTE_HASHED:
137 case IPA_MEM_V4_ROUTE:
138 case IPA_MEM_V6_ROUTE_HASHED:
139 case IPA_MEM_V6_ROUTE:
140 case IPA_MEM_MODEM_HEADER:
141 case IPA_MEM_AP_HEADER:
142 case IPA_MEM_MODEM_PROC_CTX:
143 case IPA_MEM_AP_PROC_CTX:
144 case IPA_MEM_MODEM:
145 case IPA_MEM_UC_EVENT_RING:
146 case IPA_MEM_PDN_CONFIG:
147 case IPA_MEM_STATS_QUOTA_MODEM:
148 case IPA_MEM_STATS_QUOTA_AP:
149 case IPA_MEM_END_MARKER: /* pseudo region */
150 break;
151
152 case IPA_MEM_STATS_TETHERING:
153 case IPA_MEM_STATS_DROP:
154 if (version < IPA_VERSION_4_0)
155 return false;
156 break;
157
158 case IPA_MEM_STATS_V4_FILTER:
159 case IPA_MEM_STATS_V6_FILTER:
160 case IPA_MEM_STATS_V4_ROUTE:
161 case IPA_MEM_STATS_V6_ROUTE:
162 if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
163 return false;
164 break;
165
166 case IPA_MEM_NAT_TABLE:
167 case IPA_MEM_STATS_FILTER_ROUTE:
168 if (version < IPA_VERSION_4_5)
169 return false;
170 break;
171
172 default:
173 return false;
174 }
175
176 return true;
177 }
178
179 /* Must the given memory region be present in the configuration? */
ipa_mem_id_required(struct ipa * ipa,enum ipa_mem_id mem_id)180 static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
181 {
182 switch (mem_id) {
183 case IPA_MEM_UC_SHARED:
184 case IPA_MEM_UC_INFO:
185 case IPA_MEM_V4_FILTER_HASHED:
186 case IPA_MEM_V4_FILTER:
187 case IPA_MEM_V6_FILTER_HASHED:
188 case IPA_MEM_V6_FILTER:
189 case IPA_MEM_V4_ROUTE_HASHED:
190 case IPA_MEM_V4_ROUTE:
191 case IPA_MEM_V6_ROUTE_HASHED:
192 case IPA_MEM_V6_ROUTE:
193 case IPA_MEM_MODEM_HEADER:
194 case IPA_MEM_MODEM_PROC_CTX:
195 case IPA_MEM_AP_PROC_CTX:
196 case IPA_MEM_MODEM:
197 return true;
198
199 case IPA_MEM_PDN_CONFIG:
200 case IPA_MEM_STATS_QUOTA_MODEM:
201 case IPA_MEM_STATS_TETHERING:
202 return ipa->version >= IPA_VERSION_4_0;
203
204 default:
205 return false; /* Anything else is optional */
206 }
207 }
208
ipa_mem_valid_one(struct ipa * ipa,const struct ipa_mem * mem)209 static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
210 {
211 struct device *dev = &ipa->pdev->dev;
212 enum ipa_mem_id mem_id = mem->id;
213 u16 size_multiple;
214
215 /* Make sure the memory region is valid for this version of IPA */
216 if (!ipa_mem_id_valid(ipa, mem_id)) {
217 dev_err(dev, "region id %u not valid\n", mem_id);
218 return false;
219 }
220
221 if (!mem->size && !mem->canary_count) {
222 dev_err(dev, "empty memory region %u\n", mem_id);
223 return false;
224 }
225
226 /* Other than modem memory, sizes must be a multiple of 8 */
227 size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
228 if (mem->size % size_multiple)
229 dev_err(dev, "region %u size not a multiple of %u bytes\n",
230 mem_id, size_multiple);
231 else if (mem->offset % 8)
232 dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
233 else if (mem->offset < mem->canary_count * sizeof(__le32))
234 dev_err(dev, "region %u offset too small for %hu canaries\n",
235 mem_id, mem->canary_count);
236 else if (mem_id == IPA_MEM_END_MARKER && mem->size)
237 dev_err(dev, "non-zero end marker region size\n");
238 else
239 return true;
240
241 return false;
242 }
243
244 /* Verify each defined memory region is valid. */
ipa_mem_valid(struct ipa * ipa,const struct ipa_mem_data * mem_data)245 static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
246 {
247 DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
248 struct device *dev = &ipa->pdev->dev;
249 enum ipa_mem_id mem_id;
250 u32 i;
251
252 if (mem_data->local_count > IPA_MEM_COUNT) {
253 dev_err(dev, "too many memory regions (%u > %u)\n",
254 mem_data->local_count, IPA_MEM_COUNT);
255 return false;
256 }
257
258 for (i = 0; i < mem_data->local_count; i++) {
259 const struct ipa_mem *mem = &mem_data->local[i];
260
261 if (__test_and_set_bit(mem->id, regions)) {
262 dev_err(dev, "duplicate memory region %u\n", mem->id);
263 return false;
264 }
265
266 /* Defined regions have non-zero size and/or canary count */
267 if (!ipa_mem_valid_one(ipa, mem))
268 return false;
269 }
270
271 /* Now see if any required regions are not defined */
272 for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
273 if (ipa_mem_id_required(ipa, mem_id))
274 dev_err(dev, "required memory region %u missing\n",
275 mem_id);
276 }
277
278 return true;
279 }
280
281 /* Do all memory regions fit within the IPA local memory? */
ipa_mem_size_valid(struct ipa * ipa)282 static bool ipa_mem_size_valid(struct ipa *ipa)
283 {
284 struct device *dev = &ipa->pdev->dev;
285 u32 limit = ipa->mem_size;
286 u32 i;
287
288 for (i = 0; i < ipa->mem_count; i++) {
289 const struct ipa_mem *mem = &ipa->mem[i];
290
291 if (mem->offset + mem->size <= limit)
292 continue;
293
294 dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
295 mem->id, limit);
296
297 return false;
298 }
299
300 return true;
301 }
302
303 /**
304 * ipa_mem_config() - Configure IPA shared memory
305 * @ipa: IPA pointer
306 *
307 * Return: 0 if successful, or a negative error code
308 */
ipa_mem_config(struct ipa * ipa)309 int ipa_mem_config(struct ipa *ipa)
310 {
311 struct device *dev = &ipa->pdev->dev;
312 const struct ipa_reg *reg;
313 const struct ipa_mem *mem;
314 dma_addr_t addr;
315 u32 mem_size;
316 void *virt;
317 u32 val;
318 u32 i;
319
320 /* Check the advertised location and size of the shared memory area */
321 reg = ipa_reg(ipa, SHARED_MEM_SIZE);
322 val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
323
324 /* The fields in the register are in 8 byte units */
325 ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
326
327 /* Make sure the end is within the region's mapped space */
328 mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
329
330 /* If the sizes don't match, issue a warning */
331 if (ipa->mem_offset + mem_size < ipa->mem_size) {
332 dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
333 mem_size);
334 ipa->mem_size = mem_size;
335 } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
336 dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
337 mem_size);
338 }
339
340 /* We know our memory size; make sure regions are all in range */
341 if (!ipa_mem_size_valid(ipa))
342 return -EINVAL;
343
344 /* Prealloc DMA memory for zeroing regions */
345 virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
346 if (!virt)
347 return -ENOMEM;
348 ipa->zero_addr = addr;
349 ipa->zero_virt = virt;
350 ipa->zero_size = IPA_MEM_MAX;
351
352 /* For each defined region, write "canary" values in the
353 * space prior to the region's base address if indicated.
354 */
355 for (i = 0; i < ipa->mem_count; i++) {
356 u16 canary_count = ipa->mem[i].canary_count;
357 __le32 *canary;
358
359 if (!canary_count)
360 continue;
361
362 /* Write canary values in the space before the region */
363 canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
364 do
365 *--canary = IPA_MEM_CANARY_VAL;
366 while (--canary_count);
367 }
368
369 /* Make sure filter and route table memory regions are valid */
370 if (!ipa_table_valid(ipa))
371 goto err_dma_free;
372
373 /* Validate memory-related properties relevant to immediate commands */
374 if (!ipa_cmd_data_valid(ipa))
375 goto err_dma_free;
376
377 /* Verify the microcontroller ring alignment (if defined) */
378 mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
379 if (mem && mem->offset % 1024) {
380 dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
381 goto err_dma_free;
382 }
383
384 return 0;
385
386 err_dma_free:
387 dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
388
389 return -EINVAL;
390 }
391
392 /* Inverse of ipa_mem_config() */
ipa_mem_deconfig(struct ipa * ipa)393 void ipa_mem_deconfig(struct ipa *ipa)
394 {
395 struct device *dev = &ipa->pdev->dev;
396
397 dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
398 ipa->zero_size = 0;
399 ipa->zero_virt = NULL;
400 ipa->zero_addr = 0;
401 }
402
403 /**
404 * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
405 * @ipa: IPA pointer
406 *
407 * Zero regions of IPA-local memory used by the modem. These are configured
408 * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
409 * restarts via SSR we need to re-initialize them. A QMI message tells the
410 * modem where to find regions of IPA local memory it needs to know about
411 * (these included).
412 */
ipa_mem_zero_modem(struct ipa * ipa)413 int ipa_mem_zero_modem(struct ipa *ipa)
414 {
415 struct gsi_trans *trans;
416
417 /* Get a transaction to zero the modem memory, modem header,
418 * and modem processing context regions.
419 */
420 trans = ipa_cmd_trans_alloc(ipa, 3);
421 if (!trans) {
422 dev_err(&ipa->pdev->dev,
423 "no transaction to zero modem memory\n");
424 return -EBUSY;
425 }
426
427 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
428 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
429 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
430
431 gsi_trans_commit_wait(trans);
432
433 return 0;
434 }
435
436 /**
437 * ipa_imem_init() - Initialize IMEM memory used by the IPA
438 * @ipa: IPA pointer
439 * @addr: Physical address of the IPA region in IMEM
440 * @size: Size (bytes) of the IPA region in IMEM
441 *
442 * IMEM is a block of shared memory separate from system DRAM, and
443 * a portion of this memory is available for the IPA to use. The
444 * modem accesses this memory directly, but the IPA accesses it
445 * via the IOMMU, using the AP's credentials.
446 *
447 * If this region exists (size > 0) we map it for read/write access
448 * through the IOMMU using the IPA device.
449 *
450 * Note: @addr and @size are not guaranteed to be page-aligned.
451 */
ipa_imem_init(struct ipa * ipa,unsigned long addr,size_t size)452 static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
453 {
454 struct device *dev = &ipa->pdev->dev;
455 struct iommu_domain *domain;
456 unsigned long iova;
457 phys_addr_t phys;
458 int ret;
459
460 if (!size)
461 return 0; /* IMEM memory not used */
462
463 domain = iommu_get_domain_for_dev(dev);
464 if (!domain) {
465 dev_err(dev, "no IOMMU domain found for IMEM\n");
466 return -EINVAL;
467 }
468
469 /* Align the address down and the size up to page boundaries */
470 phys = addr & PAGE_MASK;
471 size = PAGE_ALIGN(size + addr - phys);
472 iova = phys; /* We just want a direct mapping */
473
474 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
475 if (ret)
476 return ret;
477
478 ipa->imem_iova = iova;
479 ipa->imem_size = size;
480
481 return 0;
482 }
483
ipa_imem_exit(struct ipa * ipa)484 static void ipa_imem_exit(struct ipa *ipa)
485 {
486 struct iommu_domain *domain;
487 struct device *dev;
488
489 if (!ipa->imem_size)
490 return;
491
492 dev = &ipa->pdev->dev;
493 domain = iommu_get_domain_for_dev(dev);
494 if (domain) {
495 size_t size;
496
497 size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
498 if (size != ipa->imem_size)
499 dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
500 size, ipa->imem_size);
501 } else {
502 dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
503 }
504
505 ipa->imem_size = 0;
506 ipa->imem_iova = 0;
507 }
508
509 /**
510 * ipa_smem_init() - Initialize SMEM memory used by the IPA
511 * @ipa: IPA pointer
512 * @item: Item ID of SMEM memory
513 * @size: Size (bytes) of SMEM memory region
514 *
515 * SMEM is a managed block of shared DRAM, from which numbered "items"
516 * can be allocated. One item is designated for use by the IPA.
517 *
518 * The modem accesses SMEM memory directly, but the IPA accesses it
519 * via the IOMMU, using the AP's credentials.
520 *
521 * If size provided is non-zero, we allocate it and map it for
522 * access through the IOMMU.
523 *
524 * Note: @size and the item address are is not guaranteed to be page-aligned.
525 */
ipa_smem_init(struct ipa * ipa,u32 item,size_t size)526 static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
527 {
528 struct device *dev = &ipa->pdev->dev;
529 struct iommu_domain *domain;
530 unsigned long iova;
531 phys_addr_t phys;
532 phys_addr_t addr;
533 size_t actual;
534 void *virt;
535 int ret;
536
537 if (!size)
538 return 0; /* SMEM memory not used */
539
540 /* SMEM is memory shared between the AP and another system entity
541 * (in this case, the modem). An allocation from SMEM is persistent
542 * until the AP reboots; there is no way to free an allocated SMEM
543 * region. Allocation only reserves the space; to use it you need
544 * to "get" a pointer it (this does not imply reference counting).
545 * The item might have already been allocated, in which case we
546 * use it unless the size isn't what we expect.
547 */
548 ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
549 if (ret && ret != -EEXIST) {
550 dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
551 ret, size, item);
552 return ret;
553 }
554
555 /* Now get the address of the SMEM memory region */
556 virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
557 if (IS_ERR(virt)) {
558 ret = PTR_ERR(virt);
559 dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
560 return ret;
561 }
562
563 /* In case the region was already allocated, verify the size */
564 if (ret && actual != size) {
565 dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
566 item, actual, size);
567 return -EINVAL;
568 }
569
570 domain = iommu_get_domain_for_dev(dev);
571 if (!domain) {
572 dev_err(dev, "no IOMMU domain found for SMEM\n");
573 return -EINVAL;
574 }
575
576 /* Align the address down and the size up to a page boundary */
577 addr = qcom_smem_virt_to_phys(virt);
578 phys = addr & PAGE_MASK;
579 size = PAGE_ALIGN(size + addr - phys);
580 iova = phys; /* We just want a direct mapping */
581
582 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
583 if (ret)
584 return ret;
585
586 ipa->smem_iova = iova;
587 ipa->smem_size = size;
588
589 return 0;
590 }
591
ipa_smem_exit(struct ipa * ipa)592 static void ipa_smem_exit(struct ipa *ipa)
593 {
594 struct device *dev = &ipa->pdev->dev;
595 struct iommu_domain *domain;
596
597 domain = iommu_get_domain_for_dev(dev);
598 if (domain) {
599 size_t size;
600
601 size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
602 if (size != ipa->smem_size)
603 dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
604 size, ipa->smem_size);
605
606 } else {
607 dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
608 }
609
610 ipa->smem_size = 0;
611 ipa->smem_iova = 0;
612 }
613
614 /* Perform memory region-related initialization */
ipa_mem_init(struct ipa * ipa,const struct ipa_mem_data * mem_data)615 int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
616 {
617 struct device *dev = &ipa->pdev->dev;
618 struct resource *res;
619 int ret;
620
621 /* Make sure the set of defined memory regions is valid */
622 if (!ipa_mem_valid(ipa, mem_data))
623 return -EINVAL;
624
625 ipa->mem_count = mem_data->local_count;
626 ipa->mem = mem_data->local;
627
628 ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
629 if (ret) {
630 dev_err(dev, "error %d setting DMA mask\n", ret);
631 return ret;
632 }
633
634 res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
635 "ipa-shared");
636 if (!res) {
637 dev_err(dev,
638 "DT error getting \"ipa-shared\" memory property\n");
639 return -ENODEV;
640 }
641
642 ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
643 if (!ipa->mem_virt) {
644 dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
645 return -ENOMEM;
646 }
647
648 ipa->mem_addr = res->start;
649 ipa->mem_size = resource_size(res);
650
651 ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
652 if (ret)
653 goto err_unmap;
654
655 ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
656 if (ret)
657 goto err_imem_exit;
658
659 return 0;
660
661 err_imem_exit:
662 ipa_imem_exit(ipa);
663 err_unmap:
664 memunmap(ipa->mem_virt);
665
666 return ret;
667 }
668
669 /* Inverse of ipa_mem_init() */
ipa_mem_exit(struct ipa * ipa)670 void ipa_mem_exit(struct ipa *ipa)
671 {
672 ipa_smem_exit(ipa);
673 ipa_imem_exit(ipa);
674 memunmap(ipa->mem_virt);
675 }
676