1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
27
28 /* Hardware is told about receive buffers once a "batch" has been queued */
29 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
30
31 /* The amount of RX buffer space consumed by standard skb overhead */
32 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
33
34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
35 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
36
37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
38
39 /** enum ipa_status_opcode - status element opcode hardware values */
40 enum ipa_status_opcode {
41 IPA_STATUS_OPCODE_PACKET = 0x01,
42 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
43 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
44 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
45 };
46
47 /** enum ipa_status_exception - status element exception type */
48 enum ipa_status_exception {
49 /* 0 means no exception */
50 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
51 };
52
53 /* Status element provided by hardware */
54 struct ipa_status {
55 u8 opcode; /* enum ipa_status_opcode */
56 u8 exception; /* enum ipa_status_exception */
57 __le16 mask;
58 __le16 pkt_len;
59 u8 endp_src_idx;
60 u8 endp_dst_idx;
61 __le32 metadata;
62 __le32 flags1;
63 __le64 flags2;
64 __le32 flags3;
65 __le32 flags4;
66 };
67
68 /* Field masks for struct ipa_status structure fields */
69 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
70 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
71 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
72 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
73 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
74
aggr_byte_limit_max(enum ipa_version version)75 static u32 aggr_byte_limit_max(enum ipa_version version)
76 {
77 if (version < IPA_VERSION_4_5)
78 return field_max(aggr_byte_limit_fmask(true));
79
80 return field_max(aggr_byte_limit_fmask(false));
81 }
82
83 /* Compute the aggregation size value to use for a given buffer size */
ipa_aggr_size_kb(u32 rx_buffer_size,bool aggr_hard_limit)84 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
85 {
86 /* A hard aggregation limit will not be crossed; aggregation closes
87 * if saving incoming data would cross the hard byte limit boundary.
88 *
89 * With a soft limit, aggregation closes *after* the size boundary
90 * has been crossed. In that case the limit must leave enough space
91 * after that limit to receive a full MTU of data plus overhead.
92 */
93 if (!aggr_hard_limit)
94 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
95
96 /* The byte limit is encoded as a number of kilobytes */
97
98 return rx_buffer_size / SZ_1K;
99 }
100
ipa_endpoint_data_valid_one(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * all_data,const struct ipa_gsi_endpoint_data * data)101 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
102 const struct ipa_gsi_endpoint_data *all_data,
103 const struct ipa_gsi_endpoint_data *data)
104 {
105 const struct ipa_gsi_endpoint_data *other_data;
106 struct device *dev = &ipa->pdev->dev;
107 enum ipa_endpoint_name other_name;
108
109 if (ipa_gsi_endpoint_data_empty(data))
110 return true;
111
112 if (!data->toward_ipa) {
113 const struct ipa_endpoint_rx *rx_config;
114 u32 buffer_size;
115 u32 aggr_size;
116 u32 limit;
117
118 if (data->endpoint.filter_support) {
119 dev_err(dev, "filtering not supported for "
120 "RX endpoint %u\n",
121 data->endpoint_id);
122 return false;
123 }
124
125 /* Nothing more to check for non-AP RX */
126 if (data->ee_id != GSI_EE_AP)
127 return true;
128
129 rx_config = &data->endpoint.config.rx;
130
131 /* The buffer size must hold an MTU plus overhead */
132 buffer_size = rx_config->buffer_size;
133 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
134 if (buffer_size < limit) {
135 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
136 data->endpoint_id, buffer_size, limit);
137 return false;
138 }
139
140 if (!data->endpoint.config.aggregation) {
141 bool result = true;
142
143 /* No aggregation; check for bogus aggregation data */
144 if (rx_config->aggr_time_limit) {
145 dev_err(dev,
146 "time limit with no aggregation for RX endpoint %u\n",
147 data->endpoint_id);
148 result = false;
149 }
150
151 if (rx_config->aggr_hard_limit) {
152 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
153 data->endpoint_id);
154 result = false;
155 }
156
157 if (rx_config->aggr_close_eof) {
158 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
159 data->endpoint_id);
160 result = false;
161 }
162
163 return result; /* Nothing more to check */
164 }
165
166 /* For an endpoint supporting receive aggregation, the byte
167 * limit defines the point at which aggregation closes. This
168 * check ensures the receive buffer size doesn't result in a
169 * limit that exceeds what's representable in the aggregation
170 * byte limit field.
171 */
172 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
173 rx_config->aggr_hard_limit);
174 limit = aggr_byte_limit_max(ipa->version);
175 if (aggr_size > limit) {
176 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
177 data->endpoint_id, aggr_size, limit);
178
179 return false;
180 }
181
182 return true; /* Nothing more to check for RX */
183 }
184
185 if (data->endpoint.config.status_enable) {
186 other_name = data->endpoint.config.tx.status_endpoint;
187 if (other_name >= count) {
188 dev_err(dev, "status endpoint name %u out of range "
189 "for endpoint %u\n",
190 other_name, data->endpoint_id);
191 return false;
192 }
193
194 /* Status endpoint must be defined... */
195 other_data = &all_data[other_name];
196 if (ipa_gsi_endpoint_data_empty(other_data)) {
197 dev_err(dev, "DMA endpoint name %u undefined "
198 "for endpoint %u\n",
199 other_name, data->endpoint_id);
200 return false;
201 }
202
203 /* ...and has to be an RX endpoint... */
204 if (other_data->toward_ipa) {
205 dev_err(dev,
206 "status endpoint for endpoint %u not RX\n",
207 data->endpoint_id);
208 return false;
209 }
210
211 /* ...and if it's to be an AP endpoint... */
212 if (other_data->ee_id == GSI_EE_AP) {
213 /* ...make sure it has status enabled. */
214 if (!other_data->endpoint.config.status_enable) {
215 dev_err(dev,
216 "status not enabled for endpoint %u\n",
217 other_data->endpoint_id);
218 return false;
219 }
220 }
221 }
222
223 if (data->endpoint.config.dma_mode) {
224 other_name = data->endpoint.config.dma_endpoint;
225 if (other_name >= count) {
226 dev_err(dev, "DMA endpoint name %u out of range "
227 "for endpoint %u\n",
228 other_name, data->endpoint_id);
229 return false;
230 }
231
232 other_data = &all_data[other_name];
233 if (ipa_gsi_endpoint_data_empty(other_data)) {
234 dev_err(dev, "DMA endpoint name %u undefined "
235 "for endpoint %u\n",
236 other_name, data->endpoint_id);
237 return false;
238 }
239 }
240
241 return true;
242 }
243
ipa_endpoint_data_valid(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)244 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
245 const struct ipa_gsi_endpoint_data *data)
246 {
247 const struct ipa_gsi_endpoint_data *dp = data;
248 struct device *dev = &ipa->pdev->dev;
249 enum ipa_endpoint_name name;
250
251 if (count > IPA_ENDPOINT_COUNT) {
252 dev_err(dev, "too many endpoints specified (%u > %u)\n",
253 count, IPA_ENDPOINT_COUNT);
254 return false;
255 }
256
257 /* Make sure needed endpoints have defined data */
258 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
259 dev_err(dev, "command TX endpoint not defined\n");
260 return false;
261 }
262 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
263 dev_err(dev, "LAN RX endpoint not defined\n");
264 return false;
265 }
266 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
267 dev_err(dev, "AP->modem TX endpoint not defined\n");
268 return false;
269 }
270 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
271 dev_err(dev, "AP<-modem RX endpoint not defined\n");
272 return false;
273 }
274
275 for (name = 0; name < count; name++, dp++)
276 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
277 return false;
278
279 return true;
280 }
281
282 /* Allocate a transaction to use on a non-command endpoint */
ipa_endpoint_trans_alloc(struct ipa_endpoint * endpoint,u32 tre_count)283 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
284 u32 tre_count)
285 {
286 struct gsi *gsi = &endpoint->ipa->gsi;
287 u32 channel_id = endpoint->channel_id;
288 enum dma_data_direction direction;
289
290 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
291
292 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
293 }
294
295 /* suspend_delay represents suspend for RX, delay for TX endpoints.
296 * Note that suspend is not supported starting with IPA v4.0, and
297 * delay mode should not be used starting with IPA v4.2.
298 */
299 static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint * endpoint,bool suspend_delay)300 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
301 {
302 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
303 struct ipa *ipa = endpoint->ipa;
304 bool state;
305 u32 mask;
306 u32 val;
307
308 if (endpoint->toward_ipa)
309 WARN_ON(ipa->version >= IPA_VERSION_4_2);
310 else
311 WARN_ON(ipa->version >= IPA_VERSION_4_0);
312
313 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
314
315 val = ioread32(ipa->reg_virt + offset);
316 state = !!(val & mask);
317
318 /* Don't bother if it's already in the requested state */
319 if (suspend_delay != state) {
320 val ^= mask;
321 iowrite32(val, ipa->reg_virt + offset);
322 }
323
324 return state;
325 }
326
327 /* We don't care what the previous state was for delay mode */
328 static void
ipa_endpoint_program_delay(struct ipa_endpoint * endpoint,bool enable)329 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
330 {
331 /* Delay mode should not be used for IPA v4.2+ */
332 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
333 WARN_ON(!endpoint->toward_ipa);
334
335 (void)ipa_endpoint_init_ctrl(endpoint, enable);
336 }
337
ipa_endpoint_aggr_active(struct ipa_endpoint * endpoint)338 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
339 {
340 u32 mask = BIT(endpoint->endpoint_id);
341 struct ipa *ipa = endpoint->ipa;
342 u32 offset;
343 u32 val;
344
345 WARN_ON(!(mask & ipa->available));
346
347 offset = ipa_reg_state_aggr_active_offset(ipa->version);
348 val = ioread32(ipa->reg_virt + offset);
349
350 return !!(val & mask);
351 }
352
ipa_endpoint_force_close(struct ipa_endpoint * endpoint)353 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
354 {
355 u32 mask = BIT(endpoint->endpoint_id);
356 struct ipa *ipa = endpoint->ipa;
357
358 WARN_ON(!(mask & ipa->available));
359
360 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
361 }
362
363 /**
364 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
365 * @endpoint: Endpoint on which to emulate a suspend
366 *
367 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
368 * with an open aggregation frame. This is to work around a hardware
369 * issue in IPA version 3.5.1 where the suspend interrupt will not be
370 * generated when it should be.
371 */
ipa_endpoint_suspend_aggr(struct ipa_endpoint * endpoint)372 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
373 {
374 struct ipa *ipa = endpoint->ipa;
375
376 if (!endpoint->config.aggregation)
377 return;
378
379 /* Nothing to do if the endpoint doesn't have aggregation open */
380 if (!ipa_endpoint_aggr_active(endpoint))
381 return;
382
383 /* Force close aggregation */
384 ipa_endpoint_force_close(endpoint);
385
386 ipa_interrupt_simulate_suspend(ipa->interrupt);
387 }
388
389 /* Returns previous suspend state (true means suspend was enabled) */
390 static bool
ipa_endpoint_program_suspend(struct ipa_endpoint * endpoint,bool enable)391 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
392 {
393 bool suspended;
394
395 if (endpoint->ipa->version >= IPA_VERSION_4_0)
396 return enable; /* For IPA v4.0+, no change made */
397
398 WARN_ON(endpoint->toward_ipa);
399
400 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
401
402 /* A client suspended with an open aggregation frame will not
403 * generate a SUSPEND IPA interrupt. If enabling suspend, have
404 * ipa_endpoint_suspend_aggr() handle this.
405 */
406 if (enable && !suspended)
407 ipa_endpoint_suspend_aggr(endpoint);
408
409 return suspended;
410 }
411
412 /* Put all modem RX endpoints into suspend mode, and stop transmission
413 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
414 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
415 * control instead.
416 */
ipa_endpoint_modem_pause_all(struct ipa * ipa,bool enable)417 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
418 {
419 u32 endpoint_id;
420
421 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
422 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
423
424 if (endpoint->ee_id != GSI_EE_MODEM)
425 continue;
426
427 if (!endpoint->toward_ipa)
428 (void)ipa_endpoint_program_suspend(endpoint, enable);
429 else if (ipa->version < IPA_VERSION_4_2)
430 ipa_endpoint_program_delay(endpoint, enable);
431 else
432 gsi_modem_channel_flow_control(&ipa->gsi,
433 endpoint->channel_id,
434 enable);
435 }
436 }
437
438 /* Reset all modem endpoints to use the default exception endpoint */
ipa_endpoint_modem_exception_reset_all(struct ipa * ipa)439 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
440 {
441 u32 initialized = ipa->initialized;
442 struct gsi_trans *trans;
443 u32 count;
444
445 /* We need one command per modem TX endpoint, plus the commands
446 * that clear the pipeline.
447 */
448 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
449 trans = ipa_cmd_trans_alloc(ipa, count);
450 if (!trans) {
451 dev_err(&ipa->pdev->dev,
452 "no transaction to reset modem exception endpoints\n");
453 return -EBUSY;
454 }
455
456 while (initialized) {
457 u32 endpoint_id = __ffs(initialized);
458 struct ipa_endpoint *endpoint;
459 u32 offset;
460
461 initialized ^= BIT(endpoint_id);
462
463 /* We only reset modem TX endpoints */
464 endpoint = &ipa->endpoint[endpoint_id];
465 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
466 continue;
467
468 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
469
470 /* Value written is 0, and all bits are updated. That
471 * means status is disabled on the endpoint, and as a
472 * result all other fields in the register are ignored.
473 */
474 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
475 }
476
477 ipa_cmd_pipeline_clear_add(trans);
478
479 gsi_trans_commit_wait(trans);
480
481 ipa_cmd_pipeline_clear_wait(ipa);
482
483 return 0;
484 }
485
ipa_endpoint_init_cfg(struct ipa_endpoint * endpoint)486 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
487 {
488 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
489 enum ipa_cs_offload_en enabled;
490 u32 val = 0;
491
492 /* FRAG_OFFLOAD_EN is 0 */
493 if (endpoint->config.checksum) {
494 enum ipa_version version = endpoint->ipa->version;
495
496 if (endpoint->toward_ipa) {
497 u32 checksum_offset;
498
499 /* Checksum header offset is in 4-byte units */
500 checksum_offset = sizeof(struct rmnet_map_header);
501 checksum_offset /= sizeof(u32);
502 val |= u32_encode_bits(checksum_offset,
503 CS_METADATA_HDR_OFFSET_FMASK);
504
505 enabled = version < IPA_VERSION_4_5
506 ? IPA_CS_OFFLOAD_UL
507 : IPA_CS_OFFLOAD_INLINE;
508 } else {
509 enabled = version < IPA_VERSION_4_5
510 ? IPA_CS_OFFLOAD_DL
511 : IPA_CS_OFFLOAD_INLINE;
512 }
513 } else {
514 enabled = IPA_CS_OFFLOAD_NONE;
515 }
516 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
517 /* CS_GEN_QMB_MASTER_SEL is 0 */
518
519 iowrite32(val, endpoint->ipa->reg_virt + offset);
520 }
521
ipa_endpoint_init_nat(struct ipa_endpoint * endpoint)522 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
523 {
524 u32 offset;
525 u32 val;
526
527 if (!endpoint->toward_ipa)
528 return;
529
530 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
531 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
532
533 iowrite32(val, endpoint->ipa->reg_virt + offset);
534 }
535
536 static u32
ipa_qmap_header_size(enum ipa_version version,struct ipa_endpoint * endpoint)537 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
538 {
539 u32 header_size = sizeof(struct rmnet_map_header);
540
541 /* Without checksum offload, we just have the MAP header */
542 if (!endpoint->config.checksum)
543 return header_size;
544
545 if (version < IPA_VERSION_4_5) {
546 /* Checksum header inserted for AP TX endpoints only */
547 if (endpoint->toward_ipa)
548 header_size += sizeof(struct rmnet_map_ul_csum_header);
549 } else {
550 /* Checksum header is used in both directions */
551 header_size += sizeof(struct rmnet_map_v5_csum_header);
552 }
553
554 return header_size;
555 }
556
557 /**
558 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
559 * @endpoint: Endpoint pointer
560 *
561 * We program QMAP endpoints so each packet received is preceded by a QMAP
562 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
563 * packet size field, and we have the IPA hardware populate both for each
564 * received packet. The header is configured (in the HDR_EXT register)
565 * to use big endian format.
566 *
567 * The packet size is written into the QMAP header's pkt_len field. That
568 * location is defined here using the HDR_OFST_PKT_SIZE field.
569 *
570 * The mux_id comes from a 4-byte metadata value supplied with each packet
571 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
572 * value that we want, in its low-order byte. A bitmask defined in the
573 * endpoint's METADATA_MASK register defines which byte within the modem
574 * metadata contains the mux_id. And the OFST_METADATA field programmed
575 * here indicates where the extracted byte should be placed within the QMAP
576 * header.
577 */
ipa_endpoint_init_hdr(struct ipa_endpoint * endpoint)578 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
579 {
580 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
581 struct ipa *ipa = endpoint->ipa;
582 u32 val = 0;
583
584 if (endpoint->config.qmap) {
585 enum ipa_version version = ipa->version;
586 size_t header_size;
587
588 header_size = ipa_qmap_header_size(version, endpoint);
589 val = ipa_header_size_encoded(version, header_size);
590
591 /* Define how to fill fields in a received QMAP header */
592 if (!endpoint->toward_ipa) {
593 u32 offset; /* Field offset within header */
594
595 /* Where IPA will write the metadata value */
596 offset = offsetof(struct rmnet_map_header, mux_id);
597 val |= ipa_metadata_offset_encoded(version, offset);
598
599 /* Where IPA will write the length */
600 offset = offsetof(struct rmnet_map_header, pkt_len);
601 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
602 if (version >= IPA_VERSION_4_5)
603 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
604
605 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
606 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
607 }
608 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
609 val |= HDR_OFST_METADATA_VALID_FMASK;
610
611 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
612 /* HDR_A5_MUX is 0 */
613 /* HDR_LEN_INC_DEAGG_HDR is 0 */
614 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
615 }
616
617 iowrite32(val, ipa->reg_virt + offset);
618 }
619
ipa_endpoint_init_hdr_ext(struct ipa_endpoint * endpoint)620 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
621 {
622 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
623 u32 pad_align = endpoint->config.rx.pad_align;
624 struct ipa *ipa = endpoint->ipa;
625 u32 val = 0;
626
627 if (endpoint->config.qmap) {
628 /* We have a header, so we must specify its endianness */
629 val |= HDR_ENDIANNESS_FMASK; /* big endian */
630
631 /* A QMAP header contains a 6 bit pad field at offset 0.
632 * The RMNet driver assumes this field is meaningful in
633 * packets it receives, and assumes the header's payload
634 * length includes that padding. The RMNet driver does
635 * *not* pad packets it sends, however, so the pad field
636 * (although 0) should be ignored.
637 */
638 if (!endpoint->toward_ipa) {
639 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
640 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
641 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
642 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
643 }
644 }
645
646 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
647 if (!endpoint->toward_ipa)
648 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
649
650 /* IPA v4.5 adds some most-significant bits to a few fields,
651 * two of which are defined in the HDR (not HDR_EXT) register.
652 */
653 if (ipa->version >= IPA_VERSION_4_5) {
654 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
655 if (endpoint->config.qmap && !endpoint->toward_ipa) {
656 u32 offset;
657
658 offset = offsetof(struct rmnet_map_header, pkt_len);
659 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
660 val |= u32_encode_bits(offset,
661 HDR_OFST_PKT_SIZE_MSB_FMASK);
662 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
663 }
664 }
665 iowrite32(val, ipa->reg_virt + offset);
666 }
667
ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint * endpoint)668 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
669 {
670 u32 endpoint_id = endpoint->endpoint_id;
671 u32 val = 0;
672 u32 offset;
673
674 if (endpoint->toward_ipa)
675 return; /* Register not valid for TX endpoints */
676
677 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
678
679 /* Note that HDR_ENDIANNESS indicates big endian header fields */
680 if (endpoint->config.qmap)
681 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
682
683 iowrite32(val, endpoint->ipa->reg_virt + offset);
684 }
685
ipa_endpoint_init_mode(struct ipa_endpoint * endpoint)686 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
687 {
688 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
689 u32 val;
690
691 if (!endpoint->toward_ipa)
692 return; /* Register not valid for RX endpoints */
693
694 if (endpoint->config.dma_mode) {
695 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
696 u32 dma_endpoint_id;
697
698 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
699
700 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
701 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
702 } else {
703 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
704 }
705 /* All other bits unspecified (and 0) */
706
707 iowrite32(val, endpoint->ipa->reg_virt + offset);
708 }
709
710 /* Encoded values for AGGR endpoint register fields */
aggr_byte_limit_encoded(enum ipa_version version,u32 limit)711 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
712 {
713 if (version < IPA_VERSION_4_5)
714 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
715
716 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
717 }
718
719 /* Encode the aggregation timer limit (microseconds) based on IPA version */
aggr_time_limit_encoded(enum ipa_version version,u32 limit)720 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
721 {
722 u32 gran_sel;
723 u32 fmask;
724 u32 val;
725
726 if (version < IPA_VERSION_4_5) {
727 /* We set aggregation granularity in ipa_hardware_config() */
728 fmask = aggr_time_limit_fmask(true);
729 val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
730 WARN(val > field_max(fmask),
731 "aggr_time_limit too large (%u > %u usec)\n",
732 val, field_max(fmask) * IPA_AGGR_GRANULARITY);
733
734 return u32_encode_bits(val, fmask);
735 }
736
737 /* IPA v4.5 expresses the time limit using Qtime. The AP has
738 * pulse generators 0 and 1 available, which were configured
739 * in ipa_qtime_config() to have granularity 100 usec and
740 * 1 msec, respectively. Use pulse generator 0 if possible,
741 * otherwise fall back to pulse generator 1.
742 */
743 fmask = aggr_time_limit_fmask(false);
744 val = DIV_ROUND_CLOSEST(limit, 100);
745 if (val > field_max(fmask)) {
746 /* Have to use pulse generator 1 (millisecond granularity) */
747 gran_sel = AGGR_GRAN_SEL_FMASK;
748 val = DIV_ROUND_CLOSEST(limit, 1000);
749 WARN(val > field_max(fmask),
750 "aggr_time_limit too large (%u > %u usec)\n",
751 limit, field_max(fmask) * 1000);
752 } else {
753 /* We can use pulse generator 0 (100 usec granularity) */
754 gran_sel = 0;
755 }
756
757 return gran_sel | u32_encode_bits(val, fmask);
758 }
759
aggr_sw_eof_active_encoded(enum ipa_version version,bool enabled)760 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
761 {
762 u32 val = enabled ? 1 : 0;
763
764 if (version < IPA_VERSION_4_5)
765 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
766
767 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
768 }
769
ipa_endpoint_init_aggr(struct ipa_endpoint * endpoint)770 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
771 {
772 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
773 enum ipa_version version = endpoint->ipa->version;
774 u32 val = 0;
775
776 if (endpoint->config.aggregation) {
777 if (!endpoint->toward_ipa) {
778 const struct ipa_endpoint_rx *rx_config;
779 u32 buffer_size;
780 bool close_eof;
781 u32 limit;
782
783 rx_config = &endpoint->config.rx;
784 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
785 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
786
787 buffer_size = rx_config->buffer_size;
788 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
789 rx_config->aggr_hard_limit);
790 val |= aggr_byte_limit_encoded(version, limit);
791
792 limit = rx_config->aggr_time_limit;
793 val |= aggr_time_limit_encoded(version, limit);
794
795 /* AGGR_PKT_LIMIT is 0 (unlimited) */
796
797 close_eof = rx_config->aggr_close_eof;
798 val |= aggr_sw_eof_active_encoded(version, close_eof);
799 } else {
800 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
801 AGGR_EN_FMASK);
802 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
803 /* other fields ignored */
804 }
805 /* AGGR_FORCE_CLOSE is 0 */
806 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
807 } else {
808 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
809 /* other fields ignored */
810 }
811
812 iowrite32(val, endpoint->ipa->reg_virt + offset);
813 }
814
815 /* Return the Qtime-based head-of-line blocking timer value that
816 * represents the given number of microseconds. The result
817 * includes both the timer value and the selected timer granularity.
818 */
hol_block_timer_qtime_val(struct ipa * ipa,u32 microseconds)819 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
820 {
821 u32 gran_sel;
822 u32 val;
823
824 /* IPA v4.5 expresses time limits using Qtime. The AP has
825 * pulse generators 0 and 1 available, which were configured
826 * in ipa_qtime_config() to have granularity 100 usec and
827 * 1 msec, respectively. Use pulse generator 0 if possible,
828 * otherwise fall back to pulse generator 1.
829 */
830 val = DIV_ROUND_CLOSEST(microseconds, 100);
831 if (val > field_max(TIME_LIMIT_FMASK)) {
832 /* Have to use pulse generator 1 (millisecond granularity) */
833 gran_sel = GRAN_SEL_FMASK;
834 val = DIV_ROUND_CLOSEST(microseconds, 1000);
835 } else {
836 /* We can use pulse generator 0 (100 usec granularity) */
837 gran_sel = 0;
838 }
839
840 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
841 }
842
843 /* The head-of-line blocking timer is defined as a tick count. For
844 * IPA version 4.5 the tick count is based on the Qtimer, which is
845 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
846 * each tick represents 128 cycles of the IPA core clock.
847 *
848 * Return the encoded value that should be written to that register
849 * that represents the timeout period provided. For IPA v4.2 this
850 * encodes a base and scale value, while for earlier versions the
851 * value is a simple tick count.
852 */
hol_block_timer_val(struct ipa * ipa,u32 microseconds)853 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
854 {
855 u32 width;
856 u32 scale;
857 u64 ticks;
858 u64 rate;
859 u32 high;
860 u32 val;
861
862 if (!microseconds)
863 return 0; /* Nothing to compute if timer period is 0 */
864
865 if (ipa->version >= IPA_VERSION_4_5)
866 return hol_block_timer_qtime_val(ipa, microseconds);
867
868 /* Use 64 bit arithmetic to avoid overflow... */
869 rate = ipa_core_clock_rate(ipa);
870 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
871 /* ...but we still need to fit into a 32-bit register */
872 WARN_ON(ticks > U32_MAX);
873
874 /* IPA v3.5.1 through v4.1 just record the tick count */
875 if (ipa->version < IPA_VERSION_4_2)
876 return (u32)ticks;
877
878 /* For IPA v4.2, the tick count is represented by base and
879 * scale fields within the 32-bit timer register, where:
880 * ticks = base << scale;
881 * The best precision is achieved when the base value is as
882 * large as possible. Find the highest set bit in the tick
883 * count, and extract the number of bits in the base field
884 * such that high bit is included.
885 */
886 high = fls(ticks); /* 1..32 */
887 width = HWEIGHT32(BASE_VALUE_FMASK);
888 scale = high > width ? high - width : 0;
889 if (scale) {
890 /* If we're scaling, round up to get a closer result */
891 ticks += 1 << (scale - 1);
892 /* High bit was set, so rounding might have affected it */
893 if (fls(ticks) != high)
894 scale++;
895 }
896
897 val = u32_encode_bits(scale, SCALE_FMASK);
898 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
899
900 return val;
901 }
902
903 /* If microseconds is 0, timeout is immediate */
ipa_endpoint_init_hol_block_timer(struct ipa_endpoint * endpoint,u32 microseconds)904 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
905 u32 microseconds)
906 {
907 u32 endpoint_id = endpoint->endpoint_id;
908 struct ipa *ipa = endpoint->ipa;
909 u32 offset;
910 u32 val;
911
912 /* This should only be changed when HOL_BLOCK_EN is disabled */
913 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
914 val = hol_block_timer_val(ipa, microseconds);
915 iowrite32(val, ipa->reg_virt + offset);
916 }
917
918 static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint * endpoint,bool enable)919 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
920 {
921 u32 endpoint_id = endpoint->endpoint_id;
922 u32 offset;
923 u32 val;
924
925 val = enable ? HOL_BLOCK_EN_FMASK : 0;
926 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
927 iowrite32(val, endpoint->ipa->reg_virt + offset);
928 /* When enabling, the register must be written twice for IPA v4.5+ */
929 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
930 iowrite32(val, endpoint->ipa->reg_virt + offset);
931 }
932
933 /* Assumes HOL_BLOCK is in disabled state */
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint * endpoint,u32 microseconds)934 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
935 u32 microseconds)
936 {
937 ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
938 ipa_endpoint_init_hol_block_en(endpoint, true);
939 }
940
ipa_endpoint_init_hol_block_disable(struct ipa_endpoint * endpoint)941 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
942 {
943 ipa_endpoint_init_hol_block_en(endpoint, false);
944 }
945
ipa_endpoint_modem_hol_block_clear_all(struct ipa * ipa)946 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
947 {
948 u32 i;
949
950 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
951 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
952
953 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
954 continue;
955
956 ipa_endpoint_init_hol_block_disable(endpoint);
957 ipa_endpoint_init_hol_block_enable(endpoint, 0);
958 }
959 }
960
ipa_endpoint_init_deaggr(struct ipa_endpoint * endpoint)961 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
962 {
963 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
964 u32 val = 0;
965
966 if (!endpoint->toward_ipa)
967 return; /* Register not valid for RX endpoints */
968
969 /* DEAGGR_HDR_LEN is 0 */
970 /* PACKET_OFFSET_VALID is 0 */
971 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
972 /* MAX_PACKET_LEN is 0 (not enforced) */
973
974 iowrite32(val, endpoint->ipa->reg_virt + offset);
975 }
976
ipa_endpoint_init_rsrc_grp(struct ipa_endpoint * endpoint)977 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
978 {
979 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
980 struct ipa *ipa = endpoint->ipa;
981 u32 val;
982
983 val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
984 iowrite32(val, ipa->reg_virt + offset);
985 }
986
ipa_endpoint_init_seq(struct ipa_endpoint * endpoint)987 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
988 {
989 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
990 u32 val = 0;
991
992 if (!endpoint->toward_ipa)
993 return; /* Register not valid for RX endpoints */
994
995 /* Low-order byte configures primary packet processing */
996 val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
997
998 /* Second byte configures replicated packet processing */
999 val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
1000 SEQ_REP_TYPE_FMASK);
1001
1002 iowrite32(val, endpoint->ipa->reg_virt + offset);
1003 }
1004
1005 /**
1006 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1007 * @endpoint: Endpoint pointer
1008 * @skb: Socket buffer to send
1009 *
1010 * Returns: 0 if successful, or a negative error code
1011 */
ipa_endpoint_skb_tx(struct ipa_endpoint * endpoint,struct sk_buff * skb)1012 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1013 {
1014 struct gsi_trans *trans;
1015 u32 nr_frags;
1016 int ret;
1017
1018 /* Make sure source endpoint's TLV FIFO has enough entries to
1019 * hold the linear portion of the skb and all its fragments.
1020 * If not, see if we can linearize it before giving up.
1021 */
1022 nr_frags = skb_shinfo(skb)->nr_frags;
1023 if (1 + nr_frags > endpoint->trans_tre_max) {
1024 if (skb_linearize(skb))
1025 return -E2BIG;
1026 nr_frags = 0;
1027 }
1028
1029 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1030 if (!trans)
1031 return -EBUSY;
1032
1033 ret = gsi_trans_skb_add(trans, skb);
1034 if (ret)
1035 goto err_trans_free;
1036 trans->data = skb; /* transaction owns skb now */
1037
1038 gsi_trans_commit(trans, !netdev_xmit_more());
1039
1040 return 0;
1041
1042 err_trans_free:
1043 gsi_trans_free(trans);
1044
1045 return -ENOMEM;
1046 }
1047
ipa_endpoint_status(struct ipa_endpoint * endpoint)1048 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1049 {
1050 u32 endpoint_id = endpoint->endpoint_id;
1051 struct ipa *ipa = endpoint->ipa;
1052 u32 val = 0;
1053 u32 offset;
1054
1055 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
1056
1057 if (endpoint->config.status_enable) {
1058 val |= STATUS_EN_FMASK;
1059 if (endpoint->toward_ipa) {
1060 enum ipa_endpoint_name name;
1061 u32 status_endpoint_id;
1062
1063 name = endpoint->config.tx.status_endpoint;
1064 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1065
1066 val |= u32_encode_bits(status_endpoint_id,
1067 STATUS_ENDP_FMASK);
1068 }
1069 /* STATUS_LOCATION is 0, meaning status element precedes
1070 * packet (not present for IPA v4.5)
1071 */
1072 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1073 }
1074
1075 iowrite32(val, ipa->reg_virt + offset);
1076 }
1077
ipa_endpoint_replenish_one(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1078 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1079 struct gsi_trans *trans)
1080 {
1081 struct page *page;
1082 u32 buffer_size;
1083 u32 offset;
1084 u32 len;
1085 int ret;
1086
1087 buffer_size = endpoint->config.rx.buffer_size;
1088 page = dev_alloc_pages(get_order(buffer_size));
1089 if (!page)
1090 return -ENOMEM;
1091
1092 /* Offset the buffer to make space for skb headroom */
1093 offset = NET_SKB_PAD;
1094 len = buffer_size - offset;
1095
1096 ret = gsi_trans_page_add(trans, page, len, offset);
1097 if (ret)
1098 put_page(page);
1099 else
1100 trans->data = page; /* transaction owns page now */
1101
1102 return ret;
1103 }
1104
1105 /**
1106 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1107 * @endpoint: Endpoint to be replenished
1108 *
1109 * The IPA hardware can hold a fixed number of receive buffers for an RX
1110 * endpoint, based on the number of entries in the underlying channel ring
1111 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1112 * more receive buffers can be supplied to the hardware. Replenishing for
1113 * an endpoint can be disabled, in which case buffers are not queued to
1114 * the hardware.
1115 */
ipa_endpoint_replenish(struct ipa_endpoint * endpoint)1116 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1117 {
1118 struct gsi_trans *trans;
1119
1120 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1121 return;
1122
1123 /* Skip it if it's already active */
1124 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1125 return;
1126
1127 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1128 bool doorbell;
1129
1130 if (ipa_endpoint_replenish_one(endpoint, trans))
1131 goto try_again_later;
1132
1133
1134 /* Ring the doorbell if we've got a full batch */
1135 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1136 gsi_trans_commit(trans, doorbell);
1137 }
1138
1139 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1140
1141 return;
1142
1143 try_again_later:
1144 gsi_trans_free(trans);
1145 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1146
1147 /* Whenever a receive buffer transaction completes we'll try to
1148 * replenish again. It's unlikely, but if we fail to supply even
1149 * one buffer, nothing will trigger another replenish attempt.
1150 * If the hardware has no receive buffers queued, schedule work to
1151 * try replenishing again.
1152 */
1153 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1154 schedule_delayed_work(&endpoint->replenish_work,
1155 msecs_to_jiffies(1));
1156 }
1157
ipa_endpoint_replenish_enable(struct ipa_endpoint * endpoint)1158 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1159 {
1160 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1161
1162 /* Start replenishing if hardware currently has no buffers */
1163 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1164 ipa_endpoint_replenish(endpoint);
1165 }
1166
ipa_endpoint_replenish_disable(struct ipa_endpoint * endpoint)1167 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1168 {
1169 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1170 }
1171
ipa_endpoint_replenish_work(struct work_struct * work)1172 static void ipa_endpoint_replenish_work(struct work_struct *work)
1173 {
1174 struct delayed_work *dwork = to_delayed_work(work);
1175 struct ipa_endpoint *endpoint;
1176
1177 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1178
1179 ipa_endpoint_replenish(endpoint);
1180 }
1181
ipa_endpoint_skb_copy(struct ipa_endpoint * endpoint,void * data,u32 len,u32 extra)1182 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1183 void *data, u32 len, u32 extra)
1184 {
1185 struct sk_buff *skb;
1186
1187 if (!endpoint->netdev)
1188 return;
1189
1190 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1191 if (skb) {
1192 /* Copy the data into the socket buffer and receive it */
1193 skb_put(skb, len);
1194 memcpy(skb->data, data, len);
1195 skb->truesize += extra;
1196 }
1197
1198 ipa_modem_skb_rx(endpoint->netdev, skb);
1199 }
1200
ipa_endpoint_skb_build(struct ipa_endpoint * endpoint,struct page * page,u32 len)1201 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1202 struct page *page, u32 len)
1203 {
1204 u32 buffer_size = endpoint->config.rx.buffer_size;
1205 struct sk_buff *skb;
1206
1207 /* Nothing to do if there's no netdev */
1208 if (!endpoint->netdev)
1209 return false;
1210
1211 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1212
1213 skb = build_skb(page_address(page), buffer_size);
1214 if (skb) {
1215 /* Reserve the headroom and account for the data */
1216 skb_reserve(skb, NET_SKB_PAD);
1217 skb_put(skb, len);
1218 }
1219
1220 /* Receive the buffer (or record drop if unable to build it) */
1221 ipa_modem_skb_rx(endpoint->netdev, skb);
1222
1223 return skb != NULL;
1224 }
1225
1226 /* The format of a packet status element is the same for several status
1227 * types (opcodes). Other types aren't currently supported.
1228 */
ipa_status_format_packet(enum ipa_status_opcode opcode)1229 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1230 {
1231 switch (opcode) {
1232 case IPA_STATUS_OPCODE_PACKET:
1233 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1234 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1235 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1236 return true;
1237 default:
1238 return false;
1239 }
1240 }
1241
ipa_endpoint_status_skip(struct ipa_endpoint * endpoint,const struct ipa_status * status)1242 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1243 const struct ipa_status *status)
1244 {
1245 u32 endpoint_id;
1246
1247 if (!ipa_status_format_packet(status->opcode))
1248 return true;
1249 if (!status->pkt_len)
1250 return true;
1251 endpoint_id = u8_get_bits(status->endp_dst_idx,
1252 IPA_STATUS_DST_IDX_FMASK);
1253 if (endpoint_id != endpoint->endpoint_id)
1254 return true;
1255
1256 return false; /* Don't skip this packet, process it */
1257 }
1258
ipa_endpoint_status_tag(struct ipa_endpoint * endpoint,const struct ipa_status * status)1259 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1260 const struct ipa_status *status)
1261 {
1262 struct ipa_endpoint *command_endpoint;
1263 struct ipa *ipa = endpoint->ipa;
1264 u32 endpoint_id;
1265
1266 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1267 return false; /* No valid tag */
1268
1269 /* The status contains a valid tag. We know the packet was sent to
1270 * this endpoint (already verified by ipa_endpoint_status_skip()).
1271 * If the packet came from the AP->command TX endpoint we know
1272 * this packet was sent as part of the pipeline clear process.
1273 */
1274 endpoint_id = u8_get_bits(status->endp_src_idx,
1275 IPA_STATUS_SRC_IDX_FMASK);
1276 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1277 if (endpoint_id == command_endpoint->endpoint_id) {
1278 complete(&ipa->completion);
1279 } else {
1280 dev_err(&ipa->pdev->dev,
1281 "unexpected tagged packet from endpoint %u\n",
1282 endpoint_id);
1283 }
1284
1285 return true;
1286 }
1287
1288 /* Return whether the status indicates the packet should be dropped */
ipa_endpoint_status_drop(struct ipa_endpoint * endpoint,const struct ipa_status * status)1289 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1290 const struct ipa_status *status)
1291 {
1292 u32 val;
1293
1294 /* If the status indicates a tagged transfer, we'll drop the packet */
1295 if (ipa_endpoint_status_tag(endpoint, status))
1296 return true;
1297
1298 /* Deaggregation exceptions we drop; all other types we consume */
1299 if (status->exception)
1300 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1301
1302 /* Drop the packet if it fails to match a routing rule; otherwise no */
1303 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1304
1305 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1306 }
1307
ipa_endpoint_status_parse(struct ipa_endpoint * endpoint,struct page * page,u32 total_len)1308 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1309 struct page *page, u32 total_len)
1310 {
1311 u32 buffer_size = endpoint->config.rx.buffer_size;
1312 void *data = page_address(page) + NET_SKB_PAD;
1313 u32 unused = buffer_size - total_len;
1314 u32 resid = total_len;
1315
1316 while (resid) {
1317 const struct ipa_status *status = data;
1318 u32 align;
1319 u32 len;
1320
1321 if (resid < sizeof(*status)) {
1322 dev_err(&endpoint->ipa->pdev->dev,
1323 "short message (%u bytes < %zu byte status)\n",
1324 resid, sizeof(*status));
1325 break;
1326 }
1327
1328 /* Skip over status packets that lack packet data */
1329 if (ipa_endpoint_status_skip(endpoint, status)) {
1330 data += sizeof(*status);
1331 resid -= sizeof(*status);
1332 continue;
1333 }
1334
1335 /* Compute the amount of buffer space consumed by the packet,
1336 * including the status element. If the hardware is configured
1337 * to pad packet data to an aligned boundary, account for that.
1338 * And if checksum offload is enabled a trailer containing
1339 * computed checksum information will be appended.
1340 */
1341 align = endpoint->config.rx.pad_align ? : 1;
1342 len = le16_to_cpu(status->pkt_len);
1343 len = sizeof(*status) + ALIGN(len, align);
1344 if (endpoint->config.checksum)
1345 len += sizeof(struct rmnet_map_dl_csum_trailer);
1346
1347 if (!ipa_endpoint_status_drop(endpoint, status)) {
1348 void *data2;
1349 u32 extra;
1350 u32 len2;
1351
1352 /* Client receives only packet data (no status) */
1353 data2 = data + sizeof(*status);
1354 len2 = le16_to_cpu(status->pkt_len);
1355
1356 /* Have the true size reflect the extra unused space in
1357 * the original receive buffer. Distribute the "cost"
1358 * proportionately across all aggregated packets in the
1359 * buffer.
1360 */
1361 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1362 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1363 }
1364
1365 /* Consume status and the full packet it describes */
1366 data += len;
1367 resid -= len;
1368 }
1369 }
1370
1371 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
ipa_endpoint_tx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1372 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1373 struct gsi_trans *trans)
1374 {
1375 }
1376
1377 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
ipa_endpoint_rx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1378 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1379 struct gsi_trans *trans)
1380 {
1381 struct page *page;
1382
1383 if (trans->cancelled)
1384 goto done;
1385
1386 /* Parse or build a socket buffer using the actual received length */
1387 page = trans->data;
1388 if (endpoint->config.status_enable)
1389 ipa_endpoint_status_parse(endpoint, page, trans->len);
1390 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1391 trans->data = NULL; /* Pages have been consumed */
1392 done:
1393 ipa_endpoint_replenish(endpoint);
1394 }
1395
ipa_endpoint_trans_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1396 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1397 struct gsi_trans *trans)
1398 {
1399 if (endpoint->toward_ipa)
1400 ipa_endpoint_tx_complete(endpoint, trans);
1401 else
1402 ipa_endpoint_rx_complete(endpoint, trans);
1403 }
1404
ipa_endpoint_trans_release(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1405 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1406 struct gsi_trans *trans)
1407 {
1408 if (endpoint->toward_ipa) {
1409 struct ipa *ipa = endpoint->ipa;
1410
1411 /* Nothing to do for command transactions */
1412 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1413 struct sk_buff *skb = trans->data;
1414
1415 if (skb)
1416 dev_kfree_skb_any(skb);
1417 }
1418 } else {
1419 struct page *page = trans->data;
1420
1421 if (page)
1422 put_page(page);
1423 }
1424 }
1425
ipa_endpoint_default_route_set(struct ipa * ipa,u32 endpoint_id)1426 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1427 {
1428 u32 val;
1429
1430 /* ROUTE_DIS is 0 */
1431 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1432 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1433 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1434 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1435 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1436
1437 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1438 }
1439
ipa_endpoint_default_route_clear(struct ipa * ipa)1440 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1441 {
1442 ipa_endpoint_default_route_set(ipa, 0);
1443 }
1444
1445 /**
1446 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1447 * @endpoint: Endpoint to be reset
1448 *
1449 * If aggregation is active on an RX endpoint when a reset is performed
1450 * on its underlying GSI channel, a special sequence of actions must be
1451 * taken to ensure the IPA pipeline is properly cleared.
1452 *
1453 * Return: 0 if successful, or a negative error code
1454 */
ipa_endpoint_reset_rx_aggr(struct ipa_endpoint * endpoint)1455 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1456 {
1457 struct device *dev = &endpoint->ipa->pdev->dev;
1458 struct ipa *ipa = endpoint->ipa;
1459 struct gsi *gsi = &ipa->gsi;
1460 bool suspended = false;
1461 dma_addr_t addr;
1462 u32 retries;
1463 u32 len = 1;
1464 void *virt;
1465 int ret;
1466
1467 virt = kzalloc(len, GFP_KERNEL);
1468 if (!virt)
1469 return -ENOMEM;
1470
1471 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1472 if (dma_mapping_error(dev, addr)) {
1473 ret = -ENOMEM;
1474 goto out_kfree;
1475 }
1476
1477 /* Force close aggregation before issuing the reset */
1478 ipa_endpoint_force_close(endpoint);
1479
1480 /* Reset and reconfigure the channel with the doorbell engine
1481 * disabled. Then poll until we know aggregation is no longer
1482 * active. We'll re-enable the doorbell (if appropriate) when
1483 * we reset again below.
1484 */
1485 gsi_channel_reset(gsi, endpoint->channel_id, false);
1486
1487 /* Make sure the channel isn't suspended */
1488 suspended = ipa_endpoint_program_suspend(endpoint, false);
1489
1490 /* Start channel and do a 1 byte read */
1491 ret = gsi_channel_start(gsi, endpoint->channel_id);
1492 if (ret)
1493 goto out_suspend_again;
1494
1495 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1496 if (ret)
1497 goto err_endpoint_stop;
1498
1499 /* Wait for aggregation to be closed on the channel */
1500 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1501 do {
1502 if (!ipa_endpoint_aggr_active(endpoint))
1503 break;
1504 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1505 } while (retries--);
1506
1507 /* Check one last time */
1508 if (ipa_endpoint_aggr_active(endpoint))
1509 dev_err(dev, "endpoint %u still active during reset\n",
1510 endpoint->endpoint_id);
1511
1512 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1513
1514 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1515 if (ret)
1516 goto out_suspend_again;
1517
1518 /* Finally, reset and reconfigure the channel again (re-enabling
1519 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1520 * complete the channel reset sequence. Finish by suspending the
1521 * channel again (if necessary).
1522 */
1523 gsi_channel_reset(gsi, endpoint->channel_id, true);
1524
1525 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1526
1527 goto out_suspend_again;
1528
1529 err_endpoint_stop:
1530 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1531 out_suspend_again:
1532 if (suspended)
1533 (void)ipa_endpoint_program_suspend(endpoint, true);
1534 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1535 out_kfree:
1536 kfree(virt);
1537
1538 return ret;
1539 }
1540
ipa_endpoint_reset(struct ipa_endpoint * endpoint)1541 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1542 {
1543 u32 channel_id = endpoint->channel_id;
1544 struct ipa *ipa = endpoint->ipa;
1545 bool special;
1546 int ret = 0;
1547
1548 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1549 * is active, we need to handle things specially to recover.
1550 * All other cases just need to reset the underlying GSI channel.
1551 */
1552 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1553 endpoint->config.aggregation;
1554 if (special && ipa_endpoint_aggr_active(endpoint))
1555 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1556 else
1557 gsi_channel_reset(&ipa->gsi, channel_id, true);
1558
1559 if (ret)
1560 dev_err(&ipa->pdev->dev,
1561 "error %d resetting channel %u for endpoint %u\n",
1562 ret, endpoint->channel_id, endpoint->endpoint_id);
1563 }
1564
ipa_endpoint_program(struct ipa_endpoint * endpoint)1565 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1566 {
1567 if (endpoint->toward_ipa) {
1568 /* Newer versions of IPA use GSI channel flow control
1569 * instead of endpoint DELAY mode to prevent sending data.
1570 * Flow control is disabled for newly-allocated channels,
1571 * and we can assume flow control is not (ever) enabled
1572 * for AP TX channels.
1573 */
1574 if (endpoint->ipa->version < IPA_VERSION_4_2)
1575 ipa_endpoint_program_delay(endpoint, false);
1576 } else {
1577 /* Ensure suspend mode is off on all AP RX endpoints */
1578 (void)ipa_endpoint_program_suspend(endpoint, false);
1579 }
1580 ipa_endpoint_init_cfg(endpoint);
1581 ipa_endpoint_init_nat(endpoint);
1582 ipa_endpoint_init_hdr(endpoint);
1583 ipa_endpoint_init_hdr_ext(endpoint);
1584 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1585 ipa_endpoint_init_mode(endpoint);
1586 ipa_endpoint_init_aggr(endpoint);
1587 if (!endpoint->toward_ipa) {
1588 if (endpoint->config.rx.holb_drop)
1589 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1590 else
1591 ipa_endpoint_init_hol_block_disable(endpoint);
1592 }
1593 ipa_endpoint_init_deaggr(endpoint);
1594 ipa_endpoint_init_rsrc_grp(endpoint);
1595 ipa_endpoint_init_seq(endpoint);
1596 ipa_endpoint_status(endpoint);
1597 }
1598
ipa_endpoint_enable_one(struct ipa_endpoint * endpoint)1599 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1600 {
1601 struct ipa *ipa = endpoint->ipa;
1602 struct gsi *gsi = &ipa->gsi;
1603 int ret;
1604
1605 ret = gsi_channel_start(gsi, endpoint->channel_id);
1606 if (ret) {
1607 dev_err(&ipa->pdev->dev,
1608 "error %d starting %cX channel %u for endpoint %u\n",
1609 ret, endpoint->toward_ipa ? 'T' : 'R',
1610 endpoint->channel_id, endpoint->endpoint_id);
1611 return ret;
1612 }
1613
1614 if (!endpoint->toward_ipa) {
1615 ipa_interrupt_suspend_enable(ipa->interrupt,
1616 endpoint->endpoint_id);
1617 ipa_endpoint_replenish_enable(endpoint);
1618 }
1619
1620 ipa->enabled |= BIT(endpoint->endpoint_id);
1621
1622 return 0;
1623 }
1624
ipa_endpoint_disable_one(struct ipa_endpoint * endpoint)1625 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1626 {
1627 u32 mask = BIT(endpoint->endpoint_id);
1628 struct ipa *ipa = endpoint->ipa;
1629 struct gsi *gsi = &ipa->gsi;
1630 int ret;
1631
1632 if (!(ipa->enabled & mask))
1633 return;
1634
1635 ipa->enabled ^= mask;
1636
1637 if (!endpoint->toward_ipa) {
1638 ipa_endpoint_replenish_disable(endpoint);
1639 ipa_interrupt_suspend_disable(ipa->interrupt,
1640 endpoint->endpoint_id);
1641 }
1642
1643 /* Note that if stop fails, the channel's state is not well-defined */
1644 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1645 if (ret)
1646 dev_err(&ipa->pdev->dev,
1647 "error %d attempting to stop endpoint %u\n", ret,
1648 endpoint->endpoint_id);
1649 }
1650
ipa_endpoint_suspend_one(struct ipa_endpoint * endpoint)1651 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1652 {
1653 struct device *dev = &endpoint->ipa->pdev->dev;
1654 struct gsi *gsi = &endpoint->ipa->gsi;
1655 int ret;
1656
1657 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1658 return;
1659
1660 if (!endpoint->toward_ipa) {
1661 ipa_endpoint_replenish_disable(endpoint);
1662 (void)ipa_endpoint_program_suspend(endpoint, true);
1663 }
1664
1665 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1666 if (ret)
1667 dev_err(dev, "error %d suspending channel %u\n", ret,
1668 endpoint->channel_id);
1669 }
1670
ipa_endpoint_resume_one(struct ipa_endpoint * endpoint)1671 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1672 {
1673 struct device *dev = &endpoint->ipa->pdev->dev;
1674 struct gsi *gsi = &endpoint->ipa->gsi;
1675 int ret;
1676
1677 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1678 return;
1679
1680 if (!endpoint->toward_ipa)
1681 (void)ipa_endpoint_program_suspend(endpoint, false);
1682
1683 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1684 if (ret)
1685 dev_err(dev, "error %d resuming channel %u\n", ret,
1686 endpoint->channel_id);
1687 else if (!endpoint->toward_ipa)
1688 ipa_endpoint_replenish_enable(endpoint);
1689 }
1690
ipa_endpoint_suspend(struct ipa * ipa)1691 void ipa_endpoint_suspend(struct ipa *ipa)
1692 {
1693 if (!ipa->setup_complete)
1694 return;
1695
1696 if (ipa->modem_netdev)
1697 ipa_modem_suspend(ipa->modem_netdev);
1698
1699 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1700 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1701 }
1702
ipa_endpoint_resume(struct ipa * ipa)1703 void ipa_endpoint_resume(struct ipa *ipa)
1704 {
1705 if (!ipa->setup_complete)
1706 return;
1707
1708 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1709 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1710
1711 if (ipa->modem_netdev)
1712 ipa_modem_resume(ipa->modem_netdev);
1713 }
1714
ipa_endpoint_setup_one(struct ipa_endpoint * endpoint)1715 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1716 {
1717 struct gsi *gsi = &endpoint->ipa->gsi;
1718 u32 channel_id = endpoint->channel_id;
1719
1720 /* Only AP endpoints get set up */
1721 if (endpoint->ee_id != GSI_EE_AP)
1722 return;
1723
1724 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1725 if (!endpoint->toward_ipa) {
1726 /* RX transactions require a single TRE, so the maximum
1727 * backlog is the same as the maximum outstanding TREs.
1728 */
1729 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1730 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1731 INIT_DELAYED_WORK(&endpoint->replenish_work,
1732 ipa_endpoint_replenish_work);
1733 }
1734
1735 ipa_endpoint_program(endpoint);
1736
1737 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1738 }
1739
ipa_endpoint_teardown_one(struct ipa_endpoint * endpoint)1740 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1741 {
1742 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1743
1744 if (!endpoint->toward_ipa)
1745 cancel_delayed_work_sync(&endpoint->replenish_work);
1746
1747 ipa_endpoint_reset(endpoint);
1748 }
1749
ipa_endpoint_setup(struct ipa * ipa)1750 void ipa_endpoint_setup(struct ipa *ipa)
1751 {
1752 u32 initialized = ipa->initialized;
1753
1754 ipa->set_up = 0;
1755 while (initialized) {
1756 u32 endpoint_id = __ffs(initialized);
1757
1758 initialized ^= BIT(endpoint_id);
1759
1760 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1761 }
1762 }
1763
ipa_endpoint_teardown(struct ipa * ipa)1764 void ipa_endpoint_teardown(struct ipa *ipa)
1765 {
1766 u32 set_up = ipa->set_up;
1767
1768 while (set_up) {
1769 u32 endpoint_id = __fls(set_up);
1770
1771 set_up ^= BIT(endpoint_id);
1772
1773 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1774 }
1775 ipa->set_up = 0;
1776 }
1777
ipa_endpoint_config(struct ipa * ipa)1778 int ipa_endpoint_config(struct ipa *ipa)
1779 {
1780 struct device *dev = &ipa->pdev->dev;
1781 u32 initialized;
1782 u32 rx_base;
1783 u32 rx_mask;
1784 u32 tx_mask;
1785 int ret = 0;
1786 u32 max;
1787 u32 val;
1788
1789 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1790 * Furthermore, the endpoints were not grouped such that TX
1791 * endpoint numbers started with 0 and RX endpoints had numbers
1792 * higher than all TX endpoints, so we can't do the simple
1793 * direction check used for newer hardware below.
1794 *
1795 * For hardware that doesn't support the FLAVOR_0 register,
1796 * just set the available mask to support any endpoint, and
1797 * assume the configuration is valid.
1798 */
1799 if (ipa->version < IPA_VERSION_3_5) {
1800 ipa->available = ~0;
1801 return 0;
1802 }
1803
1804 /* Find out about the endpoints supplied by the hardware, and ensure
1805 * the highest one doesn't exceed the number we support.
1806 */
1807 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1808
1809 /* Our RX is an IPA producer */
1810 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1811 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1812 if (max > IPA_ENDPOINT_MAX) {
1813 dev_err(dev, "too many endpoints (%u > %u)\n",
1814 max, IPA_ENDPOINT_MAX);
1815 return -EINVAL;
1816 }
1817 rx_mask = GENMASK(max - 1, rx_base);
1818
1819 /* Our TX is an IPA consumer */
1820 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1821 tx_mask = GENMASK(max - 1, 0);
1822
1823 ipa->available = rx_mask | tx_mask;
1824
1825 /* Check for initialized endpoints not supported by the hardware */
1826 if (ipa->initialized & ~ipa->available) {
1827 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1828 ipa->initialized & ~ipa->available);
1829 ret = -EINVAL; /* Report other errors too */
1830 }
1831
1832 initialized = ipa->initialized;
1833 while (initialized) {
1834 u32 endpoint_id = __ffs(initialized);
1835 struct ipa_endpoint *endpoint;
1836
1837 initialized ^= BIT(endpoint_id);
1838
1839 /* Make sure it's pointing in the right direction */
1840 endpoint = &ipa->endpoint[endpoint_id];
1841 if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1842 dev_err(dev, "endpoint id %u wrong direction\n",
1843 endpoint_id);
1844 ret = -EINVAL;
1845 }
1846 }
1847
1848 return ret;
1849 }
1850
ipa_endpoint_deconfig(struct ipa * ipa)1851 void ipa_endpoint_deconfig(struct ipa *ipa)
1852 {
1853 ipa->available = 0; /* Nothing more to do */
1854 }
1855
ipa_endpoint_init_one(struct ipa * ipa,enum ipa_endpoint_name name,const struct ipa_gsi_endpoint_data * data)1856 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1857 const struct ipa_gsi_endpoint_data *data)
1858 {
1859 struct ipa_endpoint *endpoint;
1860
1861 endpoint = &ipa->endpoint[data->endpoint_id];
1862
1863 if (data->ee_id == GSI_EE_AP)
1864 ipa->channel_map[data->channel_id] = endpoint;
1865 ipa->name_map[name] = endpoint;
1866
1867 endpoint->ipa = ipa;
1868 endpoint->ee_id = data->ee_id;
1869 endpoint->channel_id = data->channel_id;
1870 endpoint->endpoint_id = data->endpoint_id;
1871 endpoint->toward_ipa = data->toward_ipa;
1872 endpoint->config = data->endpoint.config;
1873
1874 ipa->initialized |= BIT(endpoint->endpoint_id);
1875 }
1876
ipa_endpoint_exit_one(struct ipa_endpoint * endpoint)1877 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1878 {
1879 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1880
1881 memset(endpoint, 0, sizeof(*endpoint));
1882 }
1883
ipa_endpoint_exit(struct ipa * ipa)1884 void ipa_endpoint_exit(struct ipa *ipa)
1885 {
1886 u32 initialized = ipa->initialized;
1887
1888 while (initialized) {
1889 u32 endpoint_id = __fls(initialized);
1890
1891 initialized ^= BIT(endpoint_id);
1892
1893 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1894 }
1895 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1896 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1897 }
1898
1899 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
ipa_endpoint_init(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)1900 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1901 const struct ipa_gsi_endpoint_data *data)
1902 {
1903 enum ipa_endpoint_name name;
1904 u32 filter_map;
1905
1906 BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1907
1908 if (!ipa_endpoint_data_valid(ipa, count, data))
1909 return 0; /* Error */
1910
1911 ipa->initialized = 0;
1912
1913 filter_map = 0;
1914 for (name = 0; name < count; name++, data++) {
1915 if (ipa_gsi_endpoint_data_empty(data))
1916 continue; /* Skip over empty slots */
1917
1918 ipa_endpoint_init_one(ipa, name, data);
1919
1920 if (data->endpoint.filter_support)
1921 filter_map |= BIT(data->endpoint_id);
1922 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1923 ipa->modem_tx_count++;
1924 }
1925
1926 if (!ipa_filter_map_valid(ipa, filter_map))
1927 goto err_endpoint_exit;
1928
1929 return filter_map; /* Non-zero bitmask */
1930
1931 err_endpoint_exit:
1932 ipa_endpoint_exit(ipa);
1933
1934 return 0; /* Error */
1935 }
1936