1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2022 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25 
26 /* Hardware is told about receive buffers once a "batch" has been queued */
27 #define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
28 
29 /* The amount of RX buffer space consumed by standard skb overhead */
30 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
31 
32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
33 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
34 
35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
36 
37 /** enum ipa_status_opcode - status element opcode hardware values */
38 enum ipa_status_opcode {
39 	IPA_STATUS_OPCODE_PACKET		= 0x01,
40 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
41 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
42 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
43 };
44 
45 /** enum ipa_status_exception - status element exception type */
46 enum ipa_status_exception {
47 	/* 0 means no exception */
48 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
49 };
50 
51 /* Status element provided by hardware */
52 struct ipa_status {
53 	u8 opcode;		/* enum ipa_status_opcode */
54 	u8 exception;		/* enum ipa_status_exception */
55 	__le16 mask;
56 	__le16 pkt_len;
57 	u8 endp_src_idx;
58 	u8 endp_dst_idx;
59 	__le32 metadata;
60 	__le32 flags1;
61 	__le64 flags2;
62 	__le32 flags3;
63 	__le32 flags4;
64 };
65 
66 /* Field masks for struct ipa_status structure fields */
67 #define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
68 #define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
69 #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
70 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
71 #define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
72 
73 /* Compute the aggregation size value to use for a given buffer size */
ipa_aggr_size_kb(u32 rx_buffer_size,bool aggr_hard_limit)74 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
75 {
76 	/* A hard aggregation limit will not be crossed; aggregation closes
77 	 * if saving incoming data would cross the hard byte limit boundary.
78 	 *
79 	 * With a soft limit, aggregation closes *after* the size boundary
80 	 * has been crossed.  In that case the limit must leave enough space
81 	 * after that limit to receive a full MTU of data plus overhead.
82 	 */
83 	if (!aggr_hard_limit)
84 		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
85 
86 	/* The byte limit is encoded as a number of kilobytes */
87 
88 	return rx_buffer_size / SZ_1K;
89 }
90 
ipa_endpoint_data_valid_one(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * all_data,const struct ipa_gsi_endpoint_data * data)91 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
92 			    const struct ipa_gsi_endpoint_data *all_data,
93 			    const struct ipa_gsi_endpoint_data *data)
94 {
95 	const struct ipa_gsi_endpoint_data *other_data;
96 	struct device *dev = &ipa->pdev->dev;
97 	enum ipa_endpoint_name other_name;
98 
99 	if (ipa_gsi_endpoint_data_empty(data))
100 		return true;
101 
102 	if (!data->toward_ipa) {
103 		const struct ipa_endpoint_rx *rx_config;
104 		const struct ipa_reg *reg;
105 		u32 buffer_size;
106 		u32 aggr_size;
107 		u32 limit;
108 
109 		if (data->endpoint.filter_support) {
110 			dev_err(dev, "filtering not supported for "
111 					"RX endpoint %u\n",
112 				data->endpoint_id);
113 			return false;
114 		}
115 
116 		/* Nothing more to check for non-AP RX */
117 		if (data->ee_id != GSI_EE_AP)
118 			return true;
119 
120 		rx_config = &data->endpoint.config.rx;
121 
122 		/* The buffer size must hold an MTU plus overhead */
123 		buffer_size = rx_config->buffer_size;
124 		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
125 		if (buffer_size < limit) {
126 			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
127 				data->endpoint_id, buffer_size, limit);
128 			return false;
129 		}
130 
131 		if (!data->endpoint.config.aggregation) {
132 			bool result = true;
133 
134 			/* No aggregation; check for bogus aggregation data */
135 			if (rx_config->aggr_time_limit) {
136 				dev_err(dev,
137 					"time limit with no aggregation for RX endpoint %u\n",
138 					data->endpoint_id);
139 				result = false;
140 			}
141 
142 			if (rx_config->aggr_hard_limit) {
143 				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
144 					data->endpoint_id);
145 				result = false;
146 			}
147 
148 			if (rx_config->aggr_close_eof) {
149 				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
150 					data->endpoint_id);
151 				result = false;
152 			}
153 
154 			return result;	/* Nothing more to check */
155 		}
156 
157 		/* For an endpoint supporting receive aggregation, the byte
158 		 * limit defines the point at which aggregation closes.  This
159 		 * check ensures the receive buffer size doesn't result in a
160 		 * limit that exceeds what's representable in the aggregation
161 		 * byte limit field.
162 		 */
163 		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
164 					     rx_config->aggr_hard_limit);
165 		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
166 
167 		limit = ipa_reg_field_max(reg, BYTE_LIMIT);
168 		if (aggr_size > limit) {
169 			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
170 				data->endpoint_id, aggr_size, limit);
171 
172 			return false;
173 		}
174 
175 		return true;	/* Nothing more to check for RX */
176 	}
177 
178 	/* Starting with IPA v4.5 sequencer replication is obsolete */
179 	if (ipa->version >= IPA_VERSION_4_5) {
180 		if (data->endpoint.config.tx.seq_rep_type) {
181 			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
182 				data->endpoint_id);
183 			return false;
184 		}
185 	}
186 
187 	if (data->endpoint.config.status_enable) {
188 		other_name = data->endpoint.config.tx.status_endpoint;
189 		if (other_name >= count) {
190 			dev_err(dev, "status endpoint name %u out of range "
191 					"for endpoint %u\n",
192 				other_name, data->endpoint_id);
193 			return false;
194 		}
195 
196 		/* Status endpoint must be defined... */
197 		other_data = &all_data[other_name];
198 		if (ipa_gsi_endpoint_data_empty(other_data)) {
199 			dev_err(dev, "DMA endpoint name %u undefined "
200 					"for endpoint %u\n",
201 				other_name, data->endpoint_id);
202 			return false;
203 		}
204 
205 		/* ...and has to be an RX endpoint... */
206 		if (other_data->toward_ipa) {
207 			dev_err(dev,
208 				"status endpoint for endpoint %u not RX\n",
209 				data->endpoint_id);
210 			return false;
211 		}
212 
213 		/* ...and if it's to be an AP endpoint... */
214 		if (other_data->ee_id == GSI_EE_AP) {
215 			/* ...make sure it has status enabled. */
216 			if (!other_data->endpoint.config.status_enable) {
217 				dev_err(dev,
218 					"status not enabled for endpoint %u\n",
219 					other_data->endpoint_id);
220 				return false;
221 			}
222 		}
223 	}
224 
225 	if (data->endpoint.config.dma_mode) {
226 		other_name = data->endpoint.config.dma_endpoint;
227 		if (other_name >= count) {
228 			dev_err(dev, "DMA endpoint name %u out of range "
229 					"for endpoint %u\n",
230 				other_name, data->endpoint_id);
231 			return false;
232 		}
233 
234 		other_data = &all_data[other_name];
235 		if (ipa_gsi_endpoint_data_empty(other_data)) {
236 			dev_err(dev, "DMA endpoint name %u undefined "
237 					"for endpoint %u\n",
238 				other_name, data->endpoint_id);
239 			return false;
240 		}
241 	}
242 
243 	return true;
244 }
245 
ipa_endpoint_data_valid(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)246 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
247 				    const struct ipa_gsi_endpoint_data *data)
248 {
249 	const struct ipa_gsi_endpoint_data *dp = data;
250 	struct device *dev = &ipa->pdev->dev;
251 	enum ipa_endpoint_name name;
252 
253 	if (count > IPA_ENDPOINT_COUNT) {
254 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
255 			count, IPA_ENDPOINT_COUNT);
256 		return false;
257 	}
258 
259 	/* Make sure needed endpoints have defined data */
260 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
261 		dev_err(dev, "command TX endpoint not defined\n");
262 		return false;
263 	}
264 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
265 		dev_err(dev, "LAN RX endpoint not defined\n");
266 		return false;
267 	}
268 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
269 		dev_err(dev, "AP->modem TX endpoint not defined\n");
270 		return false;
271 	}
272 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
273 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
274 		return false;
275 	}
276 
277 	for (name = 0; name < count; name++, dp++)
278 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
279 			return false;
280 
281 	return true;
282 }
283 
284 /* Allocate a transaction to use on a non-command endpoint */
ipa_endpoint_trans_alloc(struct ipa_endpoint * endpoint,u32 tre_count)285 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
286 						  u32 tre_count)
287 {
288 	struct gsi *gsi = &endpoint->ipa->gsi;
289 	u32 channel_id = endpoint->channel_id;
290 	enum dma_data_direction direction;
291 
292 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
293 
294 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
295 }
296 
297 /* suspend_delay represents suspend for RX, delay for TX endpoints.
298  * Note that suspend is not supported starting with IPA v4.0, and
299  * delay mode should not be used starting with IPA v4.2.
300  */
301 static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint * endpoint,bool suspend_delay)302 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
303 {
304 	struct ipa *ipa = endpoint->ipa;
305 	const struct ipa_reg *reg;
306 	u32 field_id;
307 	u32 offset;
308 	bool state;
309 	u32 mask;
310 	u32 val;
311 
312 	if (endpoint->toward_ipa)
313 		WARN_ON(ipa->version >= IPA_VERSION_4_2);
314 	else
315 		WARN_ON(ipa->version >= IPA_VERSION_4_0);
316 
317 	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
318 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
319 	val = ioread32(ipa->reg_virt + offset);
320 
321 	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
322 	mask = ipa_reg_bit(reg, field_id);
323 
324 	state = !!(val & mask);
325 
326 	/* Don't bother if it's already in the requested state */
327 	if (suspend_delay != state) {
328 		val ^= mask;
329 		iowrite32(val, ipa->reg_virt + offset);
330 	}
331 
332 	return state;
333 }
334 
335 /* We don't care what the previous state was for delay mode */
336 static void
ipa_endpoint_program_delay(struct ipa_endpoint * endpoint,bool enable)337 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
338 {
339 	/* Delay mode should not be used for IPA v4.2+ */
340 	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
341 	WARN_ON(!endpoint->toward_ipa);
342 
343 	(void)ipa_endpoint_init_ctrl(endpoint, enable);
344 }
345 
ipa_endpoint_aggr_active(struct ipa_endpoint * endpoint)346 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
347 {
348 	u32 mask = BIT(endpoint->endpoint_id);
349 	struct ipa *ipa = endpoint->ipa;
350 	const struct ipa_reg *reg;
351 	u32 val;
352 
353 	WARN_ON(!(mask & ipa->available));
354 
355 	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
356 	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
357 
358 	return !!(val & mask);
359 }
360 
ipa_endpoint_force_close(struct ipa_endpoint * endpoint)361 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
362 {
363 	u32 mask = BIT(endpoint->endpoint_id);
364 	struct ipa *ipa = endpoint->ipa;
365 	const struct ipa_reg *reg;
366 
367 	WARN_ON(!(mask & ipa->available));
368 
369 	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
370 	iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
371 }
372 
373 /**
374  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
375  * @endpoint:	Endpoint on which to emulate a suspend
376  *
377  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
378  *  with an open aggregation frame.  This is to work around a hardware
379  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
380  *  generated when it should be.
381  */
ipa_endpoint_suspend_aggr(struct ipa_endpoint * endpoint)382 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
383 {
384 	struct ipa *ipa = endpoint->ipa;
385 
386 	if (!endpoint->config.aggregation)
387 		return;
388 
389 	/* Nothing to do if the endpoint doesn't have aggregation open */
390 	if (!ipa_endpoint_aggr_active(endpoint))
391 		return;
392 
393 	/* Force close aggregation */
394 	ipa_endpoint_force_close(endpoint);
395 
396 	ipa_interrupt_simulate_suspend(ipa->interrupt);
397 }
398 
399 /* Returns previous suspend state (true means suspend was enabled) */
400 static bool
ipa_endpoint_program_suspend(struct ipa_endpoint * endpoint,bool enable)401 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
402 {
403 	bool suspended;
404 
405 	if (endpoint->ipa->version >= IPA_VERSION_4_0)
406 		return enable;	/* For IPA v4.0+, no change made */
407 
408 	WARN_ON(endpoint->toward_ipa);
409 
410 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
411 
412 	/* A client suspended with an open aggregation frame will not
413 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
414 	 * ipa_endpoint_suspend_aggr() handle this.
415 	 */
416 	if (enable && !suspended)
417 		ipa_endpoint_suspend_aggr(endpoint);
418 
419 	return suspended;
420 }
421 
422 /* Put all modem RX endpoints into suspend mode, and stop transmission
423  * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
424  * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
425  * control instead.
426  */
ipa_endpoint_modem_pause_all(struct ipa * ipa,bool enable)427 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
428 {
429 	u32 endpoint_id;
430 
431 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
432 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
433 
434 		if (endpoint->ee_id != GSI_EE_MODEM)
435 			continue;
436 
437 		if (!endpoint->toward_ipa)
438 			(void)ipa_endpoint_program_suspend(endpoint, enable);
439 		else if (ipa->version < IPA_VERSION_4_2)
440 			ipa_endpoint_program_delay(endpoint, enable);
441 		else
442 			gsi_modem_channel_flow_control(&ipa->gsi,
443 						       endpoint->channel_id,
444 						       enable);
445 	}
446 }
447 
448 /* Reset all modem endpoints to use the default exception endpoint */
ipa_endpoint_modem_exception_reset_all(struct ipa * ipa)449 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
450 {
451 	u32 initialized = ipa->initialized;
452 	struct gsi_trans *trans;
453 	u32 count;
454 
455 	/* We need one command per modem TX endpoint, plus the commands
456 	 * that clear the pipeline.
457 	 */
458 	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
459 	trans = ipa_cmd_trans_alloc(ipa, count);
460 	if (!trans) {
461 		dev_err(&ipa->pdev->dev,
462 			"no transaction to reset modem exception endpoints\n");
463 		return -EBUSY;
464 	}
465 
466 	while (initialized) {
467 		u32 endpoint_id = __ffs(initialized);
468 		struct ipa_endpoint *endpoint;
469 		const struct ipa_reg *reg;
470 		u32 offset;
471 
472 		initialized ^= BIT(endpoint_id);
473 
474 		/* We only reset modem TX endpoints */
475 		endpoint = &ipa->endpoint[endpoint_id];
476 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
477 			continue;
478 
479 		reg = ipa_reg(ipa, ENDP_STATUS);
480 		offset = ipa_reg_n_offset(reg, endpoint_id);
481 
482 		/* Value written is 0, and all bits are updated.  That
483 		 * means status is disabled on the endpoint, and as a
484 		 * result all other fields in the register are ignored.
485 		 */
486 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
487 	}
488 
489 	ipa_cmd_pipeline_clear_add(trans);
490 
491 	gsi_trans_commit_wait(trans);
492 
493 	ipa_cmd_pipeline_clear_wait(ipa);
494 
495 	return 0;
496 }
497 
ipa_endpoint_init_cfg(struct ipa_endpoint * endpoint)498 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
499 {
500 	u32 endpoint_id = endpoint->endpoint_id;
501 	struct ipa *ipa = endpoint->ipa;
502 	enum ipa_cs_offload_en enabled;
503 	const struct ipa_reg *reg;
504 	u32 val = 0;
505 
506 	reg = ipa_reg(ipa, ENDP_INIT_CFG);
507 	/* FRAG_OFFLOAD_EN is 0 */
508 	if (endpoint->config.checksum) {
509 		enum ipa_version version = ipa->version;
510 
511 		if (endpoint->toward_ipa) {
512 			u32 off;
513 
514 			/* Checksum header offset is in 4-byte units */
515 			off = sizeof(struct rmnet_map_header) / sizeof(u32);
516 			val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
517 
518 			enabled = version < IPA_VERSION_4_5
519 					? IPA_CS_OFFLOAD_UL
520 					: IPA_CS_OFFLOAD_INLINE;
521 		} else {
522 			enabled = version < IPA_VERSION_4_5
523 					? IPA_CS_OFFLOAD_DL
524 					: IPA_CS_OFFLOAD_INLINE;
525 		}
526 	} else {
527 		enabled = IPA_CS_OFFLOAD_NONE;
528 	}
529 	val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
530 	/* CS_GEN_QMB_MASTER_SEL is 0 */
531 
532 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
533 }
534 
ipa_endpoint_init_nat(struct ipa_endpoint * endpoint)535 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
536 {
537 	u32 endpoint_id = endpoint->endpoint_id;
538 	struct ipa *ipa = endpoint->ipa;
539 	const struct ipa_reg *reg;
540 	u32 val;
541 
542 	if (!endpoint->toward_ipa)
543 		return;
544 
545 	reg = ipa_reg(ipa, ENDP_INIT_NAT);
546 	val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
547 
548 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
549 }
550 
551 static u32
ipa_qmap_header_size(enum ipa_version version,struct ipa_endpoint * endpoint)552 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
553 {
554 	u32 header_size = sizeof(struct rmnet_map_header);
555 
556 	/* Without checksum offload, we just have the MAP header */
557 	if (!endpoint->config.checksum)
558 		return header_size;
559 
560 	if (version < IPA_VERSION_4_5) {
561 		/* Checksum header inserted for AP TX endpoints only */
562 		if (endpoint->toward_ipa)
563 			header_size += sizeof(struct rmnet_map_ul_csum_header);
564 	} else {
565 		/* Checksum header is used in both directions */
566 		header_size += sizeof(struct rmnet_map_v5_csum_header);
567 	}
568 
569 	return header_size;
570 }
571 
572 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
ipa_header_size_encode(enum ipa_version version,const struct ipa_reg * reg,u32 header_size)573 static u32 ipa_header_size_encode(enum ipa_version version,
574 				  const struct ipa_reg *reg, u32 header_size)
575 {
576 	u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
577 	u32 val;
578 
579 	/* We know field_max can be used as a mask (2^n - 1) */
580 	val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
581 	if (version < IPA_VERSION_4_5) {
582 		WARN_ON(header_size > field_max);
583 		return val;
584 	}
585 
586 	/* IPA v4.5 adds a few more most-significant bits */
587 	header_size >>= hweight32(field_max);
588 	WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
589 	val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
590 
591 	return val;
592 }
593 
594 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
ipa_metadata_offset_encode(enum ipa_version version,const struct ipa_reg * reg,u32 offset)595 static u32 ipa_metadata_offset_encode(enum ipa_version version,
596 				      const struct ipa_reg *reg, u32 offset)
597 {
598 	u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
599 	u32 val;
600 
601 	/* We know field_max can be used as a mask (2^n - 1) */
602 	val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
603 	if (version < IPA_VERSION_4_5) {
604 		WARN_ON(offset > field_max);
605 		return val;
606 	}
607 
608 	/* IPA v4.5 adds a few more most-significant bits */
609 	offset >>= hweight32(field_max);
610 	WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
611 	val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
612 
613 	return val;
614 }
615 
616 /**
617  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
618  * @endpoint:	Endpoint pointer
619  *
620  * We program QMAP endpoints so each packet received is preceded by a QMAP
621  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
622  * packet size field, and we have the IPA hardware populate both for each
623  * received packet.  The header is configured (in the HDR_EXT register)
624  * to use big endian format.
625  *
626  * The packet size is written into the QMAP header's pkt_len field.  That
627  * location is defined here using the HDR_OFST_PKT_SIZE field.
628  *
629  * The mux_id comes from a 4-byte metadata value supplied with each packet
630  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
631  * value that we want, in its low-order byte.  A bitmask defined in the
632  * endpoint's METADATA_MASK register defines which byte within the modem
633  * metadata contains the mux_id.  And the OFST_METADATA field programmed
634  * here indicates where the extracted byte should be placed within the QMAP
635  * header.
636  */
ipa_endpoint_init_hdr(struct ipa_endpoint * endpoint)637 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
638 {
639 	u32 endpoint_id = endpoint->endpoint_id;
640 	struct ipa *ipa = endpoint->ipa;
641 	const struct ipa_reg *reg;
642 	u32 val = 0;
643 
644 	reg = ipa_reg(ipa, ENDP_INIT_HDR);
645 	if (endpoint->config.qmap) {
646 		enum ipa_version version = ipa->version;
647 		size_t header_size;
648 
649 		header_size = ipa_qmap_header_size(version, endpoint);
650 		val = ipa_header_size_encode(version, reg, header_size);
651 
652 		/* Define how to fill fields in a received QMAP header */
653 		if (!endpoint->toward_ipa) {
654 			u32 off;     /* Field offset within header */
655 
656 			/* Where IPA will write the metadata value */
657 			off = offsetof(struct rmnet_map_header, mux_id);
658 			val |= ipa_metadata_offset_encode(version, reg, off);
659 
660 			/* Where IPA will write the length */
661 			off = offsetof(struct rmnet_map_header, pkt_len);
662 			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
663 			if (version >= IPA_VERSION_4_5)
664 				off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
665 
666 			val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
667 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
668 		}
669 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
670 		val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
671 
672 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
673 		/* HDR_A5_MUX is 0 */
674 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
675 		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
676 	}
677 
678 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
679 }
680 
ipa_endpoint_init_hdr_ext(struct ipa_endpoint * endpoint)681 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
682 {
683 	u32 pad_align = endpoint->config.rx.pad_align;
684 	u32 endpoint_id = endpoint->endpoint_id;
685 	struct ipa *ipa = endpoint->ipa;
686 	const struct ipa_reg *reg;
687 	u32 val = 0;
688 
689 	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
690 	if (endpoint->config.qmap) {
691 		/* We have a header, so we must specify its endianness */
692 		val |= ipa_reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
693 
694 		/* A QMAP header contains a 6 bit pad field at offset 0.
695 		 * The RMNet driver assumes this field is meaningful in
696 		 * packets it receives, and assumes the header's payload
697 		 * length includes that padding.  The RMNet driver does
698 		 * *not* pad packets it sends, however, so the pad field
699 		 * (although 0) should be ignored.
700 		 */
701 		if (!endpoint->toward_ipa) {
702 			val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
703 			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
704 			val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
705 			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
706 		}
707 	}
708 
709 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
710 	if (!endpoint->toward_ipa)
711 		val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
712 
713 	/* IPA v4.5 adds some most-significant bits to a few fields,
714 	 * two of which are defined in the HDR (not HDR_EXT) register.
715 	 */
716 	if (ipa->version >= IPA_VERSION_4_5) {
717 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
718 		if (endpoint->config.qmap && !endpoint->toward_ipa) {
719 			u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
720 			u32 off;     /* Field offset within header */
721 
722 			off = offsetof(struct rmnet_map_header, pkt_len);
723 			/* Low bits are in the ENDP_INIT_HDR register */
724 			off >>= hweight32(mask);
725 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
726 			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
727 		}
728 	}
729 
730 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
731 }
732 
ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint * endpoint)733 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
734 {
735 	u32 endpoint_id = endpoint->endpoint_id;
736 	struct ipa *ipa = endpoint->ipa;
737 	const struct ipa_reg *reg;
738 	u32 val = 0;
739 	u32 offset;
740 
741 	if (endpoint->toward_ipa)
742 		return;		/* Register not valid for TX endpoints */
743 
744 	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
745 	offset = ipa_reg_n_offset(reg, endpoint_id);
746 
747 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
748 	if (endpoint->config.qmap)
749 		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
750 
751 	iowrite32(val, ipa->reg_virt + offset);
752 }
753 
ipa_endpoint_init_mode(struct ipa_endpoint * endpoint)754 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
755 {
756 	struct ipa *ipa = endpoint->ipa;
757 	const struct ipa_reg *reg;
758 	u32 offset;
759 	u32 val;
760 
761 	if (!endpoint->toward_ipa)
762 		return;		/* Register not valid for RX endpoints */
763 
764 	reg = ipa_reg(ipa, ENDP_INIT_MODE);
765 	if (endpoint->config.dma_mode) {
766 		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
767 		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
768 
769 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
770 		val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
771 	} else {
772 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
773 	}
774 	/* All other bits unspecified (and 0) */
775 
776 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
777 	iowrite32(val, ipa->reg_virt + offset);
778 }
779 
780 /* For IPA v4.5+, times are expressed using Qtime.  The AP uses one of two
781  * pulse generators (0 and 1) to measure elapsed time.  In ipa_qtime_config()
782  * they're configured to have granularity 100 usec and 1 msec, respectively.
783  *
784  * The return value is the positive or negative Qtime value to use to
785  * express the (microsecond) time provided.  A positive return value
786  * means pulse generator 0 can be used; otherwise use pulse generator 1.
787  */
ipa_qtime_val(u32 microseconds,u32 max)788 static int ipa_qtime_val(u32 microseconds, u32 max)
789 {
790 	u32 val;
791 
792 	/* Use 100 microsecond granularity if possible */
793 	val = DIV_ROUND_CLOSEST(microseconds, 100);
794 	if (val <= max)
795 		return (int)val;
796 
797 	/* Have to use pulse generator 1 (millisecond granularity) */
798 	val = DIV_ROUND_CLOSEST(microseconds, 1000);
799 	WARN_ON(val > max);
800 
801 	return (int)-val;
802 }
803 
804 /* Encode the aggregation timer limit (microseconds) based on IPA version */
aggr_time_limit_encode(struct ipa * ipa,const struct ipa_reg * reg,u32 microseconds)805 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
806 				  u32 microseconds)
807 {
808 	u32 max;
809 	u32 val;
810 
811 	if (!microseconds)
812 		return 0;	/* Nothing to compute if time limit is 0 */
813 
814 	max = ipa_reg_field_max(reg, TIME_LIMIT);
815 	if (ipa->version >= IPA_VERSION_4_5) {
816 		u32 gran_sel;
817 		int ret;
818 
819 		/* Compute the Qtime limit value to use */
820 		ret = ipa_qtime_val(microseconds, max);
821 		if (ret < 0) {
822 			val = -ret;
823 			gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
824 		} else {
825 			val = ret;
826 			gran_sel = 0;
827 		}
828 
829 		return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
830 	}
831 
832 	/* We program aggregation granularity in ipa_hardware_config() */
833 	val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
834 	WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
835 	     microseconds, max * IPA_AGGR_GRANULARITY);
836 
837 	return ipa_reg_encode(reg, TIME_LIMIT, val);
838 }
839 
ipa_endpoint_init_aggr(struct ipa_endpoint * endpoint)840 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
841 {
842 	u32 endpoint_id = endpoint->endpoint_id;
843 	struct ipa *ipa = endpoint->ipa;
844 	const struct ipa_reg *reg;
845 	u32 val = 0;
846 
847 	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
848 	if (endpoint->config.aggregation) {
849 		if (!endpoint->toward_ipa) {
850 			const struct ipa_endpoint_rx *rx_config;
851 			u32 buffer_size;
852 			u32 limit;
853 
854 			rx_config = &endpoint->config.rx;
855 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
856 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
857 
858 			buffer_size = rx_config->buffer_size;
859 			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
860 						 rx_config->aggr_hard_limit);
861 			val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
862 
863 			limit = rx_config->aggr_time_limit;
864 			val |= aggr_time_limit_encode(ipa, reg, limit);
865 
866 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
867 
868 			if (rx_config->aggr_close_eof)
869 				val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
870 		} else {
871 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
872 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
873 			/* other fields ignored */
874 		}
875 		/* AGGR_FORCE_CLOSE is 0 */
876 		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
877 	} else {
878 		val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
879 		/* other fields ignored */
880 	}
881 
882 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
883 }
884 
885 /* The head-of-line blocking timer is defined as a tick count.  For
886  * IPA version 4.5 the tick count is based on the Qtimer, which is
887  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
888  * each tick represents 128 cycles of the IPA core clock.
889  *
890  * Return the encoded value representing the timeout period provided
891  * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
892  */
hol_block_timer_encode(struct ipa * ipa,const struct ipa_reg * reg,u32 microseconds)893 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
894 				  u32 microseconds)
895 {
896 	u32 width;
897 	u32 scale;
898 	u64 ticks;
899 	u64 rate;
900 	u32 high;
901 	u32 val;
902 
903 	if (!microseconds)
904 		return 0;	/* Nothing to compute if timer period is 0 */
905 
906 	if (ipa->version >= IPA_VERSION_4_5) {
907 		u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
908 		u32 gran_sel;
909 		int ret;
910 
911 		/* Compute the Qtime limit value to use */
912 		ret = ipa_qtime_val(microseconds, max);
913 		if (ret < 0) {
914 			val = -ret;
915 			gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
916 		} else {
917 			val = ret;
918 			gran_sel = 0;
919 		}
920 
921 		return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
922 	}
923 
924 	/* Use 64 bit arithmetic to avoid overflow */
925 	rate = ipa_core_clock_rate(ipa);
926 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
927 
928 	/* We still need the result to fit into the field */
929 	WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
930 
931 	/* IPA v3.5.1 through v4.1 just record the tick count */
932 	if (ipa->version < IPA_VERSION_4_2)
933 		return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
934 
935 	/* For IPA v4.2, the tick count is represented by base and
936 	 * scale fields within the 32-bit timer register, where:
937 	 *     ticks = base << scale;
938 	 * The best precision is achieved when the base value is as
939 	 * large as possible.  Find the highest set bit in the tick
940 	 * count, and extract the number of bits in the base field
941 	 * such that high bit is included.
942 	 */
943 	high = fls(ticks);		/* 1..32 (or warning above) */
944 	width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
945 	scale = high > width ? high - width : 0;
946 	if (scale) {
947 		/* If we're scaling, round up to get a closer result */
948 		ticks += 1 << (scale - 1);
949 		/* High bit was set, so rounding might have affected it */
950 		if (fls(ticks) != high)
951 			scale++;
952 	}
953 
954 	val = ipa_reg_encode(reg, TIMER_SCALE, scale);
955 	val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
956 
957 	return val;
958 }
959 
960 /* If microseconds is 0, timeout is immediate */
ipa_endpoint_init_hol_block_timer(struct ipa_endpoint * endpoint,u32 microseconds)961 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
962 					      u32 microseconds)
963 {
964 	u32 endpoint_id = endpoint->endpoint_id;
965 	struct ipa *ipa = endpoint->ipa;
966 	const struct ipa_reg *reg;
967 	u32 val;
968 
969 	/* This should only be changed when HOL_BLOCK_EN is disabled */
970 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
971 	val = hol_block_timer_encode(ipa, reg, microseconds);
972 
973 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
974 }
975 
976 static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint * endpoint,bool enable)977 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
978 {
979 	u32 endpoint_id = endpoint->endpoint_id;
980 	struct ipa *ipa = endpoint->ipa;
981 	const struct ipa_reg *reg;
982 	u32 offset;
983 	u32 val;
984 
985 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
986 	offset = ipa_reg_n_offset(reg, endpoint_id);
987 	val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
988 
989 	iowrite32(val, ipa->reg_virt + offset);
990 
991 	/* When enabling, the register must be written twice for IPA v4.5+ */
992 	if (enable && ipa->version >= IPA_VERSION_4_5)
993 		iowrite32(val, ipa->reg_virt + offset);
994 }
995 
996 /* Assumes HOL_BLOCK is in disabled state */
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint * endpoint,u32 microseconds)997 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
998 					       u32 microseconds)
999 {
1000 	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1001 	ipa_endpoint_init_hol_block_en(endpoint, true);
1002 }
1003 
ipa_endpoint_init_hol_block_disable(struct ipa_endpoint * endpoint)1004 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1005 {
1006 	ipa_endpoint_init_hol_block_en(endpoint, false);
1007 }
1008 
ipa_endpoint_modem_hol_block_clear_all(struct ipa * ipa)1009 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1010 {
1011 	u32 i;
1012 
1013 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
1014 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
1015 
1016 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1017 			continue;
1018 
1019 		ipa_endpoint_init_hol_block_disable(endpoint);
1020 		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1021 	}
1022 }
1023 
ipa_endpoint_init_deaggr(struct ipa_endpoint * endpoint)1024 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1025 {
1026 	u32 endpoint_id = endpoint->endpoint_id;
1027 	struct ipa *ipa = endpoint->ipa;
1028 	const struct ipa_reg *reg;
1029 	u32 val = 0;
1030 
1031 	if (!endpoint->toward_ipa)
1032 		return;		/* Register not valid for RX endpoints */
1033 
1034 	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1035 	/* DEAGGR_HDR_LEN is 0 */
1036 	/* PACKET_OFFSET_VALID is 0 */
1037 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1038 	/* MAX_PACKET_LEN is 0 (not enforced) */
1039 
1040 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1041 }
1042 
ipa_endpoint_init_rsrc_grp(struct ipa_endpoint * endpoint)1043 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1044 {
1045 	u32 resource_group = endpoint->config.resource_group;
1046 	u32 endpoint_id = endpoint->endpoint_id;
1047 	struct ipa *ipa = endpoint->ipa;
1048 	const struct ipa_reg *reg;
1049 	u32 val;
1050 
1051 	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1052 	val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1053 
1054 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1055 }
1056 
ipa_endpoint_init_seq(struct ipa_endpoint * endpoint)1057 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1058 {
1059 	u32 endpoint_id = endpoint->endpoint_id;
1060 	struct ipa *ipa = endpoint->ipa;
1061 	const struct ipa_reg *reg;
1062 	u32 val;
1063 
1064 	if (!endpoint->toward_ipa)
1065 		return;		/* Register not valid for RX endpoints */
1066 
1067 	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1068 
1069 	/* Low-order byte configures primary packet processing */
1070 	val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1071 
1072 	/* Second byte (if supported) configures replicated packet processing */
1073 	if (ipa->version < IPA_VERSION_4_5)
1074 		val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
1075 				      endpoint->config.tx.seq_rep_type);
1076 
1077 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1078 }
1079 
1080 /**
1081  * ipa_endpoint_skb_tx() - Transmit a socket buffer
1082  * @endpoint:	Endpoint pointer
1083  * @skb:	Socket buffer to send
1084  *
1085  * Returns:	0 if successful, or a negative error code
1086  */
ipa_endpoint_skb_tx(struct ipa_endpoint * endpoint,struct sk_buff * skb)1087 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1088 {
1089 	struct gsi_trans *trans;
1090 	u32 nr_frags;
1091 	int ret;
1092 
1093 	/* Make sure source endpoint's TLV FIFO has enough entries to
1094 	 * hold the linear portion of the skb and all its fragments.
1095 	 * If not, see if we can linearize it before giving up.
1096 	 */
1097 	nr_frags = skb_shinfo(skb)->nr_frags;
1098 	if (nr_frags > endpoint->skb_frag_max) {
1099 		if (skb_linearize(skb))
1100 			return -E2BIG;
1101 		nr_frags = 0;
1102 	}
1103 
1104 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1105 	if (!trans)
1106 		return -EBUSY;
1107 
1108 	ret = gsi_trans_skb_add(trans, skb);
1109 	if (ret)
1110 		goto err_trans_free;
1111 	trans->data = skb;	/* transaction owns skb now */
1112 
1113 	gsi_trans_commit(trans, !netdev_xmit_more());
1114 
1115 	return 0;
1116 
1117 err_trans_free:
1118 	gsi_trans_free(trans);
1119 
1120 	return -ENOMEM;
1121 }
1122 
ipa_endpoint_status(struct ipa_endpoint * endpoint)1123 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1124 {
1125 	u32 endpoint_id = endpoint->endpoint_id;
1126 	struct ipa *ipa = endpoint->ipa;
1127 	const struct ipa_reg *reg;
1128 	u32 val = 0;
1129 
1130 	reg = ipa_reg(ipa, ENDP_STATUS);
1131 	if (endpoint->config.status_enable) {
1132 		val |= ipa_reg_bit(reg, STATUS_EN);
1133 		if (endpoint->toward_ipa) {
1134 			enum ipa_endpoint_name name;
1135 			u32 status_endpoint_id;
1136 
1137 			name = endpoint->config.tx.status_endpoint;
1138 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1139 
1140 			val |= ipa_reg_encode(reg, STATUS_ENDP,
1141 					      status_endpoint_id);
1142 		}
1143 		/* STATUS_LOCATION is 0, meaning status element precedes
1144 		 * packet (not present for IPA v4.5+)
1145 		 */
1146 		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1147 	}
1148 
1149 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1150 }
1151 
ipa_endpoint_replenish_one(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1152 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1153 				      struct gsi_trans *trans)
1154 {
1155 	struct page *page;
1156 	u32 buffer_size;
1157 	u32 offset;
1158 	u32 len;
1159 	int ret;
1160 
1161 	buffer_size = endpoint->config.rx.buffer_size;
1162 	page = dev_alloc_pages(get_order(buffer_size));
1163 	if (!page)
1164 		return -ENOMEM;
1165 
1166 	/* Offset the buffer to make space for skb headroom */
1167 	offset = NET_SKB_PAD;
1168 	len = buffer_size - offset;
1169 
1170 	ret = gsi_trans_page_add(trans, page, len, offset);
1171 	if (ret)
1172 		put_page(page);
1173 	else
1174 		trans->data = page;	/* transaction owns page now */
1175 
1176 	return ret;
1177 }
1178 
1179 /**
1180  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1181  * @endpoint:	Endpoint to be replenished
1182  *
1183  * The IPA hardware can hold a fixed number of receive buffers for an RX
1184  * endpoint, based on the number of entries in the underlying channel ring
1185  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1186  * more receive buffers can be supplied to the hardware.  Replenishing for
1187  * an endpoint can be disabled, in which case buffers are not queued to
1188  * the hardware.
1189  */
ipa_endpoint_replenish(struct ipa_endpoint * endpoint)1190 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1191 {
1192 	struct gsi_trans *trans;
1193 
1194 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1195 		return;
1196 
1197 	/* Skip it if it's already active */
1198 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1199 		return;
1200 
1201 	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1202 		bool doorbell;
1203 
1204 		if (ipa_endpoint_replenish_one(endpoint, trans))
1205 			goto try_again_later;
1206 
1207 
1208 		/* Ring the doorbell if we've got a full batch */
1209 		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1210 		gsi_trans_commit(trans, doorbell);
1211 	}
1212 
1213 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1214 
1215 	return;
1216 
1217 try_again_later:
1218 	gsi_trans_free(trans);
1219 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1220 
1221 	/* Whenever a receive buffer transaction completes we'll try to
1222 	 * replenish again.  It's unlikely, but if we fail to supply even
1223 	 * one buffer, nothing will trigger another replenish attempt.
1224 	 * If the hardware has no receive buffers queued, schedule work to
1225 	 * try replenishing again.
1226 	 */
1227 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1228 		schedule_delayed_work(&endpoint->replenish_work,
1229 				      msecs_to_jiffies(1));
1230 }
1231 
ipa_endpoint_replenish_enable(struct ipa_endpoint * endpoint)1232 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1233 {
1234 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1235 
1236 	/* Start replenishing if hardware currently has no buffers */
1237 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1238 		ipa_endpoint_replenish(endpoint);
1239 }
1240 
ipa_endpoint_replenish_disable(struct ipa_endpoint * endpoint)1241 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1242 {
1243 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1244 }
1245 
ipa_endpoint_replenish_work(struct work_struct * work)1246 static void ipa_endpoint_replenish_work(struct work_struct *work)
1247 {
1248 	struct delayed_work *dwork = to_delayed_work(work);
1249 	struct ipa_endpoint *endpoint;
1250 
1251 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1252 
1253 	ipa_endpoint_replenish(endpoint);
1254 }
1255 
ipa_endpoint_skb_copy(struct ipa_endpoint * endpoint,void * data,u32 len,u32 extra)1256 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1257 				  void *data, u32 len, u32 extra)
1258 {
1259 	struct sk_buff *skb;
1260 
1261 	if (!endpoint->netdev)
1262 		return;
1263 
1264 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1265 	if (skb) {
1266 		/* Copy the data into the socket buffer and receive it */
1267 		skb_put(skb, len);
1268 		memcpy(skb->data, data, len);
1269 		skb->truesize += extra;
1270 	}
1271 
1272 	ipa_modem_skb_rx(endpoint->netdev, skb);
1273 }
1274 
ipa_endpoint_skb_build(struct ipa_endpoint * endpoint,struct page * page,u32 len)1275 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1276 				   struct page *page, u32 len)
1277 {
1278 	u32 buffer_size = endpoint->config.rx.buffer_size;
1279 	struct sk_buff *skb;
1280 
1281 	/* Nothing to do if there's no netdev */
1282 	if (!endpoint->netdev)
1283 		return false;
1284 
1285 	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1286 
1287 	skb = build_skb(page_address(page), buffer_size);
1288 	if (skb) {
1289 		/* Reserve the headroom and account for the data */
1290 		skb_reserve(skb, NET_SKB_PAD);
1291 		skb_put(skb, len);
1292 	}
1293 
1294 	/* Receive the buffer (or record drop if unable to build it) */
1295 	ipa_modem_skb_rx(endpoint->netdev, skb);
1296 
1297 	return skb != NULL;
1298 }
1299 
1300 /* The format of a packet status element is the same for several status
1301  * types (opcodes).  Other types aren't currently supported.
1302  */
ipa_status_format_packet(enum ipa_status_opcode opcode)1303 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1304 {
1305 	switch (opcode) {
1306 	case IPA_STATUS_OPCODE_PACKET:
1307 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1308 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1309 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1310 		return true;
1311 	default:
1312 		return false;
1313 	}
1314 }
1315 
ipa_endpoint_status_skip(struct ipa_endpoint * endpoint,const struct ipa_status * status)1316 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1317 				     const struct ipa_status *status)
1318 {
1319 	u32 endpoint_id;
1320 
1321 	if (!ipa_status_format_packet(status->opcode))
1322 		return true;
1323 	if (!status->pkt_len)
1324 		return true;
1325 	endpoint_id = u8_get_bits(status->endp_dst_idx,
1326 				  IPA_STATUS_DST_IDX_FMASK);
1327 	if (endpoint_id != endpoint->endpoint_id)
1328 		return true;
1329 
1330 	return false;	/* Don't skip this packet, process it */
1331 }
1332 
ipa_endpoint_status_tag(struct ipa_endpoint * endpoint,const struct ipa_status * status)1333 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1334 				    const struct ipa_status *status)
1335 {
1336 	struct ipa_endpoint *command_endpoint;
1337 	struct ipa *ipa = endpoint->ipa;
1338 	u32 endpoint_id;
1339 
1340 	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1341 		return false;	/* No valid tag */
1342 
1343 	/* The status contains a valid tag.  We know the packet was sent to
1344 	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1345 	 * If the packet came from the AP->command TX endpoint we know
1346 	 * this packet was sent as part of the pipeline clear process.
1347 	 */
1348 	endpoint_id = u8_get_bits(status->endp_src_idx,
1349 				  IPA_STATUS_SRC_IDX_FMASK);
1350 	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1351 	if (endpoint_id == command_endpoint->endpoint_id) {
1352 		complete(&ipa->completion);
1353 	} else {
1354 		dev_err(&ipa->pdev->dev,
1355 			"unexpected tagged packet from endpoint %u\n",
1356 			endpoint_id);
1357 	}
1358 
1359 	return true;
1360 }
1361 
1362 /* Return whether the status indicates the packet should be dropped */
ipa_endpoint_status_drop(struct ipa_endpoint * endpoint,const struct ipa_status * status)1363 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1364 				     const struct ipa_status *status)
1365 {
1366 	u32 val;
1367 
1368 	/* If the status indicates a tagged transfer, we'll drop the packet */
1369 	if (ipa_endpoint_status_tag(endpoint, status))
1370 		return true;
1371 
1372 	/* Deaggregation exceptions we drop; all other types we consume */
1373 	if (status->exception)
1374 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1375 
1376 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1377 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1378 
1379 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1380 }
1381 
ipa_endpoint_status_parse(struct ipa_endpoint * endpoint,struct page * page,u32 total_len)1382 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1383 				      struct page *page, u32 total_len)
1384 {
1385 	u32 buffer_size = endpoint->config.rx.buffer_size;
1386 	void *data = page_address(page) + NET_SKB_PAD;
1387 	u32 unused = buffer_size - total_len;
1388 	u32 resid = total_len;
1389 
1390 	while (resid) {
1391 		const struct ipa_status *status = data;
1392 		u32 align;
1393 		u32 len;
1394 
1395 		if (resid < sizeof(*status)) {
1396 			dev_err(&endpoint->ipa->pdev->dev,
1397 				"short message (%u bytes < %zu byte status)\n",
1398 				resid, sizeof(*status));
1399 			break;
1400 		}
1401 
1402 		/* Skip over status packets that lack packet data */
1403 		if (ipa_endpoint_status_skip(endpoint, status)) {
1404 			data += sizeof(*status);
1405 			resid -= sizeof(*status);
1406 			continue;
1407 		}
1408 
1409 		/* Compute the amount of buffer space consumed by the packet,
1410 		 * including the status element.  If the hardware is configured
1411 		 * to pad packet data to an aligned boundary, account for that.
1412 		 * And if checksum offload is enabled a trailer containing
1413 		 * computed checksum information will be appended.
1414 		 */
1415 		align = endpoint->config.rx.pad_align ? : 1;
1416 		len = le16_to_cpu(status->pkt_len);
1417 		len = sizeof(*status) + ALIGN(len, align);
1418 		if (endpoint->config.checksum)
1419 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1420 
1421 		if (!ipa_endpoint_status_drop(endpoint, status)) {
1422 			void *data2;
1423 			u32 extra;
1424 			u32 len2;
1425 
1426 			/* Client receives only packet data (no status) */
1427 			data2 = data + sizeof(*status);
1428 			len2 = le16_to_cpu(status->pkt_len);
1429 
1430 			/* Have the true size reflect the extra unused space in
1431 			 * the original receive buffer.  Distribute the "cost"
1432 			 * proportionately across all aggregated packets in the
1433 			 * buffer.
1434 			 */
1435 			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1436 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1437 		}
1438 
1439 		/* Consume status and the full packet it describes */
1440 		data += len;
1441 		resid -= len;
1442 	}
1443 }
1444 
ipa_endpoint_trans_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1445 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1446 				 struct gsi_trans *trans)
1447 {
1448 	struct page *page;
1449 
1450 	if (endpoint->toward_ipa)
1451 		return;
1452 
1453 	if (trans->cancelled)
1454 		goto done;
1455 
1456 	/* Parse or build a socket buffer using the actual received length */
1457 	page = trans->data;
1458 	if (endpoint->config.status_enable)
1459 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1460 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1461 		trans->data = NULL;	/* Pages have been consumed */
1462 done:
1463 	ipa_endpoint_replenish(endpoint);
1464 }
1465 
ipa_endpoint_trans_release(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1466 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1467 				struct gsi_trans *trans)
1468 {
1469 	if (endpoint->toward_ipa) {
1470 		struct ipa *ipa = endpoint->ipa;
1471 
1472 		/* Nothing to do for command transactions */
1473 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1474 			struct sk_buff *skb = trans->data;
1475 
1476 			if (skb)
1477 				dev_kfree_skb_any(skb);
1478 		}
1479 	} else {
1480 		struct page *page = trans->data;
1481 
1482 		if (page)
1483 			put_page(page);
1484 	}
1485 }
1486 
ipa_endpoint_default_route_set(struct ipa * ipa,u32 endpoint_id)1487 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1488 {
1489 	const struct ipa_reg *reg;
1490 	u32 val;
1491 
1492 	reg = ipa_reg(ipa, ROUTE);
1493 	/* ROUTE_DIS is 0 */
1494 	val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1495 	val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1496 	/* ROUTE_DEF_HDR_OFST is 0 */
1497 	val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1498 	val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1499 
1500 	iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
1501 }
1502 
ipa_endpoint_default_route_clear(struct ipa * ipa)1503 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1504 {
1505 	ipa_endpoint_default_route_set(ipa, 0);
1506 }
1507 
1508 /**
1509  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1510  * @endpoint:	Endpoint to be reset
1511  *
1512  * If aggregation is active on an RX endpoint when a reset is performed
1513  * on its underlying GSI channel, a special sequence of actions must be
1514  * taken to ensure the IPA pipeline is properly cleared.
1515  *
1516  * Return:	0 if successful, or a negative error code
1517  */
ipa_endpoint_reset_rx_aggr(struct ipa_endpoint * endpoint)1518 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1519 {
1520 	struct device *dev = &endpoint->ipa->pdev->dev;
1521 	struct ipa *ipa = endpoint->ipa;
1522 	struct gsi *gsi = &ipa->gsi;
1523 	bool suspended = false;
1524 	dma_addr_t addr;
1525 	u32 retries;
1526 	u32 len = 1;
1527 	void *virt;
1528 	int ret;
1529 
1530 	virt = kzalloc(len, GFP_KERNEL);
1531 	if (!virt)
1532 		return -ENOMEM;
1533 
1534 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1535 	if (dma_mapping_error(dev, addr)) {
1536 		ret = -ENOMEM;
1537 		goto out_kfree;
1538 	}
1539 
1540 	/* Force close aggregation before issuing the reset */
1541 	ipa_endpoint_force_close(endpoint);
1542 
1543 	/* Reset and reconfigure the channel with the doorbell engine
1544 	 * disabled.  Then poll until we know aggregation is no longer
1545 	 * active.  We'll re-enable the doorbell (if appropriate) when
1546 	 * we reset again below.
1547 	 */
1548 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1549 
1550 	/* Make sure the channel isn't suspended */
1551 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1552 
1553 	/* Start channel and do a 1 byte read */
1554 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1555 	if (ret)
1556 		goto out_suspend_again;
1557 
1558 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1559 	if (ret)
1560 		goto err_endpoint_stop;
1561 
1562 	/* Wait for aggregation to be closed on the channel */
1563 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1564 	do {
1565 		if (!ipa_endpoint_aggr_active(endpoint))
1566 			break;
1567 		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1568 	} while (retries--);
1569 
1570 	/* Check one last time */
1571 	if (ipa_endpoint_aggr_active(endpoint))
1572 		dev_err(dev, "endpoint %u still active during reset\n",
1573 			endpoint->endpoint_id);
1574 
1575 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1576 
1577 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1578 	if (ret)
1579 		goto out_suspend_again;
1580 
1581 	/* Finally, reset and reconfigure the channel again (re-enabling
1582 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1583 	 * complete the channel reset sequence.  Finish by suspending the
1584 	 * channel again (if necessary).
1585 	 */
1586 	gsi_channel_reset(gsi, endpoint->channel_id, true);
1587 
1588 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1589 
1590 	goto out_suspend_again;
1591 
1592 err_endpoint_stop:
1593 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1594 out_suspend_again:
1595 	if (suspended)
1596 		(void)ipa_endpoint_program_suspend(endpoint, true);
1597 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1598 out_kfree:
1599 	kfree(virt);
1600 
1601 	return ret;
1602 }
1603 
ipa_endpoint_reset(struct ipa_endpoint * endpoint)1604 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1605 {
1606 	u32 channel_id = endpoint->channel_id;
1607 	struct ipa *ipa = endpoint->ipa;
1608 	bool special;
1609 	int ret = 0;
1610 
1611 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1612 	 * is active, we need to handle things specially to recover.
1613 	 * All other cases just need to reset the underlying GSI channel.
1614 	 */
1615 	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1616 			endpoint->config.aggregation;
1617 	if (special && ipa_endpoint_aggr_active(endpoint))
1618 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1619 	else
1620 		gsi_channel_reset(&ipa->gsi, channel_id, true);
1621 
1622 	if (ret)
1623 		dev_err(&ipa->pdev->dev,
1624 			"error %d resetting channel %u for endpoint %u\n",
1625 			ret, endpoint->channel_id, endpoint->endpoint_id);
1626 }
1627 
ipa_endpoint_program(struct ipa_endpoint * endpoint)1628 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1629 {
1630 	if (endpoint->toward_ipa) {
1631 		/* Newer versions of IPA use GSI channel flow control
1632 		 * instead of endpoint DELAY mode to prevent sending data.
1633 		 * Flow control is disabled for newly-allocated channels,
1634 		 * and we can assume flow control is not (ever) enabled
1635 		 * for AP TX channels.
1636 		 */
1637 		if (endpoint->ipa->version < IPA_VERSION_4_2)
1638 			ipa_endpoint_program_delay(endpoint, false);
1639 	} else {
1640 		/* Ensure suspend mode is off on all AP RX endpoints */
1641 		(void)ipa_endpoint_program_suspend(endpoint, false);
1642 	}
1643 	ipa_endpoint_init_cfg(endpoint);
1644 	ipa_endpoint_init_nat(endpoint);
1645 	ipa_endpoint_init_hdr(endpoint);
1646 	ipa_endpoint_init_hdr_ext(endpoint);
1647 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1648 	ipa_endpoint_init_mode(endpoint);
1649 	ipa_endpoint_init_aggr(endpoint);
1650 	if (!endpoint->toward_ipa) {
1651 		if (endpoint->config.rx.holb_drop)
1652 			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1653 		else
1654 			ipa_endpoint_init_hol_block_disable(endpoint);
1655 	}
1656 	ipa_endpoint_init_deaggr(endpoint);
1657 	ipa_endpoint_init_rsrc_grp(endpoint);
1658 	ipa_endpoint_init_seq(endpoint);
1659 	ipa_endpoint_status(endpoint);
1660 }
1661 
ipa_endpoint_enable_one(struct ipa_endpoint * endpoint)1662 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1663 {
1664 	struct ipa *ipa = endpoint->ipa;
1665 	struct gsi *gsi = &ipa->gsi;
1666 	int ret;
1667 
1668 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1669 	if (ret) {
1670 		dev_err(&ipa->pdev->dev,
1671 			"error %d starting %cX channel %u for endpoint %u\n",
1672 			ret, endpoint->toward_ipa ? 'T' : 'R',
1673 			endpoint->channel_id, endpoint->endpoint_id);
1674 		return ret;
1675 	}
1676 
1677 	if (!endpoint->toward_ipa) {
1678 		ipa_interrupt_suspend_enable(ipa->interrupt,
1679 					     endpoint->endpoint_id);
1680 		ipa_endpoint_replenish_enable(endpoint);
1681 	}
1682 
1683 	ipa->enabled |= BIT(endpoint->endpoint_id);
1684 
1685 	return 0;
1686 }
1687 
ipa_endpoint_disable_one(struct ipa_endpoint * endpoint)1688 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1689 {
1690 	u32 mask = BIT(endpoint->endpoint_id);
1691 	struct ipa *ipa = endpoint->ipa;
1692 	struct gsi *gsi = &ipa->gsi;
1693 	int ret;
1694 
1695 	if (!(ipa->enabled & mask))
1696 		return;
1697 
1698 	ipa->enabled ^= mask;
1699 
1700 	if (!endpoint->toward_ipa) {
1701 		ipa_endpoint_replenish_disable(endpoint);
1702 		ipa_interrupt_suspend_disable(ipa->interrupt,
1703 					      endpoint->endpoint_id);
1704 	}
1705 
1706 	/* Note that if stop fails, the channel's state is not well-defined */
1707 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1708 	if (ret)
1709 		dev_err(&ipa->pdev->dev,
1710 			"error %d attempting to stop endpoint %u\n", ret,
1711 			endpoint->endpoint_id);
1712 }
1713 
ipa_endpoint_suspend_one(struct ipa_endpoint * endpoint)1714 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1715 {
1716 	struct device *dev = &endpoint->ipa->pdev->dev;
1717 	struct gsi *gsi = &endpoint->ipa->gsi;
1718 	int ret;
1719 
1720 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1721 		return;
1722 
1723 	if (!endpoint->toward_ipa) {
1724 		ipa_endpoint_replenish_disable(endpoint);
1725 		(void)ipa_endpoint_program_suspend(endpoint, true);
1726 	}
1727 
1728 	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1729 	if (ret)
1730 		dev_err(dev, "error %d suspending channel %u\n", ret,
1731 			endpoint->channel_id);
1732 }
1733 
ipa_endpoint_resume_one(struct ipa_endpoint * endpoint)1734 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1735 {
1736 	struct device *dev = &endpoint->ipa->pdev->dev;
1737 	struct gsi *gsi = &endpoint->ipa->gsi;
1738 	int ret;
1739 
1740 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1741 		return;
1742 
1743 	if (!endpoint->toward_ipa)
1744 		(void)ipa_endpoint_program_suspend(endpoint, false);
1745 
1746 	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1747 	if (ret)
1748 		dev_err(dev, "error %d resuming channel %u\n", ret,
1749 			endpoint->channel_id);
1750 	else if (!endpoint->toward_ipa)
1751 		ipa_endpoint_replenish_enable(endpoint);
1752 }
1753 
ipa_endpoint_suspend(struct ipa * ipa)1754 void ipa_endpoint_suspend(struct ipa *ipa)
1755 {
1756 	if (!ipa->setup_complete)
1757 		return;
1758 
1759 	if (ipa->modem_netdev)
1760 		ipa_modem_suspend(ipa->modem_netdev);
1761 
1762 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1763 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1764 }
1765 
ipa_endpoint_resume(struct ipa * ipa)1766 void ipa_endpoint_resume(struct ipa *ipa)
1767 {
1768 	if (!ipa->setup_complete)
1769 		return;
1770 
1771 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1772 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1773 
1774 	if (ipa->modem_netdev)
1775 		ipa_modem_resume(ipa->modem_netdev);
1776 }
1777 
ipa_endpoint_setup_one(struct ipa_endpoint * endpoint)1778 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1779 {
1780 	struct gsi *gsi = &endpoint->ipa->gsi;
1781 	u32 channel_id = endpoint->channel_id;
1782 
1783 	/* Only AP endpoints get set up */
1784 	if (endpoint->ee_id != GSI_EE_AP)
1785 		return;
1786 
1787 	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1788 	if (!endpoint->toward_ipa) {
1789 		/* RX transactions require a single TRE, so the maximum
1790 		 * backlog is the same as the maximum outstanding TREs.
1791 		 */
1792 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1793 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1794 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1795 				  ipa_endpoint_replenish_work);
1796 	}
1797 
1798 	ipa_endpoint_program(endpoint);
1799 
1800 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1801 }
1802 
ipa_endpoint_teardown_one(struct ipa_endpoint * endpoint)1803 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1804 {
1805 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1806 
1807 	if (!endpoint->toward_ipa)
1808 		cancel_delayed_work_sync(&endpoint->replenish_work);
1809 
1810 	ipa_endpoint_reset(endpoint);
1811 }
1812 
ipa_endpoint_setup(struct ipa * ipa)1813 void ipa_endpoint_setup(struct ipa *ipa)
1814 {
1815 	u32 initialized = ipa->initialized;
1816 
1817 	ipa->set_up = 0;
1818 	while (initialized) {
1819 		u32 endpoint_id = __ffs(initialized);
1820 
1821 		initialized ^= BIT(endpoint_id);
1822 
1823 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1824 	}
1825 }
1826 
ipa_endpoint_teardown(struct ipa * ipa)1827 void ipa_endpoint_teardown(struct ipa *ipa)
1828 {
1829 	u32 set_up = ipa->set_up;
1830 
1831 	while (set_up) {
1832 		u32 endpoint_id = __fls(set_up);
1833 
1834 		set_up ^= BIT(endpoint_id);
1835 
1836 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1837 	}
1838 	ipa->set_up = 0;
1839 }
1840 
ipa_endpoint_config(struct ipa * ipa)1841 int ipa_endpoint_config(struct ipa *ipa)
1842 {
1843 	struct device *dev = &ipa->pdev->dev;
1844 	const struct ipa_reg *reg;
1845 	u32 initialized;
1846 	u32 rx_base;
1847 	u32 rx_mask;
1848 	u32 tx_mask;
1849 	int ret = 0;
1850 	u32 max;
1851 	u32 val;
1852 
1853 	/* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1854 	 * Furthermore, the endpoints were not grouped such that TX
1855 	 * endpoint numbers started with 0 and RX endpoints had numbers
1856 	 * higher than all TX endpoints, so we can't do the simple
1857 	 * direction check used for newer hardware below.
1858 	 *
1859 	 * For hardware that doesn't support the FLAVOR_0 register,
1860 	 * just set the available mask to support any endpoint, and
1861 	 * assume the configuration is valid.
1862 	 */
1863 	if (ipa->version < IPA_VERSION_3_5) {
1864 		ipa->available = ~0;
1865 		return 0;
1866 	}
1867 
1868 	/* Find out about the endpoints supplied by the hardware, and ensure
1869 	 * the highest one doesn't exceed the number we support.
1870 	 */
1871 	reg = ipa_reg(ipa, FLAVOR_0);
1872 	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
1873 
1874 	/* Our RX is an IPA producer */
1875 	rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
1876 	max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
1877 	if (max > IPA_ENDPOINT_MAX) {
1878 		dev_err(dev, "too many endpoints (%u > %u)\n",
1879 			max, IPA_ENDPOINT_MAX);
1880 		return -EINVAL;
1881 	}
1882 	rx_mask = GENMASK(max - 1, rx_base);
1883 
1884 	/* Our TX is an IPA consumer */
1885 	max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
1886 	tx_mask = GENMASK(max - 1, 0);
1887 
1888 	ipa->available = rx_mask | tx_mask;
1889 
1890 	/* Check for initialized endpoints not supported by the hardware */
1891 	if (ipa->initialized & ~ipa->available) {
1892 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1893 			ipa->initialized & ~ipa->available);
1894 		ret = -EINVAL;		/* Report other errors too */
1895 	}
1896 
1897 	initialized = ipa->initialized;
1898 	while (initialized) {
1899 		u32 endpoint_id = __ffs(initialized);
1900 		struct ipa_endpoint *endpoint;
1901 
1902 		initialized ^= BIT(endpoint_id);
1903 
1904 		/* Make sure it's pointing in the right direction */
1905 		endpoint = &ipa->endpoint[endpoint_id];
1906 		if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1907 			dev_err(dev, "endpoint id %u wrong direction\n",
1908 				endpoint_id);
1909 			ret = -EINVAL;
1910 		}
1911 	}
1912 
1913 	return ret;
1914 }
1915 
ipa_endpoint_deconfig(struct ipa * ipa)1916 void ipa_endpoint_deconfig(struct ipa *ipa)
1917 {
1918 	ipa->available = 0;	/* Nothing more to do */
1919 }
1920 
ipa_endpoint_init_one(struct ipa * ipa,enum ipa_endpoint_name name,const struct ipa_gsi_endpoint_data * data)1921 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1922 				  const struct ipa_gsi_endpoint_data *data)
1923 {
1924 	struct ipa_endpoint *endpoint;
1925 
1926 	endpoint = &ipa->endpoint[data->endpoint_id];
1927 
1928 	if (data->ee_id == GSI_EE_AP)
1929 		ipa->channel_map[data->channel_id] = endpoint;
1930 	ipa->name_map[name] = endpoint;
1931 
1932 	endpoint->ipa = ipa;
1933 	endpoint->ee_id = data->ee_id;
1934 	endpoint->channel_id = data->channel_id;
1935 	endpoint->endpoint_id = data->endpoint_id;
1936 	endpoint->toward_ipa = data->toward_ipa;
1937 	endpoint->config = data->endpoint.config;
1938 
1939 	ipa->initialized |= BIT(endpoint->endpoint_id);
1940 }
1941 
ipa_endpoint_exit_one(struct ipa_endpoint * endpoint)1942 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1943 {
1944 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1945 
1946 	memset(endpoint, 0, sizeof(*endpoint));
1947 }
1948 
ipa_endpoint_exit(struct ipa * ipa)1949 void ipa_endpoint_exit(struct ipa *ipa)
1950 {
1951 	u32 initialized = ipa->initialized;
1952 
1953 	while (initialized) {
1954 		u32 endpoint_id = __fls(initialized);
1955 
1956 		initialized ^= BIT(endpoint_id);
1957 
1958 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1959 	}
1960 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1961 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1962 }
1963 
1964 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
ipa_endpoint_init(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)1965 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1966 		      const struct ipa_gsi_endpoint_data *data)
1967 {
1968 	enum ipa_endpoint_name name;
1969 	u32 filter_map;
1970 
1971 	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1972 
1973 	if (!ipa_endpoint_data_valid(ipa, count, data))
1974 		return 0;	/* Error */
1975 
1976 	ipa->initialized = 0;
1977 
1978 	filter_map = 0;
1979 	for (name = 0; name < count; name++, data++) {
1980 		if (ipa_gsi_endpoint_data_empty(data))
1981 			continue;	/* Skip over empty slots */
1982 
1983 		ipa_endpoint_init_one(ipa, name, data);
1984 
1985 		if (data->endpoint.filter_support)
1986 			filter_map |= BIT(data->endpoint_id);
1987 		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1988 			ipa->modem_tx_count++;
1989 	}
1990 
1991 	if (!ipa_filter_map_valid(ipa, filter_map))
1992 		goto err_endpoint_exit;
1993 
1994 	return filter_map;	/* Non-zero bitmask */
1995 
1996 err_endpoint_exit:
1997 	ipa_endpoint_exit(ipa);
1998 
1999 	return 0;	/* Error */
2000 }
2001