1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6 #include "iosm_ipc_mux_codec.h"
7
8 /* At the begin of the runtime phase the IP MUX channel shall created. */
ipc_mux_channel_create(struct iosm_mux * ipc_mux)9 static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
10 {
11 int channel_id;
12
13 channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
14 IPC_CTYPE_WWAN);
15
16 if (channel_id < 0) {
17 dev_err(ipc_mux->dev,
18 "allocation of the MUX channel id failed");
19 ipc_mux->state = MUX_S_ERROR;
20 ipc_mux->event = MUX_E_NOT_APPLICABLE;
21 goto no_channel;
22 }
23
24 /* Establish the MUX channel in blocking mode. */
25 ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
26 IPC_HP_NET_CHANNEL_INIT);
27
28 if (!ipc_mux->channel) {
29 dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
30 ipc_mux->state = MUX_S_ERROR;
31 ipc_mux->event = MUX_E_NOT_APPLICABLE;
32 return -ENODEV; /* MUX channel is not available. */
33 }
34
35 /* Define the MUX active state properties. */
36 ipc_mux->state = MUX_S_ACTIVE;
37 ipc_mux->event = MUX_E_NO_ORDERS;
38
39 no_channel:
40 return channel_id;
41 }
42
43 /* Reset the session/if id state. */
ipc_mux_session_free(struct iosm_mux * ipc_mux,int if_id)44 static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
45 {
46 struct mux_session *if_entry;
47
48 if_entry = &ipc_mux->session[if_id];
49 /* Reset the session state. */
50 if_entry->wwan = NULL;
51 }
52
53 /* Create and send the session open command. */
54 static struct mux_cmd_open_session_resp *
ipc_mux_session_open_send(struct iosm_mux * ipc_mux,int if_id)55 ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
56 {
57 struct mux_cmd_open_session_resp *open_session_resp;
58 struct mux_acb *acb = &ipc_mux->acb;
59 union mux_cmd_param param;
60
61 /* open_session commands to one ACB and start transmission. */
62 param.open_session.flow_ctrl = 0;
63 param.open_session.ipv4v6_hints = 0;
64 param.open_session.reserved2 = 0;
65 param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
66
67 /* Finish and transfer ACB. The user thread is suspended.
68 * It is a blocking function call, until CP responds or timeout.
69 */
70 acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
71 if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
72 ¶m, sizeof(param.open_session), true,
73 false) ||
74 acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
75 dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
76 if_id);
77 return NULL;
78 }
79
80 open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
81 if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
82 dev_err(ipc_mux->dev,
83 "if_id %d,session open failed,response=%d", if_id,
84 open_session_resp->response);
85 return NULL;
86 }
87
88 return open_session_resp;
89 }
90
91 /* Open the first IP session. */
ipc_mux_session_open(struct iosm_mux * ipc_mux,struct mux_session_open * session_open)92 static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
93 struct mux_session_open *session_open)
94 {
95 struct mux_cmd_open_session_resp *open_session_resp;
96 int if_id;
97
98 /* Search for a free session interface id. */
99 if_id = le32_to_cpu(session_open->if_id);
100 if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
101 dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
102 return false;
103 }
104
105 /* Create and send the session open command.
106 * It is a blocking function call, until CP responds or timeout.
107 */
108 open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
109 if (!open_session_resp) {
110 ipc_mux_session_free(ipc_mux, if_id);
111 session_open->if_id = cpu_to_le32(-1);
112 return false;
113 }
114
115 /* Initialize the uplink skb accumulator. */
116 skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
117
118 ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
119 ipc_mux->session[if_id].ul_head_pad_len =
120 le32_to_cpu(open_session_resp->ul_head_pad_len);
121 ipc_mux->session[if_id].wwan = ipc_mux->wwan;
122
123 /* Reset the flow ctrl stats of the session */
124 ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
125 ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
126 ipc_mux->session[if_id].ul_flow_credits = 0;
127 ipc_mux->session[if_id].net_tx_stop = false;
128 ipc_mux->session[if_id].flow_ctl_mask = 0;
129
130 /* Save and return the assigned if id. */
131 session_open->if_id = cpu_to_le32(if_id);
132 ipc_mux->nr_sessions++;
133
134 return true;
135 }
136
137 /* Free pending session UL packet. */
ipc_mux_session_reset(struct iosm_mux * ipc_mux,int if_id)138 static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
139 {
140 /* Reset the session/if id state. */
141 ipc_mux_session_free(ipc_mux, if_id);
142
143 /* Empty the uplink skb accumulator. */
144 skb_queue_purge(&ipc_mux->session[if_id].ul_list);
145 }
146
ipc_mux_session_close(struct iosm_mux * ipc_mux,struct mux_session_close * msg)147 static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
148 struct mux_session_close *msg)
149 {
150 int if_id;
151
152 /* Copy the session interface id. */
153 if_id = le32_to_cpu(msg->if_id);
154
155 if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
156 dev_err(ipc_mux->dev, "invalid session id %d", if_id);
157 return;
158 }
159
160 /* Create and send the session close command.
161 * It is a blocking function call, until CP responds or timeout.
162 */
163 if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
164 NULL, 0, true, false))
165 dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
166 if_id);
167
168 /* Reset the flow ctrl stats of the session */
169 ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
170 ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
171 ipc_mux->session[if_id].flow_ctl_mask = 0;
172
173 ipc_mux_session_reset(ipc_mux, if_id);
174 ipc_mux->nr_sessions--;
175 }
176
ipc_mux_channel_close(struct iosm_mux * ipc_mux,struct mux_channel_close * channel_close_p)177 static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
178 struct mux_channel_close *channel_close_p)
179 {
180 int i;
181
182 /* Free pending session UL packet. */
183 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
184 if (ipc_mux->session[i].wwan)
185 ipc_mux_session_reset(ipc_mux, i);
186
187 ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
188
189 /* Reset the MUX object. */
190 ipc_mux->state = MUX_S_INACTIVE;
191 ipc_mux->event = MUX_E_INACTIVE;
192 }
193
194 /* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
ipc_mux_schedule(struct iosm_mux * ipc_mux,union mux_msg * msg)195 static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
196 {
197 enum mux_event order;
198 bool success;
199 int ret = -EIO;
200
201 if (!ipc_mux->initialized) {
202 ret = -EAGAIN;
203 goto out;
204 }
205
206 order = msg->common.event;
207
208 switch (ipc_mux->state) {
209 case MUX_S_INACTIVE:
210 if (order != MUX_E_MUX_SESSION_OPEN)
211 goto out; /* Wait for the request to open a session */
212
213 if (ipc_mux->event == MUX_E_INACTIVE)
214 /* Establish the MUX channel and the new state. */
215 ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
216
217 if (ipc_mux->state != MUX_S_ACTIVE) {
218 ret = ipc_mux->channel_id; /* Missing the MUX channel */
219 goto out;
220 }
221
222 /* Disable the TD update timer and open the first IP session. */
223 ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
224 ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
225 success = ipc_mux_session_open(ipc_mux, &msg->session_open);
226
227 ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
228 if (success)
229 ret = ipc_mux->channel_id;
230 goto out;
231
232 case MUX_S_ACTIVE:
233 switch (order) {
234 case MUX_E_MUX_SESSION_OPEN:
235 /* Disable the TD update timer and open a session */
236 ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
237 ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
238 success = ipc_mux_session_open(ipc_mux,
239 &msg->session_open);
240 ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
241 if (success)
242 ret = ipc_mux->channel_id;
243 goto out;
244
245 case MUX_E_MUX_SESSION_CLOSE:
246 /* Release an IP session. */
247 ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
248 ipc_mux_session_close(ipc_mux, &msg->session_close);
249 if (!ipc_mux->nr_sessions) {
250 ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
251 ipc_mux_channel_close(ipc_mux,
252 &msg->channel_close);
253 }
254 ret = ipc_mux->channel_id;
255 goto out;
256
257 case MUX_E_MUX_CHANNEL_CLOSE:
258 /* Close the MUX channel pipes. */
259 ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
260 ipc_mux_channel_close(ipc_mux, &msg->channel_close);
261 ret = ipc_mux->channel_id;
262 goto out;
263
264 default:
265 /* Invalid order. */
266 goto out;
267 }
268
269 default:
270 dev_err(ipc_mux->dev,
271 "unexpected MUX transition: state=%d, event=%d",
272 ipc_mux->state, ipc_mux->event);
273 }
274 out:
275 return ret;
276 }
277
ipc_mux_init(struct ipc_mux_config * mux_cfg,struct iosm_imem * imem)278 struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
279 struct iosm_imem *imem)
280 {
281 struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
282 int i, j, ul_tds, ul_td_size;
283 struct sk_buff_head *free_list;
284 struct sk_buff *skb;
285 int qlt_size;
286
287 if (!ipc_mux)
288 return NULL;
289
290 ipc_mux->protocol = mux_cfg->protocol;
291 ipc_mux->ul_flow = mux_cfg->ul_flow;
292 ipc_mux->instance_id = mux_cfg->instance_id;
293 ipc_mux->wwan_q_offset = 0;
294
295 ipc_mux->pcie = imem->pcie;
296 ipc_mux->imem = imem;
297 ipc_mux->ipc_protocol = imem->ipc_protocol;
298 ipc_mux->dev = imem->dev;
299 ipc_mux->wwan = imem->wwan;
300
301 /* Get the reference to the UL ADB list. */
302 free_list = &ipc_mux->ul_adb.free_list;
303
304 /* Initialize the list with free ADB. */
305 skb_queue_head_init(free_list);
306
307 ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
308
309 ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
310
311 ipc_mux->ul_adb.dest_skb = NULL;
312
313 ipc_mux->initialized = true;
314 ipc_mux->adb_prep_ongoing = false;
315 ipc_mux->size_needed = 0;
316 ipc_mux->ul_data_pend_bytes = 0;
317 ipc_mux->state = MUX_S_INACTIVE;
318 ipc_mux->ev_mux_net_transmit_pending = false;
319 ipc_mux->tx_transaction_id = 0;
320 ipc_mux->rr_next_session = 0;
321 ipc_mux->event = MUX_E_INACTIVE;
322 ipc_mux->channel_id = -1;
323 ipc_mux->channel = NULL;
324
325 if (ipc_mux->protocol != MUX_LITE) {
326 qlt_size = offsetof(struct mux_qlth, ql) +
327 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
328
329 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
330 ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
331 GFP_ATOMIC);
332 if (!ipc_mux->ul_adb.pp_qlt[i]) {
333 for (j = i - 1; j >= 0; j--)
334 kfree(ipc_mux->ul_adb.pp_qlt[j]);
335 return NULL;
336 }
337 }
338
339 ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
340 ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
341 }
342
343 /* Allocate the list of UL ADB. */
344 for (i = 0; i < ul_tds; i++) {
345 dma_addr_t mapping;
346
347 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
348 &mapping, DMA_TO_DEVICE, 0);
349 if (!skb) {
350 ipc_mux_deinit(ipc_mux);
351 return NULL;
352 }
353 /* Extend the UL ADB list. */
354 skb_queue_tail(free_list, skb);
355 }
356
357 return ipc_mux;
358 }
359
360 /* Informs the network stack to restart transmission for all opened session if
361 * Flow Control is not ON for that session.
362 */
ipc_mux_restart_tx_for_all_sessions(struct iosm_mux * ipc_mux)363 static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
364 {
365 struct mux_session *session;
366 int idx;
367
368 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
369 session = &ipc_mux->session[idx];
370
371 if (!session->wwan)
372 continue;
373
374 /* If flow control of the session is OFF and if there was tx
375 * stop then restart. Inform the network interface to restart
376 * sending data.
377 */
378 if (session->flow_ctl_mask == 0) {
379 session->net_tx_stop = false;
380 ipc_mux_netif_tx_flowctrl(session, idx, false);
381 }
382 }
383 }
384
385 /* Informs the network stack to stop sending further pkt for all opened
386 * sessions
387 */
ipc_mux_stop_netif_for_all_sessions(struct iosm_mux * ipc_mux)388 static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
389 {
390 struct mux_session *session;
391 int idx;
392
393 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
394 session = &ipc_mux->session[idx];
395
396 if (!session->wwan)
397 continue;
398
399 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
400 }
401 }
402
ipc_mux_check_n_restart_tx(struct iosm_mux * ipc_mux)403 void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
404 {
405 if (ipc_mux->ul_flow == MUX_UL) {
406 int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
407
408 if (ipc_mux->ul_data_pend_bytes < low_thresh)
409 ipc_mux_restart_tx_for_all_sessions(ipc_mux);
410 }
411 }
412
ipc_mux_get_max_sessions(struct iosm_mux * ipc_mux)413 int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
414 {
415 return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
416 }
417
ipc_mux_get_active_protocol(struct iosm_mux * ipc_mux)418 enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
419 {
420 return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
421 }
422
ipc_mux_open_session(struct iosm_mux * ipc_mux,int session_nr)423 int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
424 {
425 struct mux_session_open *session_open;
426 union mux_msg mux_msg;
427
428 session_open = &mux_msg.session_open;
429 session_open->event = MUX_E_MUX_SESSION_OPEN;
430
431 session_open->if_id = cpu_to_le32(session_nr);
432 ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
433 return ipc_mux_schedule(ipc_mux, &mux_msg);
434 }
435
ipc_mux_close_session(struct iosm_mux * ipc_mux,int session_nr)436 int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
437 {
438 struct mux_session_close *session_close;
439 union mux_msg mux_msg;
440 int ret_val;
441
442 session_close = &mux_msg.session_close;
443 session_close->event = MUX_E_MUX_SESSION_CLOSE;
444
445 session_close->if_id = cpu_to_le32(session_nr);
446 ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
447 ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
448
449 return ret_val;
450 }
451
ipc_mux_deinit(struct iosm_mux * ipc_mux)452 void ipc_mux_deinit(struct iosm_mux *ipc_mux)
453 {
454 struct mux_channel_close *channel_close;
455 struct sk_buff_head *free_list;
456 union mux_msg mux_msg;
457 struct sk_buff *skb;
458
459 if (!ipc_mux->initialized)
460 return;
461 ipc_mux_stop_netif_for_all_sessions(ipc_mux);
462
463 if (ipc_mux->state == MUX_S_ACTIVE) {
464 channel_close = &mux_msg.channel_close;
465 channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
466 ipc_mux_schedule(ipc_mux, &mux_msg);
467 }
468
469 /* Empty the ADB free list. */
470 free_list = &ipc_mux->ul_adb.free_list;
471
472 /* Remove from the head of the downlink queue. */
473 while ((skb = skb_dequeue(free_list)))
474 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
475
476 if (ipc_mux->channel) {
477 ipc_mux->channel->ul_pipe.is_open = false;
478 ipc_mux->channel->dl_pipe.is_open = false;
479 }
480
481 kfree(ipc_mux);
482 }
483