1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
14 
15 /* Open a packet data online channel between the network layer and CP. */
ipc_imem_sys_wwan_open(struct iosm_imem * ipc_imem,int if_id)16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
17 {
18 	dev_dbg(ipc_imem->dev, "%s if id: %d",
19 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
20 
21 	/* The network interface is only supported in the runtime phase. */
22 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
23 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
24 			ipc_imem_phase_get_string(ipc_imem->phase));
25 		return -EIO;
26 	}
27 
28 	return ipc_mux_open_session(ipc_imem->mux, if_id);
29 }
30 
31 /* Release a net link to CP. */
ipc_imem_sys_wwan_close(struct iosm_imem * ipc_imem,int if_id,int channel_id)32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
33 			     int channel_id)
34 {
35 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
36 	    if_id <= IP_MUX_SESSION_END)
37 		ipc_mux_close_session(ipc_imem->mux, if_id);
38 }
39 
40 /* Tasklet call to do uplink transfer. */
ipc_imem_tq_cdev_write(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
42 				  void *msg, size_t size)
43 {
44 	ipc_imem_ul_send(ipc_imem);
45 
46 	return 0;
47 }
48 
49 /* Through tasklet to do sio write. */
ipc_imem_call_cdev_write(struct iosm_imem * ipc_imem)50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
51 {
52 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
53 					NULL, 0, false);
54 }
55 
56 /* Function for transfer UL data */
ipc_imem_sys_wwan_transmit(struct iosm_imem * ipc_imem,int if_id,int channel_id,struct sk_buff * skb)57 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
58 			       int if_id, int channel_id, struct sk_buff *skb)
59 {
60 	int ret = -EINVAL;
61 
62 	if (!ipc_imem || channel_id < 0)
63 		goto out;
64 
65 	/* Is CP Running? */
66 	if (ipc_imem->phase != IPC_P_RUN) {
67 		dev_dbg(ipc_imem->dev, "phase %s transmit",
68 			ipc_imem_phase_get_string(ipc_imem->phase));
69 		ret = -EIO;
70 		goto out;
71 	}
72 
73 	/* Route the UL packet through IP MUX Layer */
74 	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
75 out:
76 	return ret;
77 }
78 
79 /* Initialize wwan channel */
ipc_imem_wwan_channel_init(struct iosm_imem * ipc_imem,enum ipc_mux_protocol mux_type)80 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
81 				enum ipc_mux_protocol mux_type)
82 {
83 	struct ipc_chnl_cfg chnl_cfg = { 0 };
84 
85 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
86 
87 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
88 	if (ipc_imem->cp_version == -1) {
89 		dev_err(ipc_imem->dev, "invalid CP version");
90 		return;
91 	}
92 
93 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
94 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
95 			      IRQ_MOD_OFF);
96 
97 	/* WWAN registration. */
98 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
99 	if (!ipc_imem->wwan)
100 		dev_err(ipc_imem->dev,
101 			"failed to register the ipc_wwan interfaces");
102 }
103 
104 /* Map SKB to DMA for transfer */
ipc_imem_map_skb_to_dma(struct iosm_imem * ipc_imem,struct sk_buff * skb)105 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
106 				   struct sk_buff *skb)
107 {
108 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
109 	char *buf = skb->data;
110 	int len = skb->len;
111 	dma_addr_t mapping;
112 	int ret;
113 
114 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
115 
116 	if (ret)
117 		goto err;
118 
119 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
120 
121 	IPC_CB(skb)->mapping = mapping;
122 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
123 	IPC_CB(skb)->len = len;
124 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
125 
126 err:
127 	return ret;
128 }
129 
130 /* return true if channel is ready for use */
ipc_imem_is_channel_active(struct iosm_imem * ipc_imem,struct ipc_mem_channel * channel)131 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
132 				       struct ipc_mem_channel *channel)
133 {
134 	enum ipc_phase phase;
135 
136 	/* Update the current operation phase. */
137 	phase = ipc_imem->phase;
138 
139 	/* Select the operation depending on the execution stage. */
140 	switch (phase) {
141 	case IPC_P_RUN:
142 	case IPC_P_PSI:
143 	case IPC_P_EBL:
144 		break;
145 
146 	case IPC_P_ROM:
147 		/* Prepare the PSI image for the CP ROM driver and
148 		 * suspend the flash app.
149 		 */
150 		if (channel->state != IMEM_CHANNEL_RESERVED) {
151 			dev_err(ipc_imem->dev,
152 				"ch[%d]:invalid channel state %d,expected %d",
153 				channel->channel_id, channel->state,
154 				IMEM_CHANNEL_RESERVED);
155 			goto channel_unavailable;
156 		}
157 		goto channel_available;
158 
159 	default:
160 		/* Ignore uplink actions in all other phases. */
161 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
162 			channel->channel_id, phase);
163 		goto channel_unavailable;
164 	}
165 	/* Check the full availability of the channel. */
166 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
167 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
168 			channel->channel_id, channel->state);
169 		goto channel_unavailable;
170 	}
171 
172 channel_available:
173 	return true;
174 
175 channel_unavailable:
176 	return false;
177 }
178 
179 /**
180  * ipc_imem_sys_port_close - Release a sio link to CP.
181  * @ipc_imem:          Imem instance.
182  * @channel:           Channel instance.
183  */
ipc_imem_sys_port_close(struct iosm_imem * ipc_imem,struct ipc_mem_channel * channel)184 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
185 			     struct ipc_mem_channel *channel)
186 {
187 	enum ipc_phase curr_phase;
188 	int status = 0;
189 	u32 tail = 0;
190 
191 	curr_phase = ipc_imem->phase;
192 
193 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
194 	 * channel is already freed. Nothing to do.
195 	 */
196 	if (curr_phase == IPC_P_OFF) {
197 		dev_err(ipc_imem->dev,
198 			"nothing to do. Current Phase: %s",
199 			ipc_imem_phase_get_string(curr_phase));
200 		return;
201 	}
202 
203 	if (channel->state == IMEM_CHANNEL_FREE) {
204 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
205 			channel->channel_id, channel->state);
206 		return;
207 	}
208 
209 	/* If there are any pending TDs then wait for Timeout/Completion before
210 	 * closing pipe.
211 	 */
212 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
213 		ipc_imem->app_notify_ul_pend = 1;
214 
215 		/* Suspend the user app and wait a certain time for processing
216 		 * UL Data.
217 		 */
218 		status = wait_for_completion_interruptible_timeout
219 			 (&ipc_imem->ul_pend_sem,
220 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
221 		if (status == 0) {
222 			dev_dbg(ipc_imem->dev,
223 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
224 				channel->ul_pipe.pipe_nr,
225 				channel->ul_pipe.old_head,
226 				channel->ul_pipe.old_tail);
227 		}
228 
229 		ipc_imem->app_notify_ul_pend = 0;
230 	}
231 
232 	/* If there are any pending TDs then wait for Timeout/Completion before
233 	 * closing pipe.
234 	 */
235 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
236 					 &channel->dl_pipe, NULL, &tail);
237 
238 	if (tail != channel->dl_pipe.old_tail) {
239 		ipc_imem->app_notify_dl_pend = 1;
240 
241 		/* Suspend the user app and wait a certain time for processing
242 		 * DL Data.
243 		 */
244 		status = wait_for_completion_interruptible_timeout
245 			 (&ipc_imem->dl_pend_sem,
246 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
247 		if (status == 0) {
248 			dev_dbg(ipc_imem->dev,
249 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
250 				channel->dl_pipe.pipe_nr,
251 				channel->dl_pipe.old_head,
252 				channel->dl_pipe.old_tail);
253 		}
254 
255 		ipc_imem->app_notify_dl_pend = 0;
256 	}
257 
258 	/* Due to wait for completion in messages, there is a small window
259 	 * between closing the pipe and updating the channel is closed. In this
260 	 * small window there could be HP update from Host Driver. Hence update
261 	 * the channel state as CLOSING to aviod unnecessary interrupt
262 	 * towards CP.
263 	 */
264 	channel->state = IMEM_CHANNEL_CLOSING;
265 
266 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
267 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
268 
269 	ipc_imem_channel_free(channel);
270 }
271 
272 /* Open a PORT link to CP and return the channel */
ipc_imem_sys_port_open(struct iosm_imem * ipc_imem,int chl_id,int hp_id)273 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
274 					       int chl_id, int hp_id)
275 {
276 	struct ipc_mem_channel *channel;
277 	int ch_id;
278 
279 	/* The PORT interface is only supported in the runtime phase. */
280 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
281 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
282 			ipc_imem_phase_get_string(ipc_imem->phase));
283 		return NULL;
284 	}
285 
286 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
287 
288 	if (ch_id < 0) {
289 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
290 		return NULL;
291 	}
292 
293 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
294 
295 	if (!channel) {
296 		dev_err(ipc_imem->dev, "PORT channel id open failed");
297 		return NULL;
298 	}
299 
300 	return channel;
301 }
302 
303 /* transfer skb to modem */
ipc_imem_sys_cdev_write(struct iosm_cdev * ipc_cdev,struct sk_buff * skb)304 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
305 {
306 	struct ipc_mem_channel *channel = ipc_cdev->channel;
307 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
308 	int ret = -EIO;
309 
310 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
311 	    ipc_imem->phase == IPC_P_OFF_REQ)
312 		goto out;
313 
314 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
315 
316 	if (ret)
317 		goto out;
318 
319 	/* Add skb to the uplink skbuf accumulator. */
320 	skb_queue_tail(&channel->ul_list, skb);
321 
322 	ret = ipc_imem_call_cdev_write(ipc_imem);
323 
324 	if (ret) {
325 		skb_dequeue_tail(&channel->ul_list);
326 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
327 			ipc_cdev->channel->channel_id);
328 	}
329 out:
330 	return ret;
331 }
332 
333 /* Open a SIO link to CP and return the channel instance */
ipc_imem_sys_devlink_open(struct iosm_imem * ipc_imem)334 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
335 {
336 	struct ipc_mem_channel *channel;
337 	enum ipc_phase phase;
338 	int channel_id;
339 
340 	phase = ipc_imem_phase_update(ipc_imem);
341 	switch (phase) {
342 	case IPC_P_OFF:
343 	case IPC_P_ROM:
344 		/* Get a channel id as flash id and reserve it. */
345 		channel_id = ipc_imem_channel_alloc(ipc_imem,
346 						    IPC_MEM_CTRL_CHL_ID_7,
347 						    IPC_CTYPE_CTRL);
348 
349 		if (channel_id < 0) {
350 			dev_err(ipc_imem->dev,
351 				"reservation of a flash channel id failed");
352 			goto error;
353 		}
354 
355 		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
356 		channel = &ipc_imem->channels[channel_id];
357 
358 		/* Enqueue chip info data to be read */
359 		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
360 			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
361 			channel->state = IMEM_CHANNEL_FREE;
362 			goto error;
363 		}
364 
365 		return channel;
366 
367 	case IPC_P_PSI:
368 	case IPC_P_EBL:
369 		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
370 		if (ipc_imem->cp_version == -1) {
371 			dev_err(ipc_imem->dev, "invalid CP version");
372 			goto error;
373 		}
374 
375 		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
376 		return ipc_imem_channel_open(ipc_imem, channel_id,
377 					     IPC_HP_CDEV_OPEN);
378 
379 	default:
380 		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
381 		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
382 	}
383 error:
384 	return NULL;
385 }
386 
387 /* Release a SIO channel link to CP. */
ipc_imem_sys_devlink_close(struct iosm_devlink * ipc_devlink)388 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
389 {
390 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
391 	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
392 	enum ipc_mem_exec_stage exec_stage;
393 	struct ipc_mem_channel *channel;
394 	int status = 0;
395 	u32 tail = 0;
396 
397 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
398 	/* Increase the total wait time to boot_check_timeout */
399 	do {
400 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
401 		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
402 		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
403 			break;
404 		msleep(20);
405 		boot_check_timeout -= 20;
406 	} while (boot_check_timeout > 0);
407 
408 	/* If there are any pending TDs then wait for Timeout/Completion before
409 	 * closing pipe.
410 	 */
411 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
412 		status = wait_for_completion_interruptible_timeout
413 			(&ipc_imem->ul_pend_sem,
414 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
415 		if (status == 0) {
416 			dev_dbg(ipc_imem->dev,
417 				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
418 				channel->ul_pipe.pipe_nr,
419 				channel->ul_pipe.old_head,
420 				channel->ul_pipe.old_tail);
421 		}
422 	}
423 
424 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
425 					 &channel->dl_pipe, NULL, &tail);
426 
427 	if (tail != channel->dl_pipe.old_tail) {
428 		status = wait_for_completion_interruptible_timeout
429 			(&ipc_imem->dl_pend_sem,
430 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
431 		if (status == 0) {
432 			dev_dbg(ipc_imem->dev,
433 				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
434 				channel->dl_pipe.pipe_nr,
435 				channel->dl_pipe.old_head,
436 				channel->dl_pipe.old_tail);
437 		}
438 	}
439 
440 	/* Due to wait for completion in messages, there is a small window
441 	 * between closing the pipe and updating the channel is closed. In this
442 	 * small window there could be HP update from Host Driver. Hence update
443 	 * the channel state as CLOSING to aviod unnecessary interrupt
444 	 * towards CP.
445 	 */
446 	channel->state = IMEM_CHANNEL_CLOSING;
447 	/* Release the pipe resources */
448 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
449 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
450 	ipc_imem->nr_of_channels--;
451 }
452 
ipc_imem_sys_devlink_notify_rx(struct iosm_devlink * ipc_devlink,struct sk_buff * skb)453 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
454 				    struct sk_buff *skb)
455 {
456 	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
457 	complete(&ipc_devlink->devlink_sio.read_sem);
458 }
459 
460 /* PSI transfer */
ipc_imem_sys_psi_transfer(struct iosm_imem * ipc_imem,struct ipc_mem_channel * channel,unsigned char * buf,int count)461 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
462 				     struct ipc_mem_channel *channel,
463 				     unsigned char *buf, int count)
464 {
465 	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
466 	enum ipc_mem_exec_stage exec_stage;
467 
468 	dma_addr_t mapping = 0;
469 	int ret;
470 
471 	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
472 				DMA_TO_DEVICE);
473 	if (ret)
474 		goto pcie_addr_map_fail;
475 
476 	/* Save the PSI information for the CP ROM driver on the doorbell
477 	 * scratchpad.
478 	 */
479 	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
480 	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
481 
482 	ret = wait_for_completion_interruptible_timeout
483 		(&channel->ul_sem,
484 		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
485 
486 	if (ret <= 0) {
487 		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
488 			ret);
489 		goto psi_transfer_fail;
490 	}
491 	/* If the PSI download fails, return the CP boot ROM exit code */
492 	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
493 	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
494 		ret = (-1) * ((int)ipc_imem->rom_exit_code);
495 		goto psi_transfer_fail;
496 	}
497 
498 	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
499 
500 	/* Wait psi_start_timeout milliseconds until the CP PSI image is
501 	 * running and updates the execution_stage field with
502 	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
503 	 */
504 	do {
505 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
506 
507 		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
508 			break;
509 
510 		msleep(20);
511 		psi_start_timeout -= 20;
512 	} while (psi_start_timeout > 0);
513 
514 	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
515 		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
516 
517 	ipc_imem->phase = IPC_P_PSI;
518 
519 	/* Enter the PSI phase. */
520 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
521 
522 	/* Request the RUNNING state from CP and wait until it was reached
523 	 * or timeout.
524 	 */
525 	ipc_imem_ipc_init_check(ipc_imem);
526 
527 	ret = wait_for_completion_interruptible_timeout
528 		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
529 	if (ret <= 0) {
530 		dev_err(ipc_imem->dev,
531 			"Failed PSI RUNNING state on CP, Error-%d", ret);
532 		goto psi_transfer_fail;
533 	}
534 
535 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
536 			IPC_MEM_DEVICE_IPC_RUNNING) {
537 		dev_err(ipc_imem->dev,
538 			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
539 			channel->channel_id,
540 			ipc_imem_phase_get_string(ipc_imem->phase),
541 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
542 
543 		goto psi_transfer_fail;
544 	}
545 
546 	/* Create the flash channel for the transfer of the images. */
547 	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
548 		dev_err(ipc_imem->dev, "can't open flash_channel");
549 		goto psi_transfer_fail;
550 	}
551 
552 	ret = 0;
553 psi_transfer_fail:
554 	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
555 pcie_addr_map_fail:
556 	return ret;
557 }
558 
ipc_imem_sys_devlink_write(struct iosm_devlink * ipc_devlink,unsigned char * buf,int count)559 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
560 			       unsigned char *buf, int count)
561 {
562 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
563 	struct ipc_mem_channel *channel;
564 	struct sk_buff *skb;
565 	dma_addr_t mapping;
566 	int ret;
567 
568 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
569 
570 	/* In the ROM phase the PSI image is passed to CP about a specific
571 	 *  shared memory area and doorbell scratchpad directly.
572 	 */
573 	if (ipc_imem->phase == IPC_P_ROM) {
574 		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
575 		/* If the PSI transfer fails then send crash
576 		 * Signature.
577 		 */
578 		if (ret > 0)
579 			ipc_imem_msg_send_feature_set(ipc_imem,
580 						      IPC_MEM_INBAND_CRASH_SIG,
581 						      false);
582 		goto out;
583 	}
584 
585 	/* Allocate skb memory for the uplink buffer. */
586 	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
587 				 DMA_TO_DEVICE, 0);
588 	if (!skb) {
589 		ret = -ENOMEM;
590 		goto out;
591 	}
592 
593 	memcpy(skb_put(skb, count), buf, count);
594 
595 	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
596 
597 	/* Add skb to the uplink skbuf accumulator. */
598 	skb_queue_tail(&channel->ul_list, skb);
599 
600 	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
601 	if (!ipc_imem_call_cdev_write(ipc_imem)) {
602 		ret = wait_for_completion_interruptible(&channel->ul_sem);
603 
604 		if (ret < 0) {
605 			dev_err(ipc_imem->dev,
606 				"ch[%d] no CP confirmation, status = %d",
607 				channel->channel_id, ret);
608 			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
609 			goto out;
610 		}
611 	}
612 	ret = 0;
613 out:
614 	return ret;
615 }
616 
ipc_imem_sys_devlink_read(struct iosm_devlink * devlink,u8 * data,u32 bytes_to_read,u32 * bytes_read)617 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
618 			      u32 bytes_to_read, u32 *bytes_read)
619 {
620 	struct sk_buff *skb = NULL;
621 	int rc = 0;
622 
623 	/* check skb is available in rx_list or wait for skb */
624 	devlink->devlink_sio.devlink_read_pend = 1;
625 	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
626 		if (!wait_for_completion_interruptible_timeout
627 				(&devlink->devlink_sio.read_sem,
628 				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
629 			dev_err(devlink->dev, "Read timedout");
630 			rc =  -ETIMEDOUT;
631 			goto devlink_read_fail;
632 		}
633 	}
634 	devlink->devlink_sio.devlink_read_pend = 0;
635 	if (bytes_to_read < skb->len) {
636 		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
637 		rc = -EINVAL;
638 		goto devlink_read_fail;
639 	}
640 	*bytes_read = skb->len;
641 	memcpy(data, skb->data, skb->len);
642 
643 devlink_read_fail:
644 	dev_kfree_skb(skb);
645 	return rc;
646 }
647