1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 NXP
4 */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/delay.h>
18 #include <linux/vmalloc.h>
19 #include "vpu.h"
20 #include "vpu_defs.h"
21 #include "vpu_cmds.h"
22 #include "vpu_rpc.h"
23 #include "vpu_mbox.h"
24
25 struct vpu_cmd_request {
26 u32 request;
27 u32 response;
28 u32 handled;
29 };
30
31 struct vpu_cmd_t {
32 struct list_head list;
33 u32 id;
34 struct vpu_cmd_request *request;
35 struct vpu_rpc_event *pkt;
36 unsigned long key;
37 };
38
39 static struct vpu_cmd_request vpu_cmd_requests[] = {
40 {
41 .request = VPU_CMD_ID_CONFIGURE_CODEC,
42 .response = VPU_MSG_ID_MEM_REQUEST,
43 .handled = 1,
44 },
45 {
46 .request = VPU_CMD_ID_START,
47 .response = VPU_MSG_ID_START_DONE,
48 .handled = 0,
49 },
50 {
51 .request = VPU_CMD_ID_STOP,
52 .response = VPU_MSG_ID_STOP_DONE,
53 .handled = 0,
54 },
55 {
56 .request = VPU_CMD_ID_ABORT,
57 .response = VPU_MSG_ID_ABORT_DONE,
58 .handled = 0,
59 },
60 {
61 .request = VPU_CMD_ID_RST_BUF,
62 .response = VPU_MSG_ID_BUF_RST,
63 .handled = 1,
64 },
65 };
66
vpu_cmd_send(struct vpu_core * core,struct vpu_rpc_event * pkt)67 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
68 {
69 int ret = 0;
70
71 ret = vpu_iface_send_cmd(core, pkt);
72 if (ret)
73 return ret;
74
75 /*write cmd data to cmd buffer before trigger a cmd interrupt*/
76 mb();
77 vpu_mbox_send_type(core, COMMAND);
78
79 return ret;
80 }
81
vpu_alloc_cmd(struct vpu_inst * inst,u32 id,void * data)82 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
83 {
84 struct vpu_cmd_t *cmd;
85 int i;
86 int ret;
87
88 cmd = vzalloc(sizeof(*cmd));
89 if (!cmd)
90 return NULL;
91
92 cmd->pkt = vzalloc(sizeof(*cmd->pkt));
93 if (!cmd->pkt) {
94 vfree(cmd);
95 return NULL;
96 }
97
98 cmd->id = id;
99 ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
100 if (ret) {
101 dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
102 vfree(cmd->pkt);
103 vfree(cmd);
104 return NULL;
105 }
106 for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
107 if (vpu_cmd_requests[i].request == id) {
108 cmd->request = &vpu_cmd_requests[i];
109 break;
110 }
111 }
112
113 return cmd;
114 }
115
vpu_free_cmd(struct vpu_cmd_t * cmd)116 static void vpu_free_cmd(struct vpu_cmd_t *cmd)
117 {
118 if (!cmd)
119 return;
120 if (cmd->pkt)
121 vfree(cmd->pkt);
122 vfree(cmd);
123 }
124
vpu_session_process_cmd(struct vpu_inst * inst,struct vpu_cmd_t * cmd)125 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
126 {
127 int ret;
128
129 dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
130 vpu_iface_pre_send_cmd(inst);
131 ret = vpu_cmd_send(inst->core, cmd->pkt);
132 if (!ret) {
133 vpu_iface_post_send_cmd(inst);
134 vpu_inst_record_flow(inst, cmd->id);
135 } else {
136 dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
137 }
138
139 return ret;
140 }
141
vpu_process_cmd_request(struct vpu_inst * inst)142 static void vpu_process_cmd_request(struct vpu_inst *inst)
143 {
144 struct vpu_cmd_t *cmd;
145 struct vpu_cmd_t *tmp;
146
147 if (!inst || inst->pending)
148 return;
149
150 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
151 list_del_init(&cmd->list);
152 if (vpu_session_process_cmd(inst, cmd))
153 dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
154 if (cmd->request) {
155 inst->pending = (void *)cmd;
156 break;
157 }
158 vpu_free_cmd(cmd);
159 }
160 }
161
vpu_request_cmd(struct vpu_inst * inst,u32 id,void * data,unsigned long * key,int * sync)162 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
163 unsigned long *key, int *sync)
164 {
165 struct vpu_core *core;
166 struct vpu_cmd_t *cmd;
167
168 if (!inst || !inst->core)
169 return -EINVAL;
170
171 core = inst->core;
172 cmd = vpu_alloc_cmd(inst, id, data);
173 if (!cmd)
174 return -ENOMEM;
175
176 mutex_lock(&core->cmd_lock);
177 cmd->key = core->cmd_seq++;
178 if (key)
179 *key = cmd->key;
180 if (sync)
181 *sync = cmd->request ? true : false;
182 list_add_tail(&cmd->list, &inst->cmd_q);
183 vpu_process_cmd_request(inst);
184 mutex_unlock(&core->cmd_lock);
185
186 return 0;
187 }
188
vpu_clear_pending(struct vpu_inst * inst)189 static void vpu_clear_pending(struct vpu_inst *inst)
190 {
191 if (!inst || !inst->pending)
192 return;
193
194 vpu_free_cmd(inst->pending);
195 wake_up_all(&inst->core->ack_wq);
196 inst->pending = NULL;
197 }
198
vpu_check_response(struct vpu_cmd_t * cmd,u32 response,u32 handled)199 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
200 {
201 struct vpu_cmd_request *request;
202
203 if (!cmd || !cmd->request)
204 return false;
205
206 request = cmd->request;
207 if (request->response != response)
208 return false;
209 if (request->handled != handled)
210 return false;
211
212 return true;
213 }
214
vpu_response_cmd(struct vpu_inst * inst,u32 response,u32 handled)215 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
216 {
217 struct vpu_core *core;
218
219 if (!inst || !inst->core)
220 return -EINVAL;
221
222 core = inst->core;
223 mutex_lock(&core->cmd_lock);
224 if (vpu_check_response(inst->pending, response, handled))
225 vpu_clear_pending(inst);
226
227 vpu_process_cmd_request(inst);
228 mutex_unlock(&core->cmd_lock);
229
230 return 0;
231 }
232
vpu_clear_request(struct vpu_inst * inst)233 void vpu_clear_request(struct vpu_inst *inst)
234 {
235 struct vpu_cmd_t *cmd;
236 struct vpu_cmd_t *tmp;
237
238 mutex_lock(&inst->core->cmd_lock);
239 if (inst->pending)
240 vpu_clear_pending(inst);
241
242 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
243 list_del_init(&cmd->list);
244 vpu_free_cmd(cmd);
245 }
246 mutex_unlock(&inst->core->cmd_lock);
247 }
248
check_is_responsed(struct vpu_inst * inst,unsigned long key)249 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
250 {
251 struct vpu_core *core = inst->core;
252 struct vpu_cmd_t *cmd;
253 bool flag = true;
254
255 mutex_lock(&core->cmd_lock);
256 cmd = inst->pending;
257 if (cmd && key == cmd->key) {
258 flag = false;
259 goto exit;
260 }
261 list_for_each_entry(cmd, &inst->cmd_q, list) {
262 if (key == cmd->key) {
263 flag = false;
264 break;
265 }
266 }
267 exit:
268 mutex_unlock(&core->cmd_lock);
269
270 return flag;
271 }
272
sync_session_response(struct vpu_inst * inst,unsigned long key)273 static int sync_session_response(struct vpu_inst *inst, unsigned long key)
274 {
275 struct vpu_core *core;
276
277 if (!inst || !inst->core)
278 return -EINVAL;
279
280 core = inst->core;
281
282 call_void_vop(inst, wait_prepare);
283 wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), VPU_TIMEOUT);
284 call_void_vop(inst, wait_finish);
285
286 if (!check_is_responsed(inst, key)) {
287 dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
288 set_bit(inst->id, &core->hang_mask);
289 mutex_lock(&inst->core->cmd_lock);
290 vpu_clear_pending(inst);
291 mutex_unlock(&inst->core->cmd_lock);
292 return -EINVAL;
293 }
294
295 return 0;
296 }
297
vpu_session_send_cmd(struct vpu_inst * inst,u32 id,void * data)298 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
299 {
300 unsigned long key;
301 int sync = false;
302 int ret = -EINVAL;
303
304 if (inst->id < 0)
305 return -EINVAL;
306
307 ret = vpu_request_cmd(inst, id, data, &key, &sync);
308 if (!ret && sync)
309 ret = sync_session_response(inst, key);
310
311 if (ret)
312 dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
313
314 return ret;
315 }
316
vpu_session_configure_codec(struct vpu_inst * inst)317 int vpu_session_configure_codec(struct vpu_inst *inst)
318 {
319 return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
320 }
321
vpu_session_start(struct vpu_inst * inst)322 int vpu_session_start(struct vpu_inst *inst)
323 {
324 vpu_trace(inst->dev, "[%d]\n", inst->id);
325
326 return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
327 }
328
vpu_session_stop(struct vpu_inst * inst)329 int vpu_session_stop(struct vpu_inst *inst)
330 {
331 int ret;
332
333 vpu_trace(inst->dev, "[%d]\n", inst->id);
334
335 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
336 /* workaround for a firmware bug,
337 * if the next command is too close after stop cmd,
338 * the firmware may enter wfi wrongly.
339 */
340 usleep_range(3000, 5000);
341 return ret;
342 }
343
vpu_session_encode_frame(struct vpu_inst * inst,s64 timestamp)344 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
345 {
346 return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, ×tamp);
347 }
348
vpu_session_alloc_fs(struct vpu_inst * inst,struct vpu_fs_info * fs)349 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
350 {
351 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
352 }
353
vpu_session_release_fs(struct vpu_inst * inst,struct vpu_fs_info * fs)354 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
355 {
356 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
357 }
358
vpu_session_abort(struct vpu_inst * inst)359 int vpu_session_abort(struct vpu_inst *inst)
360 {
361 return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
362 }
363
vpu_session_rst_buf(struct vpu_inst * inst)364 int vpu_session_rst_buf(struct vpu_inst *inst)
365 {
366 return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
367 }
368
vpu_session_fill_timestamp(struct vpu_inst * inst,struct vpu_ts_info * info)369 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
370 {
371 return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
372 }
373
vpu_session_update_parameters(struct vpu_inst * inst,void * arg)374 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
375 {
376 if (inst->type & VPU_CORE_TYPE_DEC)
377 vpu_iface_set_decode_params(inst, arg, 1);
378 else
379 vpu_iface_set_encode_params(inst, arg, 1);
380
381 return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
382 }
383
vpu_session_debug(struct vpu_inst * inst)384 int vpu_session_debug(struct vpu_inst *inst)
385 {
386 return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
387 }
388
vpu_core_snapshot(struct vpu_core * core)389 int vpu_core_snapshot(struct vpu_core *core)
390 {
391 struct vpu_inst *inst;
392 int ret;
393
394 if (!core || list_empty(&core->instances))
395 return 0;
396
397 inst = list_first_entry(&core->instances, struct vpu_inst, list);
398
399 reinit_completion(&core->cmp);
400 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
401 if (ret)
402 return ret;
403 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
404 if (!ret) {
405 dev_err(core->dev, "snapshot timeout\n");
406 return -EINVAL;
407 }
408
409 return 0;
410 }
411
vpu_core_sw_reset(struct vpu_core * core)412 int vpu_core_sw_reset(struct vpu_core *core)
413 {
414 struct vpu_rpc_event pkt;
415 int ret;
416
417 memset(&pkt, 0, sizeof(pkt));
418 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
419
420 reinit_completion(&core->cmp);
421 mutex_lock(&core->cmd_lock);
422 ret = vpu_cmd_send(core, &pkt);
423 mutex_unlock(&core->cmd_lock);
424 if (ret)
425 return ret;
426 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
427 if (!ret) {
428 dev_err(core->dev, "sw reset timeout\n");
429 return -EINVAL;
430 }
431
432 return 0;
433 }
434