1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- linux-c -*- *
3 *
4 * ALSA driver for the digigram lx6464es interface
5 * low-level interface
6 *
7 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
8 */
9
10 /* #define RMH_DEBUG 1 */
11
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16
17 #include "lx6464es.h"
18 #include "lx_core.h"
19
20 /* low-level register access */
21
22 static const unsigned long dsp_port_offsets[] = {
23 0,
24 0x400,
25 0x401,
26 0x402,
27 0x403,
28 0x404,
29 0x405,
30 0x406,
31 0x407,
32 0x408,
33 0x409,
34 0x40a,
35 0x40b,
36 0x40c,
37
38 0x410,
39 0x411,
40 0x412,
41 0x413,
42 0x414,
43 0x415,
44 0x416,
45
46 0x420,
47 0x430,
48 0x431,
49 0x432,
50 0x433,
51 0x434,
52 0x440
53 };
54
lx_dsp_register(struct lx6464es * chip,int port)55 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
56 {
57 void __iomem *base_address = chip->port_dsp_bar;
58 return base_address + dsp_port_offsets[port]*4;
59 }
60
lx_dsp_reg_read(struct lx6464es * chip,int port)61 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
62 {
63 void __iomem *address = lx_dsp_register(chip, port);
64 return ioread32(address);
65 }
66
lx_dsp_reg_readbuf(struct lx6464es * chip,int port,u32 * data,u32 len)67 static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
68 u32 len)
69 {
70 u32 __iomem *address = lx_dsp_register(chip, port);
71 int i;
72
73 /* we cannot use memcpy_fromio */
74 for (i = 0; i != len; ++i)
75 data[i] = ioread32(address + i);
76 }
77
78
lx_dsp_reg_write(struct lx6464es * chip,int port,unsigned data)79 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
80 {
81 void __iomem *address = lx_dsp_register(chip, port);
82 iowrite32(data, address);
83 }
84
lx_dsp_reg_writebuf(struct lx6464es * chip,int port,const u32 * data,u32 len)85 static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
86 const u32 *data, u32 len)
87 {
88 u32 __iomem *address = lx_dsp_register(chip, port);
89 int i;
90
91 /* we cannot use memcpy_to */
92 for (i = 0; i != len; ++i)
93 iowrite32(data[i], address + i);
94 }
95
96
97 static const unsigned long plx_port_offsets[] = {
98 0x04,
99 0x40,
100 0x44,
101 0x48,
102 0x4c,
103 0x50,
104 0x54,
105 0x58,
106 0x5c,
107 0x64,
108 0x68,
109 0x6C
110 };
111
lx_plx_register(struct lx6464es * chip,int port)112 static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
113 {
114 void __iomem *base_address = chip->port_plx_remapped;
115 return base_address + plx_port_offsets[port];
116 }
117
lx_plx_reg_read(struct lx6464es * chip,int port)118 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
119 {
120 void __iomem *address = lx_plx_register(chip, port);
121 return ioread32(address);
122 }
123
lx_plx_reg_write(struct lx6464es * chip,int port,u32 data)124 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
125 {
126 void __iomem *address = lx_plx_register(chip, port);
127 iowrite32(data, address);
128 }
129
130 /* rmh */
131
132 #ifdef CONFIG_SND_DEBUG
133 #define CMD_NAME(a) a
134 #else
135 #define CMD_NAME(a) NULL
136 #endif
137
138 #define Reg_CSM_MR 0x00000002
139 #define Reg_CSM_MC 0x00000001
140
141 struct dsp_cmd_info {
142 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
143 * word).*/
144 u16 dcCmdLength; /* Command length in words of 24 bits.*/
145 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
146 * random. */
147 u16 dcStatusLength; /* Status length (if fixed).*/
148 char *dcOpName;
149 };
150
151 /*
152 Initialization and control data for the Microblaze interface
153 - OpCode:
154 the opcode field of the command set at the proper offset
155 - CmdLength
156 the number of command words
157 - StatusType
158 offset in the status registers: 0 means that the return value may be
159 different from 0, and must be read
160 - StatusLength
161 the number of status words (in addition to the return value)
162 */
163
164 static const struct dsp_cmd_info dsp_commands[] =
165 {
166 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
167 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
168 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
169 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
170 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
171 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
172 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
173 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
174 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
175 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
176 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
177 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
178 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
179 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
180 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
181 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
182 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
183 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
184 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
185 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
186 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
187 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
188 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
189 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
190 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
191 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
192 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
193 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
194 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
195 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
196 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
197 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
198 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
199 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
200 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
201 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
202 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
203 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
204 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
205 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
206 };
207
lx_message_init(struct lx_rmh * rmh,enum cmd_mb_opcodes cmd)208 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
209 {
210 snd_BUG_ON(cmd >= CMD_14_INVALID);
211
212 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
213 rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
214 rmh->stat_len = dsp_commands[cmd].dcStatusLength;
215 rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
216 rmh->cmd_idx = cmd;
217 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
218
219 #ifdef CONFIG_SND_DEBUG
220 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
221 #endif
222 #ifdef RMH_DEBUG
223 rmh->cmd_idx = cmd;
224 #endif
225 }
226
227 #ifdef RMH_DEBUG
228 #define LXRMH "lx6464es rmh: "
lx_message_dump(struct lx_rmh * rmh)229 static void lx_message_dump(struct lx_rmh *rmh)
230 {
231 u8 idx = rmh->cmd_idx;
232 int i;
233
234 snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
235
236 for (i = 0; i != rmh->cmd_len; ++i)
237 snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
238
239 for (i = 0; i != rmh->stat_len; ++i)
240 snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
241 snd_printk("\n");
242 }
243 #else
lx_message_dump(struct lx_rmh * rmh)244 static inline void lx_message_dump(struct lx_rmh *rmh)
245 {}
246 #endif
247
248
249
250 /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
251 #define XILINX_TIMEOUT_MS 40
252 #define XILINX_POLL_NO_SLEEP 100
253 #define XILINX_POLL_ITERATIONS 150
254
255
lx_message_send_atomic(struct lx6464es * chip,struct lx_rmh * rmh)256 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
257 {
258 u32 reg = ED_DSP_TIMED_OUT;
259 int dwloop;
260
261 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
262 dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
263 return -EBUSY;
264 }
265
266 /* write command */
267 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
268
269 /* MicoBlaze gogogo */
270 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
271
272 /* wait for device to answer */
273 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
274 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
275 if (rmh->dsp_stat == 0)
276 reg = lx_dsp_reg_read(chip, eReg_CRM1);
277 else
278 reg = 0;
279 goto polling_successful;
280 } else
281 udelay(1);
282 }
283 dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
284 "polling failed\n");
285
286 polling_successful:
287 if ((reg & ERROR_VALUE) == 0) {
288 /* read response */
289 if (rmh->stat_len) {
290 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
291 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
292 rmh->stat_len);
293 }
294 } else
295 dev_err(chip->card->dev, "rmh error: %08x\n", reg);
296
297 /* clear Reg_CSM_MR */
298 lx_dsp_reg_write(chip, eReg_CSM, 0);
299
300 switch (reg) {
301 case ED_DSP_TIMED_OUT:
302 dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
303 return -ETIMEDOUT;
304
305 case ED_DSP_CRASHED:
306 dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
307 return -EAGAIN;
308 }
309
310 lx_message_dump(rmh);
311
312 return reg;
313 }
314
315
316 /* low-level dsp access */
lx_dsp_get_version(struct lx6464es * chip,u32 * rdsp_version)317 int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
318 {
319 u16 ret;
320
321 mutex_lock(&chip->msg_lock);
322
323 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
324 ret = lx_message_send_atomic(chip, &chip->rmh);
325
326 *rdsp_version = chip->rmh.stat[1];
327 mutex_unlock(&chip->msg_lock);
328 return ret;
329 }
330
lx_dsp_get_clock_frequency(struct lx6464es * chip,u32 * rfreq)331 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
332 {
333 u16 ret = 0;
334 u32 freq_raw = 0;
335 u32 freq = 0;
336 u32 frequency = 0;
337
338 mutex_lock(&chip->msg_lock);
339
340 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
341 ret = lx_message_send_atomic(chip, &chip->rmh);
342
343 if (ret == 0) {
344 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
345 freq = freq_raw & XES_FREQ_COUNT8_MASK;
346
347 if ((freq < XES_FREQ_COUNT8_48_MAX) ||
348 (freq > XES_FREQ_COUNT8_44_MIN))
349 frequency = 0; /* unknown */
350 else if (freq >= XES_FREQ_COUNT8_44_MAX)
351 frequency = 44100;
352 else
353 frequency = 48000;
354 }
355
356 mutex_unlock(&chip->msg_lock);
357
358 *rfreq = frequency * chip->freq_ratio;
359
360 return ret;
361 }
362
lx_dsp_get_mac(struct lx6464es * chip)363 int lx_dsp_get_mac(struct lx6464es *chip)
364 {
365 u32 macmsb, maclsb;
366
367 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
368 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
369
370 /* todo: endianess handling */
371 chip->mac_address[5] = ((u8 *)(&maclsb))[0];
372 chip->mac_address[4] = ((u8 *)(&maclsb))[1];
373 chip->mac_address[3] = ((u8 *)(&maclsb))[2];
374 chip->mac_address[2] = ((u8 *)(&macmsb))[0];
375 chip->mac_address[1] = ((u8 *)(&macmsb))[1];
376 chip->mac_address[0] = ((u8 *)(&macmsb))[2];
377
378 return 0;
379 }
380
381
lx_dsp_set_granularity(struct lx6464es * chip,u32 gran)382 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
383 {
384 int ret;
385
386 mutex_lock(&chip->msg_lock);
387
388 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
389 chip->rmh.cmd[0] |= gran;
390
391 ret = lx_message_send_atomic(chip, &chip->rmh);
392 mutex_unlock(&chip->msg_lock);
393 return ret;
394 }
395
lx_dsp_read_async_events(struct lx6464es * chip,u32 * data)396 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
397 {
398 int ret;
399
400 mutex_lock(&chip->msg_lock);
401
402 lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
403 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
404
405 ret = lx_message_send_atomic(chip, &chip->rmh);
406
407 if (!ret)
408 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
409
410 mutex_unlock(&chip->msg_lock);
411 return ret;
412 }
413
414 #define PIPE_INFO_TO_CMD(capture, pipe) \
415 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
416
417
418
419 /* low-level pipe handling */
lx_pipe_allocate(struct lx6464es * chip,u32 pipe,int is_capture,int channels)420 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
421 int channels)
422 {
423 int err;
424 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
425
426 mutex_lock(&chip->msg_lock);
427 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
428
429 chip->rmh.cmd[0] |= pipe_cmd;
430 chip->rmh.cmd[0] |= channels;
431
432 err = lx_message_send_atomic(chip, &chip->rmh);
433 mutex_unlock(&chip->msg_lock);
434
435 if (err != 0)
436 dev_err(chip->card->dev, "could not allocate pipe\n");
437
438 return err;
439 }
440
lx_pipe_release(struct lx6464es * chip,u32 pipe,int is_capture)441 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
442 {
443 int err;
444 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
445
446 mutex_lock(&chip->msg_lock);
447 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
448
449 chip->rmh.cmd[0] |= pipe_cmd;
450
451 err = lx_message_send_atomic(chip, &chip->rmh);
452 mutex_unlock(&chip->msg_lock);
453
454 return err;
455 }
456
lx_buffer_ask(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_needed,u32 * r_freed,u32 * size_array)457 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
458 u32 *r_needed, u32 *r_freed, u32 *size_array)
459 {
460 int err;
461 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
462
463 #ifdef CONFIG_SND_DEBUG
464 if (size_array)
465 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
466 #endif
467
468 *r_needed = 0;
469 *r_freed = 0;
470
471 mutex_lock(&chip->msg_lock);
472 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
473
474 chip->rmh.cmd[0] |= pipe_cmd;
475
476 err = lx_message_send_atomic(chip, &chip->rmh);
477
478 if (!err) {
479 int i;
480 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
481 u32 stat = chip->rmh.stat[i];
482 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
483 /* finished */
484 *r_freed += 1;
485 if (size_array)
486 size_array[i] = stat & MASK_DATA_SIZE;
487 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
488 == 0)
489 /* free */
490 *r_needed += 1;
491 }
492
493 dev_dbg(chip->card->dev,
494 "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
495 *r_needed, *r_freed);
496 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
497 for (i = 0; i != chip->rmh.stat_len; ++i)
498 dev_dbg(chip->card->dev,
499 " stat[%d]: %x, %x\n", i,
500 chip->rmh.stat[i],
501 chip->rmh.stat[i] & MASK_DATA_SIZE);
502 }
503 }
504
505 mutex_unlock(&chip->msg_lock);
506 return err;
507 }
508
509
lx_pipe_stop(struct lx6464es * chip,u32 pipe,int is_capture)510 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
511 {
512 int err;
513 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
514
515 mutex_lock(&chip->msg_lock);
516 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
517
518 chip->rmh.cmd[0] |= pipe_cmd;
519
520 err = lx_message_send_atomic(chip, &chip->rmh);
521
522 mutex_unlock(&chip->msg_lock);
523 return err;
524 }
525
lx_pipe_toggle_state(struct lx6464es * chip,u32 pipe,int is_capture)526 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
527 {
528 int err;
529 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
530
531 mutex_lock(&chip->msg_lock);
532 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
533
534 chip->rmh.cmd[0] |= pipe_cmd;
535
536 err = lx_message_send_atomic(chip, &chip->rmh);
537
538 mutex_unlock(&chip->msg_lock);
539 return err;
540 }
541
542
lx_pipe_start(struct lx6464es * chip,u32 pipe,int is_capture)543 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
544 {
545 int err;
546
547 err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
548 if (err < 0)
549 return err;
550
551 err = lx_pipe_toggle_state(chip, pipe, is_capture);
552
553 return err;
554 }
555
lx_pipe_pause(struct lx6464es * chip,u32 pipe,int is_capture)556 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
557 {
558 int err = 0;
559
560 err = lx_pipe_wait_for_start(chip, pipe, is_capture);
561 if (err < 0)
562 return err;
563
564 err = lx_pipe_toggle_state(chip, pipe, is_capture);
565
566 return err;
567 }
568
569
lx_pipe_sample_count(struct lx6464es * chip,u32 pipe,int is_capture,u64 * rsample_count)570 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
571 u64 *rsample_count)
572 {
573 int err;
574 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
575
576 mutex_lock(&chip->msg_lock);
577 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
578
579 chip->rmh.cmd[0] |= pipe_cmd;
580 chip->rmh.stat_len = 2; /* need all words here! */
581
582 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
583
584 if (err != 0)
585 dev_err(chip->card->dev,
586 "could not query pipe's sample count\n");
587 else {
588 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
589 << 24) /* hi part */
590 + chip->rmh.stat[1]; /* lo part */
591 }
592
593 mutex_unlock(&chip->msg_lock);
594 return err;
595 }
596
lx_pipe_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 * rstate)597 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
598 {
599 int err;
600 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
601
602 mutex_lock(&chip->msg_lock);
603 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
604
605 chip->rmh.cmd[0] |= pipe_cmd;
606
607 err = lx_message_send_atomic(chip, &chip->rmh);
608
609 if (err != 0)
610 dev_err(chip->card->dev, "could not query pipe's state\n");
611 else
612 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
613
614 mutex_unlock(&chip->msg_lock);
615 return err;
616 }
617
lx_pipe_wait_for_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 state)618 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
619 int is_capture, u16 state)
620 {
621 int i;
622
623 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
624 * timeout 50 ms */
625 for (i = 0; i != 50; ++i) {
626 u16 current_state;
627 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state);
628
629 if (err < 0)
630 return err;
631
632 if (!err && current_state == state)
633 return 0;
634
635 mdelay(1);
636 }
637
638 return -ETIMEDOUT;
639 }
640
lx_pipe_wait_for_start(struct lx6464es * chip,u32 pipe,int is_capture)641 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
642 {
643 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
644 }
645
lx_pipe_wait_for_idle(struct lx6464es * chip,u32 pipe,int is_capture)646 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
647 {
648 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
649 }
650
651 /* low-level stream handling */
lx_stream_set_state(struct lx6464es * chip,u32 pipe,int is_capture,enum stream_state_t state)652 int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
653 int is_capture, enum stream_state_t state)
654 {
655 int err;
656 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
657
658 mutex_lock(&chip->msg_lock);
659 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
660
661 chip->rmh.cmd[0] |= pipe_cmd;
662 chip->rmh.cmd[0] |= state;
663
664 err = lx_message_send_atomic(chip, &chip->rmh);
665 mutex_unlock(&chip->msg_lock);
666
667 return err;
668 }
669
lx_stream_set_format(struct lx6464es * chip,struct snd_pcm_runtime * runtime,u32 pipe,int is_capture)670 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
671 u32 pipe, int is_capture)
672 {
673 int err;
674 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
675 u32 channels = runtime->channels;
676
677 mutex_lock(&chip->msg_lock);
678 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
679
680 chip->rmh.cmd[0] |= pipe_cmd;
681
682 if (runtime->sample_bits == 16)
683 /* 16 bit format */
684 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
685
686 if (snd_pcm_format_little_endian(runtime->format))
687 /* little endian/intel format */
688 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
689
690 chip->rmh.cmd[0] |= channels-1;
691
692 err = lx_message_send_atomic(chip, &chip->rmh);
693 mutex_unlock(&chip->msg_lock);
694
695 return err;
696 }
697
lx_stream_state(struct lx6464es * chip,u32 pipe,int is_capture,int * rstate)698 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
699 int *rstate)
700 {
701 int err;
702 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
703
704 mutex_lock(&chip->msg_lock);
705 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
706
707 chip->rmh.cmd[0] |= pipe_cmd;
708
709 err = lx_message_send_atomic(chip, &chip->rmh);
710
711 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
712
713 mutex_unlock(&chip->msg_lock);
714 return err;
715 }
716
lx_stream_sample_position(struct lx6464es * chip,u32 pipe,int is_capture,u64 * r_bytepos)717 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
718 u64 *r_bytepos)
719 {
720 int err;
721 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
722
723 mutex_lock(&chip->msg_lock);
724 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
725
726 chip->rmh.cmd[0] |= pipe_cmd;
727
728 err = lx_message_send_atomic(chip, &chip->rmh);
729
730 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
731 << 32) /* hi part */
732 + chip->rmh.stat[1]; /* lo part */
733
734 mutex_unlock(&chip->msg_lock);
735 return err;
736 }
737
738 /* low-level buffer handling */
lx_buffer_give(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_size,u32 buf_address_lo,u32 buf_address_hi,u32 * r_buffer_index)739 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
740 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
741 u32 *r_buffer_index)
742 {
743 int err;
744 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
745
746 mutex_lock(&chip->msg_lock);
747 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
748
749 chip->rmh.cmd[0] |= pipe_cmd;
750 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
751
752 /* todo: pause request, circular buffer */
753
754 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
755 chip->rmh.cmd[2] = buf_address_lo;
756
757 if (buf_address_hi) {
758 chip->rmh.cmd_len = 4;
759 chip->rmh.cmd[3] = buf_address_hi;
760 chip->rmh.cmd[0] |= BF_64BITS_ADR;
761 }
762
763 err = lx_message_send_atomic(chip, &chip->rmh);
764
765 if (err == 0) {
766 *r_buffer_index = chip->rmh.stat[0];
767 goto done;
768 }
769
770 if (err == EB_RBUFFERS_TABLE_OVERFLOW)
771 dev_err(chip->card->dev,
772 "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
773
774 if (err == EB_INVALID_STREAM)
775 dev_err(chip->card->dev,
776 "lx_buffer_give EB_INVALID_STREAM\n");
777
778 if (err == EB_CMD_REFUSED)
779 dev_err(chip->card->dev,
780 "lx_buffer_give EB_CMD_REFUSED\n");
781
782 done:
783 mutex_unlock(&chip->msg_lock);
784 return err;
785 }
786
lx_buffer_free(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_buffer_size)787 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
788 u32 *r_buffer_size)
789 {
790 int err;
791 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
792
793 mutex_lock(&chip->msg_lock);
794 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
795
796 chip->rmh.cmd[0] |= pipe_cmd;
797 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
798 * microblaze will seek for it */
799
800 err = lx_message_send_atomic(chip, &chip->rmh);
801
802 if (err == 0)
803 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
804
805 mutex_unlock(&chip->msg_lock);
806 return err;
807 }
808
lx_buffer_cancel(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_index)809 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
810 u32 buffer_index)
811 {
812 int err;
813 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
814
815 mutex_lock(&chip->msg_lock);
816 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
817
818 chip->rmh.cmd[0] |= pipe_cmd;
819 chip->rmh.cmd[0] |= buffer_index;
820
821 err = lx_message_send_atomic(chip, &chip->rmh);
822
823 mutex_unlock(&chip->msg_lock);
824 return err;
825 }
826
827
828 /* low-level gain/peak handling
829 *
830 * \todo: can we unmute capture/playback channels independently?
831 *
832 * */
lx_level_unmute(struct lx6464es * chip,int is_capture,int unmute)833 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
834 {
835 int err;
836 /* bit set to 1: channel muted */
837 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
838
839 mutex_lock(&chip->msg_lock);
840 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
841
842 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
843
844 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
845 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
846
847 dev_dbg(chip->card->dev,
848 "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
849 chip->rmh.cmd[2]);
850
851 err = lx_message_send_atomic(chip, &chip->rmh);
852
853 mutex_unlock(&chip->msg_lock);
854 return err;
855 }
856
857 static const u32 peak_map[] = {
858 0x00000109, /* -90.308dB */
859 0x0000083B, /* -72.247dB */
860 0x000020C4, /* -60.205dB */
861 0x00008273, /* -48.030dB */
862 0x00020756, /* -36.005dB */
863 0x00040C37, /* -30.001dB */
864 0x00081385, /* -24.002dB */
865 0x00101D3F, /* -18.000dB */
866 0x0016C310, /* -15.000dB */
867 0x002026F2, /* -12.001dB */
868 0x002D6A86, /* -9.000dB */
869 0x004026E6, /* -6.004dB */
870 0x005A9DF6, /* -3.000dB */
871 0x0065AC8B, /* -2.000dB */
872 0x00721481, /* -1.000dB */
873 0x007FFFFF, /* FS */
874 };
875
lx_level_peaks(struct lx6464es * chip,int is_capture,int channels,u32 * r_levels)876 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
877 u32 *r_levels)
878 {
879 int err = 0;
880 int i;
881
882 mutex_lock(&chip->msg_lock);
883 for (i = 0; i < channels; i += 4) {
884 u32 s0, s1, s2, s3;
885
886 lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
887 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
888
889 err = lx_message_send_atomic(chip, &chip->rmh);
890
891 if (err == 0) {
892 s0 = peak_map[chip->rmh.stat[0] & 0x0F];
893 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
894 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
895 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
896 } else
897 s0 = s1 = s2 = s3 = 0;
898
899 r_levels[0] = s0;
900 r_levels[1] = s1;
901 r_levels[2] = s2;
902 r_levels[3] = s3;
903
904 r_levels += 4;
905 }
906
907 mutex_unlock(&chip->msg_lock);
908 return err;
909 }
910
911 /* interrupt handling */
912 #define PCX_IRQ_NONE 0
913 #define IRQCS_ACTIVE_PCIDB BIT(13)
914 #define IRQCS_ENABLE_PCIIRQ BIT(8)
915 #define IRQCS_ENABLE_PCIDB BIT(9)
916
lx_interrupt_test_ack(struct lx6464es * chip)917 static u32 lx_interrupt_test_ack(struct lx6464es *chip)
918 {
919 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
920
921 /* Test if PCI Doorbell interrupt is active */
922 if (irqcs & IRQCS_ACTIVE_PCIDB) {
923 u32 temp;
924 irqcs = PCX_IRQ_NONE;
925
926 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
927 /* RAZ interrupt */
928 irqcs |= temp;
929 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
930 }
931
932 return irqcs;
933 }
934 return PCX_IRQ_NONE;
935 }
936
lx_interrupt_ack(struct lx6464es * chip,u32 * r_irqsrc,int * r_async_pending,int * r_async_escmd)937 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
938 int *r_async_pending, int *r_async_escmd)
939 {
940 u32 irq_async;
941 u32 irqsrc = lx_interrupt_test_ack(chip);
942
943 if (irqsrc == PCX_IRQ_NONE)
944 return 0;
945
946 *r_irqsrc = irqsrc;
947
948 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
949 * (set by xilinx) + EOB */
950
951 if (irq_async & MASK_SYS_STATUS_ESA) {
952 irq_async &= ~MASK_SYS_STATUS_ESA;
953 *r_async_escmd = 1;
954 }
955
956 if (irq_async) {
957 /* dev_dbg(chip->card->dev, "interrupt: async event pending\n"); */
958 *r_async_pending = 1;
959 }
960
961 return 1;
962 }
963
lx_interrupt_handle_async_events(struct lx6464es * chip,u32 irqsrc,int * r_freq_changed,u64 * r_notified_in_pipe_mask,u64 * r_notified_out_pipe_mask)964 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
965 int *r_freq_changed,
966 u64 *r_notified_in_pipe_mask,
967 u64 *r_notified_out_pipe_mask)
968 {
969 int err;
970 u32 stat[9]; /* answer from CMD_04_GET_EVENT */
971
972 /* We can optimize this to not read dumb events.
973 * Answer words are in the following order:
974 * Stat[0] general status
975 * Stat[1] end of buffer OUT pF
976 * Stat[2] end of buffer OUT pf
977 * Stat[3] end of buffer IN pF
978 * Stat[4] end of buffer IN pf
979 * Stat[5] MSB underrun
980 * Stat[6] LSB underrun
981 * Stat[7] MSB overrun
982 * Stat[8] LSB overrun
983 * */
984
985 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
986 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
987
988 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
989
990 err = lx_dsp_read_async_events(chip, stat);
991 if (err < 0)
992 return err;
993
994 if (eb_pending_in) {
995 *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
996 + stat[4];
997 dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
998 *r_notified_in_pipe_mask);
999 }
1000 if (eb_pending_out) {
1001 *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
1002 + stat[2];
1003 dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
1004 *r_notified_out_pipe_mask);
1005 }
1006
1007 /* todo: handle xrun notification */
1008
1009 return err;
1010 }
1011
lx_interrupt_request_new_buffer(struct lx6464es * chip,struct lx_stream * lx_stream)1012 static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1013 struct lx_stream *lx_stream)
1014 {
1015 struct snd_pcm_substream *substream = lx_stream->stream;
1016 const unsigned int is_capture = lx_stream->is_capture;
1017 int err;
1018
1019 const u32 channels = substream->runtime->channels;
1020 const u32 bytes_per_frame = channels * 3;
1021 const u32 period_size = substream->runtime->period_size;
1022 const u32 period_bytes = period_size * bytes_per_frame;
1023 const u32 pos = lx_stream->frame_pos;
1024 const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
1025 0 : pos + 1;
1026
1027 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
1028 u32 buf_hi = 0;
1029 u32 buf_lo = 0;
1030 u32 buffer_index = 0;
1031
1032 u32 needed, freed;
1033 u32 size_array[MAX_STREAM_BUFFER];
1034
1035 dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
1036
1037 mutex_lock(&chip->lock);
1038
1039 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
1040 dev_dbg(chip->card->dev,
1041 "interrupt: needed %d, freed %d\n", needed, freed);
1042
1043 unpack_pointer(buf, &buf_lo, &buf_hi);
1044 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
1045 &buffer_index);
1046 dev_dbg(chip->card->dev,
1047 "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
1048 buffer_index, (unsigned long)buf, period_bytes);
1049
1050 lx_stream->frame_pos = next_pos;
1051 mutex_unlock(&chip->lock);
1052
1053 return err;
1054 }
1055
lx_interrupt(int irq,void * dev_id)1056 irqreturn_t lx_interrupt(int irq, void *dev_id)
1057 {
1058 struct lx6464es *chip = dev_id;
1059 int async_pending, async_escmd;
1060 u32 irqsrc;
1061 bool wake_thread = false;
1062
1063 dev_dbg(chip->card->dev,
1064 "**************************************************\n");
1065
1066 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
1067 dev_dbg(chip->card->dev, "IRQ_NONE\n");
1068 return IRQ_NONE; /* this device did not cause the interrupt */
1069 }
1070
1071 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1072 return IRQ_HANDLED;
1073
1074 if (irqsrc & MASK_SYS_STATUS_EOBI)
1075 dev_dbg(chip->card->dev, "interrupt: EOBI\n");
1076
1077 if (irqsrc & MASK_SYS_STATUS_EOBO)
1078 dev_dbg(chip->card->dev, "interrupt: EOBO\n");
1079
1080 if (irqsrc & MASK_SYS_STATUS_URUN)
1081 dev_dbg(chip->card->dev, "interrupt: URUN\n");
1082
1083 if (irqsrc & MASK_SYS_STATUS_ORUN)
1084 dev_dbg(chip->card->dev, "interrupt: ORUN\n");
1085
1086 if (async_pending) {
1087 wake_thread = true;
1088 chip->irqsrc = irqsrc;
1089 }
1090
1091 if (async_escmd) {
1092 /* backdoor for ethersound commands
1093 *
1094 * for now, we do not need this
1095 *
1096 * */
1097
1098 dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
1099 }
1100
1101 return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1102 }
1103
lx_threaded_irq(int irq,void * dev_id)1104 irqreturn_t lx_threaded_irq(int irq, void *dev_id)
1105 {
1106 struct lx6464es *chip = dev_id;
1107 u64 notified_in_pipe_mask = 0;
1108 u64 notified_out_pipe_mask = 0;
1109 int freq_changed;
1110 int err;
1111
1112 /* handle async events */
1113 err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
1114 &freq_changed,
1115 ¬ified_in_pipe_mask,
1116 ¬ified_out_pipe_mask);
1117 if (err)
1118 dev_err(chip->card->dev, "error handling async events\n");
1119
1120 if (notified_in_pipe_mask) {
1121 struct lx_stream *lx_stream = &chip->capture_stream;
1122
1123 dev_dbg(chip->card->dev,
1124 "requesting audio transfer for capture\n");
1125 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1126 if (err < 0)
1127 dev_err(chip->card->dev,
1128 "cannot request new buffer for capture\n");
1129 snd_pcm_period_elapsed(lx_stream->stream);
1130 }
1131
1132 if (notified_out_pipe_mask) {
1133 struct lx_stream *lx_stream = &chip->playback_stream;
1134
1135 dev_dbg(chip->card->dev,
1136 "requesting audio transfer for playback\n");
1137 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1138 if (err < 0)
1139 dev_err(chip->card->dev,
1140 "cannot request new buffer for playback\n");
1141 snd_pcm_period_elapsed(lx_stream->stream);
1142 }
1143
1144 return IRQ_HANDLED;
1145 }
1146
1147
lx_irq_set(struct lx6464es * chip,int enable)1148 static void lx_irq_set(struct lx6464es *chip, int enable)
1149 {
1150 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
1151
1152 /* enable/disable interrupts
1153 *
1154 * Set the Doorbell and PCI interrupt enable bits
1155 *
1156 * */
1157 if (enable)
1158 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1159 else
1160 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1161 lx_plx_reg_write(chip, ePLX_IRQCS, reg);
1162 }
1163
lx_irq_enable(struct lx6464es * chip)1164 void lx_irq_enable(struct lx6464es *chip)
1165 {
1166 dev_dbg(chip->card->dev, "->lx_irq_enable\n");
1167 lx_irq_set(chip, 1);
1168 }
1169
lx_irq_disable(struct lx6464es * chip)1170 void lx_irq_disable(struct lx6464es *chip)
1171 {
1172 dev_dbg(chip->card->dev, "->lx_irq_disable\n");
1173 lx_irq_set(chip, 0);
1174 }
1175