1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 // Rander Wang <rander.wang@intel.com>
11 // Keyon Jie <yang.jie@linux.intel.com>
12 //
13
14 /*
15 * Hardware interface for audio DSP on Cannonlake.
16 */
17
18 #include <sound/sof/ext_manifest4.h>
19 #include <sound/sof/ipc4/header.h>
20 #include <trace/events/sof_intel.h>
21 #include "../ipc4-priv.h"
22 #include "../ops.h"
23 #include "hda.h"
24 #include "hda-ipc.h"
25 #include "../sof-audio.h"
26
27 static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
28 {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
29 {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
30 {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
31 };
32
33 static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
34 static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
35
cnl_ipc4_irq_thread(int irq,void * context)36 irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
37 {
38 struct sof_ipc4_msg notification_data = {{ 0 }};
39 struct snd_sof_dev *sdev = context;
40 bool ack_received = false;
41 bool ipc_irq = false;
42 u32 hipcida, hipctdr;
43
44 hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
45 hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
46 if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
47 /* DSP received the message */
48 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
49 CNL_DSP_REG_HIPCCTL,
50 CNL_DSP_REG_HIPCCTL_DONE, 0);
51 cnl_ipc_dsp_done(sdev);
52
53 ipc_irq = true;
54 ack_received = true;
55 }
56
57 if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
58 /* Message from DSP (reply or notification) */
59 u32 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
60 CNL_DSP_REG_HIPCTDD);
61 u32 primary = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
62 u32 extension = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
63
64 if (primary & SOF_IPC4_MSG_DIR_MASK) {
65 /* Reply received */
66 if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
67 struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
68
69 data->primary = primary;
70 data->extension = extension;
71
72 spin_lock_irq(&sdev->ipc_lock);
73
74 snd_sof_ipc_get_reply(sdev);
75 cnl_ipc_host_done(sdev);
76 snd_sof_ipc_reply(sdev, data->primary);
77
78 spin_unlock_irq(&sdev->ipc_lock);
79 } else {
80 dev_dbg_ratelimited(sdev->dev,
81 "IPC reply before FW_READY: %#x|%#x\n",
82 primary, extension);
83 }
84 } else {
85 /* Notification received */
86 notification_data.primary = primary;
87 notification_data.extension = extension;
88
89 sdev->ipc->msg.rx_data = ¬ification_data;
90 snd_sof_ipc_msgs_rx(sdev);
91 sdev->ipc->msg.rx_data = NULL;
92
93 /* Let DSP know that we have finished processing the message */
94 cnl_ipc_host_done(sdev);
95 }
96
97 ipc_irq = true;
98 }
99
100 if (!ipc_irq)
101 /* This interrupt is not shared so no need to return IRQ_NONE. */
102 dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
103
104 if (ack_received) {
105 struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
106
107 if (hdev->delayed_ipc_tx_msg)
108 cnl_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
109 }
110
111 return IRQ_HANDLED;
112 }
113
cnl_ipc_irq_thread(int irq,void * context)114 irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
115 {
116 struct snd_sof_dev *sdev = context;
117 u32 hipci;
118 u32 hipcida;
119 u32 hipctdr;
120 u32 hipctdd;
121 u32 msg;
122 u32 msg_ext;
123 bool ipc_irq = false;
124
125 hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
126 hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
127 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
128 hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
129
130 /* reply message from DSP */
131 if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
132 msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
133 msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
134
135 trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
136
137 /* mask Done interrupt */
138 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
139 CNL_DSP_REG_HIPCCTL,
140 CNL_DSP_REG_HIPCCTL_DONE, 0);
141
142 if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
143 spin_lock_irq(&sdev->ipc_lock);
144
145 /* handle immediate reply from DSP core */
146 hda_dsp_ipc_get_reply(sdev);
147 snd_sof_ipc_reply(sdev, msg);
148
149 cnl_ipc_dsp_done(sdev);
150
151 spin_unlock_irq(&sdev->ipc_lock);
152 } else {
153 dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
154 msg);
155 }
156
157 ipc_irq = true;
158 }
159
160 /* new message from DSP */
161 if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
162 msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
163 msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
164
165 trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
166
167 /* handle messages from DSP */
168 if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
169 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
170 bool non_recoverable = true;
171
172 /*
173 * This is a PANIC message!
174 *
175 * If it is arriving during firmware boot and it is not
176 * the last boot attempt then change the non_recoverable
177 * to false as the DSP might be able to boot in the next
178 * iteration(s)
179 */
180 if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
181 hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
182 non_recoverable = false;
183
184 snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
185 non_recoverable);
186 } else {
187 snd_sof_ipc_msgs_rx(sdev);
188 }
189
190 cnl_ipc_host_done(sdev);
191
192 ipc_irq = true;
193 }
194
195 if (!ipc_irq) {
196 /*
197 * This interrupt is not shared so no need to return IRQ_NONE.
198 */
199 dev_dbg_ratelimited(sdev->dev,
200 "nothing to do in IPC IRQ thread\n");
201 }
202
203 return IRQ_HANDLED;
204 }
205
cnl_ipc_host_done(struct snd_sof_dev * sdev)206 static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
207 {
208 /*
209 * clear busy interrupt to tell dsp controller this
210 * interrupt has been accepted, not trigger it again
211 */
212 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
213 CNL_DSP_REG_HIPCTDR,
214 CNL_DSP_REG_HIPCTDR_BUSY,
215 CNL_DSP_REG_HIPCTDR_BUSY);
216 /*
217 * set done bit to ack dsp the msg has been
218 * processed and send reply msg to dsp
219 */
220 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
221 CNL_DSP_REG_HIPCTDA,
222 CNL_DSP_REG_HIPCTDA_DONE,
223 CNL_DSP_REG_HIPCTDA_DONE);
224 }
225
cnl_ipc_dsp_done(struct snd_sof_dev * sdev)226 static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
227 {
228 /*
229 * set DONE bit - tell DSP we have received the reply msg
230 * from DSP, and processed it, don't send more reply to host
231 */
232 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
233 CNL_DSP_REG_HIPCIDA,
234 CNL_DSP_REG_HIPCIDA_DONE,
235 CNL_DSP_REG_HIPCIDA_DONE);
236
237 /* unmask Done interrupt */
238 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
239 CNL_DSP_REG_HIPCCTL,
240 CNL_DSP_REG_HIPCCTL_DONE,
241 CNL_DSP_REG_HIPCCTL_DONE);
242 }
243
cnl_compact_ipc_compress(struct snd_sof_ipc_msg * msg,u32 * dr,u32 * dd)244 static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
245 u32 *dr, u32 *dd)
246 {
247 struct sof_ipc_pm_gate *pm_gate = msg->msg_data;
248
249 if (pm_gate->hdr.cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
250 /* send the compact message via the primary register */
251 *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
252
253 /* send payload via the extended data register */
254 *dd = pm_gate->flags;
255
256 return true;
257 }
258
259 return false;
260 }
261
cnl_ipc4_send_msg(struct snd_sof_dev * sdev,struct snd_sof_ipc_msg * msg)262 int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
263 {
264 struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
265 struct sof_ipc4_msg *msg_data = msg->msg_data;
266
267 if (hda_ipc4_tx_is_busy(sdev)) {
268 hdev->delayed_ipc_tx_msg = msg;
269 return 0;
270 }
271
272 hdev->delayed_ipc_tx_msg = NULL;
273
274 /* send the message via mailbox */
275 if (msg_data->data_size)
276 sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
277 msg_data->data_size);
278
279 snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD, msg_data->extension);
280 snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
281 msg_data->primary | CNL_DSP_REG_HIPCIDR_BUSY);
282
283 hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
284
285 return 0;
286 }
287
cnl_ipc_send_msg(struct snd_sof_dev * sdev,struct snd_sof_ipc_msg * msg)288 int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
289 {
290 struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
291 struct sof_ipc_cmd_hdr *hdr;
292 u32 dr = 0;
293 u32 dd = 0;
294
295 /*
296 * Currently the only compact IPC supported is the PM_GATE
297 * IPC which is used for transitioning the DSP between the
298 * D0I0 and D0I3 states. And these are sent only during the
299 * set_power_state() op. Therefore, there will never be a case
300 * that a compact IPC results in the DSP exiting D0I3 without
301 * the host and FW being in sync.
302 */
303 if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
304 /* send the message via IPC registers */
305 snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
306 dd);
307 snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
308 CNL_DSP_REG_HIPCIDR_BUSY | dr);
309 return 0;
310 }
311
312 /* send the message via mailbox */
313 sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
314 msg->msg_size);
315 snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
316 CNL_DSP_REG_HIPCIDR_BUSY);
317
318 hdr = msg->msg_data;
319
320 /*
321 * Use mod_delayed_work() to schedule the delayed work
322 * to avoid scheduling multiple workqueue items when
323 * IPCs are sent at a high-rate. mod_delayed_work()
324 * modifies the timer if the work is pending.
325 * Also, a new delayed work should not be queued after the
326 * CTX_SAVE IPC, which is sent before the DSP enters D3.
327 */
328 if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
329 mod_delayed_work(system_wq, &hdev->d0i3_work,
330 msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
331
332 return 0;
333 }
334
cnl_ipc_dump(struct snd_sof_dev * sdev)335 void cnl_ipc_dump(struct snd_sof_dev *sdev)
336 {
337 u32 hipcctl;
338 u32 hipcida;
339 u32 hipctdr;
340
341 hda_ipc_irq_dump(sdev);
342
343 /* read IPC status */
344 hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
345 hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
346 hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
347
348 /* dump the IPC regs */
349 /* TODO: parse the raw msg */
350 dev_err(sdev->dev,
351 "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
352 hipcida, hipctdr, hipcctl);
353 }
354
cnl_ipc4_dump(struct snd_sof_dev * sdev)355 void cnl_ipc4_dump(struct snd_sof_dev *sdev)
356 {
357 u32 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl;
358
359 hda_ipc_irq_dump(sdev);
360
361 hipcidr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
362 hipcidd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD);
363 hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
364 hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
365 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
366 hipctda = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDA);
367 hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
368
369 /* dump the IPC regs */
370 /* TODO: parse the raw msg */
371 dev_err(sdev->dev,
372 "Host IPC initiator: %#x|%#x|%#x, target: %#x|%#x|%#x, ctl: %#x\n",
373 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl);
374 }
375
376 /* cannonlake ops */
377 struct snd_sof_dsp_ops sof_cnl_ops;
378 EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
379
sof_cnl_ops_init(struct snd_sof_dev * sdev)380 int sof_cnl_ops_init(struct snd_sof_dev *sdev)
381 {
382 /* common defaults */
383 memcpy(&sof_cnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
384
385 /* probe/remove/shutdown */
386 sof_cnl_ops.shutdown = hda_dsp_shutdown;
387
388 /* ipc */
389 if (sdev->pdata->ipc_type == SOF_IPC) {
390 /* doorbell */
391 sof_cnl_ops.irq_thread = cnl_ipc_irq_thread;
392
393 /* ipc */
394 sof_cnl_ops.send_msg = cnl_ipc_send_msg;
395
396 /* debug */
397 sof_cnl_ops.ipc_dump = cnl_ipc_dump;
398
399 sof_cnl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
400 }
401
402 if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
403 struct sof_ipc4_fw_data *ipc4_data;
404
405 sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
406 if (!sdev->private)
407 return -ENOMEM;
408
409 ipc4_data = sdev->private;
410 ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
411
412 ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_8;
413
414 /* External library loading support */
415 ipc4_data->load_library = hda_dsp_ipc4_load_library;
416
417 /* doorbell */
418 sof_cnl_ops.irq_thread = cnl_ipc4_irq_thread;
419
420 /* ipc */
421 sof_cnl_ops.send_msg = cnl_ipc4_send_msg;
422
423 /* debug */
424 sof_cnl_ops.ipc_dump = cnl_ipc4_dump;
425
426 sof_cnl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
427 }
428
429 /* set DAI driver ops */
430 hda_set_dai_drv_ops(sdev, &sof_cnl_ops);
431
432 /* debug */
433 sof_cnl_ops.debug_map = cnl_dsp_debugfs;
434 sof_cnl_ops.debug_map_count = ARRAY_SIZE(cnl_dsp_debugfs);
435
436 /* pre/post fw run */
437 sof_cnl_ops.post_fw_run = hda_dsp_post_fw_run;
438
439 /* firmware run */
440 sof_cnl_ops.run = hda_dsp_cl_boot_firmware;
441
442 /* dsp core get/put */
443 sof_cnl_ops.core_get = hda_dsp_core_get;
444
445 return 0;
446 };
447 EXPORT_SYMBOL_NS(sof_cnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
448
449 const struct sof_intel_dsp_desc cnl_chip_info = {
450 /* Cannonlake */
451 .cores_num = 4,
452 .init_core_mask = 1,
453 .host_managed_cores_mask = GENMASK(3, 0),
454 .ipc_req = CNL_DSP_REG_HIPCIDR,
455 .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
456 .ipc_ack = CNL_DSP_REG_HIPCIDA,
457 .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
458 .ipc_ctl = CNL_DSP_REG_HIPCCTL,
459 .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
460 .rom_init_timeout = 300,
461 .ssp_count = CNL_SSP_COUNT,
462 .ssp_base_offset = CNL_SSP_BASE_OFFSET,
463 .sdw_shim_base = SDW_SHIM_BASE,
464 .sdw_alh_base = SDW_ALH_BASE,
465 .d0i3_offset = SOF_HDA_VS_D0I3C,
466 .read_sdw_lcount = hda_sdw_check_lcount_common,
467 .enable_sdw_irq = hda_common_enable_sdw_irq,
468 .check_sdw_irq = hda_common_check_sdw_irq,
469 .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
470 .check_ipc_irq = hda_dsp_check_ipc_irq,
471 .cl_init = cl_dsp_init,
472 .power_down_dsp = hda_power_down_dsp,
473 .disable_interrupts = hda_dsp_disable_interrupts,
474 .hw_ip_version = SOF_INTEL_CAVS_1_8,
475 };
476 EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
477
478 /*
479 * JasperLake is technically derived from IceLake, and should be in
480 * described in icl.c. However since JasperLake was designed with
481 * two cores, it cannot support the IceLake-specific power-up sequences
482 * which rely on core3. To simplify, JasperLake uses the CannonLake ops and
483 * is described in cnl.c
484 */
485 const struct sof_intel_dsp_desc jsl_chip_info = {
486 /* Jasperlake */
487 .cores_num = 2,
488 .init_core_mask = 1,
489 .host_managed_cores_mask = GENMASK(1, 0),
490 .ipc_req = CNL_DSP_REG_HIPCIDR,
491 .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
492 .ipc_ack = CNL_DSP_REG_HIPCIDA,
493 .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
494 .ipc_ctl = CNL_DSP_REG_HIPCCTL,
495 .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
496 .rom_init_timeout = 300,
497 .ssp_count = ICL_SSP_COUNT,
498 .ssp_base_offset = CNL_SSP_BASE_OFFSET,
499 .sdw_shim_base = SDW_SHIM_BASE,
500 .sdw_alh_base = SDW_ALH_BASE,
501 .d0i3_offset = SOF_HDA_VS_D0I3C,
502 .read_sdw_lcount = hda_sdw_check_lcount_common,
503 .enable_sdw_irq = hda_common_enable_sdw_irq,
504 .check_sdw_irq = hda_common_check_sdw_irq,
505 .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
506 .check_ipc_irq = hda_dsp_check_ipc_irq,
507 .cl_init = cl_dsp_init,
508 .power_down_dsp = hda_power_down_dsp,
509 .disable_interrupts = hda_dsp_disable_interrupts,
510 .hw_ip_version = SOF_INTEL_CAVS_2_0,
511 };
512 EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
513