1 //------------------------------------------------------------------------------
2 // <copyright file="ar6k.c" company="Atheros">
3 // Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
4 //
5 //
6 // Permission to use, copy, modify, and/or distribute this software for any
7 // purpose with or without fee is hereby granted, provided that the above
8 // copyright notice and this permission notice appear in all copies.
9 //
10 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 //
18 //
19 //------------------------------------------------------------------------------
20 //==============================================================================
21 // AR6K device layer that handles register level I/O
22 //
23 // Author(s): ="Atheros"
24 //==============================================================================
25
26 #include "a_config.h"
27 #include "athdefs.h"
28 #include "a_types.h"
29 #include "AR6002/hw2.0/hw/mbox_host_reg.h"
30 #include "a_osapi.h"
31 #include "../htc_debug.h"
32 #include "hif.h"
33 #include "htc_packet.h"
34 #include "ar6k.h"
35
36 #define MAILBOX_FOR_BLOCK_SIZE 1
37
38 int DevEnableInterrupts(struct ar6k_device *pDev);
39 int DevDisableInterrupts(struct ar6k_device *pDev);
40
41 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev);
42
AR6KFreeIOPacket(struct ar6k_device * pDev,struct htc_packet * pPacket)43 void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket)
44 {
45 LOCK_AR6K(pDev);
46 HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
47 UNLOCK_AR6K(pDev);
48 }
49
AR6KAllocIOPacket(struct ar6k_device * pDev)50 struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev)
51 {
52 struct htc_packet *pPacket;
53
54 LOCK_AR6K(pDev);
55 pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
56 UNLOCK_AR6K(pDev);
57
58 return pPacket;
59 }
60
DevCleanup(struct ar6k_device * pDev)61 void DevCleanup(struct ar6k_device *pDev)
62 {
63 DevCleanupGMbox(pDev);
64
65 if (pDev->HifAttached) {
66 HIFDetachHTC(pDev->HIFDevice);
67 pDev->HifAttached = false;
68 }
69
70 DevCleanupVirtualScatterSupport(pDev);
71
72 if (A_IS_MUTEX_VALID(&pDev->Lock)) {
73 A_MUTEX_DELETE(&pDev->Lock);
74 }
75 }
76
DevSetup(struct ar6k_device * pDev)77 int DevSetup(struct ar6k_device *pDev)
78 {
79 u32 blocksizes[AR6K_MAILBOXES];
80 int status = 0;
81 int i;
82 HTC_CALLBACKS htcCallbacks;
83
84 do {
85
86 DL_LIST_INIT(&pDev->ScatterReqHead);
87 /* initialize our free list of IO packets */
88 INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
89 A_MUTEX_INIT(&pDev->Lock);
90
91 A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
92 /* the device layer handles these */
93 htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
94 htcCallbacks.dsrHandler = DevDsrHandler;
95 htcCallbacks.context = pDev;
96
97 status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
98
99 if (status) {
100 break;
101 }
102
103 pDev->HifAttached = true;
104
105 /* get the addresses for all 4 mailboxes */
106 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
107 &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
108
109 if (status) {
110 A_ASSERT(false);
111 break;
112 }
113
114 /* carve up register I/O packets (these are for ASYNC register I/O ) */
115 for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
116 struct htc_packet *pIOPacket;
117 pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
118 SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
119 pDev,
120 pDev->RegIOBuffers[i].Buffer,
121 AR6K_REG_IO_BUFFER_SIZE,
122 0); /* don't care */
123 AR6KFreeIOPacket(pDev,pIOPacket);
124 }
125
126 /* get the block sizes */
127 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
128 blocksizes, sizeof(blocksizes));
129
130 if (status) {
131 A_ASSERT(false);
132 break;
133 }
134
135 /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
136 * size on mailbox 0 is artificially set to 1. So we use the block size that is set
137 * for the other 3 mailboxes */
138 pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
139 /* must be a power of 2 */
140 A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
141
142 /* assemble mask, used for padding to a block */
143 pDev->BlockMask = pDev->BlockSize - 1;
144
145 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
146 pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
147
148 pDev->GetPendingEventsFunc = NULL;
149 /* see if the HIF layer implements the get pending events function */
150 HIFConfigureDevice(pDev->HIFDevice,
151 HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
152 &pDev->GetPendingEventsFunc,
153 sizeof(pDev->GetPendingEventsFunc));
154
155 /* assume we can process HIF interrupt events asynchronously */
156 pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
157
158 /* see if the HIF layer overrides this assumption */
159 HIFConfigureDevice(pDev->HIFDevice,
160 HIF_DEVICE_GET_IRQ_PROC_MODE,
161 &pDev->HifIRQProcessingMode,
162 sizeof(pDev->HifIRQProcessingMode));
163
164 switch (pDev->HifIRQProcessingMode) {
165 case HIF_DEVICE_IRQ_SYNC_ONLY:
166 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
167 /* see if HIF layer wants HTC to yield */
168 HIFConfigureDevice(pDev->HIFDevice,
169 HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
170 &pDev->HifIRQYieldParams,
171 sizeof(pDev->HifIRQYieldParams));
172
173 if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
174 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
175 ("HIF requests that DSR yield per %d RECV packets \n",
176 pDev->HifIRQYieldParams.RecvPacketYieldCount));
177 pDev->DSRCanYield = true;
178 }
179 break;
180 case HIF_DEVICE_IRQ_ASYNC_SYNC:
181 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
182 break;
183 default:
184 A_ASSERT(false);
185 }
186
187 pDev->HifMaskUmaskRecvEvent = NULL;
188
189 /* see if the HIF layer implements the mask/unmask recv events function */
190 HIFConfigureDevice(pDev->HIFDevice,
191 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
192 &pDev->HifMaskUmaskRecvEvent,
193 sizeof(pDev->HifMaskUmaskRecvEvent));
194
195 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
196 (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
197
198 status = DevDisableInterrupts(pDev);
199
200 if (status) {
201 break;
202 }
203
204 status = DevSetupGMbox(pDev);
205
206 } while (false);
207
208 if (status) {
209 if (pDev->HifAttached) {
210 HIFDetachHTC(pDev->HIFDevice);
211 pDev->HifAttached = false;
212 }
213 }
214
215 return status;
216
217 }
218
DevEnableInterrupts(struct ar6k_device * pDev)219 int DevEnableInterrupts(struct ar6k_device *pDev)
220 {
221 int status;
222 struct ar6k_irq_enable_registers regs;
223
224 LOCK_AR6K(pDev);
225
226 /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
227 pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
228 INT_STATUS_ENABLE_CPU_SET(0x01) |
229 INT_STATUS_ENABLE_COUNTER_SET(0x01);
230
231 if (NULL == pDev->GetPendingEventsFunc) {
232 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
233 } else {
234 /* The HIF layer provided us with a pending events function which means that
235 * the detection of pending mbox messages is handled in the HIF layer.
236 * This is the case for the SPI2 interface.
237 * In the normal case we enable MBOX interrupts, for the case
238 * with HIFs that offer this mechanism, we keep these interrupts
239 * masked */
240 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
241 }
242
243
244 /* Set up the CPU Interrupt Status Register */
245 pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
246
247 /* Set up the Error Interrupt Status Register */
248 pDev->IrqEnableRegisters.error_status_enable =
249 ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
250 ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
251
252 /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
253 pDev->IrqEnableRegisters.counter_int_status_enable =
254 COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
255
256 /* copy into our temp area */
257 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
258
259 UNLOCK_AR6K(pDev);
260
261 /* always synchronous */
262 status = HIFReadWrite(pDev->HIFDevice,
263 INT_STATUS_ENABLE_ADDRESS,
264 ®s.int_status_enable,
265 AR6K_IRQ_ENABLE_REGS_SIZE,
266 HIF_WR_SYNC_BYTE_INC,
267 NULL);
268
269 if (status) {
270 /* Can't write it for some reason */
271 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
272 ("Failed to update interrupt control registers err: %d\n", status));
273
274 }
275
276 return status;
277 }
278
DevDisableInterrupts(struct ar6k_device * pDev)279 int DevDisableInterrupts(struct ar6k_device *pDev)
280 {
281 struct ar6k_irq_enable_registers regs;
282
283 LOCK_AR6K(pDev);
284 /* Disable all interrupts */
285 pDev->IrqEnableRegisters.int_status_enable = 0;
286 pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
287 pDev->IrqEnableRegisters.error_status_enable = 0;
288 pDev->IrqEnableRegisters.counter_int_status_enable = 0;
289 /* copy into our temp area */
290 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
291
292 UNLOCK_AR6K(pDev);
293
294 /* always synchronous */
295 return HIFReadWrite(pDev->HIFDevice,
296 INT_STATUS_ENABLE_ADDRESS,
297 ®s.int_status_enable,
298 AR6K_IRQ_ENABLE_REGS_SIZE,
299 HIF_WR_SYNC_BYTE_INC,
300 NULL);
301 }
302
303 /* enable device interrupts */
DevUnmaskInterrupts(struct ar6k_device * pDev)304 int DevUnmaskInterrupts(struct ar6k_device *pDev)
305 {
306 /* for good measure, make sure interrupt are disabled before unmasking at the HIF
307 * layer.
308 * The rationale here is that between device insertion (where we clear the interrupts the first time)
309 * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
310 * The AR6K interrupt enables reset back to an "enabled" state when this happens.
311 * */
312 int IntStatus = 0;
313 DevDisableInterrupts(pDev);
314
315 #ifdef THREAD_X
316 // Tobe verified...
317 IntStatus = DevEnableInterrupts(pDev);
318 /* Unmask the host controller interrupts */
319 HIFUnMaskInterrupt(pDev->HIFDevice);
320 #else
321 /* Unmask the host controller interrupts */
322 HIFUnMaskInterrupt(pDev->HIFDevice);
323 IntStatus = DevEnableInterrupts(pDev);
324 #endif
325
326 return IntStatus;
327 }
328
329 /* disable all device interrupts */
DevMaskInterrupts(struct ar6k_device * pDev)330 int DevMaskInterrupts(struct ar6k_device *pDev)
331 {
332 /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
333 * we zero out our shadow registers in DevDisableInterrupts()*/
334 HIFMaskInterrupt(pDev->HIFDevice);
335
336 return DevDisableInterrupts(pDev);
337 }
338
339 /* callback when our fetch to enable/disable completes */
DevDoEnableDisableRecvAsyncHandler(void * Context,struct htc_packet * pPacket)340 static void DevDoEnableDisableRecvAsyncHandler(void *Context, struct htc_packet *pPacket)
341 {
342 struct ar6k_device *pDev = (struct ar6k_device *)Context;
343
344 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
345
346 if (pPacket->Status) {
347 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
348 (" Failed to disable receiver, status:%d \n", pPacket->Status));
349 }
350 /* free this IO packet */
351 AR6KFreeIOPacket(pDev,pPacket);
352 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
353 }
354
355 /* disable packet reception (used in case the host runs out of buffers)
356 * this is the "override" method when the HIF reports another methods to
357 * disable recv events */
DevDoEnableDisableRecvOverride(struct ar6k_device * pDev,bool EnableRecv,bool AsyncMode)358 static int DevDoEnableDisableRecvOverride(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
359 {
360 int status = 0;
361 struct htc_packet *pIOPacket = NULL;
362
363 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
364 EnableRecv,AsyncMode));
365
366 do {
367
368 if (AsyncMode) {
369
370 pIOPacket = AR6KAllocIOPacket(pDev);
371
372 if (NULL == pIOPacket) {
373 status = A_NO_MEMORY;
374 A_ASSERT(false);
375 break;
376 }
377
378 /* stick in our completion routine when the I/O operation completes */
379 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
380 pIOPacket->pContext = pDev;
381
382 /* call the HIF layer override and do this asynchronously */
383 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
384 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
385 pIOPacket);
386 break;
387 }
388
389 /* if we get here we are doing it synchronously */
390 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
391 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
392 NULL);
393
394 } while (false);
395
396 if (status && (pIOPacket != NULL)) {
397 AR6KFreeIOPacket(pDev,pIOPacket);
398 }
399
400 return status;
401 }
402
403 /* disable packet reception (used in case the host runs out of buffers)
404 * this is the "normal" method using the interrupt enable registers through
405 * the host I/F */
DevDoEnableDisableRecvNormal(struct ar6k_device * pDev,bool EnableRecv,bool AsyncMode)406 static int DevDoEnableDisableRecvNormal(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
407 {
408 int status = 0;
409 struct htc_packet *pIOPacket = NULL;
410 struct ar6k_irq_enable_registers regs;
411
412 /* take the lock to protect interrupt enable shadows */
413 LOCK_AR6K(pDev);
414
415 if (EnableRecv) {
416 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
417 } else {
418 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
419 }
420
421 /* copy into our temp area */
422 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
423 UNLOCK_AR6K(pDev);
424
425 do {
426
427 if (AsyncMode) {
428
429 pIOPacket = AR6KAllocIOPacket(pDev);
430
431 if (NULL == pIOPacket) {
432 status = A_NO_MEMORY;
433 A_ASSERT(false);
434 break;
435 }
436
437 /* copy values to write to our async I/O buffer */
438 memcpy(pIOPacket->pBuffer,®s,AR6K_IRQ_ENABLE_REGS_SIZE);
439
440 /* stick in our completion routine when the I/O operation completes */
441 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
442 pIOPacket->pContext = pDev;
443
444 /* write it out asynchronously */
445 HIFReadWrite(pDev->HIFDevice,
446 INT_STATUS_ENABLE_ADDRESS,
447 pIOPacket->pBuffer,
448 AR6K_IRQ_ENABLE_REGS_SIZE,
449 HIF_WR_ASYNC_BYTE_INC,
450 pIOPacket);
451 break;
452 }
453
454 /* if we get here we are doing it synchronously */
455
456 status = HIFReadWrite(pDev->HIFDevice,
457 INT_STATUS_ENABLE_ADDRESS,
458 ®s.int_status_enable,
459 AR6K_IRQ_ENABLE_REGS_SIZE,
460 HIF_WR_SYNC_BYTE_INC,
461 NULL);
462
463 } while (false);
464
465 if (status && (pIOPacket != NULL)) {
466 AR6KFreeIOPacket(pDev,pIOPacket);
467 }
468
469 return status;
470 }
471
472
DevStopRecv(struct ar6k_device * pDev,bool AsyncMode)473 int DevStopRecv(struct ar6k_device *pDev, bool AsyncMode)
474 {
475 if (NULL == pDev->HifMaskUmaskRecvEvent) {
476 return DevDoEnableDisableRecvNormal(pDev,false,AsyncMode);
477 } else {
478 return DevDoEnableDisableRecvOverride(pDev,false,AsyncMode);
479 }
480 }
481
DevEnableRecv(struct ar6k_device * pDev,bool AsyncMode)482 int DevEnableRecv(struct ar6k_device *pDev, bool AsyncMode)
483 {
484 if (NULL == pDev->HifMaskUmaskRecvEvent) {
485 return DevDoEnableDisableRecvNormal(pDev,true,AsyncMode);
486 } else {
487 return DevDoEnableDisableRecvOverride(pDev,true,AsyncMode);
488 }
489 }
490
DevWaitForPendingRecv(struct ar6k_device * pDev,u32 TimeoutInMs,bool * pbIsRecvPending)491 int DevWaitForPendingRecv(struct ar6k_device *pDev,u32 TimeoutInMs,bool *pbIsRecvPending)
492 {
493 int status = 0;
494 u8 host_int_status = 0x0;
495 u32 counter = 0x0;
496
497 if(TimeoutInMs < 100)
498 {
499 TimeoutInMs = 100;
500 }
501
502 counter = TimeoutInMs / 100;
503
504 do
505 {
506 //Read the Host Interrupt Status Register
507 status = HIFReadWrite(pDev->HIFDevice,
508 HOST_INT_STATUS_ADDRESS,
509 &host_int_status,
510 sizeof(u8),
511 HIF_RD_SYNC_BYTE_INC,
512 NULL);
513 if (status)
514 {
515 AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
516 break;
517 }
518
519 host_int_status = !status ? (host_int_status & (1 << 0)):0;
520 if(!host_int_status)
521 {
522 status = 0;
523 *pbIsRecvPending = false;
524 break;
525 }
526 else
527 {
528 *pbIsRecvPending = true;
529 }
530
531 A_MDELAY(100);
532
533 counter--;
534
535 }while(counter);
536 return status;
537 }
538
DevDumpRegisters(struct ar6k_device * pDev,struct ar6k_irq_proc_registers * pIrqProcRegs,struct ar6k_irq_enable_registers * pIrqEnableRegs)539 void DevDumpRegisters(struct ar6k_device *pDev,
540 struct ar6k_irq_proc_registers *pIrqProcRegs,
541 struct ar6k_irq_enable_registers *pIrqEnableRegs)
542 {
543
544 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
545
546 if (pIrqProcRegs != NULL) {
547 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
548 ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status));
549 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
550 ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status));
551 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
552 ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status));
553 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
554 ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_status));
555 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
556 ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame));
557 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
558 ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_valid));
559 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
560 ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]));
561 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
562 ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]));
563
564 if (pDev->MailBoxInfo.GMboxAddress != 0) {
565 /* if the target supports GMBOX hardware, dump some additional state */
566 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
567 ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_status2));
568 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
569 ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avail));
570 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
571 ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
572 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
573 ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
574 }
575
576 }
577
578 if (pIrqEnableRegs != NULL) {
579 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
580 ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enable));
581 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
582 ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
583 }
584 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
585 }
586
587
588 #define DEV_GET_VIRT_DMA_INFO(p) ((struct dev_scatter_dma_virtual_info *)((p)->HIFPrivate[0]))
589
DevAllocScatterReq(struct hif_device * Context)590 static struct hif_scatter_req *DevAllocScatterReq(struct hif_device *Context)
591 {
592 struct dl_list *pItem;
593 struct ar6k_device *pDev = (struct ar6k_device *)Context;
594 LOCK_AR6K(pDev);
595 pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
596 UNLOCK_AR6K(pDev);
597 if (pItem != NULL) {
598 return A_CONTAINING_STRUCT(pItem, struct hif_scatter_req, ListLink);
599 }
600 return NULL;
601 }
602
DevFreeScatterReq(struct hif_device * Context,struct hif_scatter_req * pReq)603 static void DevFreeScatterReq(struct hif_device *Context, struct hif_scatter_req *pReq)
604 {
605 struct ar6k_device *pDev = (struct ar6k_device *)Context;
606 LOCK_AR6K(pDev);
607 DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
608 UNLOCK_AR6K(pDev);
609 }
610
DevCopyScatterListToFromDMABuffer(struct hif_scatter_req * pReq,bool FromDMA)611 int DevCopyScatterListToFromDMABuffer(struct hif_scatter_req *pReq, bool FromDMA)
612 {
613 u8 *pDMABuffer = NULL;
614 int i, remaining;
615 u32 length;
616
617 pDMABuffer = pReq->pScatterBounceBuffer;
618
619 if (pDMABuffer == NULL) {
620 A_ASSERT(false);
621 return A_EINVAL;
622 }
623
624 remaining = (int)pReq->TotalLength;
625
626 for (i = 0; i < pReq->ValidScatterEntries; i++) {
627
628 length = min((int)pReq->ScatterList[i].Length, remaining);
629
630 if (length != (int)pReq->ScatterList[i].Length) {
631 A_ASSERT(false);
632 /* there is a problem with the scatter list */
633 return A_EINVAL;
634 }
635
636 if (FromDMA) {
637 /* from DMA buffer */
638 memcpy(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
639 } else {
640 /* to DMA buffer */
641 memcpy(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
642 }
643
644 pDMABuffer += length;
645 remaining -= length;
646 }
647
648 return 0;
649 }
650
DevReadWriteScatterAsyncHandler(void * Context,struct htc_packet * pPacket)651 static void DevReadWriteScatterAsyncHandler(void *Context, struct htc_packet *pPacket)
652 {
653 struct ar6k_device *pDev = (struct ar6k_device *)Context;
654 struct hif_scatter_req *pReq = (struct hif_scatter_req *)pPacket->pPktContext;
655
656 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
657
658 pReq->CompletionStatus = pPacket->Status;
659
660 AR6KFreeIOPacket(pDev,pPacket);
661
662 pReq->CompletionRoutine(pReq);
663
664 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
665 }
666
DevReadWriteScatter(struct hif_device * Context,struct hif_scatter_req * pReq)667 static int DevReadWriteScatter(struct hif_device *Context, struct hif_scatter_req *pReq)
668 {
669 struct ar6k_device *pDev = (struct ar6k_device *)Context;
670 int status = 0;
671 struct htc_packet *pIOPacket = NULL;
672 u32 request = pReq->Request;
673
674 do {
675
676 if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
677 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
678 ("Invalid length: %d \n", pReq->TotalLength));
679 break;
680 }
681
682 if (pReq->TotalLength == 0) {
683 A_ASSERT(false);
684 break;
685 }
686
687 if (request & HIF_ASYNCHRONOUS) {
688 /* use an I/O packet to carry this request */
689 pIOPacket = AR6KAllocIOPacket(pDev);
690 if (NULL == pIOPacket) {
691 status = A_NO_MEMORY;
692 break;
693 }
694
695 /* save the request */
696 pIOPacket->pPktContext = pReq;
697 /* stick in our completion routine when the I/O operation completes */
698 pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
699 pIOPacket->pContext = pDev;
700 }
701
702 if (request & HIF_WRITE) {
703 /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
704 * this API will adjust the address automatically for the last byte to fall on the mailbox
705 * EOM. */
706
707 /* if the address is an extended address, we can adjust the address here since the extended
708 * address will bypass the normal checks in legacy HIF layers */
709 if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
710 pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
711 }
712 }
713
714 /* use legacy readwrite */
715 status = HIFReadWrite(pDev->HIFDevice,
716 pReq->Address,
717 DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
718 pReq->TotalLength,
719 request,
720 (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
721
722 } while (false);
723
724 if ((status != A_PENDING) && status && (request & HIF_ASYNCHRONOUS)) {
725 if (pIOPacket != NULL) {
726 AR6KFreeIOPacket(pDev,pIOPacket);
727 }
728 pReq->CompletionStatus = status;
729 pReq->CompletionRoutine(pReq);
730 status = 0;
731 }
732
733 return status;
734 }
735
736
DevCleanupVirtualScatterSupport(struct ar6k_device * pDev)737 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev)
738 {
739 struct hif_scatter_req *pReq;
740
741 while (1) {
742 pReq = DevAllocScatterReq((struct hif_device *)pDev);
743 if (NULL == pReq) {
744 break;
745 }
746 A_FREE(pReq);
747 }
748
749 }
750
751 /* function to set up virtual scatter support if HIF layer has not implemented the interface */
DevSetupVirtualScatterSupport(struct ar6k_device * pDev)752 static int DevSetupVirtualScatterSupport(struct ar6k_device *pDev)
753 {
754 int status = 0;
755 int bufferSize, sgreqSize;
756 int i;
757 struct dev_scatter_dma_virtual_info *pVirtualInfo;
758 struct hif_scatter_req *pReq;
759
760 bufferSize = sizeof(struct dev_scatter_dma_virtual_info) +
761 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
762
763 sgreqSize = sizeof(struct hif_scatter_req) +
764 (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(struct hif_scatter_item));
765
766 for (i = 0; i < AR6K_SCATTER_REQS; i++) {
767 /* allocate the scatter request, buffer info and the actual virtual buffer itself */
768 pReq = (struct hif_scatter_req *)A_MALLOC(sgreqSize + bufferSize);
769
770 if (NULL == pReq) {
771 status = A_NO_MEMORY;
772 break;
773 }
774
775 A_MEMZERO(pReq, sgreqSize);
776
777 /* the virtual DMA starts after the scatter request struct */
778 pVirtualInfo = (struct dev_scatter_dma_virtual_info *)((u8 *)pReq + sgreqSize);
779 A_MEMZERO(pVirtualInfo, sizeof(struct dev_scatter_dma_virtual_info));
780
781 pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
782 /* align buffer to cache line in case host controller can actually DMA this */
783 pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
784 /* store the structure in the private area */
785 pReq->HIFPrivate[0] = pVirtualInfo;
786 /* we emulate a DMA bounce interface */
787 pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
788 pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
789 /* free request to the list */
790 DevFreeScatterReq((struct hif_device *)pDev,pReq);
791 }
792
793 if (status) {
794 DevCleanupVirtualScatterSupport(pDev);
795 } else {
796 pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
797 pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
798 pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
799 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
800 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
801 pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
802 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
803 } else {
804 pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
805 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
806 }
807 pDev->ScatterIsVirtual = true;
808 }
809
810 return status;
811 }
812
DevCleanupMsgBundling(struct ar6k_device * pDev)813 int DevCleanupMsgBundling(struct ar6k_device *pDev)
814 {
815 if(NULL != pDev)
816 {
817 DevCleanupVirtualScatterSupport(pDev);
818 }
819
820 return 0;
821 }
822
DevSetupMsgBundling(struct ar6k_device * pDev,int MaxMsgsPerTransfer)823 int DevSetupMsgBundling(struct ar6k_device *pDev, int MaxMsgsPerTransfer)
824 {
825 int status;
826
827 if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
828 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
829 return A_ENOTSUP;
830 }
831
832 status = HIFConfigureDevice(pDev->HIFDevice,
833 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
834 &pDev->HifScatterInfo,
835 sizeof(pDev->HifScatterInfo));
836
837 if (status) {
838 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
839 ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
840
841 /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
842 status = DevSetupVirtualScatterSupport(pDev);
843
844 if (!status) {
845 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
846 ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
847 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
848 }
849
850 } else {
851 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
852 ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
853 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
854 }
855
856 if (!status) {
857 /* for the recv path, the maximum number of bytes per recv bundle is just limited
858 * by the maximum transfer size at the HIF layer */
859 pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
860
861 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
862 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
863 pDev->MaxSendBundleSize = 0;
864 } else {
865 /* for the send path, the max transfer size is limited by the existence and size of
866 * the extended mailbox address range */
867 if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
868 pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
869 } else {
870 /* legacy */
871 pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
872 }
873
874 if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
875 /* limit send bundle size to what the HIF can support for scatter requests */
876 pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
877 }
878 }
879
880 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
881 ("AR6K: max recv: %d max send: %d \n",
882 DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
883
884 }
885 return status;
886 }
887
DevSubmitScatterRequest(struct ar6k_device * pDev,struct hif_scatter_req * pScatterReq,bool Read,bool Async)888 int DevSubmitScatterRequest(struct ar6k_device *pDev, struct hif_scatter_req *pScatterReq, bool Read, bool Async)
889 {
890 int status;
891
892 if (Read) {
893 /* read operation */
894 pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
895 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
896 A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
897 } else {
898 u32 mailboxWidth;
899
900 /* write operation */
901 pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
902 A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
903 if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
904 /* for large writes use the extended address */
905 pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
906 mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
907 } else {
908 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
909 mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
910 }
911
912 if (!pDev->ScatterIsVirtual) {
913 /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
914 * so that the last byte falls on the EOM, we do this for those HIFs that support the
915 * scatter API */
916 pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
917 }
918
919 }
920
921 AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
922 ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
923 pScatterReq->ValidScatterEntries,
924 pScatterReq->TotalLength,
925 pScatterReq->Address,
926 Async ? "ASYNC" : "SYNC",
927 (Read) ? "RD" : "WR"));
928
929 status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
930
931 if (status) {
932 if (Async) {
933 pScatterReq->CompletionStatus = status;
934 pScatterReq->CompletionRoutine(pScatterReq);
935 return 0;
936 }
937 return status;
938 }
939
940 status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
941 pScatterReq);
942 if (!Async) {
943 /* in sync mode, we can touch the scatter request */
944 pScatterReq->CompletionStatus = status;
945 DEV_FINISH_SCATTER_OPERATION(pScatterReq);
946 } else {
947 if (status == A_PENDING) {
948 status = 0;
949 }
950 }
951
952 return status;
953 }
954
955
956 #ifdef MBOXHW_UNIT_TEST
957
958
959 /* This is a mailbox hardware unit test that must be called in a schedulable context
960 * This test is very simple, it will send a list of buffers with a counting pattern
961 * and the target will invert the data and send the message back
962 *
963 * the unit test has the following constraints:
964 *
965 * The target has at least 8 buffers of 256 bytes each. The host will send
966 * the following pattern of buffers in rapid succession :
967 *
968 * 1 buffer - 128 bytes
969 * 1 buffer - 256 bytes
970 * 1 buffer - 512 bytes
971 * 1 buffer - 1024 bytes
972 *
973 * The host will send the buffers to one mailbox and wait for buffers to be reflected
974 * back from the same mailbox. The target sends the buffers FIFO order.
975 * Once the final buffer has been received for a mailbox, the next mailbox is tested.
976 *
977 *
978 * Note: To simplifythe test , we assume that the chosen buffer sizes
979 * will fall on a nice block pad
980 *
981 * It is expected that higher-order tests will be written to stress the mailboxes using
982 * a message-based protocol (with some performance timming) that can create more
983 * randomness in the packets sent over mailboxes.
984 *
985 * */
986
987 #define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
988
989 #define BUFFER_BLOCK_PAD 128
990
991 #if 0
992 #define BUFFER1 128
993 #define BUFFER2 256
994 #define BUFFER3 512
995 #define BUFFER4 1024
996 #endif
997
998 #if 1
999 #define BUFFER1 80
1000 #define BUFFER2 200
1001 #define BUFFER3 444
1002 #define BUFFER4 800
1003 #endif
1004
1005 #define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
1006 A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
1007 A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
1008 A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
1009
1010 #define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4)
1011
1012 #define TEST_CREDITS_RECV_TIMEOUT 100
1013
1014 static u8 g_Buffer[TOTAL_BYTES];
1015 static u32 g_MailboxAddrs[AR6K_MAILBOXES];
1016 static u32 g_BlockSizes[AR6K_MAILBOXES];
1017
1018 #define BUFFER_PROC_LIST_DEPTH 4
1019
1020 struct buffer_proc_list {
1021 u8 *pBuffer;
1022 u32 length;
1023 };
1024
1025
1026 #define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
1027 { \
1028 (pList)->pBuffer = (pCurrpos); \
1029 (pList)->length = (len); \
1030 (pCurrpos) += (len); \
1031 (pList)++; \
1032 }
1033
1034 /* a simple and crude way to send different "message" sizes */
AssembleBufferList(struct buffer_proc_list * pList)1035 static void AssembleBufferList(struct buffer_proc_list *pList)
1036 {
1037 u8 *pBuffer = g_Buffer;
1038
1039 #if BUFFER_PROC_LIST_DEPTH < 4
1040 #error "Buffer processing list depth is not deep enough!!"
1041 #endif
1042
1043 PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
1044 PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
1045 PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
1046 PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
1047
1048 }
1049
1050 #define FILL_ZERO true
1051 #define FILL_COUNTING false
InitBuffers(bool Zero)1052 static void InitBuffers(bool Zero)
1053 {
1054 u16 *pBuffer16 = (u16 *)g_Buffer;
1055 int i;
1056
1057 /* fill buffer with 16 bit counting pattern or zeros */
1058 for (i = 0; i < (TOTAL_BYTES / 2) ; i++) {
1059 if (!Zero) {
1060 pBuffer16[i] = (u16)i;
1061 } else {
1062 pBuffer16[i] = 0;
1063 }
1064 }
1065 }
1066
1067
CheckOneBuffer(u16 * pBuffer16,int Length)1068 static bool CheckOneBuffer(u16 *pBuffer16, int Length)
1069 {
1070 int i;
1071 u16 startCount;
1072 bool success = true;
1073
1074 /* get the starting count */
1075 startCount = pBuffer16[0];
1076 /* invert it, this is the expected value */
1077 startCount = ~startCount;
1078 /* scan the buffer and verify */
1079 for (i = 0; i < (Length / 2) ; i++,startCount++) {
1080 /* target will invert all the data */
1081 if ((u16)pBuffer16[i] != (u16)~startCount) {
1082 success = false;
1083 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
1084 pBuffer16[i], ((u16)~startCount), i, Length));
1085 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
1086 pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
1087 break;
1088 }
1089 }
1090
1091 return success;
1092 }
1093
CheckBuffers(void)1094 static bool CheckBuffers(void)
1095 {
1096 int i;
1097 bool success = true;
1098 struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1099
1100 /* assemble the list */
1101 AssembleBufferList(checkList);
1102
1103 /* scan the buffers and verify */
1104 for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
1105 success = CheckOneBuffer((u16 *)checkList[i].pBuffer, checkList[i].length);
1106 if (!success) {
1107 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
1108 (u32)checkList[i].pBuffer, checkList[i].length));
1109 break;
1110 }
1111 }
1112
1113 return success;
1114 }
1115
1116 /* find the end marker for the last buffer we will be sending */
GetEndMarker(void)1117 static u16 GetEndMarker(void)
1118 {
1119 u8 *pBuffer;
1120 struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1121
1122 /* fill up buffers with the normal counting pattern */
1123 InitBuffers(FILL_COUNTING);
1124
1125 /* assemble the list we will be sending down */
1126 AssembleBufferList(checkList);
1127 /* point to the last 2 bytes of the last buffer */
1128 pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
1129
1130 /* the last count in the last buffer is the marker */
1131 return (u16)pBuffer[0] | ((u16)pBuffer[1] << 8);
1132 }
1133
1134 #define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
1135
1136 /* send the ordered buffers to the target */
SendBuffers(struct ar6k_device * pDev,int mbox)1137 static int SendBuffers(struct ar6k_device *pDev, int mbox)
1138 {
1139 int status = 0;
1140 u32 request = HIF_WR_SYNC_BLOCK_INC;
1141 struct buffer_proc_list sendList[BUFFER_PROC_LIST_DEPTH];
1142 int i;
1143 int totalBytes = 0;
1144 int paddedLength;
1145 int totalwPadding = 0;
1146
1147 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
1148
1149 /* fill buffer with counting pattern */
1150 InitBuffers(FILL_COUNTING);
1151
1152 /* assemble the order in which we send */
1153 AssembleBufferList(sendList);
1154
1155 for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
1156
1157 /* we are doing block transfers, so we need to pad everything to a block size */
1158 paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
1159 (~(g_BlockSizes[mbox] - 1));
1160
1161 /* send each buffer synchronously */
1162 status = HIFReadWrite(pDev->HIFDevice,
1163 g_MailboxAddrs[mbox],
1164 sendList[i].pBuffer,
1165 paddedLength,
1166 request,
1167 NULL);
1168 if (status) {
1169 break;
1170 }
1171 totalBytes += sendList[i].length;
1172 totalwPadding += paddedLength;
1173 }
1174
1175 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
1176
1177 return status;
1178 }
1179
1180 /* poll the mailbox credit counter until we get a credit or timeout */
GetCredits(struct ar6k_device * pDev,int mbox,int * pCredits)1181 static int GetCredits(struct ar6k_device *pDev, int mbox, int *pCredits)
1182 {
1183 int status = 0;
1184 int timeout = TEST_CREDITS_RECV_TIMEOUT;
1185 u8 credits = 0;
1186 u32 address;
1187
1188 while (true) {
1189
1190 /* Read the counter register to get credits, this auto-decrements */
1191 address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
1192 status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
1193 HIF_RD_SYNC_BYTE_FIX, NULL);
1194 if (status) {
1195 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1196 ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
1197 status = A_ERROR;
1198 break;
1199 }
1200
1201 if (credits) {
1202 break;
1203 }
1204
1205 timeout--;
1206
1207 if (timeout <= 0) {
1208 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1209 (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
1210 status = A_ERROR;
1211 break;
1212 }
1213
1214 /* delay a little, target may not be ready */
1215 A_MDELAY(1000);
1216
1217 }
1218
1219 if (status == 0) {
1220 *pCredits = credits;
1221 }
1222
1223 return status;
1224 }
1225
1226
1227 /* wait for the buffers to come back */
RecvBuffers(struct ar6k_device * pDev,int mbox)1228 static int RecvBuffers(struct ar6k_device *pDev, int mbox)
1229 {
1230 int status = 0;
1231 u32 request = HIF_RD_SYNC_BLOCK_INC;
1232 struct buffer_proc_list recvList[BUFFER_PROC_LIST_DEPTH];
1233 int curBuffer;
1234 int credits;
1235 int i;
1236 int totalBytes = 0;
1237 int paddedLength;
1238 int totalwPadding = 0;
1239
1240 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
1241
1242 /* zero the buffers */
1243 InitBuffers(FILL_ZERO);
1244
1245 /* assemble the order in which we should receive */
1246 AssembleBufferList(recvList);
1247
1248 curBuffer = 0;
1249
1250 while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
1251
1252 /* get number of buffers that have been completed, this blocks
1253 * until we get at least 1 credit or it times out */
1254 status = GetCredits(pDev, mbox, &credits);
1255
1256 if (status) {
1257 break;
1258 }
1259
1260 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
1261
1262 /* get all the buffers that are sitting on the queue */
1263 for (i = 0; i < credits; i++) {
1264 A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
1265 /* recv the current buffer synchronously, the buffers should come back in
1266 * order... with padding applied by the target */
1267 paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
1268 (~(g_BlockSizes[mbox] - 1));
1269
1270 status = HIFReadWrite(pDev->HIFDevice,
1271 g_MailboxAddrs[mbox],
1272 recvList[curBuffer].pBuffer,
1273 paddedLength,
1274 request,
1275 NULL);
1276 if (status) {
1277 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
1278 recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
1279 break;
1280 }
1281
1282 totalwPadding += paddedLength;
1283 totalBytes += recvList[curBuffer].length;
1284 curBuffer++;
1285 }
1286
1287 if (status) {
1288 break;
1289 }
1290 /* go back and get some more */
1291 credits = 0;
1292 }
1293
1294 if (totalBytes != TEST_BYTES) {
1295 A_ASSERT(false);
1296 } else {
1297 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
1298 mbox, totalBytes, totalwPadding));
1299 }
1300
1301 return status;
1302
1303
1304 }
1305
DoOneMboxHWTest(struct ar6k_device * pDev,int mbox)1306 static int DoOneMboxHWTest(struct ar6k_device *pDev, int mbox)
1307 {
1308 int status;
1309
1310 do {
1311 /* send out buffers */
1312 status = SendBuffers(pDev,mbox);
1313
1314 if (status) {
1315 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
1316 break;
1317 }
1318
1319 /* go get them, this will block */
1320 status = RecvBuffers(pDev, mbox);
1321
1322 if (status) {
1323 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
1324 break;
1325 }
1326
1327 /* check the returned data patterns */
1328 if (!CheckBuffers()) {
1329 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
1330 status = A_ERROR;
1331 break;
1332 }
1333
1334 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
1335
1336 } while (false);
1337
1338 return status;
1339 }
1340
1341 /* here is where the test starts */
DoMboxHWTest(struct ar6k_device * pDev)1342 int DoMboxHWTest(struct ar6k_device *pDev)
1343 {
1344 int i;
1345 int status;
1346 int credits = 0;
1347 u8 params[4];
1348 int numBufs;
1349 int bufferSize;
1350 u16 temp;
1351
1352
1353 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n"));
1354
1355 do {
1356 /* get the addresses for all 4 mailboxes */
1357 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
1358 g_MailboxAddrs, sizeof(g_MailboxAddrs));
1359
1360 if (status) {
1361 A_ASSERT(false);
1362 break;
1363 }
1364
1365 /* get the block sizes */
1366 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
1367 g_BlockSizes, sizeof(g_BlockSizes));
1368
1369 if (status) {
1370 A_ASSERT(false);
1371 break;
1372 }
1373
1374 /* note, the HIF layer usually reports mbox 0 to have a block size of
1375 * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
1376 * the same. */
1377 g_BlockSizes[0] = g_BlockSizes[1];
1378 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
1379
1380 if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
1381 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
1382 g_BlockSizes[1], BUFFER_BLOCK_PAD));
1383 break;
1384 }
1385
1386 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
1387
1388 /* the target lets us know it is ready by giving us 1 credit on
1389 * mailbox 0 */
1390 status = GetCredits(pDev, 0, &credits);
1391
1392 if (status) {
1393 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
1394 break;
1395 }
1396
1397 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
1398
1399 /* read the first 4 scratch registers */
1400 status = HIFReadWrite(pDev->HIFDevice,
1401 SCRATCH_ADDRESS,
1402 params,
1403 4,
1404 HIF_RD_SYNC_BYTE_INC,
1405 NULL);
1406
1407 if (status) {
1408 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
1409 break;
1410 }
1411
1412 numBufs = params[0];
1413 bufferSize = (int)(((u16)params[2] << 8) | (u16)params[1]);
1414
1415 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
1416 ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
1417 numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
1418
1419 if ((numBufs * bufferSize) < TOTAL_BYTES) {
1420 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
1421 TOTAL_BYTES, (numBufs*bufferSize)));
1422 status = A_ERROR;
1423 break;
1424 }
1425
1426 temp = GetEndMarker();
1427
1428 status = HIFReadWrite(pDev->HIFDevice,
1429 SCRATCH_ADDRESS + 4,
1430 (u8 *)&temp,
1431 2,
1432 HIF_WR_SYNC_BYTE_INC,
1433 NULL);
1434
1435 if (status) {
1436 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
1437 break;
1438 }
1439
1440 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
1441
1442 temp = (u16)g_BlockSizes[1];
1443 /* convert to a mask */
1444 temp = temp - 1;
1445 status = HIFReadWrite(pDev->HIFDevice,
1446 SCRATCH_ADDRESS + 6,
1447 (u8 *)&temp,
1448 2,
1449 HIF_WR_SYNC_BYTE_INC,
1450 NULL);
1451
1452 if (status) {
1453 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
1454 break;
1455 }
1456
1457 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
1458
1459 /* execute the test on each mailbox */
1460 for (i = 0; i < AR6K_MAILBOXES; i++) {
1461 status = DoOneMboxHWTest(pDev, i);
1462 if (status) {
1463 break;
1464 }
1465 }
1466
1467 } while (false);
1468
1469 if (status == 0) {
1470 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \n"));
1471 } else {
1472 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n"));
1473 }
1474 /* don't let HTC_Start continue, the target is actually not running any HTC code */
1475 return A_ERROR;
1476 }
1477 #endif
1478
1479
1480
1481