1 /*
2  * $Id: musycc.c,v 2.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
3  */
4 
5 unsigned int max_intcnt = 0;
6 unsigned int max_bh = 0;
7 
8 /*-----------------------------------------------------------------------------
9  * musycc.c -
10  *
11  * Copyright (C) 2007  One Stop Systems, Inc.
12  * Copyright (C) 2003-2006  SBE, Inc.
13  *
14  *   This program is free software; you can redistribute it and/or modify
15  *   it under the terms of the GNU General Public License as published by
16  *   the Free Software Foundation; either version 2 of the License, or
17  *   (at your option) any later version.
18  *
19  *   This program is distributed in the hope that it will be useful,
20  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
21  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   GNU General Public License for more details.
23  *
24  * For further information, contact via email: support@onestopsystems.com
25  * One Stop Systems, Inc.  Escondido, California  U.S.A.
26  *-----------------------------------------------------------------------------
27  * RCS info:
28  * RCS revision: $Revision: 2.1 $
29  * Last changed on $Date: 2007/08/15 23:32:17 $
30  * Changed by $Author: rickd $
31  *-----------------------------------------------------------------------------
32  * $Log: musycc.c,v $
33  * Revision 2.1  2007/08/15 23:32:17  rickd
34  * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
35  *
36  * Revision 2.0  2007/08/15 22:13:20  rickd
37  * Update to printf pointer %p usage and correct some UINT to ULONG for
38  * 64bit comptibility.
39  *
40  * Revision 1.7  2006/04/21 00:56:40  rickd
41  * workqueue files now prefixed with <sbecom> prefix.
42  *
43  * Revision 1.6  2005/10/27 18:54:19  rickd
44  * Clean out old code.  Default to HDLC_FCS16, not TRANS.
45  *
46  * Revision 1.5  2005/10/17 23:55:28  rickd
47  * Initial port of NCOMM support patches from original work found
48  * in pmc_c4t1e1 as updated by NCOMM.  Ref: CONFIG_SBE_PMCC4_NCOMM.
49  *
50  * Revision 1.4  2005/10/13 20:35:25  rickd
51  * Cleanup warning for unused <flags> variable.
52  *
53  * Revision 1.3  2005/10/13 19:19:22  rickd
54  * Disable redundant driver removal cleanup code.
55  *
56  * Revision 1.2  2005/10/11 18:36:16  rickd
57  * Clean up warning messages caused by de-implemented some <flags> associated
58  * with spin_lock() removals.
59  *
60  * Revision 1.1  2005/10/05 00:45:28  rickd
61  * Re-enable xmit on flow-controlled and full channel to fix restart hang.
62  * Add some temp spin-lock debug code (rld_spin_owner).
63  *
64  * Revision 1.0  2005/09/28 00:10:06  rickd
65  * Initial release for C4T1E1 support. Lots of transparent
66  * mode updates.
67  *
68  *-----------------------------------------------------------------------------
69  */
70 
71 char        SBEid_pmcc4_musyccc[] =
72 "@(#)musycc.c - $Revision: 2.1 $      (c) Copyright 2004-2006 SBE, Inc.";
73 
74 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
75 
76 #include <linux/types.h>
77 #include "pmcc4_sysdep.h"
78 #include <linux/kernel.h>
79 #include <linux/errno.h>
80 #include <linux/init.h>
81 #include "sbecom_inline_linux.h"
82 #include "libsbew.h"
83 #include "pmcc4_private.h"
84 #include "pmcc4.h"
85 #include "musycc.h"
86 
87 #ifdef SBE_INCLUDE_SYMBOLS
88 #define STATIC
89 #else
90 #define STATIC  static
91 #endif
92 
93 #define sd_find_chan(ci,ch)   c4_find_chan(ch)
94 
95 
96 /*******************************************************************/
97 /* global driver variables */
98 extern ci_t *c4_list;
99 extern int  drvr_state;
100 extern int  cxt1e1_log_level;
101 
102 extern int  cxt1e1_max_mru;
103 extern int  cxt1e1_max_mtu;
104 extern int  max_rxdesc_used;
105 extern int  max_txdesc_used;
106 extern ci_t *CI;                /* dummy pointr to board ZEROE's data - DEBUG
107                                  * USAGE */
108 
109 
110 /*******************************************************************/
111 /* forward references */
112 void        c4_fifo_free (mpi_t *, int);
113 void        c4_wk_chan_restart (mch_t *);
114 void        musycc_bh_tx_eom (mpi_t *, int);
115 int         musycc_chan_up (ci_t *, int);
116 status_t __init musycc_init (ci_t *);
117 STATIC void __init musycc_init_port (mpi_t *);
118 void        musycc_intr_bh_tasklet (ci_t *);
119 void        musycc_serv_req (mpi_t *, u_int32_t);
120 void        musycc_update_timeslots (mpi_t *);
121 
122 /*******************************************************************/
123 
124 #if 1
125 STATIC int
musycc_dump_rxbuffer_ring(mch_t * ch,int lockit)126 musycc_dump_rxbuffer_ring (mch_t * ch, int lockit)
127 {
128     struct mdesc *m;
129     unsigned long flags = 0;
130 
131     u_int32_t status;
132     int         n;
133 
134     if (lockit)
135     {
136         spin_lock_irqsave (&ch->ch_rxlock, flags);
137     }
138     if (ch->rxd_num == 0)
139     {
140         pr_info("  ZERO receive buffers allocated for this channel.");
141     } else
142     {
143         FLUSH_MEM_READ ();
144         m = &ch->mdr[ch->rxix_irq_srv];
145         for (n = ch->rxd_num; n; n--)
146         {
147             status = le32_to_cpu (m->status);
148             {
149                 pr_info("%c  %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
150                         (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ',
151                         (unsigned long) m, n,
152                         status,
153                         m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-',
154                         status & POLL_DISABLED ? 'P' : '-',
155                         status & EOBIRQ_ENABLE ? 'b' : '-',
156                         status & EOMIRQ_ENABLE ? 'm' : '-',
157                         status & LENGTH_MASK,
158                         le32_to_cpu (m->data), le32_to_cpu (m->next));
159 #ifdef RLD_DUMP_BUFDATA
160                 {
161                     u_int32_t  *dp;
162                     int         len = status & LENGTH_MASK;
163 
164 #if 1
165                     if (m->data && (status & HOST_RX_OWNED))
166 #else
167                     if (m->data)    /* always dump regardless of valid RX
168                                      * data */
169 #endif
170                     {
171                         dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data)));
172                         if (len >= 0x10)
173                             pr_info("    %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
174                                     *dp, *(dp + 1), *(dp + 2), *(dp + 3));
175                         else if (len >= 0x08)
176                             pr_info("    %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
177                                     *dp, *(dp + 1));
178                         else
179                             pr_info("    %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
180                     }
181                 }
182 #endif
183             }
184             m = m->snext;
185         }
186     }                               /* -for- */
187     pr_info("\n");
188 
189     if (lockit)
190     {
191         spin_unlock_irqrestore (&ch->ch_rxlock, flags);
192     }
193     return 0;
194 }
195 #endif
196 
197 #if 1
198 STATIC int
musycc_dump_txbuffer_ring(mch_t * ch,int lockit)199 musycc_dump_txbuffer_ring (mch_t * ch, int lockit)
200 {
201     struct mdesc *m;
202     unsigned long flags = 0;
203     u_int32_t   status;
204     int         n;
205 
206     if (lockit)
207     {
208         spin_lock_irqsave (&ch->ch_txlock, flags);
209     }
210     if (ch->txd_num == 0)
211     {
212         pr_info("  ZERO transmit buffers allocated for this channel.");
213     } else
214     {
215         FLUSH_MEM_READ ();
216         m = ch->txd_irq_srv;
217         for (n = ch->txd_num; n; n--)
218         {
219             status = le32_to_cpu (m->status);
220             {
221                 pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
222                         (m == ch->txd_usr_add) ? 'F' : ' ',
223                         (m == ch->txd_irq_srv) ? 'L' : ' ',
224                         (unsigned long) m, n,
225                         status,
226                      m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-',
227                         status & POLL_DISABLED ? 'P' : '-',
228                         status & EOBIRQ_ENABLE ? 'b' : '-',
229                         status & EOMIRQ_ENABLE ? 'm' : '-',
230                         status & LENGTH_MASK,
231                         le32_to_cpu (m->data), le32_to_cpu (m->next));
232 #ifdef RLD_DUMP_BUFDATA
233                 {
234                     u_int32_t  *dp;
235                     int         len = status & LENGTH_MASK;
236 
237                     if (m->data)
238                     {
239                         dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data)));
240                         if (len >= 0x10)
241                             pr_info("    %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
242                                     *dp, *(dp + 1), *(dp + 2), *(dp + 3));
243                         else if (len >= 0x08)
244                             pr_info("    %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
245                                     *dp, *(dp + 1));
246                         else
247                             pr_info("    %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
248                     }
249                 }
250 #endif
251             }
252             m = m->snext;
253         }
254     }                               /* -for- */
255     pr_info("\n");
256 
257     if (lockit)
258     {
259         spin_unlock_irqrestore (&ch->ch_txlock, flags);
260     }
261     return 0;
262 }
263 #endif
264 
265 
266 /*
267  * The following supports a backdoor debug facility which can be used to
268  * display the state of a board's channel.
269  */
270 
271 status_t
musycc_dump_ring(ci_t * ci,unsigned int chan)272 musycc_dump_ring (ci_t * ci, unsigned int chan)
273 {
274     mch_t      *ch;
275 
276     if (chan >= MAX_CHANS_USED)
277     {
278         return SBE_DRVR_FAIL;       /* E2BIG */
279     }
280     {
281         int         bh;
282 
283         bh = atomic_read (&ci->bh_pending);
284         pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n",
285                 bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt,
286                 ci->intlog.drvr_intr_thcount,
287                 ci->intlog.drvr_intr_bhcount,
288                 ci->wdcount, ci->wd_notify);
289         max_bh = 0;                 /* reset counter */
290         max_intcnt = 0;             /* reset counter */
291     }
292 
293     if (!(ch = sd_find_chan (dummy, chan)))
294     {
295         pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
296         return ENOENT;
297     }
298     pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state,
299             ch->status, ch->p.status);
300     pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n",
301             chan, ch->txd_num,
302             (u_int32_t) atomic_read (&ci->tx_pending), (u_int32_t) atomic_read (&ch->tx_pending), ch->txd_required, ch->s.tx_packets);
303     pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
304             ch->user, ch->txd_irq_srv, ch->txd_usr_add,
305             sd_queue_stopped (ch->user),
306             ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
307     musycc_dump_txbuffer_ring (ch, 1);
308     pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n",
309             chan, ch->rxd_num, ch->rxix_irq_srv,
310             &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets);
311     musycc_dump_rxbuffer_ring (ch, 1);
312 
313     return SBE_DRVR_SUCCESS;
314 }
315 
316 
317 status_t
musycc_dump_rings(ci_t * ci,unsigned int start_chan)318 musycc_dump_rings (ci_t * ci, unsigned int start_chan)
319 {
320     unsigned int chan;
321 
322     for (chan = start_chan; chan < (start_chan + 5); chan++)
323         musycc_dump_ring (ci, chan);
324     return SBE_DRVR_SUCCESS;
325 }
326 
327 
328 /*
329  * NOTE on musycc_init_mdt():  These MUSYCC writes are only operational after
330  * a MUSYCC GROUP_INIT command has been issued.
331  */
332 
333 void
musycc_init_mdt(mpi_t * pi)334 musycc_init_mdt (mpi_t * pi)
335 {
336     u_int32_t  *addr, cfg;
337     int         i;
338 
339     /*
340      * This Idle Code insertion takes effect prior to channel's first
341      * transmitted  message.  After that, each message contains its own Idle
342      * Code information which is to be issued after the message is
343      * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration
344      * Descriptor).
345      */
346 
347     addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR);
348     cfg = CFG_CH_FLAG_7E << IDLE_CODE;
349 
350     for (i = 0; i < 32; addr++, i++)
351     {
352         pci_write_32 (addr, cfg);
353     }
354 }
355 
356 
357 /* Set TX thp to the next unprocessed md */
358 
359 void
musycc_update_tx_thp(mch_t * ch)360 musycc_update_tx_thp (mch_t * ch)
361 {
362     struct mdesc *md;
363     unsigned long flags;
364 
365     spin_lock_irqsave (&ch->ch_txlock, flags);
366     while (1)
367     {
368         md = ch->txd_irq_srv;
369         FLUSH_MEM_READ ();
370         if (!md->data)
371         {
372             /* No MDs with buffers to process */
373             spin_unlock_irqrestore (&ch->ch_txlock, flags);
374             return;
375         }
376         if ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED)
377         {
378             /* this is the MD to restart TX with */
379             break;
380         }
381         /*
382          * Otherwise, we have a valid, host-owned message descriptor which
383          * has been successfully transmitted and whose buffer can be freed,
384          * so... process this MD, it's owned by the host.  (This might give
385          * as a new, updated txd_irq_srv.)
386          */
387         musycc_bh_tx_eom (ch->up, ch->gchan);
388     }
389     md = ch->txd_irq_srv;
390     ch->up->regram->thp[ch->gchan] = cpu_to_le32 (OS_vtophys (md));
391     FLUSH_MEM_WRITE ();
392 
393     if (ch->tx_full)
394     {
395         ch->tx_full = 0;
396         ch->txd_required = 0;
397         sd_enable_xmit (ch->user);  /* re-enable to catch flow controlled
398                                      * channel */
399     }
400     spin_unlock_irqrestore (&ch->ch_txlock, flags);
401 
402 #ifdef RLD_TRANS_DEBUG
403     pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status);
404 #endif
405 }
406 
407 
408 /*
409  * This is the workq task executed by the OS when our queue_work() is
410  * scheduled and run.  It can fire off either RX or TX ACTIVATION depending
411  * upon the channel's ch_start_tx and ch_start_rx variables.  This routine
412  * is implemented as a work queue so that the call to the service request is
413  * able to sleep, awaiting an interrupt acknowledgment response (SACK) from
414  * the hardware.
415  */
416 
417 void
musycc_wq_chan_restart(void * arg)418 musycc_wq_chan_restart (void *arg)      /* channel private structure */
419 {
420     mch_t      *ch;
421     mpi_t      *pi;
422     struct mdesc *md;
423 #if 0
424     unsigned long flags;
425 #endif
426 
427     ch = container_of(arg, struct c4_chan_info, ch_work);
428     pi = ch->up;
429 
430 #ifdef RLD_TRANS_DEBUG
431     pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n",
432             ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status);
433 
434 #endif
435 
436     /**********************************/
437     /** check for RX restart request **/
438     /**********************************/
439 
440     if ((ch->ch_start_rx) && (ch->status & RX_ENABLED))
441     {
442 
443         ch->ch_start_rx = 0;
444 #if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG)
445         {
446             static int  hereb4 = 7;
447 
448             if (hereb4)             /* RLD DEBUG */
449             {
450                 hereb4--;
451 #ifdef RLD_TRANS_DEBUG
452                 md = &ch->mdr[ch->rxix_irq_srv];
453                 pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
454                 ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status),
455                         ch->s.rx_packets);
456 #elif defined(RLD_RXACT_DEBUG)
457                 md = &ch->mdr[ch->rxix_irq_srv];
458                 pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
459                 ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status),
460                         ch->s.rx_packets);
461                 musycc_dump_rxbuffer_ring (ch, 1);      /* RLD DEBUG */
462 #endif
463             }
464         }
465 #endif
466         musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan);
467     }
468     /**********************************/
469     /** check for TX restart request **/
470     /**********************************/
471 
472     if ((ch->ch_start_tx) && (ch->status & TX_ENABLED))
473     {
474         /* find next unprocessed message, then set TX thp to it */
475         musycc_update_tx_thp (ch);
476 
477 #if 0
478         spin_lock_irqsave (&ch->ch_txlock, flags);
479 #endif
480         md = ch->txd_irq_srv;
481         if (!md)
482         {
483 #ifdef RLD_TRANS_DEBUG
484             pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum);
485 #endif
486 #if 0
487             spin_unlock_irqrestore (&ch->ch_txlock, flags);
488 #endif
489         } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED))
490         {
491             ch->ch_start_tx = 0;
492 #if 0
493             spin_unlock_irqrestore (&ch->ch_txlock, flags);   /* allow interrupts for service request */
494 #endif
495 #ifdef RLD_TRANS_DEBUG
496             pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n",
497                     ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets);
498 #endif
499             musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan);
500         }
501 #ifdef RLD_RESTART_DEBUG
502         else
503         {
504             /* retain request to start until retried and we have data to xmit */
505             pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n",
506                     ch->channum, md,
507                     le32_to_cpu (md->status),
508                     le32_to_cpu (md->data), ch->ch_start_tx);
509             musycc_dump_txbuffer_ring (ch, 0);
510 #if 0
511             spin_unlock_irqrestore (&ch->ch_txlock, flags);   /* allow interrupts for service request */
512 #endif
513         }
514 #endif
515     }
516 }
517 
518 
519  /*
520   * Channel restart either fires of a workqueue request (2.6) or lodges a
521   * watchdog activation sequence (2.4).
522   */
523 
524 void
musycc_chan_restart(mch_t * ch)525 musycc_chan_restart (mch_t * ch)
526 {
527 #ifdef RLD_RESTART_DEBUG
528     pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
529             ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
530 #endif
531 
532     /* 2.6 - find next unprocessed message, then set TX thp to it */
533 #ifdef RLD_RESTART_DEBUG
534     pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work);
535 #endif
536     c4_wk_chan_restart (ch);        /* work queue mechanism fires off: Ref:
537                                      * musycc_wq_chan_restart () */
538 
539 }
540 
541 
542 void
rld_put_led(mpi_t * pi,u_int32_t ledval)543 rld_put_led (mpi_t * pi, u_int32_t ledval)
544 {
545     static u_int32_t led = 0;
546 
547     if (ledval == 0)
548         led = 0;
549     else
550         led |= ledval;
551 
552     pci_write_32 ((u_int32_t *) &pi->up->cpldbase->leds, led);  /* RLD DEBUG TRANHANG */
553 }
554 
555 
556 #define MUSYCC_SR_RETRY_CNT  9
557 
558 void
musycc_serv_req(mpi_t * pi,u_int32_t req)559 musycc_serv_req (mpi_t * pi, u_int32_t req)
560 {
561     volatile u_int32_t r;
562     int         rcnt;
563 
564     /*
565      * PORT NOTE: Semaphore protect service loop guarantees only a single
566      * operation at a time.  Per MUSYCC Manual - "Issuing service requests to
567      * the same channel group without first receiving ACK from each request
568      * may cause the host to lose track of which service request has been
569      * acknowledged."
570      */
571 
572     SD_SEM_TAKE (&pi->sr_sem_busy, "serv");     /* only 1 thru here, per
573                                                  * group */
574 
575     if (pi->sr_last == req)
576     {
577 #ifdef RLD_TRANS_DEBUG
578         pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req);
579 #endif
580 
581         /*
582          * The most likely repeated request is the channel activation command
583          * which follows the occurrence of a Transparent mode TX ONR or a
584          * BUFF error.  If the previous command was a CHANNEL ACTIVATE,
585          * precede it with a NOOP command in order maintain coherent control
586          * of this current (re)ACTIVATE.
587          */
588 
589         r = (pi->sr_last & ~SR_GCHANNEL_MASK);
590         if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) ||
591             (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION)))
592         {
593 #ifdef RLD_TRANS_DEBUG
594             pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req);
595 #endif
596             SD_SEM_GIVE (&pi->sr_sem_busy);     /* allow this next request */
597             musycc_serv_req (pi, SR_NOOP);
598             SD_SEM_TAKE (&pi->sr_sem_busy, "serv");     /* relock & continue w/
599                                                          * original req */
600         } else if (req == SR_NOOP)
601         {
602             /* no need to issue back-to-back SR_NOOP commands at this time */
603 #ifdef RLD_TRANS_DEBUG
604             pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum);
605 #endif
606             SD_SEM_GIVE (&pi->sr_sem_busy);     /* allow this next request */
607             return;
608         }
609     }
610     rcnt = 0;
611     pi->sr_last = req;
612 rewrite:
613     pci_write_32 ((u_int32_t *) &pi->reg->srd, req);
614     FLUSH_MEM_WRITE ();
615 
616     /*
617      * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service
618      * request, the host must ensure at least one PCI bus clock cycle has
619      * elapsed before writing another service request.  To meet this minimum
620      * elapsed service request write timing interval, it is recommended that
621      * the host follow any SCR write with another operation which reads from
622      * the same address."
623      */
624     r = pci_read_32 ((u_int32_t *) &pi->reg->srd);      /* adhere to write
625                                                          * timing imposition */
626 
627 
628     if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT))
629     {
630         if (cxt1e1_log_level >= LOG_MONITOR)
631             pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
632                     pi->up->devname, rcnt, req, pi->sr_last, r,
633                     (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
634         OS_uwait_dummy ();          /* this delay helps reduce reissue counts
635                                      * (reason not yet researched) */
636         goto rewrite;
637     }
638     if (rcnt > MUSYCC_SR_RETRY_CNT)
639     {
640         pr_warning("%s: failed service request (#%d)= %x, group %d.\n",
641                    pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum);
642         SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */
643         return;
644     }
645     if (req == SR_CHIP_RESET)
646     {
647         /*
648          * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus
649          * the upcoming delay is used.  Though the MUSYCC documentation
650          * suggests a read-after-write would supply the required delay, it's
651          * unclear what CPU/BUS clock speeds might have been assumed when
652          * suggesting this 'lack of ACK' workaround.  Thus the use of uwait.
653          */
654         OS_uwait (100000, "icard"); /* 100ms */
655     } else
656     {
657         FLUSH_MEM_READ ();
658         SD_SEM_TAKE (&pi->sr_sem_wait, "sakack");       /* sleep until SACK
659                                                          * interrupt occurs */
660     }
661     SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */
662 }
663 
664 
665 #ifdef  SBE_PMCC4_ENABLE
666 void
musycc_update_timeslots(mpi_t * pi)667 musycc_update_timeslots (mpi_t * pi)
668 {
669     int         i, ch;
670     char        e1mode = IS_FRAME_ANY_E1 (pi->p.port_mode);
671 
672     for (i = 0; i < 32; i++)
673     {
674         int         usedby = 0, last = 0, ts, j, bits[8];
675 
676         u_int8_t lastval = 0;
677 
678         if (((i == 0) && e1mode) || /* disable if  E1 mode */
679             ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI)))
680             || ((i > 23) && (!e1mode))) /* disable if T1 mode */
681         {
682             pi->tsm[i] = 0xff;      /* make tslot unavailable for this mode */
683         } else
684         {
685             pi->tsm[i] = 0x00;      /* make tslot available for assignment */
686         }
687         for (j = 0; j < 8; j++)
688             bits[j] = -1;
689         for (ch = 0; ch < MUSYCC_NCHANS; ch++)
690         {
691             if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i]))
692             {
693                 usedby++;
694                 last = ch;
695                 lastval = pi->chan[ch]->p.bitmask[i];
696                 for (j = 0; j < 8; j++)
697                     if (lastval & (1 << j))
698                         bits[j] = ch;
699                 pi->tsm[i] |= lastval;
700             }
701         }
702         if (!usedby)
703             ts = 0;
704         else if ((usedby == 1) && (lastval == 0xff))
705             ts = (4 << 5) | last;
706         else if ((usedby == 1) && (lastval == 0x7f))
707             ts = (5 << 5) | last;
708         else
709         {
710             int         idx;
711 
712             if (bits[0] < 0)
713                 ts = (6 << 5) | (idx = last);
714             else
715                 ts = (7 << 5) | (idx = bits[0]);
716             for (j = 1; j < 8; j++)
717             {
718                 pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
719                 pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
720             }
721         }
722         pi->regram->rtsm[i] = ts;
723         pi->regram->ttsm[i] = ts;
724     }
725     FLUSH_MEM_WRITE ();
726 
727     musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
728     musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
729     musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION);
730     musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION);
731 }
732 #endif
733 
734 
735 #ifdef SBE_WAN256T3_ENABLE
736 void
musycc_update_timeslots(mpi_t * pi)737 musycc_update_timeslots (mpi_t * pi)
738 {
739     mch_t      *ch;
740 
741     u_int8_t    ts, hmask, tsen;
742     int         gchan;
743     int         i;
744 
745 #ifdef SBE_PMCC4_ENABLE
746     hmask = (0x1f << pi->up->p.hypersize) & 0x1f;
747 #endif
748 #ifdef SBE_WAN256T3_ENABLE
749     hmask = (0x1f << hyperdummy) & 0x1f;
750 #endif
751     for (i = 0; i < 128; i++)
752     {
753         gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS;
754         ch = pi->chan[gchan];
755         if (ch->p.mode_56k)
756             tsen = MODE_56KBPS;
757         else
758             tsen = MODE_64KBPS;     /* also the default */
759         ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0;
760         pi->regram->rtsm[i] = ts;
761         pi->regram->ttsm[i] = ts;
762     }
763     FLUSH_MEM_WRITE ();
764     musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
765     musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
766 }
767 #endif
768 
769 
770  /*
771   * This routine converts a generic library channel configuration parameter
772   * into a hardware specific register value (IE. MUSYCC CCD Register).
773   */
774 u_int32_t
musycc_chan_proto(int proto)775 musycc_chan_proto (int proto)
776 {
777     int         reg;
778 
779     switch (proto)
780     {
781     case CFG_CH_PROTO_TRANS:        /* 0 */
782         reg = MUSYCC_CCD_TRANS;
783         break;
784     case CFG_CH_PROTO_SS7:          /* 1 */
785         reg = MUSYCC_CCD_SS7;
786         break;
787     default:
788     case CFG_CH_PROTO_ISLP_MODE:   /* 4 */
789     case CFG_CH_PROTO_HDLC_FCS16:  /* 2 */
790         reg = MUSYCC_CCD_HDLC_FCS16;
791         break;
792     case CFG_CH_PROTO_HDLC_FCS32:  /* 3 */
793         reg = MUSYCC_CCD_HDLC_FCS32;
794         break;
795     }
796 
797     return reg;
798 }
799 
800 #ifdef SBE_WAN256T3_ENABLE
801 STATIC void __init
musycc_init_port(mpi_t * pi)802 musycc_init_port (mpi_t * pi)
803 {
804     pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram));
805 
806     pi->regram->grcd =
807         __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE |
808                                 MUSYCC_GRCD_TX_ENABLE |
809                                 MUSYCC_GRCD_SF_ALIGN |
810                                 MUSYCC_GRCD_SUBCHAN_DISABLE |
811                                 MUSYCC_GRCD_OOFMP_DISABLE |
812                                 MUSYCC_GRCD_COFAIRQ_DISABLE |
813                                 MUSYCC_GRCD_MC_ENABLE |
814                        (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT));
815 
816     pi->regram->pcd =
817         __constant_cpu_to_le32 (MUSYCC_PCD_E1X4_MODE |
818                                 MUSYCC_PCD_TXDATA_RISING |
819                                 MUSYCC_PCD_TX_DRIVEN);
820 
821     /* Message length descriptor */
822        pi->regram->mld = __constant_cpu_to_le32 (cxt1e1_max_mru | (cxt1e1_max_mru << 16));
823     FLUSH_MEM_WRITE ();
824 
825     musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION);
826     musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION);
827 
828     musycc_init_mdt (pi);
829 
830     musycc_update_timeslots (pi);
831 }
832 #endif
833 
834 
835 status_t    __init
musycc_init(ci_t * ci)836 musycc_init (ci_t * ci)
837 {
838     char       *regaddr;        /* temp for address boundary calculations */
839     int         i, gchan;
840 
841     OS_sem_init (&ci->sem_wdbusy, SEM_AVAILABLE);       /* watchdog exclusion */
842 
843     /*
844      * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword
845      * aligned memory segment for interrupt queue pointers."
846      */
847 
848 #define INT_QUEUE_BOUNDARY  4
849 
850     regaddr = OS_kmalloc ((INT_QUEUE_SIZE + 1) * sizeof (u_int32_t));
851     if (regaddr == 0)
852         return ENOMEM;
853     ci->iqd_p_saved = regaddr;      /* save orig value for free's usage */
854     ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) &
855                                (~(INT_QUEUE_BOUNDARY - 1)));    /* this calculates
856                                                                  * closest boundary */
857 
858     for (i = 0; i < INT_QUEUE_SIZE; i++)
859     {
860         ci->iqd_p[i] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
861     }
862 
863     for (i = 0; i < ci->max_port; i++)
864     {
865         mpi_t      *pi = &ci->port[i];
866 
867         /*
868          * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB
869          * bound memory segment for Channel Group 0."
870          */
871 
872 #define GROUP_BOUNDARY   0x800
873 
874         regaddr = OS_kmalloc (sizeof (struct musycc_groupr) + GROUP_BOUNDARY);
875         if (regaddr == 0)
876         {
877             for (gchan = 0; gchan < i; gchan++)
878             {
879                 pi = &ci->port[gchan];
880                 OS_kfree (pi->reg);
881                 pi->reg = 0;
882             }
883             return ENOMEM;
884         }
885         pi->regram_saved = regaddr; /* save orig value for free's usage */
886         pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) &
887                                                (~(GROUP_BOUNDARY - 1)));        /* this calculates
888                                                                                  * closest boundary */
889     }
890 
891     /* any board centric MUSYCC commands will use group ZERO as its "home" */
892     ci->regram = ci->port[0].regram;
893     musycc_serv_req (&ci->port[0], SR_CHIP_RESET);
894 
895     pci_write_32 ((u_int32_t *) &ci->reg->gbp, OS_vtophys (ci->regram));
896     pci_flush_write (ci);
897 #ifdef CONFIG_SBE_PMCC4_NCOMM
898     ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC);
899 #else
900     /* standard driver POLLS for INTB via CPLD register */
901     ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
902 #endif
903 
904     ci->regram->__iqp = cpu_to_le32 (OS_vtophys (&ci->iqd_p[0]));
905     ci->regram->__iql = __constant_cpu_to_le32 (INT_QUEUE_SIZE - 1);
906     pci_write_32 ((u_int32_t *) &ci->reg->dacbp, 0);
907     FLUSH_MEM_WRITE ();
908 
909     ci->state = C_RUNNING;          /* mark as full interrupt processing
910                                      * available */
911 
912     musycc_serv_req (&ci->port[0], SR_GLOBAL_INIT);     /* FIRST INTERRUPT ! */
913 
914     /* sanity check settable parameters */
915 
916        if (cxt1e1_max_mru > 0xffe)
917     {
918         pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n",
919                                   cxt1e1_max_mru, 0xffe);
920                cxt1e1_max_mru = 0xffe;
921     }
922        if (cxt1e1_max_mtu > 0xffe)
923     {
924         pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n",
925                                   cxt1e1_max_mtu, 0xffe);
926                cxt1e1_max_mtu = 0xffe;
927     }
928 #ifdef SBE_WAN256T3_ENABLE
929     for (i = 0; i < MUSYCC_NPORTS; i++)
930         musycc_init_port (&ci->port[i]);
931 #endif
932 
933     return SBE_DRVR_SUCCESS;        /* no error */
934 }
935 
936 
937 void
musycc_bh_tx_eom(mpi_t * pi,int gchan)938 musycc_bh_tx_eom (mpi_t * pi, int gchan)
939 {
940     mch_t      *ch;
941     struct mdesc *md;
942 
943 #if 0
944 #ifndef SBE_ISR_INLINE
945     unsigned long flags;
946 
947 #endif
948 #endif
949     volatile u_int32_t status;
950 
951     ch = pi->chan[gchan];
952     if (ch == 0 || ch->state != UP)
953     {
954         if (cxt1e1_log_level >= LOG_ERROR)
955             pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
956                     pi->up->devname, gchan);
957     }
958     if (ch == 0 || ch->mdt == 0)
959         return;                     /* note: mdt==0 implies a malloc()
960                                      * failure w/in chan_up() routine */
961 
962 #if 0
963 #ifdef SBE_ISR_INLINE
964     spin_lock_irq (&ch->ch_txlock);
965 #else
966     spin_lock_irqsave (&ch->ch_txlock, flags);
967 #endif
968 #endif
969     do
970     {
971         FLUSH_MEM_READ ();
972         md = ch->txd_irq_srv;
973         status = le32_to_cpu (md->status);
974 
975         /*
976          * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned
977          * Transmit Buffer Descriptor during Transparent Mode.
978          */
979         if (status & MUSYCC_TX_OWNED)
980         {
981             int         readCount, loopCount;
982 
983             /***********************************************************/
984             /* HW Bug Fix                                              */
985             /* ----------                                              */
986             /* Under certain PCI Bus loading conditions, the data      */
987             /* associated with an update of Shared Memory is delayed   */
988             /* relative to its PCI Interrupt.  This is caught when     */
989             /* the host determines it does not yet OWN the descriptor. */
990             /***********************************************************/
991 
992             readCount = 0;
993             while (status & MUSYCC_TX_OWNED)
994             {
995                 for (loopCount = 0; loopCount < 0x30; loopCount++)
996                     OS_uwait_dummy ();  /* use call to avoid optimization
997                                          * removal of dummy delay */
998                 FLUSH_MEM_READ ();
999                 status = le32_to_cpu (md->status);
1000                 if (readCount++ > 40)
1001                     break;          /* don't wait any longer */
1002             }
1003             if (status & MUSYCC_TX_OWNED)
1004             {
1005                 if (cxt1e1_log_level >= LOG_MONITOR)
1006                 {
1007                     pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n",
1008                             pi->up->devname, pi->portnum, ch->channum,
1009                             md, status);
1010                     pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
1011                             ch->user, ch->txd_irq_srv, ch->txd_usr_add,
1012                             sd_queue_stopped (ch->user),
1013                             ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
1014                     musycc_dump_txbuffer_ring (ch, 0);
1015                 }
1016                 break;              /* Not our mdesc, done */
1017             } else
1018             {
1019                 if (cxt1e1_log_level >= LOG_MONITOR)
1020                     pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n",
1021                             pi->up->devname, pi->portnum, ch->channum, readCount, md, status);
1022             }
1023         }
1024         ch->txd_irq_srv = md->snext;
1025 
1026         md->data = 0;
1027         if (md->mem_token != 0)
1028         {
1029             /* upcount channel */
1030             atomic_sub (OS_mem_token_tlen (md->mem_token), &ch->tx_pending);
1031             /* upcount card */
1032             atomic_sub (OS_mem_token_tlen (md->mem_token), &pi->up->tx_pending);
1033 #ifdef SBE_WAN256T3_ENABLE
1034             if (!atomic_read (&pi->up->tx_pending))
1035                 wan256t3_led (pi->up, LED_TX, 0);
1036 #endif
1037 
1038 #ifdef CONFIG_SBE_WAN256T3_NCOMM
1039             /* callback that our packet was sent */
1040             {
1041                 int         hdlcnum = (pi->portnum * 32 + gchan);
1042 
1043                 if (hdlcnum >= 228)
1044                 {
1045                     if (nciProcess_TX_complete)
1046                         (*nciProcess_TX_complete) (hdlcnum,
1047                                                    getuserbychan (gchan));
1048                 }
1049             }
1050 #endif                              /*** CONFIG_SBE_WAN256T3_NCOMM ***/
1051 
1052             OS_mem_token_free_irq (md->mem_token);
1053             md->mem_token = 0;
1054         }
1055         md->status = 0;
1056 #ifdef RLD_TXFULL_DEBUG
1057         if (cxt1e1_log_level >= LOG_MONITOR2)
1058             pr_info("~~ tx_eom: tx_full %x  txd_free %d -> %d\n",
1059                     ch->tx_full, ch->txd_free, ch->txd_free + 1);
1060 #endif
1061         ++ch->txd_free;
1062         FLUSH_MEM_WRITE ();
1063 
1064         if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE))
1065         {
1066             if (cxt1e1_log_level >= LOG_MONITOR)
1067                 pr_info("%s: Mode (%x) incorrect EOB status (%x)\n",
1068                         pi->up->devname, ch->p.chan_mode, status);
1069             if ((status & EOMIRQ_ENABLE) == 0)
1070                 break;
1071         }
1072     }
1073     while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0));
1074     /*
1075      * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each
1076      * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and
1077      * will furthermore have a separate IQD associated with each messages
1078      * buffer.
1079      */
1080 
1081     FLUSH_MEM_READ ();
1082     /*
1083      * Smooth flow control hysterisis by maintaining task stoppage until half
1084      * the available write buffers are available.
1085      */
1086     if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2)))
1087     {
1088         /*
1089          * Then, only releave task stoppage if we actually have enough
1090          * buffers to service the last requested packet.  It may require MORE
1091          * than half the available!
1092          */
1093         if (ch->txd_free >= ch->txd_required)
1094         {
1095 
1096 #ifdef RLD_TXFULL_DEBUG
1097             if (cxt1e1_log_level >= LOG_MONITOR2)
1098                 pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n",
1099                         ch->channum,
1100                         ch->txd_free, ch->txd_num / 2);
1101 #endif
1102             ch->tx_full = 0;
1103             ch->txd_required = 0;
1104             sd_enable_xmit (ch->user);  /* re-enable to catch flow controlled
1105                                          * channel */
1106         }
1107     }
1108 #ifdef RLD_TXFULL_DEBUG
1109     else if (ch->tx_full)
1110     {
1111         if (cxt1e1_log_level >= LOG_MONITOR2)
1112             pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n",
1113                     ch->channum,
1114                     ch->txd_free, ch->txd_num / 2);
1115     }
1116 #endif
1117 
1118     FLUSH_MEM_WRITE ();
1119 #if 0
1120 #ifdef SBE_ISR_INLINE
1121     spin_unlock_irq (&ch->ch_txlock);
1122 #else
1123     spin_unlock_irqrestore (&ch->ch_txlock, flags);
1124 #endif
1125 #endif
1126 }
1127 
1128 
1129 STATIC void
musycc_bh_rx_eom(mpi_t * pi,int gchan)1130 musycc_bh_rx_eom (mpi_t * pi, int gchan)
1131 {
1132     mch_t      *ch;
1133     void       *m, *m2;
1134     struct mdesc *md;
1135     volatile u_int32_t status;
1136     u_int32_t   error;
1137 
1138     ch = pi->chan[gchan];
1139     if (ch == 0 || ch->state != UP)
1140     {
1141         if (cxt1e1_log_level > LOG_ERROR)
1142             pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
1143                     pi->up->devname, gchan);
1144         return;
1145     }
1146     if (ch->mdr == 0)
1147         return;                     /* can this happen ? */
1148 
1149     for (;;)
1150     {
1151         FLUSH_MEM_READ ();
1152         md = &ch->mdr[ch->rxix_irq_srv];
1153         status = le32_to_cpu (md->status);
1154         if (!(status & HOST_RX_OWNED))
1155             break;                  /* Not our mdesc, done */
1156         m = md->mem_token;
1157         error = (status >> 16) & 0xf;
1158         if (error == 0)
1159         {
1160 #ifdef CONFIG_SBE_WAN256T3_NCOMM
1161             int         hdlcnum = (pi->portnum * 32 + gchan);
1162 
1163             /*
1164              * if the packet number belongs to NCOMM, then send it to the TMS
1165              * driver
1166              */
1167             if (hdlcnum >= 228)
1168             {
1169                 if (nciProcess_RX_packet)
1170                     (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user);
1171             } else
1172 #endif                              /*** CONFIG_SBE_WAN256T3_NCOMM ***/
1173 
1174             {
1175                                if ((m2 = OS_mem_token_alloc (cxt1e1_max_mru)))
1176                 {
1177                     /* substitute the mbuf+cluster */
1178                     md->mem_token = m2;
1179                     md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2)));
1180 
1181                     /* pass the received mbuf upward */
1182                     sd_recv_consume (m, status & LENGTH_MASK, ch->user);
1183                     ch->s.rx_packets++;
1184                     ch->s.rx_bytes += status & LENGTH_MASK;
1185                 } else
1186                 {
1187                     ch->s.rx_dropped++;
1188                 }
1189             }
1190         } else if (error == ERR_FCS)
1191         {
1192             ch->s.rx_crc_errors++;
1193         } else if (error == ERR_ALIGN)
1194         {
1195             ch->s.rx_missed_errors++;
1196         } else if (error == ERR_ABT)
1197         {
1198             ch->s.rx_missed_errors++;
1199         } else if (error == ERR_LNG)
1200         {
1201             ch->s.rx_length_errors++;
1202         } else if (error == ERR_SHT)
1203         {
1204             ch->s.rx_length_errors++;
1205         }
1206         FLUSH_MEM_WRITE ();
1207                status = cxt1e1_max_mru;
1208         if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
1209             status |= EOBIRQ_ENABLE;
1210         md->status = cpu_to_le32 (status);
1211 
1212         /* Check next mdesc in the ring */
1213         if (++ch->rxix_irq_srv >= ch->rxd_num)
1214             ch->rxix_irq_srv = 0;
1215         FLUSH_MEM_WRITE ();
1216     }
1217 }
1218 
1219 
1220 irqreturn_t
musycc_intr_th_handler(void * devp)1221 musycc_intr_th_handler (void *devp)
1222 {
1223     ci_t       *ci = (ci_t *) devp;
1224     volatile u_int32_t status, currInt = 0;
1225     u_int32_t   nextInt, intCnt;
1226 
1227     /*
1228      * Hardware not available, potential interrupt hang.  But since interrupt
1229      * might be shared, just return.
1230      */
1231     if (ci->state == C_INIT)
1232     {
1233         return IRQ_NONE;
1234     }
1235     /*
1236      * Marked as hardware available. Don't service interrupts, just clear the
1237      * event.
1238      */
1239 
1240     if (ci->state == C_IDLE)
1241     {
1242         status = pci_read_32 ((u_int32_t *) &ci->reg->isd);
1243 
1244         /* clear the interrupt but process nothing else */
1245         pci_write_32 ((u_int32_t *) &ci->reg->isd, status);
1246         return IRQ_HANDLED;
1247     }
1248     FLUSH_PCI_READ ();
1249     FLUSH_MEM_READ ();
1250 
1251     status = pci_read_32 ((u_int32_t *) &ci->reg->isd);
1252     nextInt = INTRPTS_NEXTINT (status);
1253     intCnt = INTRPTS_INTCNT (status);
1254     ci->intlog.drvr_intr_thcount++;
1255 
1256     /*********************************************************/
1257     /* HW Bug Fix                                            */
1258     /* ----------                                            */
1259     /* Under certain PCI Bus loading conditions, the         */
1260     /* MUSYCC looses the data associated with an update      */
1261     /* of its ISD and erroneously returns the immediately    */
1262     /* preceding 'nextInt' value.  However, the 'intCnt'     */
1263     /* value appears to be correct.  By not starting service */
1264     /* where the 'missing' 'nextInt' SHOULD point causes     */
1265     /* the IQD not to be serviced - the 'not serviced'       */
1266     /* entries then remain and continue to increase as more  */
1267     /* incorrect ISD's are encountered.                      */
1268     /*********************************************************/
1269 
1270     if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new))
1271     {
1272         if (cxt1e1_log_level >= LOG_MONITOR)
1273         {
1274             pr_info("%s: note - updated ISD from %08x to %08x\n",
1275                     ci->devname, status,
1276               (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new);
1277         }
1278         /*
1279          * Replace bogus status with software corrected value.
1280          *
1281          * It's not known whether, during this problem occurrence, if the
1282          * INTFULL bit is correctly reported or not.
1283          */
1284         status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new);
1285         nextInt = INTRPTS_NEXTINT (status);
1286     }
1287     /**********************************************/
1288     /* Cn847x Bug Fix                             */
1289     /* --------------                             */
1290     /* Fix for inability to write back same index */
1291     /* as read for a full interrupt queue.        */
1292     /**********************************************/
1293 
1294     if (intCnt == INT_QUEUE_SIZE)
1295     {
1296         currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1);
1297     } else
1298         /************************************************/
1299         /* Interrupt Write Location Issues              */
1300         /* -------------------------------              */
1301         /* When the interrupt status descriptor is      */
1302         /* written, the interrupt line is de-asserted   */
1303         /* by the Cn847x.  In the case of MIPS          */
1304         /* microprocessors, this must occur at the      */
1305         /* beginning of the interrupt handler so that   */
1306         /* the interrupt handle is not re-entered due   */
1307         /* to interrupt dis-assertion latency.          */
1308         /* In the case of all other processors, this    */
1309         /* action should occur at the end of the        */
1310         /* interrupt handler to avoid overwriting the   */
1311         /* interrupt queue.                             */
1312         /************************************************/
1313 
1314     if (intCnt)
1315     {
1316         currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1);
1317     } else
1318     {
1319         /*
1320          * NOTE: Servicing an interrupt whose ISD contains a count of ZERO
1321          * can be indicative of a Shared Interrupt chain.  Our driver can be
1322          * called from the system's interrupt handler as a matter of the OS
1323          * walking the chain.  As the chain is walked, the interrupt will
1324          * eventually be serviced by the correct driver/handler.
1325          */
1326 #if 0
1327         /* chained interrupt = not ours */
1328         pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n",
1329                 ci->devname, status);
1330 #endif
1331         return IRQ_NONE;
1332     }
1333 
1334     ci->iqp_tailx = currInt;
1335 
1336     currInt <<= INTRPTS_NEXTINT_S;
1337     ci->intlog.last_status_new = ci->intlog.this_status_new;
1338     ci->intlog.this_status_new = currInt;
1339 
1340     if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
1341     {
1342         pr_info("%s: Interrupt queue full condition occurred\n", ci->devname);
1343     }
1344     if (cxt1e1_log_level >= LOG_DEBUG)
1345         pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n",
1346                 ci->devname, &ci->reg->isd,
1347         status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1));
1348 
1349     FLUSH_MEM_WRITE ();
1350 #if defined(SBE_ISR_TASKLET)
1351     pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
1352     atomic_inc (&ci->bh_pending);
1353     tasklet_schedule (&ci->ci_musycc_isr_tasklet);
1354 #elif defined(SBE_ISR_IMMEDIATE)
1355     pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
1356     atomic_inc (&ci->bh_pending);
1357     queue_task (&ci->ci_musycc_isr_tq, &tq_immediate);
1358     mark_bh (IMMEDIATE_BH);
1359 #elif defined(SBE_ISR_INLINE)
1360     (void) musycc_intr_bh_tasklet (ci);
1361     pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
1362 #endif
1363     return IRQ_HANDLED;
1364 }
1365 
1366 
1367 #if defined(SBE_ISR_IMMEDIATE)
1368 unsigned long
1369 #else
1370 void
1371 #endif
musycc_intr_bh_tasklet(ci_t * ci)1372 musycc_intr_bh_tasklet (ci_t * ci)
1373 {
1374     mpi_t      *pi;
1375     mch_t      *ch;
1376     unsigned int intCnt;
1377     volatile u_int32_t currInt = 0;
1378     volatile unsigned int headx, tailx;
1379     int         readCount, loopCount;
1380     int         group, gchan, event, err, tx;
1381     u_int32_t   badInt = INT_EMPTY_ENTRY;
1382     u_int32_t   badInt2 = INT_EMPTY_ENTRY2;
1383 
1384     /*
1385      * Hardware not available, potential interrupt hang.  But since interrupt
1386      * might be shared, just return.
1387      */
1388     if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT))
1389     {
1390 #if defined(SBE_ISR_IMMEDIATE)
1391         return 0L;
1392 #else
1393         return;
1394 #endif
1395     }
1396 #if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE)
1397     if (drvr_state != SBE_DRVR_AVAILABLE)
1398     {
1399 #if defined(SBE_ISR_TASKLET)
1400         return;
1401 #elif defined(SBE_ISR_IMMEDIATE)
1402         return 0L;
1403 #endif
1404     }
1405 #elif defined(SBE_ISR_INLINE)
1406     /* no semaphore taken, no double checks */
1407 #endif
1408 
1409     ci->intlog.drvr_intr_bhcount++;
1410     FLUSH_MEM_READ ();
1411     {
1412         unsigned int bh = atomic_read (&ci->bh_pending);
1413 
1414         max_bh = max (bh, max_bh);
1415     }
1416     atomic_set (&ci->bh_pending, 0);/* if here, no longer pending */
1417     while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx))
1418     {
1419         intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE);
1420         currInt = le32_to_cpu (ci->iqd_p[headx]);
1421 
1422         max_intcnt = max (intCnt, max_intcnt);  /* RLD DEBUG */
1423 
1424         /**************************************************/
1425         /* HW Bug Fix                                     */
1426         /* ----------                                     */
1427         /* The following code checks for the condition    */
1428         /* of interrupt assertion before interrupt        */
1429         /* queue update.  This is a problem on several    */
1430         /* PCI-Local bridge chips found on some products. */
1431         /**************************************************/
1432 
1433         readCount = 0;
1434         if ((currInt == badInt) || (currInt == badInt2))
1435             ci->intlog.drvr_int_failure++;
1436 
1437         while ((currInt == badInt) || (currInt == badInt2))
1438         {
1439             for (loopCount = 0; loopCount < 0x30; loopCount++)
1440                 OS_uwait_dummy ();  /* use call to avoid optimization removal
1441                                      * of dummy delay */
1442             FLUSH_MEM_READ ();
1443             currInt = le32_to_cpu (ci->iqd_p[headx]);
1444             if (readCount++ > 20)
1445                 break;
1446         }
1447 
1448         if ((currInt == badInt) || (currInt == badInt2))        /* catch failure of Bug
1449                                                                  * Fix checking */
1450         {
1451             if (cxt1e1_log_level >= LOG_WARN)
1452                 pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n",
1453                         ci->devname, &ci->iqd_p[headx], headx);
1454 
1455             /*
1456              * If the descriptor has not recovered, then leaving the EMPTY
1457              * entry set will not signal to the MUSYCC that this descriptor
1458              * has been serviced. The Interrupt Queue can then start losing
1459              * available descriptors and MUSYCC eventually encounters and
1460              * reports the INTFULL condition.  Per manual, changing any bit
1461              * marks descriptor as available, thus the use of different
1462              * EMPTY_ENTRY values.
1463              */
1464 
1465             if (currInt == badInt)
1466             {
1467                 ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY2);
1468             } else
1469             {
1470                 ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
1471             }
1472             ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */
1473             FLUSH_MEM_WRITE ();
1474             FLUSH_MEM_READ ();
1475             continue;
1476         }
1477         group = INTRPT_GRP (currInt);
1478         gchan = INTRPT_CH (currInt);
1479         event = INTRPT_EVENT (currInt);
1480         err = INTRPT_ERROR (currInt);
1481         tx = currInt & INTRPT_DIR_M;
1482 
1483         ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
1484         FLUSH_MEM_WRITE ();
1485 
1486         if (cxt1e1_log_level >= LOG_DEBUG)
1487         {
1488             if (err != 0)
1489                 pr_info(" %08x -> err: %2d,", currInt, err);
1490 
1491             pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n",
1492                     event, group, gchan, tx ? 'T' : 'R');
1493         }
1494         pi = &ci->port[group];      /* notice that here we assume 1-1 group -
1495                                      * port mapping */
1496         ch = pi->chan[gchan];
1497         switch (event)
1498         {
1499         case EVE_SACK:              /* Service Request Acknowledge */
1500             if (cxt1e1_log_level >= LOG_DEBUG)
1501             {
1502                 volatile u_int32_t r;
1503 
1504                 r = pci_read_32 ((u_int32_t *) &pi->reg->srd);
1505                 pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r);
1506             }
1507             SD_SEM_GIVE (&pi->sr_sem_wait);     /* wake up waiting process */
1508             break;
1509         case EVE_CHABT:     /* Change To Abort Code (0x7e -> 0xff) */
1510         case EVE_CHIC:              /* Change To Idle Code (0xff -> 0x7e) */
1511             break;
1512         case EVE_EOM:               /* End Of Message */
1513         case EVE_EOB:               /* End Of Buffer (Transparent mode) */
1514             if (tx)
1515             {
1516                 musycc_bh_tx_eom (pi, gchan);
1517             } else
1518             {
1519                 musycc_bh_rx_eom (pi, gchan);
1520             }
1521 #if 0
1522             break;
1523 #else
1524             /*
1525              * MUSYCC Interrupt Descriptor section states that EOB and EOM
1526              * can be combined with the NONE error (as well as others).  So
1527              * drop thru to catch this...
1528              */
1529 #endif
1530         case EVE_NONE:
1531             if (err == ERR_SHT)
1532             {
1533                 ch->s.rx_length_errors++;
1534             }
1535             break;
1536         default:
1537             if (cxt1e1_log_level >= LOG_WARN)
1538                 pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname,
1539                         event, headx, currInt, group);
1540             break;
1541         }                           /* switch on event */
1542 
1543 
1544         /*
1545          * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors
1546          * are service-affecting and require action to resume normal
1547          * bit-level processing.
1548          */
1549 
1550         switch (err)
1551         {
1552         case ERR_ONR:
1553             /*
1554              * Per MUSYCC manual, Section  6.4.8.3 [Transmit Errors], this
1555              * error requires Transmit channel reactivation.
1556              *
1557              * Per MUSYCC manual, Section  6.4.8.4 [Receive Errors], this error
1558              * requires Receive channel reactivation.
1559              */
1560             if (tx)
1561             {
1562 
1563                 /*
1564                  * TX ONR Error only occurs when channel is configured for
1565                  * Transparent Mode.  However, this code will catch and
1566                  * re-activate on ANY TX ONR error.
1567                  */
1568 
1569                 /*
1570                  * Set flag to re-enable on any next transmit attempt.
1571                  */
1572                 ch->ch_start_tx = CH_START_TX_ONR;
1573 
1574                 {
1575 #ifdef RLD_TRANS_DEBUG
1576                     if (1 || cxt1e1_log_level >= LOG_MONITOR)
1577 #else
1578                     if (cxt1e1_log_level >= LOG_MONITOR)
1579 #endif
1580                     {
1581                         pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n",
1582                                 ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped (ch->user), ch->txd_free);
1583 #ifdef RLD_DEBUG
1584                         if (ch->p.chan_mode == 2)       /* problem = ONR on HDLC
1585                                                          * mode */
1586                         {
1587                             pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
1588                                     (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add,
1589                                     sd_queue_stopped (ch->user),
1590                                     ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
1591                             musycc_dump_txbuffer_ring (ch, 0);
1592                         }
1593 #endif
1594                     }
1595                 }
1596             } else                  /* RX buffer overrun */
1597             {
1598                 /*
1599                  * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors],
1600                  * channel recovery for this RX ONR error IS required.  It is
1601                  * also suggested to increase the number of receive buffers
1602                  * for this channel.  Receive channel reactivation IS
1603                  * required, and data has been lost.
1604                  */
1605                 ch->s.rx_over_errors++;
1606                 ch->ch_start_rx = CH_START_RX_ONR;
1607 
1608                 if (cxt1e1_log_level >= LOG_WARN)
1609                 {
1610                     pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n",
1611                             ci->devname, ch->channum, ch->p.chan_mode);
1612                     //musycc_dump_rxbuffer_ring (ch, 0);        /* RLD DEBUG */
1613                 }
1614             }
1615             musycc_chan_restart (ch);
1616             break;
1617         case ERR_BUF:
1618             if (tx)
1619             {
1620                 ch->s.tx_fifo_errors++;
1621                 ch->ch_start_tx = CH_START_TX_BUF;
1622                 /*
1623                  * Per MUSYCC manual, Section  6.4.8.3 [Transmit Errors],
1624                  * this BUFF error requires Transmit channel reactivation.
1625                  */
1626                 if (cxt1e1_log_level >= LOG_MONITOR)
1627                     pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n",
1628                             ci->devname, ch->channum, ch->p.chan_mode);
1629             } else                  /* RX buffer overrun */
1630             {
1631                 ch->s.rx_over_errors++;
1632                 /*
1633                  * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC
1634                  * mode requires NO recovery for this RX BUFF error is
1635                  * required.  It is suggested to increase the FIFO buffer
1636                  * space for this channel.  Receive channel reactivation is
1637                  * not required, but data has been lost.
1638                  */
1639                 if (cxt1e1_log_level >= LOG_WARN)
1640                     pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n",
1641                             ci->devname, ch->channum, ch->p.chan_mode);
1642                 /*
1643                  * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors],
1644                  * Transparent mode DOES require recovery for the RX BUFF
1645                  * error.  It is suggested to increase the FIFO buffer space
1646                  * for this channel.  Receive channel reactivation IS
1647                  * required and data has been lost.
1648                  */
1649                 if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
1650                     ch->ch_start_rx = CH_START_RX_BUF;
1651             }
1652 
1653             if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
1654                 musycc_chan_restart (ch);
1655             break;
1656         default:
1657             break;
1658         }                           /* switch on err */
1659 
1660         /* Check for interrupt lost condition */
1661         if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR))
1662         {
1663             pr_info("%s: Interrupt queue overflow - ILOST asserted\n",
1664                     ci->devname);
1665         }
1666         ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1);     /* insure wrapness */
1667         FLUSH_MEM_WRITE ();
1668         FLUSH_MEM_READ ();
1669     }                               /* while */
1670     if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
1671     {
1672         int         bh;
1673 
1674         bh = atomic_read (&CI->bh_pending);
1675         pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n",
1676                 ci->iqp_headx, ci->iqp_tailx, bh);
1677     }
1678 #if defined(SBE_ISR_IMMEDIATE)
1679     return 0L;
1680 #endif
1681     /* else, nothing returned */
1682 }
1683 
1684 #if 0
1685 int         __init
1686 musycc_new_chan (ci_t * ci, int channum, void *user)
1687 {
1688     mch_t      *ch;
1689 
1690     ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS];
1691 
1692     if (ch->state != UNASSIGNED)
1693         return EEXIST;
1694     /* NOTE: mch_t already cleared during OS_kmalloc() */
1695     ch->state = DOWN;
1696     ch->user = user;
1697 #if 0
1698     ch->status = 0;
1699     ch->p.status = 0;
1700     ch->p.intr_mask = 0;
1701 #endif
1702     ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16;
1703     ch->p.idlecode = CFG_CH_FLAG_7E;
1704     ch->p.pad_fill_count = 2;
1705     spin_lock_init (&ch->ch_rxlock);
1706     spin_lock_init (&ch->ch_txlock);
1707 
1708     return 0;
1709 }
1710 #endif
1711 
1712 
1713 #ifdef SBE_PMCC4_ENABLE
1714 status_t
musycc_chan_down(ci_t * dummy,int channum)1715 musycc_chan_down (ci_t * dummy, int channum)
1716 {
1717     mpi_t      *pi;
1718     mch_t      *ch;
1719     int         i, gchan;
1720 
1721     if (!(ch = sd_find_chan (dummy, channum)))
1722         return EINVAL;
1723     pi = ch->up;
1724     gchan = ch->gchan;
1725 
1726     /* Deactivate the channel */
1727     musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan);
1728     ch->ch_start_rx = 0;
1729     musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan);
1730     ch->ch_start_tx = 0;
1731 
1732     if (ch->state == DOWN)
1733         return 0;
1734     ch->state = DOWN;
1735 
1736     pi->regram->thp[gchan] = 0;
1737     pi->regram->tmp[gchan] = 0;
1738     pi->regram->rhp[gchan] = 0;
1739     pi->regram->rmp[gchan] = 0;
1740     FLUSH_MEM_WRITE ();
1741     for (i = 0; i < ch->txd_num; i++)
1742     {
1743         if (ch->mdt[i].mem_token != 0)
1744             OS_mem_token_free (ch->mdt[i].mem_token);
1745     }
1746 
1747     for (i = 0; i < ch->rxd_num; i++)
1748     {
1749         if (ch->mdr[i].mem_token != 0)
1750             OS_mem_token_free (ch->mdr[i].mem_token);
1751     }
1752 
1753     OS_kfree (ch->mdr);
1754     ch->mdr = 0;
1755     ch->rxd_num = 0;
1756     OS_kfree (ch->mdt);
1757     ch->mdt = 0;
1758     ch->txd_num = 0;
1759 
1760     musycc_update_timeslots (pi);
1761     c4_fifo_free (pi, ch->gchan);
1762 
1763     pi->openchans--;
1764     return 0;
1765 }
1766 #endif
1767 
1768 
1769 int
musycc_del_chan(ci_t * ci,int channum)1770 musycc_del_chan (ci_t * ci, int channum)
1771 {
1772     mch_t      *ch;
1773 
1774     if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)))  /* sanity chk param */
1775         return ECHRNG;
1776     if (!(ch = sd_find_chan (ci, channum)))
1777         return ENOENT;
1778     if (ch->state == UP)
1779         musycc_chan_down (ci, channum);
1780     ch->state = UNASSIGNED;
1781     return 0;
1782 }
1783 
1784 
1785 int
musycc_del_chan_stats(ci_t * ci,int channum)1786 musycc_del_chan_stats (ci_t * ci, int channum)
1787 {
1788     mch_t      *ch;
1789 
1790     if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))      /* sanity chk param */
1791         return ECHRNG;
1792     if (!(ch = sd_find_chan (ci, channum)))
1793         return ENOENT;
1794 
1795     memset (&ch->s, 0, sizeof (struct sbecom_chan_stats));
1796     return 0;
1797 }
1798 
1799 
1800 int
musycc_start_xmit(ci_t * ci,int channum,void * mem_token)1801 musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
1802 {
1803     mch_t      *ch;
1804     struct mdesc *md;
1805     void       *m2;
1806 #if 0
1807     unsigned long flags;
1808 #endif
1809     int         txd_need_cnt;
1810     u_int32_t   len;
1811 
1812     if (!(ch = sd_find_chan (ci, channum)))
1813         return ENOENT;
1814 
1815     if (ci->state != C_RUNNING)     /* full interrupt processing available */
1816         return EINVAL;
1817     if (ch->state != UP)
1818         return EINVAL;
1819 
1820     if (!(ch->status & TX_ENABLED))
1821         return EROFS;               /* how else to flag unwritable state ? */
1822 
1823 #ifdef RLD_TRANS_DEBUGx
1824     if (1 || cxt1e1_log_level >= LOG_MONITOR2)
1825 #else
1826     if (cxt1e1_log_level >= LOG_MONITOR2)
1827 #endif
1828     {
1829         pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n",
1830                 channum, ch->state, ch->ch_start_tx, ch->tx_full,
1831                 ch->txd_free, ch->txd_required, sd_queue_stopped (ch->user));
1832     }
1833     /***********************************************/
1834     /** Determine total amount of data to be sent **/
1835     /***********************************************/
1836     m2 = mem_token;
1837     txd_need_cnt = 0;
1838     for (len = OS_mem_token_tlen (m2); len > 0;
1839          m2 = (void *) OS_mem_token_next (m2))
1840     {
1841         if (!OS_mem_token_len (m2))
1842             continue;
1843         txd_need_cnt++;
1844         len -= OS_mem_token_len (m2);
1845     }
1846 
1847     if (txd_need_cnt == 0)
1848     {
1849         if (cxt1e1_log_level >= LOG_MONITOR2)
1850             pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum);
1851         OS_mem_token_free (mem_token);
1852         return 0;                   /* no data to send */
1853     }
1854     /*************************************************/
1855     /** Are there sufficient descriptors available? **/
1856     /*************************************************/
1857     if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this
1858                                      * large a buffer */
1859     {
1860         if (cxt1e1_log_level >= LOG_DEBUG)
1861         {
1862             pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n",
1863                     ch->txd_num, txd_need_cnt + 1);
1864         }
1865         ch->s.tx_dropped++;
1866         OS_mem_token_free (mem_token);
1867         return 0;
1868     }
1869 #if 0
1870     spin_lock_irqsave (&ch->ch_txlock, flags);
1871 #endif
1872     /************************************************************/
1873     /** flow control the line if not enough descriptors remain **/
1874     /************************************************************/
1875     if (txd_need_cnt > ch->txd_free)
1876     {
1877         if (cxt1e1_log_level >= LOG_MONITOR2)
1878         {
1879             pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n",
1880                     channum, ch->txd_free, ch->txd_num, txd_need_cnt);
1881         }
1882         ch->tx_full = 1;
1883         ch->txd_required = txd_need_cnt;
1884         sd_disable_xmit (ch->user);
1885 #if 0
1886         spin_unlock_irqrestore (&ch->ch_txlock, flags);
1887 #endif
1888         return EBUSY;               /* tell user to try again later */
1889     }
1890     /**************************************************/
1891     /** Put the user data into MUSYCC data buffer(s) **/
1892     /**************************************************/
1893     m2 = mem_token;
1894     md = ch->txd_usr_add;           /* get current available descriptor */
1895 
1896     for (len = OS_mem_token_tlen (m2); len > 0; m2 = OS_mem_token_next (m2))
1897     {
1898         int         u = OS_mem_token_len (m2);
1899 
1900         if (!u)
1901             continue;
1902         len -= u;
1903 
1904         /*
1905          * Enable following chunks, yet wait to enable the FIRST chunk until
1906          * after ALL subsequent chunks are setup.
1907          */
1908         if (md != ch->txd_usr_add)  /* not first chunk */
1909             u |= MUSYCC_TX_OWNED;   /* transfer ownership from HOST to MUSYCC */
1910 
1911         if (len)                    /* not last chunk */
1912             u |= EOBIRQ_ENABLE;
1913         else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
1914         {
1915             /*
1916              * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must
1917              * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor
1918              * (IE. don't set herein).
1919              */
1920             u |= EOBIRQ_ENABLE;
1921         } else
1922             u |= EOMIRQ_ENABLE;     /* EOM, last HDLC chunk */
1923 
1924 
1925         /* last chunk in hdlc mode */
1926         u |= (ch->p.idlecode << IDLE_CODE);
1927         if (ch->p.pad_fill_count)
1928         {
1929 #if 0
1930             /* NOOP NOTE: u_int8_t cannot be > 0xFF */
1931             /* sanitize pad_fill_count for maximums allowed by hardware */
1932             if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK)
1933                 ch->p.pad_fill_count = EXTRA_FLAGS_MASK;
1934 #endif
1935             u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS));
1936         }
1937         md->mem_token = len ? 0 : mem_token;    /* Fill in mds on last
1938                                                  * segment, others set ZERO
1939                                                  * so that entire token is
1940                                                  * removed ONLY when ALL
1941                                                  * segments have been
1942                                                  * transmitted. */
1943 
1944         md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2)));
1945         FLUSH_MEM_WRITE ();
1946         md->status = cpu_to_le32 (u);
1947         --ch->txd_free;
1948         md = md->snext;
1949     }
1950     FLUSH_MEM_WRITE ();
1951 
1952 
1953     /*
1954      * Now transfer ownership of first chunk from HOST to MUSYCC in order to
1955      * fire-off this XMIT.
1956      */
1957     ch->txd_usr_add->status |= __constant_cpu_to_le32 (MUSYCC_TX_OWNED);
1958     FLUSH_MEM_WRITE ();
1959     ch->txd_usr_add = md;
1960 
1961     len = OS_mem_token_tlen (mem_token);
1962     atomic_add (len, &ch->tx_pending);
1963     atomic_add (len, &ci->tx_pending);
1964     ch->s.tx_packets++;
1965     ch->s.tx_bytes += len;
1966     /*
1967      * If an ONR was seen, then channel requires poking to restart
1968      * transmission.
1969      */
1970     if (ch->ch_start_tx)
1971     {
1972         musycc_chan_restart (ch);
1973     }
1974 #ifdef SBE_WAN256T3_ENABLE
1975     wan256t3_led (ci, LED_TX, LEDV_G);
1976 #endif
1977     return 0;
1978 }
1979 
1980 
1981 /*** End-of-File ***/
1982