1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
2 *
3 * low level driver for CCD's hfc-pci based cards
4 *
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
15 *
16 */
17
18 #include <linux/init.h>
19 #include "hisax.h"
20 #include "hfc_pci.h"
21 #include "isdnl1.h"
22 #include <linux/pci.h>
23 #include <linux/sched.h>
24 #include <linux/interrupt.h>
25
26 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
27
28 /* table entry in the PCI devices list */
29 typedef struct {
30 int vendor_id;
31 int device_id;
32 char *vendor_name;
33 char *card_name;
34 } PCI_ENTRY;
35
36 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
37 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
38 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
39
40 static const PCI_ENTRY id_list[] =
41 {
42 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
43 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
53 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
54 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
55 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
57 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
58 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
59 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
65 {0, 0, NULL, NULL},
66 };
67
68
69 /******************************************/
70 /* free hardware resources used by driver */
71 /******************************************/
72 static void
release_io_hfcpci(struct IsdnCardState * cs)73 release_io_hfcpci(struct IsdnCardState *cs)
74 {
75 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
76 cs->hw.hfcpci.pci_io);
77 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
78 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
79 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
80 mdelay(10);
81 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
82 mdelay(10);
83 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
84 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
85 del_timer(&cs->hw.hfcpci.timer);
86 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
87 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
88 cs->hw.hfcpci.fifos = NULL;
89 iounmap((void *)cs->hw.hfcpci.pci_io);
90 }
91
92 /********************************************************************************/
93 /* function called to reset the HFC PCI chip. A complete software reset of chip */
94 /* and fifos is done. */
95 /********************************************************************************/
96 static void
reset_hfcpci(struct IsdnCardState * cs)97 reset_hfcpci(struct IsdnCardState *cs)
98 {
99 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
100 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
101 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
102
103 printk(KERN_INFO "HFC_PCI: resetting card\n");
104 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
105 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
106 mdelay(10);
107 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
108 mdelay(10);
109 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
110 printk(KERN_WARNING "HFC-PCI init bit busy\n");
111
112 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
113 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
114
115 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
116 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
117
118 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
119 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
120 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
121 cs->hw.hfcpci.bswapped = 0; /* no exchange */
122 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
123 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
124 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
125
126 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
127 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
128 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
129
130 /* Clear already pending ints */
131 if (Read_hfc(cs, HFCPCI_INT_S1));
132
133 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
134 udelay(10);
135 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
136 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
137
138 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
139 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
140 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
141 cs->hw.hfcpci.sctrl_r = 0;
142 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
143
144 /* Init GCI/IOM2 in master mode */
145 /* Slots 0 and 1 are set for B-chan 1 and 2 */
146 /* D- and monitor/CI channel are not enabled */
147 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
148 /* STIO2 is used as data input, B1+B2 from IOM->ST */
149 /* ST B-channel send disabled -> continuous 1s */
150 /* The IOM slots are always enabled */
151 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
152 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
153 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
154 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
155 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
156 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
157
158 /* Finally enable IRQ output */
159 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
160 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
161 if (Read_hfc(cs, HFCPCI_INT_S1));
162 }
163
164 /***************************************************/
165 /* Timer function called when kernel timer expires */
166 /***************************************************/
167 static void
hfcpci_Timer(struct IsdnCardState * cs)168 hfcpci_Timer(struct IsdnCardState *cs)
169 {
170 cs->hw.hfcpci.timer.expires = jiffies + 75;
171 /* WD RESET */
172 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
173 add_timer(&cs->hw.hfcpci.timer);
174 */
175 }
176
177
178 /*********************************/
179 /* schedule a new D-channel task */
180 /*********************************/
181 static void
sched_event_D_pci(struct IsdnCardState * cs,int event)182 sched_event_D_pci(struct IsdnCardState *cs, int event)
183 {
184 test_and_set_bit(event, &cs->event);
185 schedule_work(&cs->tqueue);
186 }
187
188 /*********************************/
189 /* schedule a new b_channel task */
190 /*********************************/
191 static void
hfcpci_sched_event(struct BCState * bcs,int event)192 hfcpci_sched_event(struct BCState *bcs, int event)
193 {
194 test_and_set_bit(event, &bcs->event);
195 schedule_work(&bcs->tqueue);
196 }
197
198 /************************************************/
199 /* select a b-channel entry matching and active */
200 /************************************************/
201 static
202 struct BCState *
Sel_BCS(struct IsdnCardState * cs,int channel)203 Sel_BCS(struct IsdnCardState *cs, int channel)
204 {
205 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
206 return (&cs->bcs[0]);
207 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
208 return (&cs->bcs[1]);
209 else
210 return (NULL);
211 }
212
213 /***************************************/
214 /* clear the desired B-channel rx fifo */
215 /***************************************/
hfcpci_clear_fifo_rx(struct IsdnCardState * cs,int fifo)216 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
217 { u_char fifo_state;
218 bzfifo_type *bzr;
219
220 if (fifo) {
221 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
222 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
223 } else {
224 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
225 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
226 }
227 if (fifo_state)
228 cs->hw.hfcpci.fifo_en ^= fifo_state;
229 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
230 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
231 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
232 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
233 bzr->f1 = MAX_B_FRAMES;
234 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
235 if (fifo_state)
236 cs->hw.hfcpci.fifo_en |= fifo_state;
237 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
238 }
239
240 /***************************************/
241 /* clear the desired B-channel tx fifo */
242 /***************************************/
hfcpci_clear_fifo_tx(struct IsdnCardState * cs,int fifo)243 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
244 { u_char fifo_state;
245 bzfifo_type *bzt;
246
247 if (fifo) {
248 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
249 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
250 } else {
251 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
252 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
253 }
254 if (fifo_state)
255 cs->hw.hfcpci.fifo_en ^= fifo_state;
256 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
257 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
258 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
259 bzt->f1 = MAX_B_FRAMES;
260 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
261 if (fifo_state)
262 cs->hw.hfcpci.fifo_en |= fifo_state;
263 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
264 }
265
266 /*********************************************/
267 /* read a complete B-frame out of the buffer */
268 /*********************************************/
269 static struct sk_buff
270 *
hfcpci_empty_fifo(struct BCState * bcs,bzfifo_type * bz,u_char * bdata,int count)271 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
272 {
273 u_char *ptr, *ptr1, new_f2;
274 struct sk_buff *skb;
275 struct IsdnCardState *cs = bcs->cs;
276 int total, maxlen, new_z2;
277 z_type *zp;
278
279 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
280 debugl1(cs, "hfcpci_empty_fifo");
281 zp = &bz->za[bz->f2]; /* point to Z-Regs */
282 new_z2 = zp->z2 + count; /* new position in fifo */
283 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
284 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
285 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
286 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
287 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
288 if (cs->debug & L1_DEB_WARN)
289 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
290 #ifdef ERROR_STATISTIC
291 bcs->err_inv++;
292 #endif
293 bz->za[new_f2].z2 = new_z2;
294 bz->f2 = new_f2; /* next buffer */
295 skb = NULL;
296 } else if (!(skb = dev_alloc_skb(count - 3)))
297 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
298 else {
299 total = count;
300 count -= 3;
301 ptr = skb_put(skb, count);
302
303 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
304 maxlen = count; /* complete transfer */
305 else
306 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
307
308 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
309 memcpy(ptr, ptr1, maxlen); /* copy data */
310 count -= maxlen;
311
312 if (count) { /* rest remaining */
313 ptr += maxlen;
314 ptr1 = bdata; /* start of buffer */
315 memcpy(ptr, ptr1, count); /* rest */
316 }
317 bz->za[new_f2].z2 = new_z2;
318 bz->f2 = new_f2; /* next buffer */
319
320 }
321 return (skb);
322 }
323
324 /*******************************/
325 /* D-channel receive procedure */
326 /*******************************/
327 static
328 int
receive_dmsg(struct IsdnCardState * cs)329 receive_dmsg(struct IsdnCardState *cs)
330 {
331 struct sk_buff *skb;
332 int maxlen;
333 int rcnt, total;
334 int count = 5;
335 u_char *ptr, *ptr1;
336 dfifo_type *df;
337 z_type *zp;
338
339 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
340 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
341 debugl1(cs, "rec_dmsg blocked");
342 return (1);
343 }
344 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
345 zp = &df->za[df->f2 & D_FREG_MASK];
346 rcnt = zp->z1 - zp->z2;
347 if (rcnt < 0)
348 rcnt += D_FIFO_SIZE;
349 rcnt++;
350 if (cs->debug & L1_DEB_ISAC)
351 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
352 df->f1, df->f2, zp->z1, zp->z2, rcnt);
353
354 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
355 (df->data[zp->z1])) {
356 if (cs->debug & L1_DEB_WARN)
357 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
358 #ifdef ERROR_STATISTIC
359 cs->err_rx++;
360 #endif
361 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
362 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
363 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
364 total = rcnt;
365 rcnt -= 3;
366 ptr = skb_put(skb, rcnt);
367
368 if (zp->z2 + rcnt <= D_FIFO_SIZE)
369 maxlen = rcnt; /* complete transfer */
370 else
371 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
372
373 ptr1 = df->data + zp->z2; /* start of data */
374 memcpy(ptr, ptr1, maxlen); /* copy data */
375 rcnt -= maxlen;
376
377 if (rcnt) { /* rest remaining */
378 ptr += maxlen;
379 ptr1 = df->data; /* start of buffer */
380 memcpy(ptr, ptr1, rcnt); /* rest */
381 }
382 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
383 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
384
385 skb_queue_tail(&cs->rq, skb);
386 sched_event_D_pci(cs, D_RCVBUFREADY);
387 } else
388 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
389 }
390 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
391 return (1);
392 }
393
394 /*******************************************************************************/
395 /* check for transparent receive data and read max one threshold size if avail */
396 /*******************************************************************************/
397 static int
hfcpci_empty_fifo_trans(struct BCState * bcs,bzfifo_type * bz,u_char * bdata)398 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
399 {
400 unsigned short *z1r, *z2r;
401 int new_z2, fcnt, maxlen;
402 struct sk_buff *skb;
403 u_char *ptr, *ptr1;
404
405 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
406 z2r = z1r + 1;
407
408 if (!(fcnt = *z1r - *z2r))
409 return (0); /* no data avail */
410
411 if (fcnt <= 0)
412 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
413 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
414 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
415
416 new_z2 = *z2r + fcnt; /* new position in fifo */
417 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
418 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
419
420 if (!(skb = dev_alloc_skb(fcnt)))
421 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
422 else {
423 ptr = skb_put(skb, fcnt);
424 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
425 maxlen = fcnt; /* complete transfer */
426 else
427 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
428
429 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
430 memcpy(ptr, ptr1, maxlen); /* copy data */
431 fcnt -= maxlen;
432
433 if (fcnt) { /* rest remaining */
434 ptr += maxlen;
435 ptr1 = bdata; /* start of buffer */
436 memcpy(ptr, ptr1, fcnt); /* rest */
437 }
438 skb_queue_tail(&bcs->rqueue, skb);
439 hfcpci_sched_event(bcs, B_RCVBUFREADY);
440 }
441
442 *z2r = new_z2; /* new position */
443 return (1);
444 } /* hfcpci_empty_fifo_trans */
445
446 /**********************************/
447 /* B-channel main receive routine */
448 /**********************************/
449 static void
main_rec_hfcpci(struct BCState * bcs)450 main_rec_hfcpci(struct BCState *bcs)
451 {
452 struct IsdnCardState *cs = bcs->cs;
453 int rcnt, real_fifo;
454 int receive, count = 5;
455 struct sk_buff *skb;
456 bzfifo_type *bz;
457 u_char *bdata;
458 z_type *zp;
459
460
461 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
462 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
463 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
464 real_fifo = 1;
465 } else {
466 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
467 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
468 real_fifo = 0;
469 }
470 Begin:
471 count--;
472 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
473 debugl1(cs, "rec_data %d blocked", bcs->channel);
474 return;
475 }
476 if (bz->f1 != bz->f2) {
477 if (cs->debug & L1_DEB_HSCX)
478 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
479 bcs->channel, bz->f1, bz->f2);
480 zp = &bz->za[bz->f2];
481
482 rcnt = zp->z1 - zp->z2;
483 if (rcnt < 0)
484 rcnt += B_FIFO_SIZE;
485 rcnt++;
486 if (cs->debug & L1_DEB_HSCX)
487 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
488 bcs->channel, zp->z1, zp->z2, rcnt);
489 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
490 skb_queue_tail(&bcs->rqueue, skb);
491 hfcpci_sched_event(bcs, B_RCVBUFREADY);
492 }
493 rcnt = bz->f1 - bz->f2;
494 if (rcnt < 0)
495 rcnt += MAX_B_FRAMES + 1;
496 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
497 rcnt = 0;
498 hfcpci_clear_fifo_rx(cs, real_fifo);
499 }
500 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
501 if (rcnt > 1)
502 receive = 1;
503 else
504 receive = 0;
505 } else if (bcs->mode == L1_MODE_TRANS)
506 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
507 else
508 receive = 0;
509 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
510 if (count && receive)
511 goto Begin;
512 }
513
514 /**************************/
515 /* D-channel send routine */
516 /**************************/
517 static void
hfcpci_fill_dfifo(struct IsdnCardState * cs)518 hfcpci_fill_dfifo(struct IsdnCardState *cs)
519 {
520 int fcnt;
521 int count, new_z1, maxlen;
522 dfifo_type *df;
523 u_char *src, *dst, new_f1;
524
525 if (!cs->tx_skb)
526 return;
527 if (cs->tx_skb->len <= 0)
528 return;
529
530 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
531
532 if (cs->debug & L1_DEB_ISAC)
533 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
534 df->f1, df->f2,
535 df->za[df->f1 & D_FREG_MASK].z1);
536 fcnt = df->f1 - df->f2; /* frame count actually buffered */
537 if (fcnt < 0)
538 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
539 if (fcnt > (MAX_D_FRAMES - 1)) {
540 if (cs->debug & L1_DEB_ISAC)
541 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
542 #ifdef ERROR_STATISTIC
543 cs->err_tx++;
544 #endif
545 return;
546 }
547 /* now determine free bytes in FIFO buffer */
548 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
549 if (count <= 0)
550 count += D_FIFO_SIZE; /* count now contains available bytes */
551
552 if (cs->debug & L1_DEB_ISAC)
553 debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
554 cs->tx_skb->len, count);
555 if (count < cs->tx_skb->len) {
556 if (cs->debug & L1_DEB_ISAC)
557 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
558 return;
559 }
560 count = cs->tx_skb->len; /* get frame len */
561 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
562 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
563 src = cs->tx_skb->data; /* source pointer */
564 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
565 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
566 if (maxlen > count)
567 maxlen = count; /* limit size */
568 memcpy(dst, src, maxlen); /* first copy */
569
570 count -= maxlen; /* remaining bytes */
571 if (count) {
572 dst = df->data; /* start of buffer */
573 src += maxlen; /* new position */
574 memcpy(dst, src, count);
575 }
576 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
577 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
578 df->f1 = new_f1; /* next frame */
579
580 dev_kfree_skb_any(cs->tx_skb);
581 cs->tx_skb = NULL;
582 }
583
584 /**************************/
585 /* B-channel send routine */
586 /**************************/
587 static void
hfcpci_fill_fifo(struct BCState * bcs)588 hfcpci_fill_fifo(struct BCState *bcs)
589 {
590 struct IsdnCardState *cs = bcs->cs;
591 int maxlen, fcnt;
592 int count, new_z1;
593 bzfifo_type *bz;
594 u_char *bdata;
595 u_char new_f1, *src, *dst;
596 unsigned short *z1t, *z2t;
597
598 if (!bcs->tx_skb)
599 return;
600 if (bcs->tx_skb->len <= 0)
601 return;
602
603 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
604 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
605 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
606 } else {
607 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
608 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
609 }
610
611 if (bcs->mode == L1_MODE_TRANS) {
612 z1t = &bz->za[MAX_B_FRAMES].z1;
613 z2t = z1t + 1;
614 if (cs->debug & L1_DEB_HSCX)
615 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
616 bcs->channel, *z1t, *z2t);
617 fcnt = *z2t - *z1t;
618 if (fcnt <= 0)
619 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
620 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
621
622 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
623 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
624 /* data is suitable for fifo */
625 count = bcs->tx_skb->len;
626
627 new_z1 = *z1t + count; /* new buffer Position */
628 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
629 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
630 src = bcs->tx_skb->data; /* source pointer */
631 dst = bdata + (*z1t - B_SUB_VAL);
632 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
633 if (maxlen > count)
634 maxlen = count; /* limit size */
635 memcpy(dst, src, maxlen); /* first copy */
636
637 count -= maxlen; /* remaining bytes */
638 if (count) {
639 dst = bdata; /* start of buffer */
640 src += maxlen; /* new position */
641 memcpy(dst, src, count);
642 }
643 bcs->tx_cnt -= bcs->tx_skb->len;
644 fcnt += bcs->tx_skb->len;
645 *z1t = new_z1; /* now send data */
646 } else if (cs->debug & L1_DEB_HSCX)
647 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
648 bcs->channel, bcs->tx_skb->len);
649
650 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
651 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
652 u_long flags;
653 spin_lock_irqsave(&bcs->aclock, flags);
654 bcs->ackcnt += bcs->tx_skb->len;
655 spin_unlock_irqrestore(&bcs->aclock, flags);
656 schedule_event(bcs, B_ACKPENDING);
657 }
658
659 dev_kfree_skb_any(bcs->tx_skb);
660 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
661 }
662 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
663 return;
664 }
665 if (cs->debug & L1_DEB_HSCX)
666 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
667 bcs->channel, bz->f1, bz->f2,
668 bz->za[bz->f1].z1);
669
670 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
671 if (fcnt < 0)
672 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
673 if (fcnt > (MAX_B_FRAMES - 1)) {
674 if (cs->debug & L1_DEB_HSCX)
675 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
676 return;
677 }
678 /* now determine free bytes in FIFO buffer */
679 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
680 if (count <= 0)
681 count += B_FIFO_SIZE; /* count now contains available bytes */
682
683 if (cs->debug & L1_DEB_HSCX)
684 debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
685 bcs->channel, bcs->tx_skb->len,
686 count, current->state);
687
688 if (count < bcs->tx_skb->len) {
689 if (cs->debug & L1_DEB_HSCX)
690 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
691 return;
692 }
693 count = bcs->tx_skb->len; /* get frame len */
694 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
695 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
696 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
697
698 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
699 src = bcs->tx_skb->data; /* source pointer */
700 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
701 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
702 if (maxlen > count)
703 maxlen = count; /* limit size */
704 memcpy(dst, src, maxlen); /* first copy */
705
706 count -= maxlen; /* remaining bytes */
707 if (count) {
708 dst = bdata; /* start of buffer */
709 src += maxlen; /* new position */
710 memcpy(dst, src, count);
711 }
712 bcs->tx_cnt -= bcs->tx_skb->len;
713 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
714 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
715 u_long flags;
716 spin_lock_irqsave(&bcs->aclock, flags);
717 bcs->ackcnt += bcs->tx_skb->len;
718 spin_unlock_irqrestore(&bcs->aclock, flags);
719 schedule_event(bcs, B_ACKPENDING);
720 }
721
722 bz->za[new_f1].z1 = new_z1; /* for next buffer */
723 bz->f1 = new_f1; /* next frame */
724
725 dev_kfree_skb_any(bcs->tx_skb);
726 bcs->tx_skb = NULL;
727 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
728 }
729
730 /**********************************************/
731 /* D-channel l1 state call for leased NT-mode */
732 /**********************************************/
733 static void
dch_nt_l2l1(struct PStack * st,int pr,void * arg)734 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
735 {
736 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
737
738 switch (pr) {
739 case (PH_DATA | REQUEST):
740 case (PH_PULL | REQUEST):
741 case (PH_PULL | INDICATION):
742 st->l1.l1hw(st, pr, arg);
743 break;
744 case (PH_ACTIVATE | REQUEST):
745 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
746 break;
747 case (PH_TESTLOOP | REQUEST):
748 if (1 & (long) arg)
749 debugl1(cs, "PH_TEST_LOOP B1");
750 if (2 & (long) arg)
751 debugl1(cs, "PH_TEST_LOOP B2");
752 if (!(3 & (long) arg))
753 debugl1(cs, "PH_TEST_LOOP DISABLED");
754 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
755 break;
756 default:
757 if (cs->debug)
758 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
759 break;
760 }
761 }
762
763
764
765 /***********************/
766 /* set/reset echo mode */
767 /***********************/
768 static int
hfcpci_auxcmd(struct IsdnCardState * cs,isdn_ctrl * ic)769 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
770 {
771 u_long flags;
772 int i = *(unsigned int *) ic->parm.num;
773
774 if ((ic->arg == 98) &&
775 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
776 spin_lock_irqsave(&cs->lock, flags);
777 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
778 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
779 udelay(10);
780 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
781 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
782 udelay(10);
783 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
784 udelay(10);
785 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
786 cs->dc.hfcpci.ph_state = 1;
787 cs->hw.hfcpci.nt_mode = 1;
788 cs->hw.hfcpci.nt_timer = 0;
789 cs->stlist->l2.l2l1 = dch_nt_l2l1;
790 spin_unlock_irqrestore(&cs->lock, flags);
791 debugl1(cs, "NT mode activated");
792 return (0);
793 }
794 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
795 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
796 return (-EINVAL);
797
798 spin_lock_irqsave(&cs->lock, flags);
799 if (i) {
800 cs->logecho = 1;
801 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
802 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
803 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
804 } else {
805 cs->logecho = 0;
806 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
807 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
808 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
809 }
810 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
811 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
812 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
813 cs->hw.hfcpci.ctmt &= ~2;
814 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
815 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
816 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
817 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
818 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
819 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
820 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
821 spin_unlock_irqrestore(&cs->lock, flags);
822 return (0);
823 } /* hfcpci_auxcmd */
824
825 /*****************************/
826 /* E-channel receive routine */
827 /*****************************/
828 static void
receive_emsg(struct IsdnCardState * cs)829 receive_emsg(struct IsdnCardState *cs)
830 {
831 int rcnt;
832 int receive, count = 5;
833 bzfifo_type *bz;
834 u_char *bdata;
835 z_type *zp;
836 u_char *ptr, *ptr1, new_f2;
837 int total, maxlen, new_z2;
838 u_char e_buffer[256];
839
840 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
841 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
842 Begin:
843 count--;
844 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
845 debugl1(cs, "echo_rec_data blocked");
846 return;
847 }
848 if (bz->f1 != bz->f2) {
849 if (cs->debug & L1_DEB_ISAC)
850 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
851 bz->f1, bz->f2);
852 zp = &bz->za[bz->f2];
853
854 rcnt = zp->z1 - zp->z2;
855 if (rcnt < 0)
856 rcnt += B_FIFO_SIZE;
857 rcnt++;
858 if (cs->debug & L1_DEB_ISAC)
859 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
860 zp->z1, zp->z2, rcnt);
861 new_z2 = zp->z2 + rcnt; /* new position in fifo */
862 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
863 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
864 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
865 if ((rcnt > 256 + 3) || (count < 4) ||
866 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
867 if (cs->debug & L1_DEB_WARN)
868 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
869 bz->za[new_f2].z2 = new_z2;
870 bz->f2 = new_f2; /* next buffer */
871 } else {
872 total = rcnt;
873 rcnt -= 3;
874 ptr = e_buffer;
875
876 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
877 maxlen = rcnt; /* complete transfer */
878 else
879 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
880
881 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
882 memcpy(ptr, ptr1, maxlen); /* copy data */
883 rcnt -= maxlen;
884
885 if (rcnt) { /* rest remaining */
886 ptr += maxlen;
887 ptr1 = bdata; /* start of buffer */
888 memcpy(ptr, ptr1, rcnt); /* rest */
889 }
890 bz->za[new_f2].z2 = new_z2;
891 bz->f2 = new_f2; /* next buffer */
892 if (cs->debug & DEB_DLOG_HEX) {
893 ptr = cs->dlog;
894 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
895 *ptr++ = 'E';
896 *ptr++ = 'C';
897 *ptr++ = 'H';
898 *ptr++ = 'O';
899 *ptr++ = ':';
900 ptr += QuickHex(ptr, e_buffer, total - 3);
901 ptr--;
902 *ptr++ = '\n';
903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, cs->dlog);
905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 }
908 }
909
910 rcnt = bz->f1 - bz->f2;
911 if (rcnt < 0)
912 rcnt += MAX_B_FRAMES + 1;
913 if (rcnt > 1)
914 receive = 1;
915 else
916 receive = 0;
917 } else
918 receive = 0;
919 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
920 if (count && receive)
921 goto Begin;
922 } /* receive_emsg */
923
924 /*********************/
925 /* Interrupt handler */
926 /*********************/
927 static irqreturn_t
hfcpci_interrupt(int intno,void * dev_id)928 hfcpci_interrupt(int intno, void *dev_id)
929 {
930 u_long flags;
931 struct IsdnCardState *cs = dev_id;
932 u_char exval;
933 struct BCState *bcs;
934 int count = 15;
935 u_char val, stat;
936
937 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
938 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
939 return IRQ_NONE; /* not initialised */
940 }
941 spin_lock_irqsave(&cs->lock, flags);
942 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
943 val = Read_hfc(cs, HFCPCI_INT_S1);
944 if (cs->debug & L1_DEB_ISAC)
945 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
946 } else {
947 spin_unlock_irqrestore(&cs->lock, flags);
948 return IRQ_NONE;
949 }
950 if (cs->debug & L1_DEB_ISAC)
951 debugl1(cs, "HFC-PCI irq %x %s", val,
952 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
953 "locked" : "unlocked");
954 val &= cs->hw.hfcpci.int_m1;
955 if (val & 0x40) { /* state machine irq */
956 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
957 if (cs->debug & L1_DEB_ISAC)
958 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
959 exval);
960 cs->dc.hfcpci.ph_state = exval;
961 sched_event_D_pci(cs, D_L1STATECHANGE);
962 val &= ~0x40;
963 }
964 if (val & 0x80) { /* timer irq */
965 if (cs->hw.hfcpci.nt_mode) {
966 if ((--cs->hw.hfcpci.nt_timer) < 0)
967 sched_event_D_pci(cs, D_L1STATECHANGE);
968 }
969 val &= ~0x80;
970 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
971 }
972 while (val) {
973 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
974 cs->hw.hfcpci.int_s1 |= val;
975 spin_unlock_irqrestore(&cs->lock, flags);
976 return IRQ_HANDLED;
977 }
978 if (cs->hw.hfcpci.int_s1 & 0x18) {
979 exval = val;
980 val = cs->hw.hfcpci.int_s1;
981 cs->hw.hfcpci.int_s1 = exval;
982 }
983 if (val & 0x08) {
984 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
985 if (cs->debug)
986 debugl1(cs, "hfcpci spurious 0x08 IRQ");
987 } else
988 main_rec_hfcpci(bcs);
989 }
990 if (val & 0x10) {
991 if (cs->logecho)
992 receive_emsg(cs);
993 else if (!(bcs = Sel_BCS(cs, 1))) {
994 if (cs->debug)
995 debugl1(cs, "hfcpci spurious 0x10 IRQ");
996 } else
997 main_rec_hfcpci(bcs);
998 }
999 if (val & 0x01) {
1000 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1001 if (cs->debug)
1002 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1003 } else {
1004 if (bcs->tx_skb) {
1005 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1006 hfcpci_fill_fifo(bcs);
1007 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1008 } else
1009 debugl1(cs, "fill_data %d blocked", bcs->channel);
1010 } else {
1011 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1012 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1013 hfcpci_fill_fifo(bcs);
1014 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1015 } else
1016 debugl1(cs, "fill_data %d blocked", bcs->channel);
1017 } else {
1018 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1019 }
1020 }
1021 }
1022 }
1023 if (val & 0x02) {
1024 if (!(bcs = Sel_BCS(cs, 1))) {
1025 if (cs->debug)
1026 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1027 } else {
1028 if (bcs->tx_skb) {
1029 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1030 hfcpci_fill_fifo(bcs);
1031 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1032 } else
1033 debugl1(cs, "fill_data %d blocked", bcs->channel);
1034 } else {
1035 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1036 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1037 hfcpci_fill_fifo(bcs);
1038 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1039 } else
1040 debugl1(cs, "fill_data %d blocked", bcs->channel);
1041 } else {
1042 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1043 }
1044 }
1045 }
1046 }
1047 if (val & 0x20) { /* receive dframe */
1048 receive_dmsg(cs);
1049 }
1050 if (val & 0x04) { /* dframe transmitted */
1051 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1052 del_timer(&cs->dbusytimer);
1053 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1054 sched_event_D_pci(cs, D_CLEARBUSY);
1055 if (cs->tx_skb) {
1056 if (cs->tx_skb->len) {
1057 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1058 hfcpci_fill_dfifo(cs);
1059 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1060 } else {
1061 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1062 }
1063 goto afterXPR;
1064 } else {
1065 dev_kfree_skb_irq(cs->tx_skb);
1066 cs->tx_cnt = 0;
1067 cs->tx_skb = NULL;
1068 }
1069 }
1070 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1071 cs->tx_cnt = 0;
1072 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1073 hfcpci_fill_dfifo(cs);
1074 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1075 } else {
1076 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1077 }
1078 } else
1079 sched_event_D_pci(cs, D_XMTBUFREADY);
1080 }
1081 afterXPR:
1082 if (cs->hw.hfcpci.int_s1 && count--) {
1083 val = cs->hw.hfcpci.int_s1;
1084 cs->hw.hfcpci.int_s1 = 0;
1085 if (cs->debug & L1_DEB_ISAC)
1086 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1087 } else
1088 val = 0;
1089 }
1090 spin_unlock_irqrestore(&cs->lock, flags);
1091 return IRQ_HANDLED;
1092 }
1093
1094 /********************************************************************/
1095 /* timer callback for D-chan busy resolution. Currently no function */
1096 /********************************************************************/
1097 static void
hfcpci_dbusy_timer(struct IsdnCardState * cs)1098 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1099 {
1100 }
1101
1102 /*************************************/
1103 /* Layer 1 D-channel hardware access */
1104 /*************************************/
1105 static void
HFCPCI_l1hw(struct PStack * st,int pr,void * arg)1106 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1107 {
1108 u_long flags;
1109 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1110 struct sk_buff *skb = arg;
1111
1112 switch (pr) {
1113 case (PH_DATA | REQUEST):
1114 if (cs->debug & DEB_DLOG_HEX)
1115 LogFrame(cs, skb->data, skb->len);
1116 if (cs->debug & DEB_DLOG_VERBOSE)
1117 dlogframe(cs, skb, 0);
1118 spin_lock_irqsave(&cs->lock, flags);
1119 if (cs->tx_skb) {
1120 skb_queue_tail(&cs->sq, skb);
1121 #ifdef L2FRAME_DEBUG /* psa */
1122 if (cs->debug & L1_DEB_LAPD)
1123 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1124 #endif
1125 } else {
1126 cs->tx_skb = skb;
1127 cs->tx_cnt = 0;
1128 #ifdef L2FRAME_DEBUG /* psa */
1129 if (cs->debug & L1_DEB_LAPD)
1130 Logl2Frame(cs, skb, "PH_DATA", 0);
1131 #endif
1132 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1133 hfcpci_fill_dfifo(cs);
1134 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1135 } else
1136 debugl1(cs, "hfcpci_fill_dfifo blocked");
1137
1138 }
1139 spin_unlock_irqrestore(&cs->lock, flags);
1140 break;
1141 case (PH_PULL | INDICATION):
1142 spin_lock_irqsave(&cs->lock, flags);
1143 if (cs->tx_skb) {
1144 if (cs->debug & L1_DEB_WARN)
1145 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1146 skb_queue_tail(&cs->sq, skb);
1147 spin_unlock_irqrestore(&cs->lock, flags);
1148 break;
1149 }
1150 if (cs->debug & DEB_DLOG_HEX)
1151 LogFrame(cs, skb->data, skb->len);
1152 if (cs->debug & DEB_DLOG_VERBOSE)
1153 dlogframe(cs, skb, 0);
1154 cs->tx_skb = skb;
1155 cs->tx_cnt = 0;
1156 #ifdef L2FRAME_DEBUG /* psa */
1157 if (cs->debug & L1_DEB_LAPD)
1158 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1159 #endif
1160 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1161 hfcpci_fill_dfifo(cs);
1162 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1163 } else
1164 debugl1(cs, "hfcpci_fill_dfifo blocked");
1165 spin_unlock_irqrestore(&cs->lock, flags);
1166 break;
1167 case (PH_PULL | REQUEST):
1168 #ifdef L2FRAME_DEBUG /* psa */
1169 if (cs->debug & L1_DEB_LAPD)
1170 debugl1(cs, "-> PH_REQUEST_PULL");
1171 #endif
1172 if (!cs->tx_skb) {
1173 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1174 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1175 } else
1176 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1177 break;
1178 case (HW_RESET | REQUEST):
1179 spin_lock_irqsave(&cs->lock, flags);
1180 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1181 udelay(6);
1182 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1183 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1184 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1185 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1186 spin_unlock_irqrestore(&cs->lock, flags);
1187 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1188 break;
1189 case (HW_ENABLE | REQUEST):
1190 spin_lock_irqsave(&cs->lock, flags);
1191 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1192 spin_unlock_irqrestore(&cs->lock, flags);
1193 break;
1194 case (HW_DEACTIVATE | REQUEST):
1195 spin_lock_irqsave(&cs->lock, flags);
1196 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1197 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1198 spin_unlock_irqrestore(&cs->lock, flags);
1199 break;
1200 case (HW_INFO3 | REQUEST):
1201 spin_lock_irqsave(&cs->lock, flags);
1202 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1203 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1204 spin_unlock_irqrestore(&cs->lock, flags);
1205 break;
1206 case (HW_TESTLOOP | REQUEST):
1207 spin_lock_irqsave(&cs->lock, flags);
1208 switch ((long) arg) {
1209 case (1):
1210 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1211 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1212 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1213 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1214 break;
1215
1216 case (2):
1217 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1218 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1219 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1220 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1221 break;
1222
1223 default:
1224 spin_unlock_irqrestore(&cs->lock, flags);
1225 if (cs->debug & L1_DEB_WARN)
1226 debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
1227 return;
1228 }
1229 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1230 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1231 spin_unlock_irqrestore(&cs->lock, flags);
1232 break;
1233 default:
1234 if (cs->debug & L1_DEB_WARN)
1235 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1236 break;
1237 }
1238 }
1239
1240 /***********************************************/
1241 /* called during init setting l1 stack pointer */
1242 /***********************************************/
1243 static void
setstack_hfcpci(struct PStack * st,struct IsdnCardState * cs)1244 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1245 {
1246 st->l1.l1hw = HFCPCI_l1hw;
1247 }
1248
1249 /**************************************/
1250 /* send B-channel data if not blocked */
1251 /**************************************/
1252 static void
hfcpci_send_data(struct BCState * bcs)1253 hfcpci_send_data(struct BCState *bcs)
1254 {
1255 struct IsdnCardState *cs = bcs->cs;
1256
1257 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1258 hfcpci_fill_fifo(bcs);
1259 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1260 } else
1261 debugl1(cs, "send_data %d blocked", bcs->channel);
1262 }
1263
1264 /***************************************************************/
1265 /* activate/deactivate hardware for selected channels and mode */
1266 /***************************************************************/
1267 static void
mode_hfcpci(struct BCState * bcs,int mode,int bc)1268 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1269 {
1270 struct IsdnCardState *cs = bcs->cs;
1271 int fifo2;
1272
1273 if (cs->debug & L1_DEB_HSCX)
1274 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1275 mode, bc, bcs->channel);
1276 bcs->mode = mode;
1277 bcs->channel = bc;
1278 fifo2 = bc;
1279 if (cs->chanlimit > 1) {
1280 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1281 cs->hw.hfcpci.sctrl_e &= ~0x80;
1282 } else {
1283 if (bc) {
1284 if (mode != L1_MODE_NULL) {
1285 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1286 cs->hw.hfcpci.sctrl_e |= 0x80;
1287 } else {
1288 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1289 cs->hw.hfcpci.sctrl_e &= ~0x80;
1290 }
1291 fifo2 = 0;
1292 } else {
1293 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1294 cs->hw.hfcpci.sctrl_e &= ~0x80;
1295 }
1296 }
1297 switch (mode) {
1298 case (L1_MODE_NULL):
1299 if (bc) {
1300 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1301 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1302 } else {
1303 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1304 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1305 }
1306 if (fifo2) {
1307 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1308 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1309 } else {
1310 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1311 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1312 }
1313 break;
1314 case (L1_MODE_TRANS):
1315 hfcpci_clear_fifo_rx(cs, fifo2);
1316 hfcpci_clear_fifo_tx(cs, fifo2);
1317 if (bc) {
1318 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1319 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1320 } else {
1321 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1322 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1323 }
1324 if (fifo2) {
1325 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1326 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1327 cs->hw.hfcpci.ctmt |= 2;
1328 cs->hw.hfcpci.conn &= ~0x18;
1329 } else {
1330 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1331 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1332 cs->hw.hfcpci.ctmt |= 1;
1333 cs->hw.hfcpci.conn &= ~0x03;
1334 }
1335 break;
1336 case (L1_MODE_HDLC):
1337 hfcpci_clear_fifo_rx(cs, fifo2);
1338 hfcpci_clear_fifo_tx(cs, fifo2);
1339 if (bc) {
1340 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1341 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1342 } else {
1343 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1344 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1345 }
1346 if (fifo2) {
1347 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1348 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1349 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1350 cs->hw.hfcpci.ctmt &= ~2;
1351 cs->hw.hfcpci.conn &= ~0x18;
1352 } else {
1353 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1356 cs->hw.hfcpci.ctmt &= ~1;
1357 cs->hw.hfcpci.conn &= ~0x03;
1358 }
1359 break;
1360 case (L1_MODE_EXTRN):
1361 if (bc) {
1362 cs->hw.hfcpci.conn |= 0x10;
1363 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1364 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1365 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1366 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1367 } else {
1368 cs->hw.hfcpci.conn |= 0x02;
1369 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1370 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1371 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1372 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1373 }
1374 break;
1375 }
1376 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1377 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1378 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1379 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1380 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1381 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1382 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1383 }
1384
1385 /******************************/
1386 /* Layer2 -> Layer 1 Transfer */
1387 /******************************/
1388 static void
hfcpci_l2l1(struct PStack * st,int pr,void * arg)1389 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1390 {
1391 struct BCState *bcs = st->l1.bcs;
1392 u_long flags;
1393 struct sk_buff *skb = arg;
1394
1395 switch (pr) {
1396 case (PH_DATA | REQUEST):
1397 spin_lock_irqsave(&bcs->cs->lock, flags);
1398 if (bcs->tx_skb) {
1399 skb_queue_tail(&bcs->squeue, skb);
1400 } else {
1401 bcs->tx_skb = skb;
1402 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1403 bcs->cs->BC_Send_Data(bcs);
1404 }
1405 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1406 break;
1407 case (PH_PULL | INDICATION):
1408 spin_lock_irqsave(&bcs->cs->lock, flags);
1409 if (bcs->tx_skb) {
1410 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1411 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1412 break;
1413 }
1414 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1415 bcs->tx_skb = skb;
1416 bcs->cs->BC_Send_Data(bcs);
1417 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1418 break;
1419 case (PH_PULL | REQUEST):
1420 if (!bcs->tx_skb) {
1421 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1422 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1423 } else
1424 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1425 break;
1426 case (PH_ACTIVATE | REQUEST):
1427 spin_lock_irqsave(&bcs->cs->lock, flags);
1428 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1429 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1430 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1431 l1_msg_b(st, pr, arg);
1432 break;
1433 case (PH_DEACTIVATE | REQUEST):
1434 l1_msg_b(st, pr, arg);
1435 break;
1436 case (PH_DEACTIVATE | CONFIRM):
1437 spin_lock_irqsave(&bcs->cs->lock, flags);
1438 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1439 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1440 mode_hfcpci(bcs, 0, st->l1.bc);
1441 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1442 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1443 break;
1444 }
1445 }
1446
1447 /******************************************/
1448 /* deactivate B-channel access and queues */
1449 /******************************************/
1450 static void
close_hfcpci(struct BCState * bcs)1451 close_hfcpci(struct BCState *bcs)
1452 {
1453 mode_hfcpci(bcs, 0, bcs->channel);
1454 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1455 skb_queue_purge(&bcs->rqueue);
1456 skb_queue_purge(&bcs->squeue);
1457 if (bcs->tx_skb) {
1458 dev_kfree_skb_any(bcs->tx_skb);
1459 bcs->tx_skb = NULL;
1460 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1461 }
1462 }
1463 }
1464
1465 /*************************************/
1466 /* init B-channel queues and control */
1467 /*************************************/
1468 static int
open_hfcpcistate(struct IsdnCardState * cs,struct BCState * bcs)1469 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1470 {
1471 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1472 skb_queue_head_init(&bcs->rqueue);
1473 skb_queue_head_init(&bcs->squeue);
1474 }
1475 bcs->tx_skb = NULL;
1476 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1477 bcs->event = 0;
1478 bcs->tx_cnt = 0;
1479 return (0);
1480 }
1481
1482 /*********************************/
1483 /* inits the stack for B-channel */
1484 /*********************************/
1485 static int
setstack_2b(struct PStack * st,struct BCState * bcs)1486 setstack_2b(struct PStack *st, struct BCState *bcs)
1487 {
1488 bcs->channel = st->l1.bc;
1489 if (open_hfcpcistate(st->l1.hardware, bcs))
1490 return (-1);
1491 st->l1.bcs = bcs;
1492 st->l2.l2l1 = hfcpci_l2l1;
1493 setstack_manager(st);
1494 bcs->st = st;
1495 setstack_l1_B(st);
1496 return (0);
1497 }
1498
1499 /***************************/
1500 /* handle L1 state changes */
1501 /***************************/
1502 static void
hfcpci_bh(struct work_struct * work)1503 hfcpci_bh(struct work_struct *work)
1504 {
1505 struct IsdnCardState *cs =
1506 container_of(work, struct IsdnCardState, tqueue);
1507 u_long flags;
1508 // struct PStack *stptr;
1509
1510 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1511 if (!cs->hw.hfcpci.nt_mode)
1512 switch (cs->dc.hfcpci.ph_state) {
1513 case (0):
1514 l1_msg(cs, HW_RESET | INDICATION, NULL);
1515 break;
1516 case (3):
1517 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1518 break;
1519 case (8):
1520 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1521 break;
1522 case (6):
1523 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1524 break;
1525 case (7):
1526 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1527 break;
1528 default:
1529 break;
1530 } else {
1531 spin_lock_irqsave(&cs->lock, flags);
1532 switch (cs->dc.hfcpci.ph_state) {
1533 case (2):
1534 if (cs->hw.hfcpci.nt_timer < 0) {
1535 cs->hw.hfcpci.nt_timer = 0;
1536 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1537 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1538 /* Clear already pending ints */
1539 if (Read_hfc(cs, HFCPCI_INT_S1));
1540 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1541 udelay(10);
1542 Write_hfc(cs, HFCPCI_STATES, 4);
1543 cs->dc.hfcpci.ph_state = 4;
1544 } else {
1545 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1546 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1547 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1548 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1549 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1550 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1551 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1552 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1553 }
1554 break;
1555 case (1):
1556 case (3):
1557 case (4):
1558 cs->hw.hfcpci.nt_timer = 0;
1559 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1560 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1561 break;
1562 default:
1563 break;
1564 }
1565 spin_unlock_irqrestore(&cs->lock, flags);
1566 }
1567 }
1568 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1569 DChannel_proc_rcv(cs);
1570 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1571 DChannel_proc_xmt(cs);
1572 }
1573
1574
1575 /********************************/
1576 /* called for card init message */
1577 /********************************/
1578 static void
inithfcpci(struct IsdnCardState * cs)1579 inithfcpci(struct IsdnCardState *cs)
1580 {
1581 cs->bcs[0].BC_SetStack = setstack_2b;
1582 cs->bcs[1].BC_SetStack = setstack_2b;
1583 cs->bcs[0].BC_Close = close_hfcpci;
1584 cs->bcs[1].BC_Close = close_hfcpci;
1585 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1586 cs->dbusytimer.data = (long) cs;
1587 init_timer(&cs->dbusytimer);
1588 mode_hfcpci(cs->bcs, 0, 0);
1589 mode_hfcpci(cs->bcs + 1, 0, 1);
1590 }
1591
1592
1593
1594 /*******************************************/
1595 /* handle card messages from control layer */
1596 /*******************************************/
1597 static int
hfcpci_card_msg(struct IsdnCardState * cs,int mt,void * arg)1598 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1599 {
1600 u_long flags;
1601
1602 if (cs->debug & L1_DEB_ISAC)
1603 debugl1(cs, "HFCPCI: card_msg %x", mt);
1604 switch (mt) {
1605 case CARD_RESET:
1606 spin_lock_irqsave(&cs->lock, flags);
1607 reset_hfcpci(cs);
1608 spin_unlock_irqrestore(&cs->lock, flags);
1609 return (0);
1610 case CARD_RELEASE:
1611 release_io_hfcpci(cs);
1612 return (0);
1613 case CARD_INIT:
1614 spin_lock_irqsave(&cs->lock, flags);
1615 inithfcpci(cs);
1616 reset_hfcpci(cs);
1617 spin_unlock_irqrestore(&cs->lock, flags);
1618 msleep(80); /* Timeout 80ms */
1619 /* now switch timer interrupt off */
1620 spin_lock_irqsave(&cs->lock, flags);
1621 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1622 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1623 /* reinit mode reg */
1624 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1625 spin_unlock_irqrestore(&cs->lock, flags);
1626 return (0);
1627 case CARD_TEST:
1628 return (0);
1629 }
1630 return (0);
1631 }
1632
1633
1634 /* this variable is used as card index when more than one cards are present */
1635 static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1636
1637 int __devinit
setup_hfcpci(struct IsdnCard * card)1638 setup_hfcpci(struct IsdnCard *card)
1639 {
1640 u_long flags;
1641 struct IsdnCardState *cs = card->cs;
1642 char tmp[64];
1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL;
1645
1646 #ifdef __BIG_ENDIAN
1647 #error "not running on big endian machines now"
1648 #endif
1649
1650 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652
1653 cs->hw.hfcpci.int_s1 = 0;
1654 cs->dc.hfcpci.ph_state = 0;
1655 cs->hw.hfcpci.fifo = 255;
1656 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1657 return(0);
1658
1659 i = 0;
1660 while (id_list[i].vendor_id) {
1661 tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id,
1662 id_list[i].device_id,
1663 dev_hfcpci);
1664 i++;
1665 if (tmp_hfcpci) {
1666 dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL;
1667 if (pci_enable_device(tmp_hfcpci))
1668 continue;
1669 if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) {
1670 printk(KERN_WARNING
1671 "HiSax hfc_pci: No suitable DMA available.\n");
1672 continue;
1673 }
1674 if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) {
1675 printk(KERN_WARNING
1676 "HiSax hfc_pci: No suitable consistent DMA available.\n");
1677 continue;
1678 }
1679 pci_set_master(tmp_hfcpci);
1680 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1681 continue;
1682 else
1683 break;
1684 }
1685 }
1686
1687 if (!tmp_hfcpci) {
1688 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1689 return (0);
1690 }
1691
1692 i--;
1693 dev_hfcpci = tmp_hfcpci; /* old device */
1694 cs->hw.hfcpci.dev = dev_hfcpci;
1695 cs->irq = dev_hfcpci->irq;
1696 if (!cs->irq) {
1697 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1698 return (0);
1699 }
1700 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1701 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1702
1703 if (!cs->hw.hfcpci.pci_io) {
1704 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1705 return (0);
1706 }
1707
1708 /* Allocate memory for FIFOS */
1709 cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev,
1710 0x8000, &cs->hw.hfcpci.dma);
1711 if (!cs->hw.hfcpci.fifos) {
1712 printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n");
1713 return 0;
1714 }
1715 if (cs->hw.hfcpci.dma & 0x7fff) {
1716 printk(KERN_WARNING
1717 "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
1718 (u_long)cs->hw.hfcpci.dma);
1719 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
1720 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
1721 return 0;
1722 }
1723 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
1724 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1725 printk(KERN_INFO
1726 "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
1727 cs->hw.hfcpci.pci_io,
1728 cs->hw.hfcpci.fifos,
1729 (u_long)cs->hw.hfcpci.dma,
1730 cs->irq, HZ);
1731
1732 spin_lock_irqsave(&cs->lock, flags);
1733
1734 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1735 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1736 cs->hw.hfcpci.int_m1 = 0;
1737 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1738 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1739 /* At this point the needed PCI config is done */
1740 /* fifos are still not enabled */
1741
1742 INIT_WORK(&cs->tqueue, hfcpci_bh);
1743 cs->setstack_d = setstack_hfcpci;
1744 cs->BC_Send_Data = &hfcpci_send_data;
1745 cs->readisac = NULL;
1746 cs->writeisac = NULL;
1747 cs->readisacfifo = NULL;
1748 cs->writeisacfifo = NULL;
1749 cs->BC_Read_Reg = NULL;
1750 cs->BC_Write_Reg = NULL;
1751 cs->irq_func = &hfcpci_interrupt;
1752 cs->irq_flags |= IRQF_SHARED;
1753 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1754 cs->hw.hfcpci.timer.data = (long) cs;
1755 init_timer(&cs->hw.hfcpci.timer);
1756 cs->cardmsg = &hfcpci_card_msg;
1757 cs->auxcmd = &hfcpci_auxcmd;
1758
1759 spin_unlock_irqrestore(&cs->lock, flags);
1760
1761 return (1);
1762 }
1763