1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21 
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43 
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Andreas Tobler <toa@pop.agri.ch>
51  *  . Updated proc_fs calls
52  *
53  * Emilie Chung	<emilie.chung@axis.com>
54  *  . Tip on Async Request Filter
55  *
56  * Pascal Drolet <pascal.drolet@informission.ca>
57  *  . Various tips for optimization and functionnalities
58  *
59  * Robert Ficklin <rficklin@westengineering.com>
60  *  . Loop in irq_handler
61  *
62  * James Goodwin <jamesg@Filanet.com>
63  *  . Various tips on initialization, self-id reception, etc.
64  *
65  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
66  *  . Apple PowerBook detection
67  *
68  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
69  *  . Reset the board properly before leaving + misc cleanups
70  *
71  * Leon van Stuivenberg <leonvs@iae.nl>
72  *  . Bug fixes
73  *
74  * Ben Collins <bcollins@debian.org>
75  *  . Working big-endian support
76  *  . Updated to 2.4.x module scheme (PCI aswell)
77  *  . Removed procfs support since it trashes random mem
78  *  . Config ROM generation
79  *
80  * Manfred Weihs <weihs@ict.tuwien.ac.at>
81  *  . Reworked code for initiating bus resets
82  *    (long, short, with or without hold-off)
83  *
84  * Nandu Santhi <contactnandu@users.sourceforge.net>
85  *  . Added support for nVidia nForce2 onboard Firewire chipset
86  *
87  */
88 
89 #include <linux/config.h>
90 #include <linux/kernel.h>
91 #include <linux/list.h>
92 #include <linux/slab.h>
93 #include <linux/interrupt.h>
94 #include <linux/wait.h>
95 #include <linux/errno.h>
96 #include <linux/module.h>
97 #include <linux/pci.h>
98 #include <linux/fs.h>
99 #include <linux/poll.h>
100 #include <linux/irq.h>
101 #include <asm/byteorder.h>
102 #include <asm/atomic.h>
103 #include <asm/uaccess.h>
104 #include <linux/delay.h>
105 #include <linux/spinlock.h>
106 
107 #include <asm/pgtable.h>
108 #include <asm/page.h>
109 #include <linux/sched.h>
110 #include <linux/types.h>
111 #include <linux/wrapper.h>
112 #include <linux/vmalloc.h>
113 #include <linux/init.h>
114 
115 #ifdef CONFIG_ALL_PPC
116 #include <asm/machdep.h>
117 #include <asm/pmac_feature.h>
118 #include <asm/prom.h>
119 #include <asm/pci-bridge.h>
120 #endif
121 
122 #include "ieee1394.h"
123 #include "ieee1394_types.h"
124 #include "hosts.h"
125 #include "dma.h"
126 #include "iso.h"
127 #include "ieee1394_core.h"
128 #include "highlevel.h"
129 #include "ohci1394.h"
130 
131 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
132 #define OHCI1394_DEBUG
133 #endif
134 
135 #ifdef DBGMSG
136 #undef DBGMSG
137 #endif
138 
139 #ifdef OHCI1394_DEBUG
140 #define DBGMSG(card, fmt, args...) \
141 printk(KERN_INFO "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
142 #else
143 #define DBGMSG(card, fmt, args...)
144 #endif
145 
146 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
147 #define OHCI_DMA_ALLOC(fmt, args...) \
148 	HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 		++global_outstanding_dmas, ## args)
150 #define OHCI_DMA_FREE(fmt, args...) \
151 	HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
152 		--global_outstanding_dmas, ## args)
153 static int global_outstanding_dmas = 0;
154 #else
155 #define OHCI_DMA_ALLOC(fmt, args...)
156 #define OHCI_DMA_FREE(fmt, args...)
157 #endif
158 
159 /* print general (card independent) information */
160 #define PRINT_G(level, fmt, args...) \
161 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
162 
163 /* print card specific information */
164 #define PRINT(level, card, fmt, args...) \
165 printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
166 
167 static char version[] __devinitdata =
168 	"$Rev: 1045 $ Ben Collins <bcollins@debian.org>";
169 
170 /* Module Parameters */
171 MODULE_PARM(phys_dma,"i");
172 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
173 static int phys_dma = 1;
174 
175 static void dma_trm_tasklet(unsigned long data);
176 static void dma_trm_reset(struct dma_trm_ctx *d);
177 
178 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
179 			     enum context_type type, int ctx, int num_desc,
180 			     int buf_size, int split_buf_size, int context_base);
181 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
182 
183 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
184 			     enum context_type type, int ctx, int num_desc,
185 			     int context_base);
186 
187 static void ohci1394_pci_remove(struct pci_dev *pdev);
188 
189 #ifndef __LITTLE_ENDIAN
190 static unsigned hdr_sizes[] =
191 {
192 	3,	/* TCODE_WRITEQ */
193 	4,	/* TCODE_WRITEB */
194 	3,	/* TCODE_WRITE_RESPONSE */
195 	0,	/* ??? */
196 	3,	/* TCODE_READQ */
197 	4,	/* TCODE_READB */
198 	3,	/* TCODE_READQ_RESPONSE */
199 	4,	/* TCODE_READB_RESPONSE */
200 	1,	/* TCODE_CYCLE_START (???) */
201 	4,	/* TCODE_LOCK_REQUEST */
202 	2,	/* TCODE_ISO_DATA */
203 	4,	/* TCODE_LOCK_RESPONSE */
204 };
205 
206 /* Swap headers */
packet_swab(quadlet_t * data,int tcode)207 static inline void packet_swab(quadlet_t *data, int tcode)
208 {
209 	size_t size = hdr_sizes[tcode];
210 
211 	if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
212 		return;
213 
214 	while (size--)
215 		data[size] = swab32(data[size]);
216 }
217 #else
218 /* Don't waste cycles on same sex byte swaps */
219 #define packet_swab(w,x)
220 #endif /* !LITTLE_ENDIAN */
221 
222 /***********************************
223  * IEEE-1394 functionality section *
224  ***********************************/
225 
get_phy_reg(struct ti_ohci * ohci,u8 addr)226 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
227 {
228 	int i;
229 	unsigned long flags;
230 	quadlet_t r;
231 
232 	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
233 
234 	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
235 
236 	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
237 		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
238 			break;
239 
240 		mdelay(1);
241 	}
242 
243 	r = reg_read(ohci, OHCI1394_PhyControl);
244 
245 	if (i >= OHCI_LOOP_COUNT)
246 		PRINT (KERN_ERR, ohci->id, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
247 		       r, r & 0x80000000, i);
248 
249 	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
250 
251 	return (r & 0x00ff0000) >> 16;
252 }
253 
set_phy_reg(struct ti_ohci * ohci,u8 addr,u8 data)254 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
255 {
256 	int i;
257 	unsigned long flags;
258 	u32 r = 0;
259 
260 	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
261 
262 	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
263 
264 	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
265 		r = reg_read(ohci, OHCI1394_PhyControl);
266 		if (!(r & 0x00004000))
267 			break;
268 
269 		mdelay(1);
270 	}
271 
272 	if (i == OHCI_LOOP_COUNT)
273 		PRINT (KERN_ERR, ohci->id, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
274 		       r, r & 0x00004000, i);
275 
276 	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
277 
278 	return;
279 }
280 
281 /* Or's our value into the current value */
set_phy_reg_mask(struct ti_ohci * ohci,u8 addr,u8 data)282 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
283 {
284 	u8 old;
285 
286 	old = get_phy_reg (ohci, addr);
287 	old |= data;
288 	set_phy_reg (ohci, addr, old);
289 
290 	return;
291 }
292 
handle_selfid(struct ti_ohci * ohci,struct hpsb_host * host,int phyid,int isroot)293 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
294 				int phyid, int isroot)
295 {
296 	quadlet_t *q = ohci->selfid_buf_cpu;
297 	quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
298 	size_t size;
299 	quadlet_t q0, q1;
300 
301 	/* Check status of self-id reception */
302 
303 	if (ohci->selfid_swap)
304 		q0 = le32_to_cpu(q[0]);
305 	else
306 		q0 = q[0];
307 
308 	if ((self_id_count & 0x80000000) ||
309 	    ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
310 		PRINT(KERN_ERR, ohci->id,
311 		      "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
312 		      self_id_count, q0, ohci->self_id_errors);
313 
314 		/* Tip by James Goodwin <jamesg@Filanet.com>:
315 		 * We had an error, generate another bus reset in response.  */
316 		if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
317 			set_phy_reg_mask (ohci, 1, 0x40);
318 			ohci->self_id_errors++;
319 		} else {
320 			PRINT(KERN_ERR, ohci->id,
321 			      "Too many errors on SelfID error reception, giving up!");
322 		}
323 		return;
324 	}
325 
326 	/* SelfID Ok, reset error counter. */
327 	ohci->self_id_errors = 0;
328 
329 	size = ((self_id_count & 0x00001FFC) >> 2) - 1;
330 	q++;
331 
332 	while (size > 0) {
333 		if (ohci->selfid_swap) {
334 			q0 = le32_to_cpu(q[0]);
335 			q1 = le32_to_cpu(q[1]);
336 		} else {
337 			q0 = q[0];
338 			q1 = q[1];
339 		}
340 
341 		if (q0 == ~q1) {
342 			DBGMSG (ohci->id, "SelfID packet 0x%x received", q0);
343 			hpsb_selfid_received(host, cpu_to_be32(q0));
344 			if (((q0 & 0x3f000000) >> 24) == phyid)
345 				DBGMSG (ohci->id, "SelfID for this node is 0x%08x", q0);
346 		} else {
347 			PRINT(KERN_ERR, ohci->id,
348 			      "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
349 		}
350 		q += 2;
351 		size -= 2;
352 	}
353 
354 	DBGMSG(ohci->id, "SelfID complete");
355 
356 	return;
357 }
358 
ohci_soft_reset(struct ti_ohci * ohci)359 static void ohci_soft_reset(struct ti_ohci *ohci) {
360 	int i;
361 
362 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
363 
364 	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
365 		if (!reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)
366 			break;
367 		mdelay(1);
368 	}
369 	DBGMSG (ohci->id, "Soft reset finished");
370 }
371 
run_context(struct ti_ohci * ohci,int reg,char * msg)372 static int run_context(struct ti_ohci *ohci, int reg, char *msg)
373 {
374 	u32 nodeId;
375 
376 	/* check that the node id is valid */
377 	nodeId = reg_read(ohci, OHCI1394_NodeID);
378 	if (!(nodeId&0x80000000)) {
379 		PRINT(KERN_ERR, ohci->id,
380 		      "Running dma failed because Node ID is not valid");
381 		return -1;
382 	}
383 
384 	/* check that the node number != 63 */
385 	if ((nodeId&0x3f)==63) {
386 		PRINT(KERN_ERR, ohci->id,
387 		      "Running dma failed because Node ID == 63");
388 		return -1;
389 	}
390 
391 	/* Run the dma context */
392 	reg_write(ohci, reg, 0x8000);
393 
394 	if (msg) PRINT(KERN_DEBUG, ohci->id, "%s", msg);
395 
396 	return 0;
397 }
398 
399 /* Generate the dma receive prgs and start the context */
initialize_dma_rcv_ctx(struct dma_rcv_ctx * d,int generate_irq)400 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
401 {
402 	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
403 	int i;
404 
405 	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
406 
407 	for (i=0; i<d->num_desc; i++) {
408 		u32 c;
409 
410 		c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
411 		if (generate_irq)
412 			c |= DMA_CTL_IRQ;
413 
414 		d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
415 
416 		/* End of descriptor list? */
417 		if (i + 1 < d->num_desc) {
418 			d->prg_cpu[i]->branchAddress =
419 				cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
420 		} else {
421 			d->prg_cpu[i]->branchAddress =
422 				cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
423 		}
424 
425 		d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
426 		d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
427 	}
428 
429         d->buf_ind = 0;
430         d->buf_offset = 0;
431 
432 	if (d->type == DMA_CTX_ISO) {
433 		/* Clear contextControl */
434 		reg_write(ohci, d->ctrlClear, 0xffffffff);
435 
436 		/* Set bufferFill, isochHeader, multichannel for IR context */
437 		reg_write(ohci, d->ctrlSet, 0xd0000000);
438 
439 		/* Set the context match register to match on all tags */
440 		reg_write(ohci, d->ctxtMatch, 0xf0000000);
441 
442 		/* Clear the multi channel mask high and low registers */
443 		reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
444 		reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
445 
446 		/* Set up isoRecvIntMask to generate interrupts */
447 		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
448 	}
449 
450 	/* Tell the controller where the first AR program is */
451 	reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
452 
453 	/* Run context */
454 	reg_write(ohci, d->ctrlSet, 0x00008000);
455 
456 	DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
457 }
458 
459 /* Initialize the dma transmit context */
initialize_dma_trm_ctx(struct dma_trm_ctx * d)460 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
461 {
462 	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
463 
464 	/* Stop the context */
465 	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
466 
467         d->prg_ind = 0;
468 	d->sent_ind = 0;
469 	d->free_prgs = d->num_desc;
470         d->branchAddrPtr = NULL;
471 	INIT_LIST_HEAD(&d->fifo_list);
472 	INIT_LIST_HEAD(&d->pending_list);
473 
474 	if (d->type == DMA_CTX_ISO) {
475 		/* enable interrupts */
476 		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
477 	}
478 
479 	DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);
480 }
481 
482 /* Count the number of available iso contexts */
get_nb_iso_ctx(struct ti_ohci * ohci,int reg)483 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
484 {
485 	int i,ctx=0;
486 	u32 tmp;
487 
488 	reg_write(ohci, reg, 0xffffffff);
489 	tmp = reg_read(ohci, reg);
490 
491 	DBGMSG(ohci->id,"Iso contexts reg: %08x implemented: %08x", reg, tmp);
492 
493 	/* Count the number of contexts */
494 	for (i=0; i<32; i++) {
495 	    	if (tmp & 1) ctx++;
496 		tmp >>= 1;
497 	}
498 	return ctx;
499 }
500 
501 static void ohci_init_config_rom(struct ti_ohci *ohci);
502 
503 /* Global initialization */
ohci_initialize(struct ti_ohci * ohci)504 static void ohci_initialize(struct ti_ohci *ohci)
505 {
506 	char irq_buf[16];
507 	quadlet_t buf;
508 
509 	spin_lock_init(&ohci->phy_reg_lock);
510 	spin_lock_init(&ohci->event_lock);
511 
512 	/* Put some defaults to these undefined bus options */
513 	buf = reg_read(ohci, OHCI1394_BusOptions);
514 	buf |=  0xE0000000; /* Enable IRMC, CMC and ISC */
515 	buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
516 	buf &= ~0x18000000; /* Disable PMC and BMC */
517 	reg_write(ohci, OHCI1394_BusOptions, buf);
518 
519 	/* Set the bus number */
520 	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
521 
522 	/* Enable posted writes */
523 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
524 
525 	/* Clear link control register */
526 	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
527 
528 	/* Enable cycle timer and cycle master and set the IRM
529 	 * contender bit in our self ID packets. */
530 	reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000);
531 	set_phy_reg_mask(ohci, 4, 0xc0);
532 
533 	/* Clear interrupt registers */
534 	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
535 	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
536 
537 	/* Set up self-id dma buffer */
538 	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
539 
540 	/* enable self-id dma */
541 	reg_write(ohci, OHCI1394_LinkControlSet, 0x00000200);
542 
543 	/* Set the Config ROM mapping register */
544 	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
545 
546 	/* Initialize the Config ROM */
547 	ohci_init_config_rom(ohci);
548 
549 	/* Now get our max packet size */
550 	ohci->max_packet_size =
551 		1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
552 
553 	/* Don't accept phy packets into AR request context */
554 	reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
555 
556 	/* Clear the interrupt mask */
557 	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
558 	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
559 
560 	/* Clear the interrupt mask */
561 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
562 	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
563 
564 	/* Initialize AR dma */
565 	initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
566 	initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
567 
568 	/* Initialize AT dma */
569 	initialize_dma_trm_ctx(&ohci->at_req_context);
570 	initialize_dma_trm_ctx(&ohci->at_resp_context);
571 
572 	/*
573 	 * Accept AT requests from all nodes. This probably
574 	 * will have to be controlled from the subsystem
575 	 * on a per node basis.
576 	 */
577 	reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
578 
579 	/* Specify AT retries */
580 	reg_write(ohci, OHCI1394_ATRetries,
581 		  OHCI1394_MAX_AT_REQ_RETRIES |
582 		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
583 		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
584 
585 	/* We don't want hardware swapping */
586 	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
587 
588 	/* Enable interrupts */
589 	reg_write(ohci, OHCI1394_IntMaskSet,
590 		  OHCI1394_unrecoverableError |
591 		  OHCI1394_masterIntEnable |
592 		  OHCI1394_busReset |
593 		  OHCI1394_selfIDComplete |
594 		  OHCI1394_RSPkt |
595 		  OHCI1394_RQPkt |
596 		  OHCI1394_respTxComplete |
597 		  OHCI1394_reqTxComplete |
598 		  OHCI1394_isochRx |
599 		  OHCI1394_isochTx |
600 		  OHCI1394_cycleInconsistent);
601 
602 	/* Enable link */
603 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
604 
605 	buf = reg_read(ohci, OHCI1394_Version);
606 #ifndef __sparc__
607 	sprintf (irq_buf, "%d", ohci->dev->irq);
608 #else
609 	sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
610 #endif
611 	PRINT(KERN_INFO, ohci->id, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
612 	      "MMIO=[%lx-%lx]  Max Packet=[%d]",
613 	      ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
614 	      ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
615 	      pci_resource_start(ohci->dev, 0),
616 	      pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
617 	      ohci->max_packet_size);
618 }
619 
620 /*
621  * Insert a packet in the DMA fifo and generate the DMA prg
622  * FIXME: rewrite the program in order to accept packets crossing
623  *        page boundaries.
624  *        check also that a single dma descriptor doesn't cross a
625  *        page boundary.
626  */
insert_packet(struct ti_ohci * ohci,struct dma_trm_ctx * d,struct hpsb_packet * packet)627 static void insert_packet(struct ti_ohci *ohci,
628 			  struct dma_trm_ctx *d, struct hpsb_packet *packet)
629 {
630 	u32 cycleTimer;
631 	int idx = d->prg_ind;
632 
633 	DBGMSG(ohci->id, "Inserting packet for node " NODE_BUS_FMT
634 	       ", tlabel=%d, tcode=0x%x, speed=%d",
635 	       NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
636 	       packet->tcode, packet->speed_code);
637 
638 	d->prg_cpu[idx]->begin.address = 0;
639 	d->prg_cpu[idx]->begin.branchAddress = 0;
640 
641 	if (d->type == DMA_CTX_ASYNC_RESP) {
642 		/*
643 		 * For response packets, we need to put a timeout value in
644 		 * the 16 lower bits of the status... let's try 1 sec timeout
645 		 */
646 		cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
647 		d->prg_cpu[idx]->begin.status = cpu_to_le32(
648 			(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
649 			((cycleTimer&0x01fff000)>>12));
650 
651 		DBGMSG(ohci->id, "cycleTimer: %08x timeStamp: %08x",
652 		       cycleTimer, d->prg_cpu[idx]->begin.status);
653 	} else
654 		d->prg_cpu[idx]->begin.status = 0;
655 
656         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
657 
658                 if (packet->type == hpsb_raw) {
659 			d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
660                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
661                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
662                 } else {
663                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
664                                 (packet->header[0] & 0xFFFF);
665 
666 			if (packet->tcode == TCODE_ISO_DATA) {
667 				/* Sending an async stream packet */
668 				d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
669 			} else {
670 				/* Sending a normal async request or response */
671 				d->prg_cpu[idx]->data[1] =
672 					(packet->header[1] & 0xFFFF) |
673 					(packet->header[0] & 0xFFFF0000);
674 				d->prg_cpu[idx]->data[2] = packet->header[2];
675 				d->prg_cpu[idx]->data[3] = packet->header[3];
676 			}
677 			packet_swab(d->prg_cpu[idx]->data, packet->tcode);
678                 }
679 
680                 if (packet->data_size) { /* block transmit */
681 			if (packet->tcode == TCODE_STREAM_DATA){
682 				d->prg_cpu[idx]->begin.control =
683 					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
684 						    DMA_CTL_IMMEDIATE | 0x8);
685 			} else {
686 				d->prg_cpu[idx]->begin.control =
687 					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
688 						    DMA_CTL_IMMEDIATE | 0x10);
689 			}
690                         d->prg_cpu[idx]->end.control =
691                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
692 					    DMA_CTL_IRQ |
693 					    DMA_CTL_BRANCH |
694 					    packet->data_size);
695                         /*
696                          * Check that the packet data buffer
697                          * does not cross a page boundary.
698                          */
699                         if (cross_bound((unsigned long)packet->data,
700                                         packet->data_size)>0) {
701                                 /* FIXME: do something about it */
702                                 PRINT(KERN_ERR, ohci->id,
703                                       "%s: packet data addr: %p size %Zd bytes "
704                                       "cross page boundary", __FUNCTION__,
705                                       packet->data, packet->data_size);
706                         }
707 
708                         d->prg_cpu[idx]->end.address = cpu_to_le32(
709                                 pci_map_single(ohci->dev, packet->data,
710                                                packet->data_size,
711                                                PCI_DMA_TODEVICE));
712 			OHCI_DMA_ALLOC("single, block transmit packet");
713 
714                         d->prg_cpu[idx]->end.branchAddress = 0;
715                         d->prg_cpu[idx]->end.status = 0;
716                         if (d->branchAddrPtr)
717                                 *(d->branchAddrPtr) =
718 					cpu_to_le32(d->prg_bus[idx] | 0x3);
719                         d->branchAddrPtr =
720                                 &(d->prg_cpu[idx]->end.branchAddress);
721                 } else { /* quadlet transmit */
722                         if (packet->type == hpsb_raw)
723                                 d->prg_cpu[idx]->begin.control =
724 					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
725 						    DMA_CTL_IMMEDIATE |
726 						    DMA_CTL_IRQ |
727 						    DMA_CTL_BRANCH |
728 						    (packet->header_size + 4));
729                         else
730                                 d->prg_cpu[idx]->begin.control =
731 					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
732 						    DMA_CTL_IMMEDIATE |
733 						    DMA_CTL_IRQ |
734 						    DMA_CTL_BRANCH |
735 						    packet->header_size);
736 
737                         if (d->branchAddrPtr)
738                                 *(d->branchAddrPtr) =
739 					cpu_to_le32(d->prg_bus[idx] | 0x2);
740                         d->branchAddrPtr =
741                                 &(d->prg_cpu[idx]->begin.branchAddress);
742                 }
743 
744         } else { /* iso packet */
745                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
746                         (packet->header[0] & 0xFFFF);
747                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
748 		packet_swab(d->prg_cpu[idx]->data, packet->tcode);
749 
750                 d->prg_cpu[idx]->begin.control =
751 			cpu_to_le32(DMA_CTL_OUTPUT_MORE |
752 				    DMA_CTL_IMMEDIATE | 0x8);
753                 d->prg_cpu[idx]->end.control =
754 			cpu_to_le32(DMA_CTL_OUTPUT_LAST |
755 				    DMA_CTL_UPDATE |
756 				    DMA_CTL_IRQ |
757 				    DMA_CTL_BRANCH |
758 				    packet->data_size);
759                 d->prg_cpu[idx]->end.address = cpu_to_le32(
760 				pci_map_single(ohci->dev, packet->data,
761 				packet->data_size, PCI_DMA_TODEVICE));
762 		OHCI_DMA_ALLOC("single, iso transmit packet");
763 
764                 d->prg_cpu[idx]->end.branchAddress = 0;
765                 d->prg_cpu[idx]->end.status = 0;
766                 DBGMSG(ohci->id, "Iso xmit context info: header[%08x %08x]\n"
767                        "                       begin=%08x %08x %08x %08x\n"
768                        "                             %08x %08x %08x %08x\n"
769                        "                       end  =%08x %08x %08x %08x",
770                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
771                        d->prg_cpu[idx]->begin.control,
772                        d->prg_cpu[idx]->begin.address,
773                        d->prg_cpu[idx]->begin.branchAddress,
774                        d->prg_cpu[idx]->begin.status,
775                        d->prg_cpu[idx]->data[0],
776                        d->prg_cpu[idx]->data[1],
777                        d->prg_cpu[idx]->data[2],
778                        d->prg_cpu[idx]->data[3],
779                        d->prg_cpu[idx]->end.control,
780                        d->prg_cpu[idx]->end.address,
781                        d->prg_cpu[idx]->end.branchAddress,
782                        d->prg_cpu[idx]->end.status);
783                 if (d->branchAddrPtr)
784   		        *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
785                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
786         }
787 	d->free_prgs--;
788 
789 	/* queue the packet in the appropriate context queue */
790 	list_add_tail(&packet->driver_list, &d->fifo_list);
791 	d->prg_ind = (d->prg_ind+1)%d->num_desc;
792 }
793 
794 /*
795  * This function fills the FIFO with the (eventual) pending packets
796  * and runs or wakes up the DMA prg if necessary.
797  *
798  * The function MUST be called with the d->lock held.
799  */
dma_trm_flush(struct ti_ohci * ohci,struct dma_trm_ctx * d)800 static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
801 {
802 	struct hpsb_packet *p;
803 	int idx,z;
804 
805 	if (list_empty(&d->pending_list) || d->free_prgs == 0)
806 		return 0;
807 
808 	p = driver_packet(d->pending_list.next);
809 	idx = d->prg_ind;
810 	z = (p->data_size) ? 3 : 2;
811 
812 	/* insert the packets into the dma fifo */
813 	while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
814 		struct hpsb_packet *p = driver_packet(d->pending_list.next);
815 		list_del(&p->driver_list);
816 		insert_packet(ohci, d, p);
817 	}
818 
819 	if (d->free_prgs == 0)
820 		DBGMSG(ohci->id, "Transmit DMA FIFO ctx=%d is full... waiting", d->ctx);
821 
822 	/* Is the context running ? (should be unless it is
823 	   the first packet to be sent in this context) */
824 	if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
825 		DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx);
826 		reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);
827 		run_context(ohci, d->ctrlSet, NULL);
828 	}
829 	else {
830 		/* Wake up the dma context if necessary */
831 		if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
832 			DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);
833 		}
834 
835 		/* do this always, to avoid race condition */
836 		reg_write(ohci, d->ctrlSet, 0x1000);
837 	}
838 	return 1;
839 }
840 
841 /* Transmission of an async or iso packet */
ohci_transmit(struct hpsb_host * host,struct hpsb_packet * packet)842 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
843 {
844 	struct ti_ohci *ohci = host->hostdata;
845 	struct dma_trm_ctx *d;
846 	unsigned long flags;
847 
848 	if (packet->data_size > ohci->max_packet_size) {
849 		PRINT(KERN_ERR, ohci->id,
850 		      "Transmit packet size %Zd is too big",
851 		      packet->data_size);
852 		return 0;
853 	}
854 
855 	/* Decide whether we have an iso, a request, or a response packet */
856 	if (packet->type == hpsb_raw)
857 		d = &ohci->at_req_context;
858 	else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
859 		/* The legacy IT DMA context is initialized on first
860 		 * use.  However, the alloc cannot be run from
861 		 * interrupt context, so we bail out if that is the
862 		 * case. I don't see anyone sending ISO packets from
863 		 * interrupt context anyway... */
864 
865 		if (ohci->it_legacy_context.ohci == NULL) {
866 			if (in_interrupt()) {
867 				PRINT(KERN_ERR, ohci->id,
868 				      "legacy IT context cannot be initialized during interrupt");
869 				return 0;
870 			}
871 
872 			if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
873 					      DMA_CTX_ISO, 0, IT_NUM_DESC,
874 					      OHCI1394_IsoXmitContextBase) < 0) {
875 				PRINT(KERN_ERR, ohci->id,
876 				      "error initializing legacy IT context");
877 				return 0;
878 			}
879 
880 			initialize_dma_trm_ctx(&ohci->it_legacy_context);
881 		}
882 
883 		d = &ohci->it_legacy_context;
884 	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
885 		d = &ohci->at_resp_context;
886 	else
887 		d = &ohci->at_req_context;
888 
889 	spin_lock_irqsave(&d->lock,flags);
890 
891 	list_add_tail(&packet->driver_list, &d->pending_list);
892 
893 	dma_trm_flush(ohci, d);
894 
895 	spin_unlock_irqrestore(&d->lock,flags);
896 
897 	return 1;
898 }
899 
ohci_devctl(struct hpsb_host * host,enum devctl_cmd cmd,int arg)900 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
901 {
902 	struct ti_ohci *ohci = host->hostdata;
903 	int retval = 0;
904 	unsigned long flags;
905 	int phy_reg;
906 
907 	switch (cmd) {
908 	case RESET_BUS:
909 		switch (arg) {
910 		case SHORT_RESET:
911 			phy_reg = get_phy_reg(ohci, 5);
912 			phy_reg |= 0x40;
913 			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
914 			break;
915 		case LONG_RESET:
916 			phy_reg = get_phy_reg(ohci, 1);
917 			phy_reg |= 0x40;
918 			set_phy_reg(ohci, 1, phy_reg); /* set IBR */
919 			break;
920 		case SHORT_RESET_NO_FORCE_ROOT:
921 			phy_reg = get_phy_reg(ohci, 1);
922 			if (phy_reg & 0x80) {
923 				phy_reg &= ~0x80;
924 				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
925 			}
926 
927 			phy_reg = get_phy_reg(ohci, 5);
928 			phy_reg |= 0x40;
929 			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
930 			break;
931 		case LONG_RESET_NO_FORCE_ROOT:
932 			phy_reg = get_phy_reg(ohci, 1);
933 			phy_reg &= ~0x80;
934 			phy_reg |= 0x40;
935 			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
936 			break;
937 		case SHORT_RESET_FORCE_ROOT:
938 			phy_reg = get_phy_reg(ohci, 1);
939 			if (!(phy_reg & 0x80)) {
940 				phy_reg |= 0x80;
941 				set_phy_reg(ohci, 1, phy_reg); /* set RHB */
942 			}
943 
944 			phy_reg = get_phy_reg(ohci, 5);
945 			phy_reg |= 0x40;
946 			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
947 			break;
948 		case LONG_RESET_FORCE_ROOT:
949 			phy_reg = get_phy_reg(ohci, 1);
950 			phy_reg |= 0xc0;
951 			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
952 			break;
953 		default:
954 			retval = -1;
955 		}
956 		break;
957 
958 	case GET_CYCLE_COUNTER:
959 		retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
960 		break;
961 
962 	case SET_CYCLE_COUNTER:
963 		reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
964 		break;
965 
966 	case SET_BUS_ID:
967 		PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err");
968 		break;
969 
970 	case ACT_CYCLE_MASTER:
971 		if (arg) {
972 			/* check if we are root and other nodes are present */
973 			u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
974 			if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
975 				/*
976 				 * enable cycleTimer, cycleMaster
977 				 */
978 				DBGMSG(ohci->id, "Cycle master enabled");
979 				reg_write(ohci, OHCI1394_LinkControlSet,
980 					  0x00300000);
981 			}
982 		} else {
983 			/* disable cycleTimer, cycleMaster, cycleSource */
984 			reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000);
985 		}
986 		break;
987 
988 	case CANCEL_REQUESTS:
989 		DBGMSG(ohci->id, "Cancel request received");
990 		dma_trm_reset(&ohci->at_req_context);
991 		dma_trm_reset(&ohci->at_resp_context);
992 		break;
993 
994 	case MODIFY_USAGE:
995                 if (arg) {
996                         MOD_INC_USE_COUNT;
997                 } else {
998                         MOD_DEC_USE_COUNT;
999                 }
1000 		retval = 1;
1001                 break;
1002 
1003 	case ISO_LISTEN_CHANNEL:
1004         {
1005 		u64 mask;
1006 
1007 		if (arg<0 || arg>63) {
1008 			PRINT(KERN_ERR, ohci->id,
1009 			      "%s: IS0 listen channel %d is out of range",
1010 			      __FUNCTION__, arg);
1011 			return -EFAULT;
1012 		}
1013 
1014 		/* activate the legacy IR context */
1015 		if (ohci->ir_legacy_context.ohci == NULL) {
1016 			if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
1017 					      DMA_CTX_ISO, 0, IR_NUM_DESC,
1018 					      IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
1019 					      OHCI1394_IsoRcvContextBase) < 0) {
1020 				PRINT(KERN_ERR, ohci->id, "%s: failed to allocate an IR context",
1021 				      __FUNCTION__);
1022 				return -ENOMEM;
1023 			}
1024 			ohci->ir_legacy_channels = 0;
1025 			initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1026 
1027 			DBGMSG(ohci->id, "ISO receive legacy context activated");
1028 		}
1029 
1030 		mask = (u64)0x1<<arg;
1031 
1032                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1033 
1034 		if (ohci->ISO_channel_usage & mask) {
1035 			PRINT(KERN_ERR, ohci->id,
1036 			      "%s: IS0 listen channel %d is already used",
1037 			      __FUNCTION__, arg);
1038 			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1039 			return -EFAULT;
1040 		}
1041 
1042 		ohci->ISO_channel_usage |= mask;
1043 		ohci->ir_legacy_channels |= mask;
1044 
1045 		if (arg>31)
1046 			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1047 				  1<<(arg-32));
1048 		else
1049 			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1050 				  1<<arg);
1051 
1052                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1053                 DBGMSG(ohci->id, "Listening enabled on channel %d", arg);
1054                 break;
1055         }
1056 	case ISO_UNLISTEN_CHANNEL:
1057         {
1058 		u64 mask;
1059 
1060 		if (arg<0 || arg>63) {
1061 			PRINT(KERN_ERR, ohci->id,
1062 			      "%s: IS0 unlisten channel %d is out of range",
1063 			      __FUNCTION__, arg);
1064 			return -EFAULT;
1065 		}
1066 
1067 		mask = (u64)0x1<<arg;
1068 
1069                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1070 
1071 		if (!(ohci->ISO_channel_usage & mask)) {
1072 			PRINT(KERN_ERR, ohci->id,
1073 			      "%s: IS0 unlisten channel %d is not used",
1074 			      __FUNCTION__, arg);
1075 			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1076 			return -EFAULT;
1077 		}
1078 
1079 		ohci->ISO_channel_usage &= ~mask;
1080 		ohci->ir_legacy_channels &= ~mask;
1081 
1082 		if (arg>31)
1083 			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1084 				  1<<(arg-32));
1085 		else
1086 			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1087 				  1<<arg);
1088 
1089                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1090                 DBGMSG(ohci->id, "Listening disabled on channel %d", arg);
1091 
1092 		if (ohci->ir_legacy_channels == 0) {
1093 			free_dma_rcv_ctx(&ohci->ir_legacy_context);
1094 			DBGMSG(ohci->id, "ISO receive legacy context deactivated");
1095 		}
1096                 break;
1097         }
1098 	default:
1099 		PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1100 			cmd);
1101 		break;
1102 	}
1103 	return retval;
1104 }
1105 
1106 /***********************************
1107  * rawiso ISO reception            *
1108  ***********************************/
1109 
1110 /*
1111   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1112   buffer is split into "blocks" (regions described by one DMA
1113   descriptor). Each block must be one page or less in size, and
1114   must not cross a page boundary.
1115 
1116   There is one little wrinkle with buffer-fill mode: a packet that
1117   starts in the final block may wrap around into the first block. But
1118   the user API expects all packets to be contiguous. Our solution is
1119   to keep the very last page of the DMA buffer in reserve - if a
1120   packet spans the gap, we copy its tail into this page.
1121 */
1122 
1123 struct ohci_iso_recv {
1124 	struct ti_ohci *ohci;
1125 
1126 	struct ohci1394_iso_tasklet task;
1127 	int task_active;
1128 
1129 	enum { BUFFER_FILL_MODE,
1130 	       PACKET_PER_BUFFER_MODE } dma_mode;
1131 
1132 	/* memory and PCI mapping for the DMA descriptors */
1133 	struct dma_prog_region prog;
1134 	struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1135 
1136 	/* how many DMA blocks fit in the buffer */
1137 	unsigned int nblocks;
1138 
1139 	/* stride of DMA blocks */
1140 	unsigned int buf_stride;
1141 
1142 	/* number of blocks to batch between interrupts */
1143 	int block_irq_interval;
1144 
1145 	/* block that DMA will finish next */
1146 	int block_dma;
1147 
1148 	/* (buffer-fill only) block that the reader will release next */
1149 	int block_reader;
1150 
1151 	/* (buffer-fill only) bytes of buffer the reader has released,
1152 	   less than one block */
1153 	int released_bytes;
1154 
1155 	/* (buffer-fill only) buffer offset at which the next packet will appear */
1156 	int dma_offset;
1157 
1158 	/* OHCI DMA context control registers */
1159 	u32 ContextControlSet;
1160 	u32 ContextControlClear;
1161 	u32 CommandPtr;
1162 	u32 ContextMatch;
1163 };
1164 
1165 static void ohci_iso_recv_task(unsigned long data);
1166 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1167 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1168 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1169 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1170 
ohci_iso_recv_init(struct hpsb_iso * iso)1171 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1172 {
1173 	struct ti_ohci *ohci = iso->host->hostdata;
1174 	struct ohci_iso_recv *recv;
1175 	int ctx;
1176 	int ret = -ENOMEM;
1177 
1178 	recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1179 	if (!recv)
1180 		return -ENOMEM;
1181 
1182 	iso->hostdata = recv;
1183 	recv->ohci = ohci;
1184 	recv->task_active = 0;
1185 	dma_prog_region_init(&recv->prog);
1186 	recv->block = NULL;
1187 
1188 	/* use buffer-fill mode, unless irq_interval is 1
1189 	   (note: multichannel requires buffer-fill) */
1190 
1191 	if (iso->irq_interval == 1 && iso->channel != -1) {
1192 		recv->dma_mode = PACKET_PER_BUFFER_MODE;
1193 	} else {
1194 		recv->dma_mode = BUFFER_FILL_MODE;
1195 	}
1196 
1197 	/* set nblocks, buf_stride, block_irq_interval */
1198 
1199 	if (recv->dma_mode == BUFFER_FILL_MODE) {
1200 		recv->buf_stride = PAGE_SIZE;
1201 
1202 		/* one block per page of data in the DMA buffer, minus the final guard page */
1203 		recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1204 		if (recv->nblocks < 3) {
1205 			DBGMSG(ohci->id, "ohci_iso_recv_init: DMA buffer too small");
1206 			goto err;
1207 		}
1208 
1209 		/* iso->irq_interval is in packets - translate that to blocks */
1210 		/* (err, sort of... 1 is always the safest value) */
1211 		recv->block_irq_interval = iso->irq_interval / recv->nblocks;
1212 		if (recv->block_irq_interval*4 > recv->nblocks)
1213 			recv->block_irq_interval = recv->nblocks/4;
1214 		if (recv->block_irq_interval < 1)
1215 			recv->block_irq_interval = 1;
1216 
1217 	} else {
1218 		int max_packet_size;
1219 
1220 		recv->nblocks = iso->buf_packets;
1221 		recv->block_irq_interval = 1;
1222 
1223 		/* choose a buffer stride */
1224 		/* must be a power of 2, and <= PAGE_SIZE */
1225 
1226 		max_packet_size = iso->buf_size / iso->buf_packets;
1227 
1228 		for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1229 		    recv->buf_stride *= 2);
1230 
1231 		if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1232 		   recv->buf_stride > PAGE_SIZE) {
1233 			/* this shouldn't happen, but anyway... */
1234 			DBGMSG(ohci->id, "ohci_iso_recv_init: problem choosing a buffer stride");
1235 			goto err;
1236 		}
1237 	}
1238 
1239 	recv->block_reader = 0;
1240 	recv->released_bytes = 0;
1241 	recv->block_dma = 0;
1242 	recv->dma_offset = 0;
1243 
1244 	/* size of DMA program = one descriptor per block */
1245 	if (dma_prog_region_alloc(&recv->prog,
1246 				 sizeof(struct dma_cmd) * recv->nblocks,
1247 				 recv->ohci->dev))
1248 		goto err;
1249 
1250 	recv->block = (struct dma_cmd*) recv->prog.kvirt;
1251 
1252 	ohci1394_init_iso_tasklet(&recv->task,
1253 				  iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1254 				                       OHCI_ISO_RECEIVE,
1255 				  ohci_iso_recv_task, (unsigned long) iso);
1256 
1257 	if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1258 		goto err;
1259 
1260 	recv->task_active = 1;
1261 
1262 	/* recv context registers are spaced 32 bytes apart */
1263 	ctx = recv->task.context;
1264 	recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1265 	recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1266 	recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1267 	recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1268 
1269 	if (iso->channel == -1) {
1270 		/* clear multi-channel selection mask */
1271 		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1272 		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1273 	}
1274 
1275 	/* write the DMA program */
1276 	ohci_iso_recv_program(iso);
1277 
1278 	DBGMSG(ohci->id, "ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1279 	       " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1280 	       recv->dma_mode == BUFFER_FILL_MODE ?
1281 	       "buffer-fill" : "packet-per-buffer",
1282 	       iso->buf_size/PAGE_SIZE, iso->buf_size,
1283 	       recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1284 
1285 	return 0;
1286 
1287 err:
1288 	ohci_iso_recv_shutdown(iso);
1289 	return ret;
1290 }
1291 
ohci_iso_recv_stop(struct hpsb_iso * iso)1292 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1293 {
1294 	struct ohci_iso_recv *recv = iso->hostdata;
1295 
1296 	/* disable interrupts */
1297 	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1298 
1299 	/* halt DMA */
1300 	ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1301 }
1302 
ohci_iso_recv_shutdown(struct hpsb_iso * iso)1303 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1304 {
1305 	struct ohci_iso_recv *recv = iso->hostdata;
1306 
1307 	if (recv->task_active) {
1308 		ohci_iso_recv_stop(iso);
1309 		ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1310 		recv->task_active = 0;
1311 	}
1312 
1313 	dma_prog_region_free(&recv->prog);
1314 	kfree(recv);
1315 	iso->hostdata = NULL;
1316 }
1317 
1318 /* set up a "gapped" ring buffer DMA program */
ohci_iso_recv_program(struct hpsb_iso * iso)1319 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1320 {
1321 	struct ohci_iso_recv *recv = iso->hostdata;
1322 	int blk;
1323 
1324 	/* address of 'branch' field in previous DMA descriptor */
1325 	u32 *prev_branch = NULL;
1326 
1327 	for (blk = 0; blk < recv->nblocks; blk++) {
1328 		u32 control;
1329 
1330 		/* the DMA descriptor */
1331 		struct dma_cmd *cmd = &recv->block[blk];
1332 
1333 		/* offset of the DMA descriptor relative to the DMA prog buffer */
1334 		unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1335 
1336 		/* offset of this packet's data within the DMA buffer */
1337 		unsigned long buf_offset = blk * recv->buf_stride;
1338 
1339 		if (recv->dma_mode == BUFFER_FILL_MODE) {
1340 			control = 2 << 28; /* INPUT_MORE */
1341 		} else {
1342 			control = 3 << 28; /* INPUT_LAST */
1343 		}
1344 
1345 		control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1346 
1347 		/* interrupt on last block, and at intervals */
1348 		if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1349 			control |= 3 << 20; /* want interrupt */
1350 		}
1351 
1352 		control |= 3 << 18; /* enable branch to address */
1353 		control |= recv->buf_stride;
1354 
1355 		cmd->control = cpu_to_le32(control);
1356 		cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1357 		cmd->branchAddress = 0; /* filled in on next loop */
1358 		cmd->status = cpu_to_le32(recv->buf_stride);
1359 
1360 		/* link the previous descriptor to this one */
1361 		if (prev_branch) {
1362 			*prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1363 		}
1364 
1365 		prev_branch = &cmd->branchAddress;
1366 	}
1367 
1368 	/* the final descriptor's branch address and Z should be left at 0 */
1369 }
1370 
1371 /* listen or unlisten to a specific channel (multi-channel mode only) */
ohci_iso_recv_change_channel(struct hpsb_iso * iso,unsigned char channel,int listen)1372 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1373 {
1374 	struct ohci_iso_recv *recv = iso->hostdata;
1375 	int reg, i;
1376 
1377 	if (channel < 32) {
1378 		reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1379 		i = channel;
1380 	} else {
1381 		reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1382 		i = channel - 32;
1383 	}
1384 
1385 	reg_write(recv->ohci, reg, (1 << i));
1386 
1387 	/* issue a dummy read to force all PCI writes to be posted immediately */
1388 	mb();
1389 	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1390 }
1391 
ohci_iso_recv_set_channel_mask(struct hpsb_iso * iso,u64 mask)1392 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1393 {
1394 	struct ohci_iso_recv *recv = iso->hostdata;
1395 	int i;
1396 
1397 	for (i = 0; i < 64; i++) {
1398 		if (mask & (1ULL << i)) {
1399 			if (i < 32)
1400 				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1401 			else
1402 				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1403 		} else {
1404 			if (i < 32)
1405 				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1406 			else
1407 				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1408 		}
1409 	}
1410 
1411 	/* issue a dummy read to force all PCI writes to be posted immediately */
1412 	mb();
1413 	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1414 }
1415 
ohci_iso_recv_start(struct hpsb_iso * iso,int cycle,int tag_mask,int sync)1416 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1417 {
1418 	struct ohci_iso_recv *recv = iso->hostdata;
1419 	u32 command, contextMatch;
1420 
1421 	reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1422 	wmb();
1423 
1424 	/* always keep ISO headers */
1425 	command = (1 << 30);
1426 
1427 	if (recv->dma_mode == BUFFER_FILL_MODE)
1428 		command |= (1 << 31);
1429 
1430 	reg_write(recv->ohci, recv->ContextControlSet, command);
1431 
1432 	/* match on specified tags */
1433 	contextMatch = tag_mask << 28;
1434 
1435 	if (iso->channel == -1) {
1436 		/* enable multichannel reception */
1437 		reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1438 	} else {
1439 		/* listen on channel */
1440 		contextMatch |= iso->channel;
1441 	}
1442 
1443 	if (cycle != -1) {
1444 		u32 seconds;
1445 
1446 		/* enable cycleMatch */
1447 		reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1448 
1449 		/* set starting cycle */
1450 		cycle &= 0x1FFF;
1451 
1452 		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1453 		   just snarf them from the current time */
1454 		seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1455 
1456 		/* advance one second to give some extra time for DMA to start */
1457 		seconds += 1;
1458 
1459 		cycle |= (seconds & 3) << 13;
1460 
1461 		contextMatch |= cycle << 12;
1462 	}
1463 
1464 	if (sync != -1) {
1465 		/* set sync flag on first DMA descriptor */
1466 		struct dma_cmd *cmd = &recv->block[recv->block_dma];
1467 		cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1468 
1469 		/* match sync field */
1470 		contextMatch |= (sync&0xf)<<8;
1471 	}
1472 
1473 	reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1474 
1475 	/* address of first descriptor block */
1476 	command = dma_prog_region_offset_to_bus(&recv->prog,
1477 						recv->block_dma * sizeof(struct dma_cmd));
1478 	command |= 1; /* Z=1 */
1479 
1480 	reg_write(recv->ohci, recv->CommandPtr, command);
1481 
1482 	/* enable interrupts */
1483 	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1484 
1485 	wmb();
1486 
1487 	/* run */
1488 	reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1489 
1490 	/* issue a dummy read of the cycle timer register to force
1491 	   all PCI writes to be posted immediately */
1492 	mb();
1493 	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1494 
1495 	/* check RUN */
1496 	if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1497 		PRINT(KERN_ERR, recv->ohci->id,
1498 		      "Error starting IR DMA (ContextControl 0x%08x)\n",
1499 		      reg_read(recv->ohci, recv->ContextControlSet));
1500 		return -1;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
ohci_iso_recv_release_block(struct ohci_iso_recv * recv,int block)1506 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1507 {
1508 	/* re-use the DMA descriptor for the block */
1509 	/* by linking the previous descriptor to it */
1510 
1511 	int next_i = block;
1512 	int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1513 
1514 	struct dma_cmd *next = &recv->block[next_i];
1515 	struct dma_cmd *prev = &recv->block[prev_i];
1516 
1517 	/* 'next' becomes the new end of the DMA chain,
1518 	   so disable branch and enable interrupt */
1519 	next->branchAddress = 0;
1520 	next->control |= cpu_to_le32(3 << 20);
1521 	next->status = cpu_to_le32(recv->buf_stride);
1522 
1523 	/* link prev to next */
1524 	prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1525 									sizeof(struct dma_cmd) * next_i)
1526 					  | 1); /* Z=1 */
1527 
1528 	/* disable interrupt on previous DMA descriptor, except at intervals */
1529 	if ((prev_i % recv->block_irq_interval) == 0) {
1530 		prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1531 	} else {
1532 		prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1533 	}
1534 	wmb();
1535 
1536 	/* wake up DMA in case it fell asleep */
1537 	reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1538 }
1539 
ohci_iso_recv_bufferfill_release(struct ohci_iso_recv * recv,struct hpsb_iso_packet_info * info)1540 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1541 					     struct hpsb_iso_packet_info *info)
1542 {
1543 	int len;
1544 
1545 	/* release the memory where the packet was */
1546 	len = info->len;
1547 
1548 	/* add the wasted space for padding to 4 bytes */
1549 	if (len % 4)
1550 		len += 4 - (len % 4);
1551 
1552 	/* add 8 bytes for the OHCI DMA data format overhead */
1553 	len += 8;
1554 
1555 	recv->released_bytes += len;
1556 
1557 	/* have we released enough memory for one block? */
1558 	while (recv->released_bytes > recv->buf_stride) {
1559 		ohci_iso_recv_release_block(recv, recv->block_reader);
1560 		recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1561 		recv->released_bytes -= recv->buf_stride;
1562 	}
1563 }
1564 
ohci_iso_recv_release(struct hpsb_iso * iso,struct hpsb_iso_packet_info * info)1565 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1566 {
1567 	struct ohci_iso_recv *recv = iso->hostdata;
1568 	if (recv->dma_mode == BUFFER_FILL_MODE) {
1569 		ohci_iso_recv_bufferfill_release(recv, info);
1570 	} else {
1571 		ohci_iso_recv_release_block(recv, info - iso->infos);
1572 	}
1573 }
1574 
1575 /* parse all packets from blocks that have been fully received */
ohci_iso_recv_bufferfill_parse(struct hpsb_iso * iso,struct ohci_iso_recv * recv)1576 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1577 {
1578 	int wake = 0;
1579 	int runaway = 0;
1580 
1581 	while (1) {
1582 		/* we expect the next parsable packet to begin at recv->dma_offset */
1583 		/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1584 
1585 		unsigned int offset;
1586 		unsigned short len, cycle;
1587 		unsigned char channel, tag, sy;
1588 
1589 		unsigned char *p = iso->data_buf.kvirt;
1590 
1591 		unsigned int this_block = recv->dma_offset/recv->buf_stride;
1592 
1593 		/* don't loop indefinitely */
1594 		if (runaway++ > 100000) {
1595 			atomic_inc(&iso->overflows);
1596 			PRINT(KERN_ERR, recv->ohci->id,
1597 			      "IR DMA error - Runaway during buffer parsing!\n");
1598 			break;
1599 		}
1600 
1601 		/* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1602 		if (this_block == recv->block_dma)
1603 			break;
1604 
1605 		wake = 1;
1606 
1607 		/* parse data length, tag, channel, and sy */
1608 
1609 		/* note: we keep our own local copies of 'len' and 'offset'
1610 		   so the user can't mess with them by poking in the mmap area */
1611 
1612 		len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1613 
1614 		if (len > 4096) {
1615 			PRINT(KERN_ERR, recv->ohci->id,
1616 			      "IR DMA error - bogus 'len' value %u\n", len);
1617 		}
1618 
1619 		channel = p[recv->dma_offset+1] & 0x3F;
1620 		tag = p[recv->dma_offset+1] >> 6;
1621 		sy = p[recv->dma_offset+0] & 0xF;
1622 
1623 		/* advance to data payload */
1624 		recv->dma_offset += 4;
1625 
1626 		/* check for wrap-around */
1627 		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1628 			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1629 		}
1630 
1631 		/* dma_offset now points to the first byte of the data payload */
1632 		offset = recv->dma_offset;
1633 
1634 		/* advance to xferStatus/timeStamp */
1635 		recv->dma_offset += len;
1636 
1637 		/* payload is padded to 4 bytes */
1638 		if (len % 4) {
1639 			recv->dma_offset += 4 - (len%4);
1640 		}
1641 
1642 		/* check for wrap-around */
1643 		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1644 			/* uh oh, the packet data wraps from the last
1645                            to the first DMA block - make the packet
1646                            contiguous by copying its "tail" into the
1647                            guard page */
1648 
1649 			int guard_off = recv->buf_stride*recv->nblocks;
1650 			int tail_len = len - (guard_off - offset);
1651 
1652 			if (tail_len > 0  && tail_len < recv->buf_stride) {
1653 				memcpy(iso->data_buf.kvirt + guard_off,
1654 				       iso->data_buf.kvirt,
1655 				       tail_len);
1656 			}
1657 
1658 			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1659 		}
1660 
1661 		/* parse timestamp */
1662 		cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1663 		cycle &= 0x1FFF;
1664 
1665 		/* advance to next packet */
1666 		recv->dma_offset += 4;
1667 
1668 		/* check for wrap-around */
1669 		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1670 			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1671 		}
1672 
1673 		hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1674 	}
1675 
1676 	if (wake)
1677 		hpsb_iso_wake(iso);
1678 }
1679 
ohci_iso_recv_bufferfill_task(struct hpsb_iso * iso,struct ohci_iso_recv * recv)1680 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1681 {
1682 	int loop;
1683 
1684 	/* loop over all blocks */
1685 	for (loop = 0; loop < recv->nblocks; loop++) {
1686 
1687 		/* check block_dma to see if it's done */
1688 		struct dma_cmd *im = &recv->block[recv->block_dma];
1689 
1690 		/* check the DMA descriptor for new writes to xferStatus */
1691 		u16 xferstatus = le32_to_cpu(im->status) >> 16;
1692 
1693 		/* rescount is the number of bytes *remaining to be written* in the block */
1694 		u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1695 
1696 		unsigned char event = xferstatus & 0x1F;
1697 
1698 		if (!event) {
1699 			/* nothing has happened to this block yet */
1700 			break;
1701 		}
1702 
1703 		if (event != 0x11) {
1704 			atomic_inc(&iso->overflows);
1705 			PRINT(KERN_ERR, recv->ohci->id,
1706 			      "IR DMA error - OHCI error code 0x%02x\n", event);
1707 		}
1708 
1709 		if (rescount != 0) {
1710 			/* the card is still writing to this block;
1711 			   we can't touch it until it's done */
1712 			break;
1713 		}
1714 
1715 		/* OK, the block is finished... */
1716 
1717 		/* sync our view of the block */
1718 		dma_region_sync(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1719 
1720 		/* reset the DMA descriptor */
1721 		im->status = recv->buf_stride;
1722 
1723 		/* advance block_dma */
1724 		recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1725 
1726 		if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1727 			atomic_inc(&iso->overflows);
1728 			DBGMSG(recv->ohci->id, "ISO reception overflow - "
1729 			       "ran out of DMA blocks");
1730 		}
1731 	}
1732 
1733 	/* parse any packets that have arrived */
1734 	ohci_iso_recv_bufferfill_parse(iso, recv);
1735 }
1736 
ohci_iso_recv_packetperbuf_task(struct hpsb_iso * iso,struct ohci_iso_recv * recv)1737 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1738 {
1739 	int count;
1740 	int wake = 0;
1741 
1742 	/* loop over the entire buffer */
1743 	for (count = 0; count < recv->nblocks; count++) {
1744 		u32 packet_len = 0;
1745 
1746 		/* pointer to the DMA descriptor */
1747 		struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1748 
1749 		/* check the DMA descriptor for new writes to xferStatus */
1750 		u16 xferstatus = le32_to_cpu(il->status) >> 16;
1751 		u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1752 
1753 		unsigned char event = xferstatus & 0x1F;
1754 
1755 		if (!event) {
1756 			/* this packet hasn't come in yet; we are done for now */
1757 			goto out;
1758 		}
1759 
1760 		if (event == 0x11) {
1761 			/* packet received successfully! */
1762 
1763 			/* rescount is the number of bytes *remaining* in the packet buffer,
1764 			   after the packet was written */
1765 			packet_len = recv->buf_stride - rescount;
1766 
1767 		} else if (event == 0x02) {
1768 			PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n");
1769 		} else if (event) {
1770 			PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event);
1771 		}
1772 
1773 		/* sync our view of the buffer */
1774 		dma_region_sync(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1775 
1776 		/* record the per-packet info */
1777 		{
1778 			/* iso header is 8 bytes ahead of the data payload */
1779 			unsigned char *hdr;
1780 
1781 			unsigned int offset;
1782 			unsigned short cycle;
1783 			unsigned char channel, tag, sy;
1784 
1785 			offset = iso->pkt_dma * recv->buf_stride;
1786 			hdr = iso->data_buf.kvirt + offset;
1787 
1788 			/* skip iso header */
1789 			offset += 8;
1790 			packet_len -= 8;
1791 
1792 			cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1793 			channel = hdr[5] & 0x3F;
1794 			tag = hdr[5] >> 6;
1795 			sy = hdr[4] & 0xF;
1796 
1797 			hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1798 		}
1799 
1800 		/* reset the DMA descriptor */
1801 		il->status = recv->buf_stride;
1802 
1803 		wake = 1;
1804 		recv->block_dma = iso->pkt_dma;
1805 	}
1806 
1807 out:
1808 	if (wake)
1809 		hpsb_iso_wake(iso);
1810 }
1811 
ohci_iso_recv_task(unsigned long data)1812 static void ohci_iso_recv_task(unsigned long data)
1813 {
1814 	struct hpsb_iso *iso = (struct hpsb_iso*) data;
1815 	struct ohci_iso_recv *recv = iso->hostdata;
1816 
1817 	if (recv->dma_mode == BUFFER_FILL_MODE)
1818 		ohci_iso_recv_bufferfill_task(iso, recv);
1819 	else
1820 		ohci_iso_recv_packetperbuf_task(iso, recv);
1821 }
1822 
1823 /***********************************
1824  * rawiso ISO transmission         *
1825  ***********************************/
1826 
1827 struct ohci_iso_xmit {
1828 	struct ti_ohci *ohci;
1829 	struct dma_prog_region prog;
1830 	struct ohci1394_iso_tasklet task;
1831 	int task_active;
1832 
1833 	u32 ContextControlSet;
1834 	u32 ContextControlClear;
1835 	u32 CommandPtr;
1836 };
1837 
1838 /* transmission DMA program:
1839    one OUTPUT_MORE_IMMEDIATE for the IT header
1840    one OUTPUT_LAST for the buffer data */
1841 
1842 struct iso_xmit_cmd {
1843 	struct dma_cmd output_more_immediate;
1844 	u8 iso_hdr[8];
1845 	u32 unused[2];
1846 	struct dma_cmd output_last;
1847 };
1848 
1849 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1850 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1851 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1852 static void ohci_iso_xmit_task(unsigned long data);
1853 
ohci_iso_xmit_init(struct hpsb_iso * iso)1854 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1855 {
1856 	struct ohci_iso_xmit *xmit;
1857 	unsigned int prog_size;
1858 	int ctx;
1859 	int ret = -ENOMEM;
1860 
1861 	xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1862 	if (!xmit)
1863 		return -ENOMEM;
1864 
1865 	iso->hostdata = xmit;
1866 	xmit->ohci = iso->host->hostdata;
1867 	xmit->task_active = 0;
1868 
1869 	dma_prog_region_init(&xmit->prog);
1870 
1871 	prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1872 
1873 	if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1874 		goto err;
1875 
1876 	ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1877 				  ohci_iso_xmit_task, (unsigned long) iso);
1878 
1879 	if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1880 		goto err;
1881 
1882 	xmit->task_active = 1;
1883 
1884 	/* xmit context registers are spaced 16 bytes apart */
1885 	ctx = xmit->task.context;
1886 	xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1887 	xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1888 	xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1889 
1890 	return 0;
1891 
1892 err:
1893 	ohci_iso_xmit_shutdown(iso);
1894 	return ret;
1895 }
1896 
ohci_iso_xmit_stop(struct hpsb_iso * iso)1897 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1898 {
1899 	struct ohci_iso_xmit *xmit = iso->hostdata;
1900 
1901 	/* disable interrupts */
1902 	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1903 
1904 	/* halt DMA */
1905 	if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1906 		/* XXX the DMA context will lock up if you try to send too much data! */
1907 		PRINT(KERN_ERR, xmit->ohci->id,
1908 		      "you probably exceeded the OHCI card's bandwidth limit - "
1909 		      "reload the module and reduce xmit bandwidth");
1910 	}
1911 }
1912 
ohci_iso_xmit_shutdown(struct hpsb_iso * iso)1913 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1914 {
1915 	struct ohci_iso_xmit *xmit = iso->hostdata;
1916 
1917 	if (xmit->task_active) {
1918 		ohci_iso_xmit_stop(iso);
1919 		ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1920 		xmit->task_active = 0;
1921 	}
1922 
1923 	dma_prog_region_free(&xmit->prog);
1924 	kfree(xmit);
1925 	iso->hostdata = NULL;
1926 }
1927 
ohci_iso_xmit_task(unsigned long data)1928 static void ohci_iso_xmit_task(unsigned long data)
1929 {
1930 	struct hpsb_iso *iso = (struct hpsb_iso*) data;
1931 	struct ohci_iso_xmit *xmit = iso->hostdata;
1932 	int wake = 0;
1933 	int count;
1934 
1935 	/* check the whole buffer if necessary, starting at pkt_dma */
1936 	for (count = 0; count < iso->buf_packets; count++) {
1937 		int cycle;
1938 
1939 		/* DMA descriptor */
1940 		struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1941 
1942 		/* check for new writes to xferStatus */
1943 		u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1944 		u8  event = xferstatus & 0x1F;
1945 
1946 		if (!event) {
1947 			/* packet hasn't been sent yet; we are done for now */
1948 			break;
1949 		}
1950 
1951 		if (event != 0x11)
1952 			PRINT(KERN_ERR, xmit->ohci->id,
1953 			      "IT DMA error - OHCI error code 0x%02x\n", event);
1954 
1955 		/* at least one packet went out, so wake up the writer */
1956 		wake = 1;
1957 
1958 		/* parse cycle */
1959 		cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1960 
1961 		/* tell the subsystem the packet has gone out */
1962 		hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1963 
1964 		/* reset the DMA descriptor for next time */
1965 		cmd->output_last.status = 0;
1966 	}
1967 
1968 	if (wake)
1969 		hpsb_iso_wake(iso);
1970 }
1971 
ohci_iso_xmit_queue(struct hpsb_iso * iso,struct hpsb_iso_packet_info * info)1972 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1973 {
1974 	struct ohci_iso_xmit *xmit = iso->hostdata;
1975 
1976 	int next_i, prev_i;
1977 	struct iso_xmit_cmd *next, *prev;
1978 
1979 	unsigned int offset;
1980 	unsigned short len;
1981 	unsigned char tag, sy;
1982 
1983 	/* check that the packet doesn't cross a page boundary
1984 	   (we could allow this if we added OUTPUT_MORE descriptor support) */
1985 	if (cross_bound(info->offset, info->len)) {
1986 		PRINT(KERN_ERR, xmit->ohci->id,
1987 		      "rawiso xmit: packet %u crosses a page boundary",
1988 		      iso->first_packet);
1989 		return -EINVAL;
1990 	}
1991 
1992 	offset = info->offset;
1993 	len = info->len;
1994 	tag = info->tag;
1995 	sy = info->sy;
1996 
1997 	/* sync up the card's view of the buffer */
1998 	dma_region_sync(&iso->data_buf, offset, len);
1999 
2000 	/* append first_packet to the DMA chain */
2001 	/* by linking the previous descriptor to it */
2002 	/* (next will become the new end of the DMA chain) */
2003 
2004 	next_i = iso->first_packet;
2005 	prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2006 
2007 	next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2008 	prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2009 
2010 	/* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2011 	memset(next, 0, sizeof(struct iso_xmit_cmd));
2012 	next->output_more_immediate.control = cpu_to_le32(0x02000008);
2013 
2014 	/* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2015 
2016 	/* tcode = 0xA, and sy */
2017 	next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2018 
2019 	/* tag and channel number */
2020 	next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2021 
2022 	/* transmission speed */
2023 	next->iso_hdr[2] = iso->speed & 0x7;
2024 
2025 	/* payload size */
2026 	next->iso_hdr[6] = len & 0xFF;
2027 	next->iso_hdr[7] = len >> 8;
2028 
2029 	/* set up the OUTPUT_LAST */
2030 	next->output_last.control = cpu_to_le32(1 << 28);
2031 	next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2032 	next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2033 	next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2034 	next->output_last.control |= cpu_to_le32(len);
2035 
2036 	/* payload bus address */
2037 	next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2038 
2039 	/* leave branchAddress at zero for now */
2040 
2041 	/* re-write the previous DMA descriptor to chain to this one */
2042 
2043 	/* set prev branch address to point to next (Z=3) */
2044 	prev->output_last.branchAddress = cpu_to_le32(
2045 		dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2046 
2047 	/* disable interrupt, unless required by the IRQ interval */
2048 	if (prev_i % iso->irq_interval) {
2049 		prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2050 	} else {
2051 		prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2052 	}
2053 
2054 	wmb();
2055 
2056 	/* wake DMA in case it is sleeping */
2057 	reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2058 
2059 	/* issue a dummy read of the cycle timer to force all PCI
2060 	   writes to be posted immediately */
2061 	mb();
2062 	reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2063 
2064 	return 0;
2065 }
2066 
ohci_iso_xmit_start(struct hpsb_iso * iso,int cycle)2067 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2068 {
2069 	struct ohci_iso_xmit *xmit = iso->hostdata;
2070 
2071 	/* clear out the control register */
2072 	reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2073 	wmb();
2074 
2075 	/* address and length of first descriptor block (Z=3) */
2076 	reg_write(xmit->ohci, xmit->CommandPtr,
2077 		  dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2078 
2079 	/* cycle match */
2080 	if (cycle != -1) {
2081 		u32 start = cycle & 0x1FFF;
2082 
2083 		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2084 		   just snarf them from the current time */
2085 		u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2086 
2087 		/* advance one second to give some extra time for DMA to start */
2088 		seconds += 1;
2089 
2090 		start |= (seconds & 3) << 13;
2091 
2092 		reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2093 	}
2094 
2095 	/* enable interrupts */
2096 	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2097 
2098 	/* run */
2099 	reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2100 	mb();
2101 
2102 	/* wait 100 usec to give the card time to go active */
2103 	udelay(100);
2104 
2105 	/* check the RUN bit */
2106 	if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2107 		PRINT(KERN_ERR, xmit->ohci->id, "Error starting IT DMA (ContextControl 0x%08x)\n",
2108 		      reg_read(xmit->ohci, xmit->ContextControlSet));
2109 		return -1;
2110 	}
2111 
2112 	return 0;
2113 }
2114 
ohci_isoctl(struct hpsb_iso * iso,enum isoctl_cmd cmd,unsigned long arg)2115 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2116 {
2117 
2118 	switch(cmd) {
2119 	case XMIT_INIT:
2120 		return ohci_iso_xmit_init(iso);
2121 	case XMIT_START:
2122 		return ohci_iso_xmit_start(iso, arg);
2123 	case XMIT_STOP:
2124 		ohci_iso_xmit_stop(iso);
2125 		return 0;
2126 	case XMIT_QUEUE:
2127 		return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2128 	case XMIT_SHUTDOWN:
2129 		ohci_iso_xmit_shutdown(iso);
2130 		return 0;
2131 
2132 	case RECV_INIT:
2133 		return ohci_iso_recv_init(iso);
2134 	case RECV_START: {
2135 		int *args = (int*) arg;
2136 		return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2137 	}
2138 	case RECV_STOP:
2139 		ohci_iso_recv_stop(iso);
2140 		return 0;
2141 	case RECV_RELEASE:
2142 		ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2143 		return 0;
2144 	case RECV_FLUSH:
2145 		ohci_iso_recv_task((unsigned long) iso);
2146 		return 0;
2147 	case RECV_SHUTDOWN:
2148 		ohci_iso_recv_shutdown(iso);
2149 		return 0;
2150 	case RECV_LISTEN_CHANNEL:
2151 		ohci_iso_recv_change_channel(iso, arg, 1);
2152 		return 0;
2153 	case RECV_UNLISTEN_CHANNEL:
2154 		ohci_iso_recv_change_channel(iso, arg, 0);
2155 		return 0;
2156 	case RECV_SET_CHANNEL_MASK:
2157 		ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2158 		return 0;
2159 
2160 	default:
2161 		PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2162 			cmd);
2163 		break;
2164 	}
2165 	return -EINVAL;
2166 }
2167 
2168 /***************************************
2169  * IEEE-1394 functionality section END *
2170  ***************************************/
2171 
2172 
2173 /********************************************************
2174  * Global stuff (interrupt handler, init/shutdown code) *
2175  ********************************************************/
2176 
dma_trm_reset(struct dma_trm_ctx * d)2177 static void dma_trm_reset(struct dma_trm_ctx *d)
2178 {
2179 	unsigned long flags;
2180 	LIST_HEAD(packet_list);
2181 
2182 	ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2183 
2184 	/* Lock the context, reset it and release it. Move the packets
2185 	 * that were pending in the context to packet_list and free
2186 	 * them after releasing the lock. */
2187 
2188 	spin_lock_irqsave(&d->lock, flags);
2189 
2190 	list_splice(&d->fifo_list, &packet_list);
2191 	list_splice(&d->pending_list, &packet_list);
2192 	INIT_LIST_HEAD(&d->fifo_list);
2193 	INIT_LIST_HEAD(&d->pending_list);
2194 
2195 	d->branchAddrPtr = NULL;
2196 	d->sent_ind = d->prg_ind;
2197 	d->free_prgs = d->num_desc;
2198 
2199 	spin_unlock_irqrestore(&d->lock, flags);
2200 
2201 	/* Now process subsystem callbacks for the packets from the
2202 	 * context. */
2203 
2204 	while (!list_empty(&packet_list)) {
2205 		struct hpsb_packet *p = driver_packet(packet_list.next);
2206 		PRINT(KERN_INFO, d->ohci->id,
2207 		      "AT dma reset ctx=%d, aborting transmission", d->ctx);
2208 		list_del(&p->driver_list);
2209 		hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED);
2210 	}
2211 }
2212 
ohci_schedule_iso_tasklets(struct ti_ohci * ohci,quadlet_t rx_event,quadlet_t tx_event)2213 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2214 				       quadlet_t rx_event,
2215 				       quadlet_t tx_event)
2216 {
2217 	struct list_head *lh;
2218 	struct ohci1394_iso_tasklet *t;
2219 	unsigned long mask;
2220 
2221 	spin_lock(&ohci->iso_tasklet_list_lock);
2222 
2223 	list_for_each(lh, &ohci->iso_tasklet_list) {
2224 		t = list_entry(lh, struct ohci1394_iso_tasklet, link);
2225 		mask = 1 << t->context;
2226 
2227 		if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2228 			tasklet_schedule(&t->tasklet);
2229 		else if (rx_event & mask)
2230 			tasklet_schedule(&t->tasklet);
2231 	}
2232 
2233 	spin_unlock(&ohci->iso_tasklet_list_lock);
2234 
2235 }
2236 
ohci_irq_handler(int irq,void * dev_id,struct pt_regs * regs_are_unused)2237 static void ohci_irq_handler(int irq, void *dev_id,
2238                              struct pt_regs *regs_are_unused)
2239 {
2240 	quadlet_t event, node_id;
2241 	struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2242 	struct hpsb_host *host = ohci->host;
2243 	int phyid = -1, isroot = 0;
2244 	unsigned long flags;
2245 
2246 	/* Read and clear the interrupt event register.  Don't clear
2247 	 * the busReset event, though. This is done when we get the
2248 	 * selfIDComplete interrupt. */
2249 	spin_lock_irqsave(&ohci->event_lock, flags);
2250 	event = reg_read(ohci, OHCI1394_IntEventClear);
2251 	reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2252 	spin_unlock_irqrestore(&ohci->event_lock, flags);
2253 
2254 	if (!event) return;
2255 
2256 	DBGMSG(ohci->id, "IntEvent: %08x", event);
2257 
2258 	if (event & OHCI1394_unrecoverableError) {
2259 		int ctx;
2260 		PRINT(KERN_ERR, ohci->id, "Unrecoverable error!");
2261 
2262 		if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2263 			PRINT(KERN_ERR, ohci->id, "Async Req Tx Context died: "
2264 				"ctrl[%08x] cmdptr[%08x]",
2265 				reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2266 				reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2267 
2268 		if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2269 			PRINT(KERN_ERR, ohci->id, "Async Rsp Tx Context died: "
2270 				"ctrl[%08x] cmdptr[%08x]",
2271 				reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2272 				reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2273 
2274 		if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2275 			PRINT(KERN_ERR, ohci->id, "Async Req Rcv Context died: "
2276 				"ctrl[%08x] cmdptr[%08x]",
2277 				reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2278 				reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2279 
2280 		if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2281 			PRINT(KERN_ERR, ohci->id, "Async Rsp Rcv Context died: "
2282 				"ctrl[%08x] cmdptr[%08x]",
2283 				reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2284 				reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2285 
2286 		for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2287 			if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2288 				PRINT(KERN_ERR, ohci->id, "Iso Xmit %d Context died: "
2289 					"ctrl[%08x] cmdptr[%08x]", ctx,
2290 					reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2291 					reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2292 		}
2293 
2294 		for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2295 			if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2296 				PRINT(KERN_ERR, ohci->id, "Iso Recv %d Context died: "
2297 					"ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2298 					reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2299 					reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2300 					reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2301 		}
2302 
2303 		event &= ~OHCI1394_unrecoverableError;
2304 	}
2305 
2306 	if (event & OHCI1394_cycleInconsistent) {
2307 		/* We subscribe to the cycleInconsistent event only to
2308 		 * clear the corresponding event bit... otherwise,
2309 		 * isochronous cycleMatch DMA won't work. */
2310 		DBGMSG(ohci->id, "OHCI1394_cycleInconsistent");
2311 		event &= ~OHCI1394_cycleInconsistent;
2312 	}
2313 
2314 	if (event & OHCI1394_busReset) {
2315 		/* The busReset event bit can't be cleared during the
2316 		 * selfID phase, so we disable busReset interrupts, to
2317 		 * avoid burying the cpu in interrupt requests. */
2318 		spin_lock_irqsave(&ohci->event_lock, flags);
2319 		reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2320 
2321 		if (ohci->check_busreset) {
2322 			int loop_count = 0;
2323 
2324 			udelay(10);
2325 
2326 			while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2327 				reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2328 
2329 				spin_unlock_irqrestore(&ohci->event_lock, flags);
2330 				udelay(10);
2331 				spin_lock_irqsave(&ohci->event_lock, flags);
2332 
2333 				/* The loop counter check is to prevent the driver
2334 				 * from remaining in this state forever. For the
2335 				 * initial bus reset, the loop continues for ever
2336 				 * and the system hangs, until some device is plugged-in
2337 				 * or out manually into a port! The forced reset seems
2338 				 * to solve this problem. This mainly effects nForce2. */
2339 				if (loop_count > 10000) {
2340 					ohci_devctl(host, RESET_BUS, LONG_RESET);
2341 					DBGMSG(ohci->id, "Detected bus-reset loop. Forced a bus reset!");
2342 					loop_count = 0;
2343 				}
2344 
2345 				loop_count++;
2346 			}
2347 		}
2348 		spin_unlock_irqrestore(&ohci->event_lock, flags);
2349 		if (!host->in_bus_reset) {
2350 			DBGMSG(ohci->id, "irq_handler: Bus reset requested");
2351 
2352 			/* Subsystem call */
2353 			hpsb_bus_reset(ohci->host);
2354 		}
2355 		event &= ~OHCI1394_busReset;
2356 	}
2357 
2358 	/* XXX: We need a way to also queue the OHCI1394_reqTxComplete,
2359 	 * but for right now we simply run it upon reception, to make sure
2360 	 * we get sent acks before response packets. This sucks mainly
2361 	 * because it halts the interrupt handler.  */
2362 	if (event & OHCI1394_reqTxComplete) {
2363 		struct dma_trm_ctx *d = &ohci->at_req_context;
2364 		DBGMSG(ohci->id, "Got reqTxComplete interrupt "
2365 		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
2366 		if (reg_read(ohci, d->ctrlSet) & 0x800)
2367 			ohci1394_stop_context(ohci, d->ctrlClear,
2368 					      "reqTxComplete");
2369 		else
2370 			dma_trm_tasklet ((unsigned long)d);
2371 		event &= ~OHCI1394_reqTxComplete;
2372 	}
2373 	if (event & OHCI1394_respTxComplete) {
2374 		struct dma_trm_ctx *d = &ohci->at_resp_context;
2375 		DBGMSG(ohci->id, "Got respTxComplete interrupt "
2376 		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
2377 		if (reg_read(ohci, d->ctrlSet) & 0x800)
2378 			ohci1394_stop_context(ohci, d->ctrlClear,
2379 					      "respTxComplete");
2380 		else
2381 			tasklet_schedule(&d->task);
2382 		event &= ~OHCI1394_respTxComplete;
2383 	}
2384 	if (event & OHCI1394_RQPkt) {
2385 		struct dma_rcv_ctx *d = &ohci->ar_req_context;
2386 		DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
2387 		       reg_read(ohci, d->ctrlSet));
2388 		if (reg_read(ohci, d->ctrlSet) & 0x800)
2389 			ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2390 		else
2391 			tasklet_schedule(&d->task);
2392 		event &= ~OHCI1394_RQPkt;
2393 	}
2394 	if (event & OHCI1394_RSPkt) {
2395 		struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2396 		DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
2397 		       reg_read(ohci, d->ctrlSet));
2398 		if (reg_read(ohci, d->ctrlSet) & 0x800)
2399 			ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2400 		else
2401 			tasklet_schedule(&d->task);
2402 		event &= ~OHCI1394_RSPkt;
2403 	}
2404 	if (event & OHCI1394_isochRx) {
2405 		quadlet_t rx_event;
2406 
2407 		rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2408 		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2409 		ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2410 		event &= ~OHCI1394_isochRx;
2411 	}
2412 	if (event & OHCI1394_isochTx) {
2413 		quadlet_t tx_event;
2414 
2415 		tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2416 		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2417 		ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2418 		event &= ~OHCI1394_isochTx;
2419 	}
2420 	if (event & OHCI1394_selfIDComplete) {
2421 		if (host->in_bus_reset) {
2422 			node_id = reg_read(ohci, OHCI1394_NodeID);
2423 
2424 			if (!(node_id & 0x80000000)) {
2425 				PRINT(KERN_ERR, ohci->id,
2426 				      "SelfID received, but NodeID invalid "
2427 				      "(probably new bus reset occurred): %08X",
2428 				      node_id);
2429 				goto selfid_not_valid;
2430 			}
2431 
2432 			phyid =  node_id & 0x0000003f;
2433 			isroot = (node_id & 0x40000000) != 0;
2434 
2435 			DBGMSG(ohci->id,
2436 			      "SelfID interrupt received "
2437 			      "(phyid %d, %s)", phyid,
2438 			      (isroot ? "root" : "not root"));
2439 
2440 			handle_selfid(ohci, host, phyid, isroot);
2441 
2442 			/* Clear the bus reset event and re-enable the
2443 			 * busReset interrupt.  */
2444 			spin_lock_irqsave(&ohci->event_lock, flags);
2445 			reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2446 			reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2447 			spin_unlock_irqrestore(&ohci->event_lock, flags);
2448 
2449 			/* Accept Physical requests from all nodes. */
2450 			reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2451 			reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2452 
2453 			/* Turn on phys dma reception.
2454 			 *
2455 			 * TODO: Enable some sort of filtering management.
2456 			 */
2457 			if (phys_dma) {
2458 				reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2459 				reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2460 				reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2461 			} else {
2462 				reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2463 				reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2464 			}
2465 
2466 			DBGMSG(ohci->id, "PhyReqFilter=%08x%08x",
2467 			       reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2468 			       reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2469 
2470 			hpsb_selfid_complete(host, phyid, isroot);
2471 		} else
2472 			PRINT(KERN_ERR, ohci->id,
2473 			      "SelfID received outside of bus reset sequence");
2474 
2475 selfid_not_valid:
2476 		event &= ~OHCI1394_selfIDComplete;
2477 	}
2478 
2479 	/* Make sure we handle everything, just in case we accidentally
2480 	 * enabled an interrupt that we didn't write a handler for.  */
2481 	if (event)
2482 		PRINT(KERN_ERR, ohci->id, "Unhandled interrupt(s) 0x%08x",
2483 		      event);
2484 
2485 	return;
2486 }
2487 
2488 /* Put the buffer back into the dma context */
insert_dma_buffer(struct dma_rcv_ctx * d,int idx)2489 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2490 {
2491 	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2492 	DBGMSG(ohci->id, "Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2493 
2494 	d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2495 	d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2496 	idx = (idx + d->num_desc - 1 ) % d->num_desc;
2497 	d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2498 
2499 	/* wake up the dma context if necessary */
2500 	if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2501 		PRINT(KERN_INFO, ohci->id,
2502 		      "Waking dma ctx=%d ... processing is probably too slow",
2503 		      d->ctx);
2504 	}
2505 
2506 	/* do this always, to avoid race condition */
2507 	reg_write(ohci, d->ctrlSet, 0x1000);
2508 }
2509 
2510 #define cond_le32_to_cpu(data, noswap) \
2511 	(noswap ? data : le32_to_cpu(data))
2512 
2513 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2514 			    -1, 0, -1, 0, -1, -1, 16, -1};
2515 
2516 /*
2517  * Determine the length of a packet in the buffer
2518  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2519  */
packet_length(struct dma_rcv_ctx * d,int idx,quadlet_t * buf_ptr,int offset,unsigned char tcode,int noswap)2520 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2521 			 int offset, unsigned char tcode, int noswap)
2522 {
2523 	int length = -1;
2524 
2525 	if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2526 		length = TCODE_SIZE[tcode];
2527 		if (length == 0) {
2528 			if (offset + 12 >= d->buf_size) {
2529 				length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2530 						[3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2531 			} else {
2532 				length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2533 			}
2534 			length += 20;
2535 		}
2536 	} else if (d->type == DMA_CTX_ISO) {
2537 		/* Assumption: buffer fill mode with header/trailer */
2538 		length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2539 	}
2540 
2541 	if (length > 0 && length % 4)
2542 		length += 4 - (length % 4);
2543 
2544 	return length;
2545 }
2546 
2547 /* Tasklet that processes dma receive buffers */
dma_rcv_tasklet(unsigned long data)2548 static void dma_rcv_tasklet (unsigned long data)
2549 {
2550 	struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2551 	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2552 	unsigned int split_left, idx, offset, rescount;
2553 	unsigned char tcode;
2554 	int length, bytes_left, ack;
2555 	unsigned long flags;
2556 	quadlet_t *buf_ptr;
2557 	char *split_ptr;
2558 	char msg[256];
2559 
2560 	spin_lock_irqsave(&d->lock, flags);
2561 
2562 	idx = d->buf_ind;
2563 	offset = d->buf_offset;
2564 	buf_ptr = d->buf_cpu[idx] + offset/4;
2565 
2566 	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2567 	bytes_left = d->buf_size - rescount - offset;
2568 
2569 	while (bytes_left > 0) {
2570 		tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2571 
2572 		/* packet_length() will return < 4 for an error */
2573 		length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2574 
2575 		if (length < 4) { /* something is wrong */
2576 			sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2577 				tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2578 				d->ctx, length);
2579 			ohci1394_stop_context(ohci, d->ctrlClear, msg);
2580 			spin_unlock_irqrestore(&d->lock, flags);
2581 			return;
2582 		}
2583 
2584 		/* The first case is where we have a packet that crosses
2585 		 * over more than one descriptor. The next case is where
2586 		 * it's all in the first descriptor.  */
2587 		if ((offset + length) > d->buf_size) {
2588 			DBGMSG(ohci->id,"Split packet rcv'd");
2589 			if (length > d->split_buf_size) {
2590 				ohci1394_stop_context(ohci, d->ctrlClear,
2591 					     "Split packet size exceeded");
2592 				d->buf_ind = idx;
2593 				d->buf_offset = offset;
2594 				spin_unlock_irqrestore(&d->lock, flags);
2595 				return;
2596 			}
2597 
2598 			if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2599 			    == d->buf_size) {
2600 				/* Other part of packet not written yet.
2601 				 * this should never happen I think
2602 				 * anyway we'll get it on the next call.  */
2603 				PRINT(KERN_INFO, ohci->id,
2604 				      "Got only half a packet!");
2605 				d->buf_ind = idx;
2606 				d->buf_offset = offset;
2607 				spin_unlock_irqrestore(&d->lock, flags);
2608 				return;
2609 			}
2610 
2611 			split_left = length;
2612 			split_ptr = (char *)d->spb;
2613 			memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2614 			split_left -= d->buf_size-offset;
2615 			split_ptr += d->buf_size-offset;
2616 			insert_dma_buffer(d, idx);
2617 			idx = (idx+1) % d->num_desc;
2618 			buf_ptr = d->buf_cpu[idx];
2619 			offset=0;
2620 
2621 			while (split_left >= d->buf_size) {
2622 				memcpy(split_ptr,buf_ptr,d->buf_size);
2623 				split_ptr += d->buf_size;
2624 				split_left -= d->buf_size;
2625 				insert_dma_buffer(d, idx);
2626 				idx = (idx+1) % d->num_desc;
2627 				buf_ptr = d->buf_cpu[idx];
2628 			}
2629 
2630 			if (split_left > 0) {
2631 				memcpy(split_ptr, buf_ptr, split_left);
2632 				offset = split_left;
2633 				buf_ptr += offset/4;
2634 			}
2635 		} else {
2636 			DBGMSG(ohci->id,"Single packet rcv'd");
2637 			memcpy(d->spb, buf_ptr, length);
2638 			offset += length;
2639 			buf_ptr += length/4;
2640 			if (offset==d->buf_size) {
2641 				insert_dma_buffer(d, idx);
2642 				idx = (idx+1) % d->num_desc;
2643 				buf_ptr = d->buf_cpu[idx];
2644 				offset=0;
2645 			}
2646 		}
2647 
2648 		/* We get one phy packet to the async descriptor for each
2649 		 * bus reset. We always ignore it.  */
2650 		if (tcode != OHCI1394_TCODE_PHY) {
2651 			if (!ohci->no_swap_incoming)
2652 				packet_swab(d->spb, tcode);
2653 			DBGMSG(ohci->id, "Packet received from node"
2654 				" %d ack=0x%02X spd=%d tcode=0x%X"
2655 				" length=%d ctx=%d tlabel=%d",
2656 				(d->spb[1]>>16)&0x3f,
2657 				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2658 				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2659 				tcode, length, d->ctx,
2660 				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2661 
2662 			ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2663 				== 0x11) ? 1 : 0;
2664 
2665 			hpsb_packet_received(ohci->host, d->spb,
2666 					     length-4, ack);
2667 		}
2668 #ifdef OHCI1394_DEBUG
2669 		else
2670 			PRINT (KERN_DEBUG, ohci->id, "Got phy packet ctx=%d ... discarded",
2671 			       d->ctx);
2672 #endif
2673 
2674 	       	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2675 
2676 		bytes_left = d->buf_size - rescount - offset;
2677 
2678 	}
2679 
2680 	d->buf_ind = idx;
2681 	d->buf_offset = offset;
2682 
2683 	spin_unlock_irqrestore(&d->lock, flags);
2684 }
2685 
2686 /* Bottom half that processes sent packets */
dma_trm_tasklet(unsigned long data)2687 static void dma_trm_tasklet (unsigned long data)
2688 {
2689 	struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2690 	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2691 	struct hpsb_packet *packet;
2692 	unsigned long flags;
2693 	u32 status, ack;
2694         size_t datasize;
2695 
2696 	spin_lock_irqsave(&d->lock, flags);
2697 
2698 	while (!list_empty(&d->fifo_list)) {
2699 		packet = driver_packet(d->fifo_list.next);
2700                 datasize = packet->data_size;
2701 		if (datasize && packet->type != hpsb_raw)
2702 			status = le32_to_cpu(
2703 				d->prg_cpu[d->sent_ind]->end.status) >> 16;
2704 		else
2705 			status = le32_to_cpu(
2706 				d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2707 
2708 		if (status == 0)
2709 			/* this packet hasn't been sent yet*/
2710 			break;
2711 
2712 #ifdef OHCI1394_DEBUG
2713 		if (datasize)
2714 			if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2715 				DBGMSG(ohci->id,
2716 				       "Stream packet sent to channel %d tcode=0x%X "
2717 				       "ack=0x%X spd=%d dataLength=%d ctx=%d",
2718 				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2719 				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2720 				       status&0x1f, (status>>5)&0x3,
2721 				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2722 				       d->ctx);
2723 			else
2724 				DBGMSG(ohci->id,
2725 				       "Packet sent to node %d tcode=0x%X tLabel="
2726 				       "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2727 				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2728                                         	>>16)&0x3f,
2729 				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2730                                         	>>4)&0xf,
2731 				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2732                                         	>>10)&0x3f,
2733 				       status&0x1f, (status>>5)&0x3,
2734 				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])
2735 				       		>>16,
2736 				       d->ctx);
2737 		else
2738 			DBGMSG(ohci->id,
2739 			       "Packet sent to node %d tcode=0x%X tLabel="
2740 			       "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2741                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2742                                         >>16)&0x3f,
2743                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2744                                         >>4)&0xf,
2745                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2746                                         >>10)&0x3f,
2747                                 status&0x1f, (status>>5)&0x3,
2748                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2749                                 d->ctx);
2750 #endif
2751 
2752 		if (status & 0x10) {
2753 			ack = status & 0xf;
2754 		} else {
2755 			switch (status & 0x1f) {
2756 			case EVT_NO_STATUS: /* that should never happen */
2757 			case EVT_RESERVED_A: /* that should never happen */
2758 			case EVT_LONG_PACKET: /* that should never happen */
2759 				PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2760 				ack = ACKX_SEND_ERROR;
2761 				break;
2762 			case EVT_MISSING_ACK:
2763 				ack = ACKX_TIMEOUT;
2764 				break;
2765 			case EVT_UNDERRUN:
2766 				ack = ACKX_SEND_ERROR;
2767 				break;
2768 			case EVT_OVERRUN: /* that should never happen */
2769 				PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2770 				ack = ACKX_SEND_ERROR;
2771 				break;
2772 			case EVT_DESCRIPTOR_READ:
2773 			case EVT_DATA_READ:
2774 			case EVT_DATA_WRITE:
2775 				ack = ACKX_SEND_ERROR;
2776 				break;
2777 			case EVT_BUS_RESET: /* that should never happen */
2778 				PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2779 				ack = ACKX_SEND_ERROR;
2780 				break;
2781 			case EVT_TIMEOUT:
2782 				ack = ACKX_TIMEOUT;
2783 				break;
2784 			case EVT_TCODE_ERR:
2785 				ack = ACKX_SEND_ERROR;
2786 				break;
2787 			case EVT_RESERVED_B: /* that should never happen */
2788 			case EVT_RESERVED_C: /* that should never happen */
2789 				PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2790 				ack = ACKX_SEND_ERROR;
2791 				break;
2792 			case EVT_UNKNOWN:
2793 			case EVT_FLUSHED:
2794 				ack = ACKX_SEND_ERROR;
2795 				break;
2796 			default:
2797 				PRINT(KERN_ERR, ohci->id, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2798 				ack = ACKX_SEND_ERROR;
2799 				BUG();
2800 			}
2801 		}
2802 
2803                 list_del(&packet->driver_list);
2804 		hpsb_packet_sent(ohci->host, packet, ack);
2805 
2806 		if (datasize) {
2807 			pci_unmap_single(ohci->dev,
2808 					 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2809 					 datasize, PCI_DMA_TODEVICE);
2810 			OHCI_DMA_FREE("single Xmit data packet");
2811 		}
2812 
2813 		d->sent_ind = (d->sent_ind+1)%d->num_desc;
2814 		d->free_prgs++;
2815 	}
2816 
2817 	dma_trm_flush(ohci, d);
2818 
2819 	spin_unlock_irqrestore(&d->lock, flags);
2820 }
2821 
free_dma_rcv_ctx(struct dma_rcv_ctx * d)2822 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2823 {
2824 	int i;
2825 
2826 	if (d->ohci == NULL)
2827 		return;
2828 
2829 	DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx);
2830 
2831 	if (d->ctrlClear) {
2832 		ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2833 
2834 		if (d->type == DMA_CTX_ISO) {
2835 			/* disable interrupts */
2836 			reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2837 			ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2838 		} else {
2839 			tasklet_kill(&d->task);
2840 		}
2841 	}
2842 
2843 	if (d->buf_cpu) {
2844 		for (i=0; i<d->num_desc; i++)
2845 			if (d->buf_cpu[i] && d->buf_bus[i]) {
2846 				pci_free_consistent(
2847 					d->ohci->dev, d->buf_size,
2848 					d->buf_cpu[i], d->buf_bus[i]);
2849 				OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2850 			}
2851 		kfree(d->buf_cpu);
2852 		kfree(d->buf_bus);
2853 	}
2854 	if (d->prg_cpu) {
2855 		for (i=0; i<d->num_desc; i++)
2856 			if (d->prg_cpu[i] && d->prg_bus[i]) {
2857 				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2858 				OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2859 			}
2860 		pci_pool_destroy(d->prg_pool);
2861 		OHCI_DMA_FREE("dma_rcv prg pool");
2862 		kfree(d->prg_cpu);
2863 		kfree(d->prg_bus);
2864 	}
2865 	if (d->spb) kfree(d->spb);
2866 
2867 	/* Mark this context as freed. */
2868 	d->ohci = NULL;
2869 }
2870 
2871 static int
alloc_dma_rcv_ctx(struct ti_ohci * ohci,struct dma_rcv_ctx * d,enum context_type type,int ctx,int num_desc,int buf_size,int split_buf_size,int context_base)2872 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2873 		  enum context_type type, int ctx, int num_desc,
2874 		  int buf_size, int split_buf_size, int context_base)
2875 {
2876 	int i;
2877 
2878 	d->ohci = ohci;
2879 	d->type = type;
2880 	d->ctx = ctx;
2881 
2882 	d->num_desc = num_desc;
2883 	d->buf_size = buf_size;
2884 	d->split_buf_size = split_buf_size;
2885 
2886 	d->ctrlSet = 0;
2887 	d->ctrlClear = 0;
2888 	d->cmdPtr = 0;
2889 
2890 	d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
2891 	d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2892 
2893 	if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2894 		PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
2895 		free_dma_rcv_ctx(d);
2896 		return -ENOMEM;
2897 	}
2898 	memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2899 	memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2900 
2901 	d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2902 			     GFP_KERNEL);
2903 	d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2904 
2905 	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2906 		PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg");
2907 		free_dma_rcv_ctx(d);
2908 		return -ENOMEM;
2909 	}
2910 	memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2911 	memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2912 
2913 	d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
2914 
2915 	if (d->spb == NULL) {
2916 		PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer");
2917 		free_dma_rcv_ctx(d);
2918 		return -ENOMEM;
2919 	}
2920 
2921 	d->prg_pool = pci_pool_create("ohci1394 rcv prg", ohci->dev,
2922 				      sizeof(struct dma_cmd), 4, 0, SLAB_KERNEL);
2923 	OHCI_DMA_ALLOC("dma_rcv prg pool");
2924 
2925 	for (i=0; i<d->num_desc; i++) {
2926 		d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2927 						     d->buf_size,
2928 						     d->buf_bus+i);
2929 		OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2930 
2931 		if (d->buf_cpu[i] != NULL) {
2932 			memset(d->buf_cpu[i], 0, d->buf_size);
2933 		} else {
2934 			PRINT(KERN_ERR, ohci->id,
2935 			      "Failed to allocate dma buffer");
2936 			free_dma_rcv_ctx(d);
2937 			return -ENOMEM;
2938 		}
2939 
2940 		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2941 		OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2942 
2943                 if (d->prg_cpu[i] != NULL) {
2944                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2945 		} else {
2946 			PRINT(KERN_ERR, ohci->id,
2947 			      "Failed to allocate dma prg");
2948 			free_dma_rcv_ctx(d);
2949 			return -ENOMEM;
2950 		}
2951 	}
2952 
2953         spin_lock_init(&d->lock);
2954 
2955 	if (type == DMA_CTX_ISO) {
2956 		ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
2957 					  OHCI_ISO_MULTICHANNEL_RECEIVE,
2958 					  dma_rcv_tasklet, (unsigned long) d);
2959 		if (ohci1394_register_iso_tasklet(ohci,
2960 						  &ohci->ir_legacy_tasklet) < 0) {
2961 			PRINT(KERN_ERR, ohci->id, "No IR DMA context available");
2962 			free_dma_rcv_ctx(d);
2963 			return -EBUSY;
2964 		}
2965 
2966 		/* the IR context can be assigned to any DMA context
2967 		 * by ohci1394_register_iso_tasklet */
2968 		d->ctx = ohci->ir_legacy_tasklet.context;
2969 		d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
2970 		d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
2971 		d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
2972 		d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
2973 	} else {
2974 		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2975 		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2976 		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2977 
2978 		tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
2979 	}
2980 
2981 	return 0;
2982 }
2983 
free_dma_trm_ctx(struct dma_trm_ctx * d)2984 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2985 {
2986 	int i;
2987 
2988 	if (d->ohci == NULL)
2989 		return;
2990 
2991 	DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx);
2992 
2993 	if (d->ctrlClear) {
2994 		ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2995 
2996 		if (d->type == DMA_CTX_ISO) {
2997 			/* disable interrupts */
2998 			reg_write(d->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << d->ctx);
2999 			ohci1394_unregister_iso_tasklet(d->ohci,
3000 							&d->ohci->it_legacy_tasklet);
3001 		} else {
3002 			tasklet_kill(&d->task);
3003 		}
3004 	}
3005 
3006 	if (d->prg_cpu) {
3007 		for (i=0; i<d->num_desc; i++)
3008 			if (d->prg_cpu[i] && d->prg_bus[i]) {
3009 				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3010 				OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3011 			}
3012 		pci_pool_destroy(d->prg_pool);
3013 		OHCI_DMA_FREE("dma_trm prg pool");
3014 		kfree(d->prg_cpu);
3015 		kfree(d->prg_bus);
3016 	}
3017 
3018 	/* Mark this context as freed. */
3019 	d->ohci = NULL;
3020 }
3021 
3022 static int
alloc_dma_trm_ctx(struct ti_ohci * ohci,struct dma_trm_ctx * d,enum context_type type,int ctx,int num_desc,int context_base)3023 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3024 		  enum context_type type, int ctx, int num_desc,
3025 		  int context_base)
3026 {
3027 	int i;
3028 
3029 	d->ohci = ohci;
3030 	d->type = type;
3031 	d->ctx = ctx;
3032 	d->num_desc = num_desc;
3033 	d->ctrlSet = 0;
3034 	d->ctrlClear = 0;
3035 	d->cmdPtr = 0;
3036 
3037 	d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3038 			     GFP_KERNEL);
3039 	d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3040 
3041 	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3042 		PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg");
3043 		free_dma_trm_ctx(d);
3044 		return -ENOMEM;
3045 	}
3046 	memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3047 	memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3048 
3049 	d->prg_pool = pci_pool_create("ohci1394 trm prg", ohci->dev,
3050 				      sizeof(struct at_dma_prg), 4, 0, SLAB_KERNEL);
3051 	OHCI_DMA_ALLOC("dma_rcv prg pool");
3052 
3053 	for (i = 0; i < d->num_desc; i++) {
3054 		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3055 		OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3056 
3057                 if (d->prg_cpu[i] != NULL) {
3058                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3059 		} else {
3060 			PRINT(KERN_ERR, ohci->id,
3061 			      "Failed to allocate at dma prg");
3062 			free_dma_trm_ctx(d);
3063 			return -ENOMEM;
3064 		}
3065 	}
3066 
3067         spin_lock_init(&d->lock);
3068 
3069 	/* initialize tasklet */
3070 	if (type == DMA_CTX_ISO) {
3071 		ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3072 					  dma_trm_tasklet, (unsigned long) d);
3073 		if (ohci1394_register_iso_tasklet(ohci,
3074 						  &ohci->it_legacy_tasklet) < 0) {
3075 			PRINT(KERN_ERR, ohci->id, "No IT DMA context available");
3076 			free_dma_trm_ctx(d);
3077 			return -EBUSY;
3078 		}
3079 
3080 		/* IT can be assigned to any context by register_iso_tasklet */
3081 		d->ctx = ohci->it_legacy_tasklet.context;
3082 		d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3083 		d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3084 		d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3085 	} else {
3086 		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3087 		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3088 		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3089 		tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3090 	}
3091 
3092 	return 0;
3093 }
3094 
ohci_crc16(u32 * ptr,int length)3095 static u16 ohci_crc16 (u32 *ptr, int length)
3096 {
3097 	int shift;
3098 	u32 crc, sum, data;
3099 
3100 	crc = 0;
3101 	for (; length > 0; length--) {
3102 		data = be32_to_cpu(*ptr++);
3103 		for (shift = 28; shift >= 0; shift -= 4) {
3104 			sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
3105 			crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
3106 		}
3107 		crc &= 0xffff;
3108 	}
3109 	return crc;
3110 }
3111 
3112 /* Config ROM macro implementation influenced by NetBSD OHCI driver */
3113 
3114 struct config_rom_unit {
3115 	u32 *start;
3116 	u32 *refer;
3117 	int length;
3118 	int refunit;
3119 };
3120 
3121 struct config_rom_ptr {
3122 	u32 *data;
3123 	int unitnum;
3124 	struct config_rom_unit unitdir[10];
3125 };
3126 
3127 #define cf_put_1quad(cr, q) (((cr)->data++)[0] = cpu_to_be32(q))
3128 
3129 #define cf_put_4bytes(cr, b1, b2, b3, b4) \
3130 	(((cr)->data++)[0] = cpu_to_be32(((b1) << 24) | ((b2) << 16) | ((b3) << 8) | (b4)))
3131 
3132 #define cf_put_keyval(cr, key, val) (((cr)->data++)[0] = cpu_to_be32(((key) << 24) | (val)))
3133 
cf_put_str(struct config_rom_ptr * cr,const char * str)3134 static inline void cf_put_str(struct config_rom_ptr *cr, const char *str)
3135 {
3136 	int t;
3137 	char fourb[4];
3138 
3139 	while (str[0]) {
3140 		memset(fourb, 0, 4);
3141 		for (t = 0; t < 4 && str[t]; t++)
3142 			fourb[t] = str[t];
3143 		cf_put_4bytes(cr, fourb[0], fourb[1], fourb[2], fourb[3]);
3144 		str += strlen(str) < 4 ? strlen(str) : 4;
3145 	}
3146 	return;
3147 }
3148 
cf_put_crc16(struct config_rom_ptr * cr,int unit)3149 static inline void cf_put_crc16(struct config_rom_ptr *cr, int unit)
3150 {
3151 	*cr->unitdir[unit].start =
3152 		cpu_to_be32((cr->unitdir[unit].length << 16) |
3153 			    ohci_crc16(cr->unitdir[unit].start + 1,
3154 				       cr->unitdir[unit].length));
3155 }
3156 
cf_unit_begin(struct config_rom_ptr * cr,int unit)3157 static inline void cf_unit_begin(struct config_rom_ptr *cr, int unit)
3158 {
3159 	if (cr->unitdir[unit].refer != NULL) {
3160 		*cr->unitdir[unit].refer |=
3161 			cpu_to_be32 (cr->data - cr->unitdir[unit].refer);
3162 		cf_put_crc16(cr, cr->unitdir[unit].refunit);
3163 	}
3164 	cr->unitnum = unit;
3165 	cr->unitdir[unit].start = cr->data++;
3166 }
3167 
cf_put_refer(struct config_rom_ptr * cr,char key,int unit)3168 static inline void cf_put_refer(struct config_rom_ptr *cr, char key, int unit)
3169 {
3170 	cr->unitdir[unit].refer = cr->data;
3171 	cr->unitdir[unit].refunit = cr->unitnum;
3172 	(cr->data++)[0] = cpu_to_be32(key << 24);
3173 }
3174 
cf_unit_end(struct config_rom_ptr * cr)3175 static inline void cf_unit_end(struct config_rom_ptr *cr)
3176 {
3177 	cr->unitdir[cr->unitnum].length = cr->data -
3178 		(cr->unitdir[cr->unitnum].start + 1);
3179 	cf_put_crc16(cr, cr->unitnum);
3180 }
3181 
3182 /* End of NetBSD derived code.  */
3183 
ohci_init_config_rom(struct ti_ohci * ohci)3184 static void ohci_init_config_rom(struct ti_ohci *ohci)
3185 {
3186 	struct config_rom_ptr cr;
3187 
3188 	memset(&cr, 0, sizeof(cr));
3189 	memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3190 
3191 	cr.data = ohci->csr_config_rom_cpu;
3192 
3193 	/* Bus info block */
3194 	cf_unit_begin(&cr, 0);
3195 	cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusID));
3196 	cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusOptions));
3197 	cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDHi));
3198 	cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDLo));
3199 	cf_unit_end(&cr);
3200 
3201 	DBGMSG(ohci->id, "GUID: %08x:%08x", reg_read(ohci, OHCI1394_GUIDHi),
3202 		reg_read(ohci, OHCI1394_GUIDLo));
3203 
3204 	/* IEEE P1212 suggests the initial ROM header CRC should only
3205 	 * cover the header itself (and not the entire ROM). Since we do
3206 	 * this, then we can make our bus_info_len the same as the CRC
3207 	 * length.  */
3208 	ohci->csr_config_rom_cpu[0] |= cpu_to_be32(
3209 		(be32_to_cpu(ohci->csr_config_rom_cpu[0]) & 0x00ff0000) << 8);
3210 	reg_write(ohci, OHCI1394_ConfigROMhdr,
3211 		  be32_to_cpu(ohci->csr_config_rom_cpu[0]));
3212 
3213 	/* Root directory */
3214 	cf_unit_begin(&cr, 1);
3215 	/* Vendor ID */
3216 	cf_put_keyval(&cr, 0x03, reg_read(ohci,OHCI1394_VendorID) & 0xFFFFFF);
3217 	cf_put_refer(&cr, 0x81, 2);		/* Textual description unit */
3218 	cf_put_keyval(&cr, 0x0c, 0x0083c0);	/* Node capabilities */
3219 	/* NOTE: Add other unit referers here, and append at bottom */
3220 	cf_unit_end(&cr);
3221 
3222 	/* Textual description - "Linux 1394" */
3223 	cf_unit_begin(&cr, 2);
3224 	cf_put_keyval(&cr, 0, 0);
3225 	cf_put_1quad(&cr, 0);
3226 	cf_put_str(&cr, "Linux OHCI-1394");
3227 	cf_unit_end(&cr);
3228 
3229 	ohci->csr_config_rom_length = cr.data - ohci->csr_config_rom_cpu;
3230 }
3231 
ohci_get_rom(struct hpsb_host * host,quadlet_t ** ptr)3232 static size_t ohci_get_rom(struct hpsb_host *host, quadlet_t **ptr)
3233 {
3234 	struct ti_ohci *ohci=host->hostdata;
3235 
3236 	DBGMSG(ohci->id, "request csr_rom address: %p",
3237 		ohci->csr_config_rom_cpu);
3238 
3239 	*ptr = ohci->csr_config_rom_cpu;
3240 
3241 	return ohci->csr_config_rom_length * 4;
3242 }
3243 
ohci_hw_csr_reg(struct hpsb_host * host,int reg,quadlet_t data,quadlet_t compare)3244 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3245                                  quadlet_t data, quadlet_t compare)
3246 {
3247 	struct ti_ohci *ohci = host->hostdata;
3248 	int i;
3249 
3250 	reg_write(ohci, OHCI1394_CSRData, data);
3251 	reg_write(ohci, OHCI1394_CSRCompareData, compare);
3252 	reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3253 
3254 	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3255 		if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3256 			break;
3257 
3258 		mdelay(1);
3259 	}
3260 
3261 	return reg_read(ohci, OHCI1394_CSRData);
3262 }
3263 
3264 static struct hpsb_host_driver ohci1394_driver = {
3265 	.name =			OHCI1394_DRIVER_NAME,
3266 	.get_rom =		ohci_get_rom,
3267 	.transmit_packet =	ohci_transmit,
3268 	.devctl =		ohci_devctl,
3269 	.isoctl =               ohci_isoctl,
3270 	.hw_csr_reg =		ohci_hw_csr_reg,
3271 };
3272 
3273 
3274 
3275 /***********************************
3276  * PCI Driver Interface functions  *
3277  ***********************************/
3278 
3279 #define FAIL(err, fmt, args...)			\
3280 do {						\
3281 	PRINT_G(KERN_ERR, fmt , ## args);	\
3282         ohci1394_pci_remove(dev);               \
3283 	return err;				\
3284 } while (0)
3285 
ohci1394_pci_probe(struct pci_dev * dev,const struct pci_device_id * ent)3286 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3287 					const struct pci_device_id *ent)
3288 {
3289 	static unsigned int card_id_counter = 0;
3290 	static int version_printed = 0;
3291 
3292 	struct hpsb_host *host;
3293 	struct ti_ohci *ohci;	/* shortcut to currently handled device */
3294 	unsigned long ohci_base;
3295 
3296 	if (version_printed++ == 0)
3297 		PRINT_G(KERN_INFO, "%s", version);
3298 
3299         if (pci_enable_device(dev))
3300 		FAIL(-ENXIO, "Failed to enable OHCI hardware %d",
3301 		        card_id_counter++);
3302         pci_set_master(dev);
3303 
3304 	host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci));
3305 	if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3306 
3307 	ohci = host->hostdata;
3308 	ohci->id = card_id_counter++;
3309 	ohci->dev = dev;
3310 	ohci->host = host;
3311 	ohci->init_state = OHCI_INIT_ALLOC_HOST;
3312 	host->pdev = dev;
3313 	pci_set_drvdata(dev, ohci);
3314 
3315 	/* We don't want hardware swapping */
3316 	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3317 
3318 	/* Some oddball Apple controllers do not order the selfid
3319 	 * properly, so we make up for it here.  */
3320 #ifndef __LITTLE_ENDIAN
3321 	/* XXX: Need a better way to check this. I'm wondering if we can
3322 	 * read the values of the OHCI1394_PCI_HCI_Control and the
3323 	 * noByteSwapData registers to see if they were not cleared to
3324 	 * zero. Should this work? Obviously it's not defined what these
3325 	 * registers will read when they aren't supported. Bleh! */
3326 	if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3327 	    dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3328 		ohci->no_swap_incoming = 1;
3329 		ohci->selfid_swap = 0;
3330 	} else
3331 		ohci->selfid_swap = 1;
3332 #endif
3333 
3334 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3335 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3336 #endif
3337 
3338 	/* These chipsets require a bit of extra care when checking after
3339 	 * a busreset.  */
3340 	if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3341 	     dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3342 	    (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3343 	     dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3344 		ohci->check_busreset = 1;
3345 
3346 	/* We hardwire the MMIO length, since some CardBus adaptors
3347 	 * fail to report the right length.  Anyway, the ohci spec
3348 	 * clearly says it's 2kb, so this shouldn't be a problem. */
3349 	ohci_base = pci_resource_start(dev, 0);
3350 	if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3351 		PRINT(KERN_WARNING, ohci->id, "Unexpected PCI resource length of %lx!",
3352 		      pci_resource_len(dev, 0));
3353 
3354 	/* Seems PCMCIA handles this internally. Not sure why. Seems
3355 	 * pretty bogus to force a driver to special case this.  */
3356 #ifndef PCMCIA
3357 	if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3358 		FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3359 		     ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3360 #endif
3361 	ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3362 
3363 	ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3364 	if (ohci->registers == NULL)
3365 		FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3366 	ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3367 	DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p", ohci->registers);
3368 
3369 	/* csr_config rom allocation */
3370 	ohci->csr_config_rom_cpu =
3371 		pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3372 				     &ohci->csr_config_rom_bus);
3373 	OHCI_DMA_ALLOC("consistent csr_config_rom");
3374 	if (ohci->csr_config_rom_cpu == NULL)
3375 		FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3376 	ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3377 
3378 	/* self-id dma buffer allocation */
3379 	ohci->selfid_buf_cpu =
3380 		pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3381                       &ohci->selfid_buf_bus);
3382 	OHCI_DMA_ALLOC("consistent selfid_buf");
3383 
3384 	if (ohci->selfid_buf_cpu == NULL)
3385 		FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3386 	ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3387 
3388 	if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3389 		PRINT(KERN_INFO, ohci->id, "SelfID buffer %p is not aligned on "
3390 		      "8Kb boundary... may cause problems on some CXD3222 chip",
3391 		      ohci->selfid_buf_cpu);
3392 
3393 	/* No self-id errors at startup */
3394 	ohci->self_id_errors = 0;
3395 
3396 	ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3397 	/* AR DMA request context allocation */
3398 	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3399 			      DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3400 			      AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3401 			      OHCI1394_AsReqRcvContextBase) < 0)
3402 		FAIL(-ENOMEM, "Failed to allocate AR Req context");
3403 
3404 	/* AR DMA response context allocation */
3405 	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3406 			      DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3407 			      AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3408 			      OHCI1394_AsRspRcvContextBase) < 0)
3409 		FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3410 
3411 	/* AT DMA request context */
3412 	if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3413 			      DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3414 			      OHCI1394_AsReqTrContextBase) < 0)
3415 		FAIL(-ENOMEM, "Failed to allocate AT Req context");
3416 
3417 	/* AT DMA response context */
3418 	if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3419 			      DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3420 			      OHCI1394_AsRspTrContextBase) < 0)
3421 		FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3422 
3423 	/* Start off with a soft reset, to clear everything to a sane
3424 	 * state. */
3425 	ohci_soft_reset(ohci);
3426 
3427 	/* Now enable LPS, which we need in order to start accessing
3428 	 * most of the registers.  In fact, on some cards (ALI M5251),
3429 	 * accessing registers in the SClk domain without LPS enabled
3430 	 * will lock up the machine.  Wait 50msec to make sure we have
3431 	 * full link enabled.  */
3432 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3433 	mdelay(50);
3434 
3435 	/* Determine the number of available IR and IT contexts. */
3436 	ohci->nb_iso_rcv_ctx =
3437 		get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3438 	DBGMSG(ohci->id, "%d iso receive contexts available",
3439 	       ohci->nb_iso_rcv_ctx);
3440 
3441 	ohci->nb_iso_xmit_ctx =
3442 		get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3443 	DBGMSG(ohci->id, "%d iso transmit contexts available",
3444 	       ohci->nb_iso_xmit_ctx);
3445 
3446 	/* Set the usage bits for non-existent contexts so they can't
3447 	 * be allocated */
3448 	ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3449 	ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3450 
3451 	INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3452 	spin_lock_init(&ohci->iso_tasklet_list_lock);
3453 	ohci->ISO_channel_usage = 0;
3454         spin_lock_init(&ohci->IR_channel_lock);
3455 
3456 	/* the IR DMA context is allocated on-demand; mark it inactive */
3457 	ohci->ir_legacy_context.ohci = NULL;
3458 
3459 	/* same for the IT DMA context */
3460 	ohci->it_legacy_context.ohci = NULL;
3461 
3462 	if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3463 			 OHCI1394_DRIVER_NAME, ohci))
3464 		FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3465 
3466 	ohci->init_state = OHCI_INIT_HAVE_IRQ;
3467 	ohci_initialize(ohci);
3468 
3469 	/* Tell the highlevel this host is ready */
3470 	hpsb_add_host(host);
3471 	ohci->init_state = OHCI_INIT_DONE;
3472 
3473 	return 0;
3474 #undef FAIL
3475 }
3476 
ohci1394_pci_remove(struct pci_dev * pdev)3477 static void ohci1394_pci_remove(struct pci_dev *pdev)
3478 {
3479 	struct ti_ohci *ohci;
3480 
3481 	ohci = pci_get_drvdata(pdev);
3482 	if (!ohci)
3483 		return;
3484 
3485 	switch (ohci->init_state) {
3486 	case OHCI_INIT_DONE:
3487 		hpsb_remove_host(ohci->host);
3488 
3489 		/* Clear out BUS Options */
3490 		reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3491 		reg_write(ohci, OHCI1394_BusOptions,
3492 			  (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3493 			  0x00ff0000);
3494 		memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3495 
3496 	case OHCI_INIT_HAVE_IRQ:
3497 		/* Clear interrupt registers */
3498 		reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3499 		reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3500 		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3501 		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3502 		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3503 		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3504 
3505 		/* Disable IRM Contender */
3506 		set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3507 
3508 		/* Clear link control register */
3509 		reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3510 
3511 		/* Let all other nodes know to ignore us */
3512 		ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3513 
3514 		/* Soft reset before we start - this disables
3515 		 * interrupts and clears linkEnable and LPS. */
3516 		ohci_soft_reset(ohci);
3517 		free_irq(ohci->dev->irq, ohci);
3518 
3519 	case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3520 		/* Free AR dma */
3521 		free_dma_rcv_ctx(&ohci->ar_req_context);
3522 		free_dma_rcv_ctx(&ohci->ar_resp_context);
3523 
3524 		/* Free AT dma */
3525 		free_dma_trm_ctx(&ohci->at_req_context);
3526 		free_dma_trm_ctx(&ohci->at_resp_context);
3527 
3528 		/* Free IR dma */
3529 		free_dma_rcv_ctx(&ohci->ir_legacy_context);
3530 
3531 		/* Free IT dma */
3532 		free_dma_trm_ctx(&ohci->it_legacy_context);
3533 
3534 	case OHCI_INIT_HAVE_SELFID_BUFFER:
3535 		pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3536 				    ohci->selfid_buf_cpu,
3537 				    ohci->selfid_buf_bus);
3538 		OHCI_DMA_FREE("consistent selfid_buf");
3539 
3540 	case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3541 		pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3542 				    ohci->csr_config_rom_cpu,
3543 				    ohci->csr_config_rom_bus);
3544 		OHCI_DMA_FREE("consistent csr_config_rom");
3545 
3546 	case OHCI_INIT_HAVE_IOMAPPING:
3547 		iounmap(ohci->registers);
3548 
3549 	case OHCI_INIT_HAVE_MEM_REGION:
3550 #ifndef PCMCIA
3551 		release_mem_region(pci_resource_start(ohci->dev, 0),
3552 				   OHCI1394_REGISTER_SIZE);
3553 #endif
3554 
3555 #ifdef CONFIG_ALL_PPC
3556 	/* On UniNorth, power down the cable and turn off the chip
3557 	 * clock when the module is removed to save power on
3558 	 * laptops. Turning it back ON is done by the arch code when
3559 	 * pci_enable_device() is called */
3560 	{
3561 		struct device_node* of_node;
3562 
3563 		of_node = pci_device_to_OF_node(ohci->dev);
3564 		if (of_node) {
3565 			pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3566 			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3567 		}
3568 	}
3569 #endif /* CONFIG_ALL_PPC */
3570 
3571 	case OHCI_INIT_ALLOC_HOST:
3572 		pci_set_drvdata(ohci->dev, NULL);
3573 		hpsb_unref_host(ohci->host);
3574 	}
3575 }
3576 
3577 
3578 #ifdef  CONFIG_PM
ohci1394_pci_resume(struct pci_dev * dev)3579 static int ohci1394_pci_resume (struct pci_dev *dev)
3580 {
3581 	pci_enable_device(dev);
3582 	return 0;
3583 }
3584 #endif
3585 
3586 
3587 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3588 
3589 static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
3590 	{
3591 		.class = 	PCI_CLASS_FIREWIRE_OHCI,
3592 		.class_mask = 	PCI_ANY_ID,
3593 		.vendor =	PCI_ANY_ID,
3594 		.device =	PCI_ANY_ID,
3595 		.subvendor =	PCI_ANY_ID,
3596 		.subdevice =	PCI_ANY_ID,
3597 	},
3598 	{ 0, },
3599 };
3600 
3601 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3602 
3603 static struct pci_driver ohci1394_pci_driver = {
3604 	.name =		OHCI1394_DRIVER_NAME,
3605 	.id_table =	ohci1394_pci_tbl,
3606 	.probe =	ohci1394_pci_probe,
3607 	.remove =	ohci1394_pci_remove,
3608 
3609 #ifdef  CONFIG_PM
3610 	.resume =	ohci1394_pci_resume,
3611 #endif  /* PM */
3612 };
3613 
3614 
3615 
3616 /***********************************
3617  * OHCI1394 Video Interface        *
3618  ***********************************/
3619 
3620 /* essentially the only purpose of this code is to allow another
3621    module to hook into ohci's interrupt handler */
3622 
ohci1394_stop_context(struct ti_ohci * ohci,int reg,char * msg)3623 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3624 {
3625 	int i=0;
3626 
3627 	/* stop the channel program if it's still running */
3628 	reg_write(ohci, reg, 0x8000);
3629 
3630 	/* Wait until it effectively stops */
3631 	while (reg_read(ohci, reg) & 0x400) {
3632 		i++;
3633 		if (i>5000) {
3634 			PRINT(KERN_ERR, ohci->id,
3635 			      "Runaway loop while stopping context: %s...", msg ? msg : "");
3636 			return 1;
3637 		}
3638 
3639 		mb();
3640 		udelay(10);
3641 	}
3642 	if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
3643 	return 0;
3644 }
3645 
ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet * tasklet,int type,void (* func)(unsigned long),unsigned long data)3646 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3647 			       void (*func)(unsigned long), unsigned long data)
3648 {
3649 	tasklet_init(&tasklet->tasklet, func, data);
3650 	tasklet->type = type;
3651 	/* We init the tasklet->link field, so we can list_del() it
3652 	 * without worrying whether it was added to the list or not. */
3653 	INIT_LIST_HEAD(&tasklet->link);
3654 }
3655 
ohci1394_register_iso_tasklet(struct ti_ohci * ohci,struct ohci1394_iso_tasklet * tasklet)3656 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3657 				  struct ohci1394_iso_tasklet *tasklet)
3658 {
3659 	unsigned long flags, *usage;
3660 	int n, i, r = -EBUSY;
3661 
3662 	if (tasklet->type == OHCI_ISO_TRANSMIT) {
3663 		n = ohci->nb_iso_xmit_ctx;
3664 		usage = &ohci->it_ctx_usage;
3665 	}
3666 	else {
3667 		n = ohci->nb_iso_rcv_ctx;
3668 		usage = &ohci->ir_ctx_usage;
3669 
3670 		/* only one receive context can be multichannel (OHCI sec 10.4.1) */
3671 		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3672 			if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3673 				return r;
3674 			}
3675 		}
3676 	}
3677 
3678 	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3679 
3680 	for (i = 0; i < n; i++)
3681 		if (!test_and_set_bit(i, usage)) {
3682 			tasklet->context = i;
3683 			list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3684 			r = 0;
3685 			break;
3686 		}
3687 
3688 	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3689 
3690 	return r;
3691 }
3692 
ohci1394_unregister_iso_tasklet(struct ti_ohci * ohci,struct ohci1394_iso_tasklet * tasklet)3693 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3694 				     struct ohci1394_iso_tasklet *tasklet)
3695 {
3696 	unsigned long flags;
3697 
3698 	tasklet_kill(&tasklet->tasklet);
3699 
3700 	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3701 
3702 	if (tasklet->type == OHCI_ISO_TRANSMIT)
3703 		clear_bit(tasklet->context, &ohci->it_ctx_usage);
3704 	else {
3705 		clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3706 
3707 		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3708 			clear_bit(0, &ohci->ir_multichannel_used);
3709 		}
3710 	}
3711 
3712 	list_del(&tasklet->link);
3713 
3714 	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3715 }
3716 
3717 EXPORT_SYMBOL(ohci1394_stop_context);
3718 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3719 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3720 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3721 
3722 
3723 /***********************************
3724  * General module initialization   *
3725  ***********************************/
3726 
3727 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3728 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3729 MODULE_LICENSE("GPL");
3730 
ohci1394_cleanup(void)3731 static void __exit ohci1394_cleanup (void)
3732 {
3733 	pci_unregister_driver(&ohci1394_pci_driver);
3734 }
3735 
ohci1394_init(void)3736 static int __init ohci1394_init(void)
3737 {
3738 	return pci_module_init(&ohci1394_pci_driver);
3739 }
3740 
3741 module_init(ohci1394_init);
3742 module_exit(ohci1394_cleanup);
3743