1 /*
2  * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * Derived from ca91c042.c by Michael Wyrick
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17 
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 
33 #include "../vme.h"
34 #include "../vme_bridge.h"
35 #include "vme_ca91cx42.h"
36 
37 static int __init ca91cx42_init(void);
38 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39 static void ca91cx42_remove(struct pci_dev *);
40 static void __exit ca91cx42_exit(void);
41 
42 /* Module parameters */
43 static int geoid;
44 
45 static const char driver_name[] = "vme_ca91cx42";
46 
47 static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
48 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
49 	{ },
50 };
51 
52 static struct pci_driver ca91cx42_driver = {
53 	.name = driver_name,
54 	.id_table = ca91cx42_ids,
55 	.probe = ca91cx42_probe,
56 	.remove = ca91cx42_remove,
57 };
58 
ca91cx42_DMA_irqhandler(struct ca91cx42_driver * bridge)59 static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
60 {
61 	wake_up(&bridge->dma_queue);
62 
63 	return CA91CX42_LINT_DMA;
64 }
65 
ca91cx42_LM_irqhandler(struct ca91cx42_driver * bridge,u32 stat)66 static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
67 {
68 	int i;
69 	u32 serviced = 0;
70 
71 	for (i = 0; i < 4; i++) {
72 		if (stat & CA91CX42_LINT_LM[i]) {
73 			/* We only enable interrupts if the callback is set */
74 			bridge->lm_callback[i](i);
75 			serviced |= CA91CX42_LINT_LM[i];
76 		}
77 	}
78 
79 	return serviced;
80 }
81 
82 /* XXX This needs to be split into 4 queues */
ca91cx42_MB_irqhandler(struct ca91cx42_driver * bridge,int mbox_mask)83 static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
84 {
85 	wake_up(&bridge->mbox_queue);
86 
87 	return CA91CX42_LINT_MBOX;
88 }
89 
ca91cx42_IACK_irqhandler(struct ca91cx42_driver * bridge)90 static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
91 {
92 	wake_up(&bridge->iack_queue);
93 
94 	return CA91CX42_LINT_SW_IACK;
95 }
96 
ca91cx42_VERR_irqhandler(struct vme_bridge * ca91cx42_bridge)97 static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
98 {
99 	int val;
100 	struct ca91cx42_driver *bridge;
101 
102 	bridge = ca91cx42_bridge->driver_priv;
103 
104 	val = ioread32(bridge->base + DGCS);
105 
106 	if (!(val & 0x00000800)) {
107 		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
108 			"Read Error DGCS=%08X\n", val);
109 	}
110 
111 	return CA91CX42_LINT_VERR;
112 }
113 
ca91cx42_LERR_irqhandler(struct vme_bridge * ca91cx42_bridge)114 static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
115 {
116 	int val;
117 	struct ca91cx42_driver *bridge;
118 
119 	bridge = ca91cx42_bridge->driver_priv;
120 
121 	val = ioread32(bridge->base + DGCS);
122 
123 	if (!(val & 0x00000800))
124 		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
125 			"Read Error DGCS=%08X\n", val);
126 
127 	return CA91CX42_LINT_LERR;
128 }
129 
130 
ca91cx42_VIRQ_irqhandler(struct vme_bridge * ca91cx42_bridge,int stat)131 static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
132 	int stat)
133 {
134 	int vec, i, serviced = 0;
135 	struct ca91cx42_driver *bridge;
136 
137 	bridge = ca91cx42_bridge->driver_priv;
138 
139 
140 	for (i = 7; i > 0; i--) {
141 		if (stat & (1 << i)) {
142 			vec = ioread32(bridge->base +
143 				CA91CX42_V_STATID[i]) & 0xff;
144 
145 			vme_irq_handler(ca91cx42_bridge, i, vec);
146 
147 			serviced |= (1 << i);
148 		}
149 	}
150 
151 	return serviced;
152 }
153 
ca91cx42_irqhandler(int irq,void * ptr)154 static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
155 {
156 	u32 stat, enable, serviced = 0;
157 	struct vme_bridge *ca91cx42_bridge;
158 	struct ca91cx42_driver *bridge;
159 
160 	ca91cx42_bridge = ptr;
161 
162 	bridge = ca91cx42_bridge->driver_priv;
163 
164 	enable = ioread32(bridge->base + LINT_EN);
165 	stat = ioread32(bridge->base + LINT_STAT);
166 
167 	/* Only look at unmasked interrupts */
168 	stat &= enable;
169 
170 	if (unlikely(!stat))
171 		return IRQ_NONE;
172 
173 	if (stat & CA91CX42_LINT_DMA)
174 		serviced |= ca91cx42_DMA_irqhandler(bridge);
175 	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
176 			CA91CX42_LINT_LM3))
177 		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
178 	if (stat & CA91CX42_LINT_MBOX)
179 		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
180 	if (stat & CA91CX42_LINT_SW_IACK)
181 		serviced |= ca91cx42_IACK_irqhandler(bridge);
182 	if (stat & CA91CX42_LINT_VERR)
183 		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
184 	if (stat & CA91CX42_LINT_LERR)
185 		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
186 	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
187 			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
188 			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
189 			CA91CX42_LINT_VIRQ7))
190 		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
191 
192 	/* Clear serviced interrupts */
193 	iowrite32(serviced, bridge->base + LINT_STAT);
194 
195 	return IRQ_HANDLED;
196 }
197 
ca91cx42_irq_init(struct vme_bridge * ca91cx42_bridge)198 static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
199 {
200 	int result, tmp;
201 	struct pci_dev *pdev;
202 	struct ca91cx42_driver *bridge;
203 
204 	bridge = ca91cx42_bridge->driver_priv;
205 
206 	/* Need pdev */
207 	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
208 
209 	/* Initialise list for VME bus errors */
210 	INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
211 
212 	mutex_init(&ca91cx42_bridge->irq_mtx);
213 
214 	/* Disable interrupts from PCI to VME */
215 	iowrite32(0, bridge->base + VINT_EN);
216 
217 	/* Disable PCI interrupts */
218 	iowrite32(0, bridge->base + LINT_EN);
219 	/* Clear Any Pending PCI Interrupts */
220 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
221 
222 	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
223 			driver_name, ca91cx42_bridge);
224 	if (result) {
225 		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
226 		       pdev->irq);
227 		return result;
228 	}
229 
230 	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
231 	iowrite32(0, bridge->base + LINT_MAP0);
232 	iowrite32(0, bridge->base + LINT_MAP1);
233 	iowrite32(0, bridge->base + LINT_MAP2);
234 
235 	/* Enable DMA, mailbox & LM Interrupts */
236 	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
237 		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
238 		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
239 
240 	iowrite32(tmp, bridge->base + LINT_EN);
241 
242 	return 0;
243 }
244 
ca91cx42_irq_exit(struct ca91cx42_driver * bridge,struct pci_dev * pdev)245 static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
246 	struct pci_dev *pdev)
247 {
248 	/* Disable interrupts from PCI to VME */
249 	iowrite32(0, bridge->base + VINT_EN);
250 
251 	/* Disable PCI interrupts */
252 	iowrite32(0, bridge->base + LINT_EN);
253 	/* Clear Any Pending PCI Interrupts */
254 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
255 
256 	free_irq(pdev->irq, pdev);
257 }
258 
ca91cx42_iack_received(struct ca91cx42_driver * bridge,int level)259 static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
260 {
261 	u32 tmp;
262 
263 	tmp = ioread32(bridge->base + LINT_STAT);
264 
265 	if (tmp & (1 << level))
266 		return 0;
267 	else
268 		return 1;
269 }
270 
271 /*
272  * Set up an VME interrupt
273  */
ca91cx42_irq_set(struct vme_bridge * ca91cx42_bridge,int level,int state,int sync)274 static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
275 	int state, int sync)
276 
277 {
278 	struct pci_dev *pdev;
279 	u32 tmp;
280 	struct ca91cx42_driver *bridge;
281 
282 	bridge = ca91cx42_bridge->driver_priv;
283 
284 	/* Enable IRQ level */
285 	tmp = ioread32(bridge->base + LINT_EN);
286 
287 	if (state == 0)
288 		tmp &= ~CA91CX42_LINT_VIRQ[level];
289 	else
290 		tmp |= CA91CX42_LINT_VIRQ[level];
291 
292 	iowrite32(tmp, bridge->base + LINT_EN);
293 
294 	if ((state == 0) && (sync != 0)) {
295 		pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
296 			dev);
297 
298 		synchronize_irq(pdev->irq);
299 	}
300 }
301 
ca91cx42_irq_generate(struct vme_bridge * ca91cx42_bridge,int level,int statid)302 static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
303 	int statid)
304 {
305 	u32 tmp;
306 	struct ca91cx42_driver *bridge;
307 
308 	bridge = ca91cx42_bridge->driver_priv;
309 
310 	/* Universe can only generate even vectors */
311 	if (statid & 1)
312 		return -EINVAL;
313 
314 	mutex_lock(&bridge->vme_int);
315 
316 	tmp = ioread32(bridge->base + VINT_EN);
317 
318 	/* Set Status/ID */
319 	iowrite32(statid << 24, bridge->base + STATID);
320 
321 	/* Assert VMEbus IRQ */
322 	tmp = tmp | (1 << (level + 24));
323 	iowrite32(tmp, bridge->base + VINT_EN);
324 
325 	/* Wait for IACK */
326 	wait_event_interruptible(bridge->iack_queue,
327 				 ca91cx42_iack_received(bridge, level));
328 
329 	/* Return interrupt to low state */
330 	tmp = ioread32(bridge->base + VINT_EN);
331 	tmp = tmp & ~(1 << (level + 24));
332 	iowrite32(tmp, bridge->base + VINT_EN);
333 
334 	mutex_unlock(&bridge->vme_int);
335 
336 	return 0;
337 }
338 
ca91cx42_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)339 static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
340 	unsigned long long vme_base, unsigned long long size,
341 	dma_addr_t pci_base, u32 aspace, u32 cycle)
342 {
343 	unsigned int i, addr = 0, granularity;
344 	unsigned int temp_ctl = 0;
345 	unsigned int vme_bound, pci_offset;
346 	struct vme_bridge *ca91cx42_bridge;
347 	struct ca91cx42_driver *bridge;
348 
349 	ca91cx42_bridge = image->parent;
350 
351 	bridge = ca91cx42_bridge->driver_priv;
352 
353 	i = image->number;
354 
355 	switch (aspace) {
356 	case VME_A16:
357 		addr |= CA91CX42_VSI_CTL_VAS_A16;
358 		break;
359 	case VME_A24:
360 		addr |= CA91CX42_VSI_CTL_VAS_A24;
361 		break;
362 	case VME_A32:
363 		addr |= CA91CX42_VSI_CTL_VAS_A32;
364 		break;
365 	case VME_USER1:
366 		addr |= CA91CX42_VSI_CTL_VAS_USER1;
367 		break;
368 	case VME_USER2:
369 		addr |= CA91CX42_VSI_CTL_VAS_USER2;
370 		break;
371 	case VME_A64:
372 	case VME_CRCSR:
373 	case VME_USER3:
374 	case VME_USER4:
375 	default:
376 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
377 		return -EINVAL;
378 		break;
379 	}
380 
381 	/*
382 	 * Bound address is a valid address for the window, adjust
383 	 * accordingly
384 	 */
385 	vme_bound = vme_base + size;
386 	pci_offset = pci_base - vme_base;
387 
388 	if ((i == 0) || (i == 4))
389 		granularity = 0x1000;
390 	else
391 		granularity = 0x10000;
392 
393 	if (vme_base & (granularity - 1)) {
394 		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
395 			"alignment\n");
396 		return -EINVAL;
397 	}
398 	if (vme_bound & (granularity - 1)) {
399 		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
400 			"alignment\n");
401 		return -EINVAL;
402 	}
403 	if (pci_offset & (granularity - 1)) {
404 		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
405 			"alignment\n");
406 		return -EINVAL;
407 	}
408 
409 	/* Disable while we are mucking around */
410 	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
411 	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
412 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
413 
414 	/* Setup mapping */
415 	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
416 	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
417 	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
418 
419 	/* Setup address space */
420 	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
421 	temp_ctl |= addr;
422 
423 	/* Setup cycle types */
424 	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
425 	if (cycle & VME_SUPER)
426 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
427 	if (cycle & VME_USER)
428 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
429 	if (cycle & VME_PROG)
430 		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
431 	if (cycle & VME_DATA)
432 		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
433 
434 	/* Write ctl reg without enable */
435 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
436 
437 	if (enabled)
438 		temp_ctl |= CA91CX42_VSI_CTL_EN;
439 
440 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
441 
442 	return 0;
443 }
444 
ca91cx42_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)445 static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
446 	unsigned long long *vme_base, unsigned long long *size,
447 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
448 {
449 	unsigned int i, granularity = 0, ctl = 0;
450 	unsigned long long vme_bound, pci_offset;
451 	struct ca91cx42_driver *bridge;
452 
453 	bridge = image->parent->driver_priv;
454 
455 	i = image->number;
456 
457 	if ((i == 0) || (i == 4))
458 		granularity = 0x1000;
459 	else
460 		granularity = 0x10000;
461 
462 	/* Read Registers */
463 	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
464 
465 	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
466 	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
467 	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
468 
469 	*pci_base = (dma_addr_t)vme_base + pci_offset;
470 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
471 
472 	*enabled = 0;
473 	*aspace = 0;
474 	*cycle = 0;
475 
476 	if (ctl & CA91CX42_VSI_CTL_EN)
477 		*enabled = 1;
478 
479 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
480 		*aspace = VME_A16;
481 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
482 		*aspace = VME_A24;
483 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
484 		*aspace = VME_A32;
485 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
486 		*aspace = VME_USER1;
487 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
488 		*aspace = VME_USER2;
489 
490 	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
491 		*cycle |= VME_SUPER;
492 	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
493 		*cycle |= VME_USER;
494 	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
495 		*cycle |= VME_PROG;
496 	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
497 		*cycle |= VME_DATA;
498 
499 	return 0;
500 }
501 
502 /*
503  * Allocate and map PCI Resource
504  */
ca91cx42_alloc_resource(struct vme_master_resource * image,unsigned long long size)505 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
506 	unsigned long long size)
507 {
508 	unsigned long long existing_size;
509 	int retval = 0;
510 	struct pci_dev *pdev;
511 	struct vme_bridge *ca91cx42_bridge;
512 
513 	ca91cx42_bridge = image->parent;
514 
515 	/* Find pci_dev container of dev */
516 	if (ca91cx42_bridge->parent == NULL) {
517 		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
518 		return -EINVAL;
519 	}
520 	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
521 
522 	existing_size = (unsigned long long)(image->bus_resource.end -
523 		image->bus_resource.start);
524 
525 	/* If the existing size is OK, return */
526 	if (existing_size == (size - 1))
527 		return 0;
528 
529 	if (existing_size != 0) {
530 		iounmap(image->kern_base);
531 		image->kern_base = NULL;
532 		kfree(image->bus_resource.name);
533 		release_resource(&image->bus_resource);
534 		memset(&image->bus_resource, 0, sizeof(struct resource));
535 	}
536 
537 	if (image->bus_resource.name == NULL) {
538 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
539 		if (image->bus_resource.name == NULL) {
540 			dev_err(ca91cx42_bridge->parent, "Unable to allocate "
541 				"memory for resource name\n");
542 			retval = -ENOMEM;
543 			goto err_name;
544 		}
545 	}
546 
547 	sprintf((char *)image->bus_resource.name, "%s.%d",
548 		ca91cx42_bridge->name, image->number);
549 
550 	image->bus_resource.start = 0;
551 	image->bus_resource.end = (unsigned long)size;
552 	image->bus_resource.flags = IORESOURCE_MEM;
553 
554 	retval = pci_bus_alloc_resource(pdev->bus,
555 		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
556 		0, NULL, NULL);
557 	if (retval) {
558 		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
559 			"resource for window %d size 0x%lx start 0x%lx\n",
560 			image->number, (unsigned long)size,
561 			(unsigned long)image->bus_resource.start);
562 		goto err_resource;
563 	}
564 
565 	image->kern_base = ioremap_nocache(
566 		image->bus_resource.start, size);
567 	if (image->kern_base == NULL) {
568 		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
569 		retval = -ENOMEM;
570 		goto err_remap;
571 	}
572 
573 	return 0;
574 
575 err_remap:
576 	release_resource(&image->bus_resource);
577 err_resource:
578 	kfree(image->bus_resource.name);
579 	memset(&image->bus_resource, 0, sizeof(struct resource));
580 err_name:
581 	return retval;
582 }
583 
584 /*
585  * Free and unmap PCI Resource
586  */
ca91cx42_free_resource(struct vme_master_resource * image)587 static void ca91cx42_free_resource(struct vme_master_resource *image)
588 {
589 	iounmap(image->kern_base);
590 	image->kern_base = NULL;
591 	release_resource(&image->bus_resource);
592 	kfree(image->bus_resource.name);
593 	memset(&image->bus_resource, 0, sizeof(struct resource));
594 }
595 
596 
ca91cx42_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)597 static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
598 	unsigned long long vme_base, unsigned long long size, u32 aspace,
599 	u32 cycle, u32 dwidth)
600 {
601 	int retval = 0;
602 	unsigned int i, granularity = 0;
603 	unsigned int temp_ctl = 0;
604 	unsigned long long pci_bound, vme_offset, pci_base;
605 	struct vme_bridge *ca91cx42_bridge;
606 	struct ca91cx42_driver *bridge;
607 
608 	ca91cx42_bridge = image->parent;
609 
610 	bridge = ca91cx42_bridge->driver_priv;
611 
612 	i = image->number;
613 
614 	if ((i == 0) || (i == 4))
615 		granularity = 0x1000;
616 	else
617 		granularity = 0x10000;
618 
619 	/* Verify input data */
620 	if (vme_base & (granularity - 1)) {
621 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
622 			"alignment\n");
623 		retval = -EINVAL;
624 		goto err_window;
625 	}
626 	if (size & (granularity - 1)) {
627 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
628 			"alignment\n");
629 		retval = -EINVAL;
630 		goto err_window;
631 	}
632 
633 	spin_lock(&image->lock);
634 
635 	/*
636 	 * Let's allocate the resource here rather than further up the stack as
637 	 * it avoids pushing loads of bus dependent stuff up the stack
638 	 */
639 	retval = ca91cx42_alloc_resource(image, size);
640 	if (retval) {
641 		spin_unlock(&image->lock);
642 		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
643 			"for resource name\n");
644 		retval = -ENOMEM;
645 		goto err_res;
646 	}
647 
648 	pci_base = (unsigned long long)image->bus_resource.start;
649 
650 	/*
651 	 * Bound address is a valid address for the window, adjust
652 	 * according to window granularity.
653 	 */
654 	pci_bound = pci_base + size;
655 	vme_offset = vme_base - pci_base;
656 
657 	/* Disable while we are mucking around */
658 	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
659 	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
660 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
661 
662 	/* Setup cycle types */
663 	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
664 	if (cycle & VME_BLT)
665 		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
666 	if (cycle & VME_MBLT)
667 		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
668 
669 	/* Setup data width */
670 	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
671 	switch (dwidth) {
672 	case VME_D8:
673 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
674 		break;
675 	case VME_D16:
676 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
677 		break;
678 	case VME_D32:
679 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
680 		break;
681 	case VME_D64:
682 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
683 		break;
684 	default:
685 		spin_unlock(&image->lock);
686 		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
687 		retval = -EINVAL;
688 		goto err_dwidth;
689 		break;
690 	}
691 
692 	/* Setup address space */
693 	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
694 	switch (aspace) {
695 	case VME_A16:
696 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
697 		break;
698 	case VME_A24:
699 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
700 		break;
701 	case VME_A32:
702 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
703 		break;
704 	case VME_CRCSR:
705 		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
706 		break;
707 	case VME_USER1:
708 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
709 		break;
710 	case VME_USER2:
711 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
712 		break;
713 	case VME_A64:
714 	case VME_USER3:
715 	case VME_USER4:
716 	default:
717 		spin_unlock(&image->lock);
718 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
719 		retval = -EINVAL;
720 		goto err_aspace;
721 		break;
722 	}
723 
724 	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
725 	if (cycle & VME_SUPER)
726 		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
727 	if (cycle & VME_PROG)
728 		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
729 
730 	/* Setup mapping */
731 	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
732 	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
733 	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
734 
735 	/* Write ctl reg without enable */
736 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
737 
738 	if (enabled)
739 		temp_ctl |= CA91CX42_LSI_CTL_EN;
740 
741 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
742 
743 	spin_unlock(&image->lock);
744 	return 0;
745 
746 err_aspace:
747 err_dwidth:
748 	ca91cx42_free_resource(image);
749 err_res:
750 err_window:
751 	return retval;
752 }
753 
__ca91cx42_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)754 static int __ca91cx42_master_get(struct vme_master_resource *image,
755 	int *enabled, unsigned long long *vme_base, unsigned long long *size,
756 	u32 *aspace, u32 *cycle, u32 *dwidth)
757 {
758 	unsigned int i, ctl;
759 	unsigned long long pci_base, pci_bound, vme_offset;
760 	struct ca91cx42_driver *bridge;
761 
762 	bridge = image->parent->driver_priv;
763 
764 	i = image->number;
765 
766 	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
767 
768 	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
769 	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
770 	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
771 
772 	*vme_base = pci_base + vme_offset;
773 	*size = (unsigned long long)(pci_bound - pci_base);
774 
775 	*enabled = 0;
776 	*aspace = 0;
777 	*cycle = 0;
778 	*dwidth = 0;
779 
780 	if (ctl & CA91CX42_LSI_CTL_EN)
781 		*enabled = 1;
782 
783 	/* Setup address space */
784 	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
785 	case CA91CX42_LSI_CTL_VAS_A16:
786 		*aspace = VME_A16;
787 		break;
788 	case CA91CX42_LSI_CTL_VAS_A24:
789 		*aspace = VME_A24;
790 		break;
791 	case CA91CX42_LSI_CTL_VAS_A32:
792 		*aspace = VME_A32;
793 		break;
794 	case CA91CX42_LSI_CTL_VAS_CRCSR:
795 		*aspace = VME_CRCSR;
796 		break;
797 	case CA91CX42_LSI_CTL_VAS_USER1:
798 		*aspace = VME_USER1;
799 		break;
800 	case CA91CX42_LSI_CTL_VAS_USER2:
801 		*aspace = VME_USER2;
802 		break;
803 	}
804 
805 	/* XXX Not sure howto check for MBLT */
806 	/* Setup cycle types */
807 	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
808 		*cycle |= VME_BLT;
809 	else
810 		*cycle |= VME_SCT;
811 
812 	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
813 		*cycle |= VME_SUPER;
814 	else
815 		*cycle |= VME_USER;
816 
817 	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
818 		*cycle = VME_PROG;
819 	else
820 		*cycle = VME_DATA;
821 
822 	/* Setup data width */
823 	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
824 	case CA91CX42_LSI_CTL_VDW_D8:
825 		*dwidth = VME_D8;
826 		break;
827 	case CA91CX42_LSI_CTL_VDW_D16:
828 		*dwidth = VME_D16;
829 		break;
830 	case CA91CX42_LSI_CTL_VDW_D32:
831 		*dwidth = VME_D32;
832 		break;
833 	case CA91CX42_LSI_CTL_VDW_D64:
834 		*dwidth = VME_D64;
835 		break;
836 	}
837 
838 	return 0;
839 }
840 
ca91cx42_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)841 static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
842 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
843 	u32 *cycle, u32 *dwidth)
844 {
845 	int retval;
846 
847 	spin_lock(&image->lock);
848 
849 	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
850 		cycle, dwidth);
851 
852 	spin_unlock(&image->lock);
853 
854 	return retval;
855 }
856 
ca91cx42_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)857 static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
858 	void *buf, size_t count, loff_t offset)
859 {
860 	ssize_t retval;
861 	void *addr = image->kern_base + offset;
862 	unsigned int done = 0;
863 	unsigned int count32;
864 
865 	if (count == 0)
866 		return 0;
867 
868 	spin_lock(&image->lock);
869 
870 	/* The following code handles VME address alignment problem
871 	 * in order to assure the maximal data width cycle.
872 	 * We cannot use memcpy_xxx directly here because it
873 	 * may cut data transfer in 8-bits cycles, thus making
874 	 * D16 cycle impossible.
875 	 * From the other hand, the bridge itself assures that
876 	 * maximal configured data cycle is used and splits it
877 	 * automatically for non-aligned addresses.
878 	 */
879 	if ((uintptr_t)addr & 0x1) {
880 		*(u8 *)buf = ioread8(addr);
881 		done += 1;
882 		if (done == count)
883 			goto out;
884 	}
885 	if ((uintptr_t)addr & 0x2) {
886 		if ((count - done) < 2) {
887 			*(u8 *)(buf + done) = ioread8(addr + done);
888 			done += 1;
889 			goto out;
890 		} else {
891 			*(u16 *)(buf + done) = ioread16(addr + done);
892 			done += 2;
893 		}
894 	}
895 
896 	count32 = (count - done) & ~0x3;
897 	if (count32 > 0) {
898 		memcpy_fromio(buf + done, addr + done, (unsigned int)count);
899 		done += count32;
900 	}
901 
902 	if ((count - done) & 0x2) {
903 		*(u16 *)(buf + done) = ioread16(addr + done);
904 		done += 2;
905 	}
906 	if ((count - done) & 0x1) {
907 		*(u8 *)(buf + done) = ioread8(addr + done);
908 		done += 1;
909 	}
910 out:
911 	retval = count;
912 	spin_unlock(&image->lock);
913 
914 	return retval;
915 }
916 
ca91cx42_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)917 static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
918 	void *buf, size_t count, loff_t offset)
919 {
920 	ssize_t retval;
921 	void *addr = image->kern_base + offset;
922 	unsigned int done = 0;
923 	unsigned int count32;
924 
925 	if (count == 0)
926 		return 0;
927 
928 	spin_lock(&image->lock);
929 
930 	/* Here we apply for the same strategy we do in master_read
931 	 * function in order to assure D16 cycle when required.
932 	 */
933 	if ((uintptr_t)addr & 0x1) {
934 		iowrite8(*(u8 *)buf, addr);
935 		done += 1;
936 		if (done == count)
937 			goto out;
938 	}
939 	if ((uintptr_t)addr & 0x2) {
940 		if ((count - done) < 2) {
941 			iowrite8(*(u8 *)(buf + done), addr + done);
942 			done += 1;
943 			goto out;
944 		} else {
945 			iowrite16(*(u16 *)(buf + done), addr + done);
946 			done += 2;
947 		}
948 	}
949 
950 	count32 = (count - done) & ~0x3;
951 	if (count32 > 0) {
952 		memcpy_toio(addr + done, buf + done, count32);
953 		done += count32;
954 	}
955 
956 	if ((count - done) & 0x2) {
957 		iowrite16(*(u16 *)(buf + done), addr + done);
958 		done += 2;
959 	}
960 	if ((count - done) & 0x1) {
961 		iowrite8(*(u8 *)(buf + done), addr + done);
962 		done += 1;
963 	}
964 out:
965 	retval = count;
966 
967 	spin_unlock(&image->lock);
968 
969 	return retval;
970 }
971 
ca91cx42_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)972 static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
973 	unsigned int mask, unsigned int compare, unsigned int swap,
974 	loff_t offset)
975 {
976 	u32 result;
977 	uintptr_t pci_addr;
978 	int i;
979 	struct ca91cx42_driver *bridge;
980 	struct device *dev;
981 
982 	bridge = image->parent->driver_priv;
983 	dev = image->parent->parent;
984 
985 	/* Find the PCI address that maps to the desired VME address */
986 	i = image->number;
987 
988 	/* Locking as we can only do one of these at a time */
989 	mutex_lock(&bridge->vme_rmw);
990 
991 	/* Lock image */
992 	spin_lock(&image->lock);
993 
994 	pci_addr = (uintptr_t)image->kern_base + offset;
995 
996 	/* Address must be 4-byte aligned */
997 	if (pci_addr & 0x3) {
998 		dev_err(dev, "RMW Address not 4-byte aligned\n");
999 		result = -EINVAL;
1000 		goto out;
1001 	}
1002 
1003 	/* Ensure RMW Disabled whilst configuring */
1004 	iowrite32(0, bridge->base + SCYC_CTL);
1005 
1006 	/* Configure registers */
1007 	iowrite32(mask, bridge->base + SCYC_EN);
1008 	iowrite32(compare, bridge->base + SCYC_CMP);
1009 	iowrite32(swap, bridge->base + SCYC_SWP);
1010 	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1011 
1012 	/* Enable RMW */
1013 	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1014 
1015 	/* Kick process off with a read to the required address. */
1016 	result = ioread32(image->kern_base + offset);
1017 
1018 	/* Disable RMW */
1019 	iowrite32(0, bridge->base + SCYC_CTL);
1020 
1021 out:
1022 	spin_unlock(&image->lock);
1023 
1024 	mutex_unlock(&bridge->vme_rmw);
1025 
1026 	return result;
1027 }
1028 
ca91cx42_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1029 static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1030 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1031 {
1032 	struct ca91cx42_dma_entry *entry, *prev;
1033 	struct vme_dma_pci *pci_attr;
1034 	struct vme_dma_vme *vme_attr;
1035 	dma_addr_t desc_ptr;
1036 	int retval = 0;
1037 	struct device *dev;
1038 
1039 	dev = list->parent->parent->parent;
1040 
1041 	/* XXX descriptor must be aligned on 64-bit boundaries */
1042 	entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1043 	if (entry == NULL) {
1044 		dev_err(dev, "Failed to allocate memory for dma resource "
1045 			"structure\n");
1046 		retval = -ENOMEM;
1047 		goto err_mem;
1048 	}
1049 
1050 	/* Test descriptor alignment */
1051 	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1052 		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1053 			"required: %p\n", &entry->descriptor);
1054 		retval = -EINVAL;
1055 		goto err_align;
1056 	}
1057 
1058 	memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1059 
1060 	if (dest->type == VME_DMA_VME) {
1061 		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1062 		vme_attr = dest->private;
1063 		pci_attr = src->private;
1064 	} else {
1065 		vme_attr = src->private;
1066 		pci_attr = dest->private;
1067 	}
1068 
1069 	/* Check we can do fulfill required attributes */
1070 	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1071 		VME_USER2)) != 0) {
1072 
1073 		dev_err(dev, "Unsupported cycle type\n");
1074 		retval = -EINVAL;
1075 		goto err_aspace;
1076 	}
1077 
1078 	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1079 		VME_PROG | VME_DATA)) != 0) {
1080 
1081 		dev_err(dev, "Unsupported cycle type\n");
1082 		retval = -EINVAL;
1083 		goto err_cycle;
1084 	}
1085 
1086 	/* Check to see if we can fulfill source and destination */
1087 	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1088 		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1089 
1090 		dev_err(dev, "Cannot perform transfer with this "
1091 			"source-destination combination\n");
1092 		retval = -EINVAL;
1093 		goto err_direct;
1094 	}
1095 
1096 	/* Setup cycle types */
1097 	if (vme_attr->cycle & VME_BLT)
1098 		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1099 
1100 	/* Setup data width */
1101 	switch (vme_attr->dwidth) {
1102 	case VME_D8:
1103 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1104 		break;
1105 	case VME_D16:
1106 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1107 		break;
1108 	case VME_D32:
1109 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1110 		break;
1111 	case VME_D64:
1112 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1113 		break;
1114 	default:
1115 		dev_err(dev, "Invalid data width\n");
1116 		return -EINVAL;
1117 	}
1118 
1119 	/* Setup address space */
1120 	switch (vme_attr->aspace) {
1121 	case VME_A16:
1122 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1123 		break;
1124 	case VME_A24:
1125 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1126 		break;
1127 	case VME_A32:
1128 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1129 		break;
1130 	case VME_USER1:
1131 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1132 		break;
1133 	case VME_USER2:
1134 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1135 		break;
1136 	default:
1137 		dev_err(dev, "Invalid address space\n");
1138 		return -EINVAL;
1139 		break;
1140 	}
1141 
1142 	if (vme_attr->cycle & VME_SUPER)
1143 		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1144 	if (vme_attr->cycle & VME_PROG)
1145 		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1146 
1147 	entry->descriptor.dtbc = count;
1148 	entry->descriptor.dla = pci_attr->address;
1149 	entry->descriptor.dva = vme_attr->address;
1150 	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1151 
1152 	/* Add to list */
1153 	list_add_tail(&entry->list, &list->entries);
1154 
1155 	/* Fill out previous descriptors "Next Address" */
1156 	if (entry->list.prev != &list->entries) {
1157 		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1158 			list);
1159 		/* We need the bus address for the pointer */
1160 		desc_ptr = virt_to_bus(&entry->descriptor);
1161 		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1162 	}
1163 
1164 	return 0;
1165 
1166 err_cycle:
1167 err_aspace:
1168 err_direct:
1169 err_align:
1170 	kfree(entry);
1171 err_mem:
1172 	return retval;
1173 }
1174 
ca91cx42_dma_busy(struct vme_bridge * ca91cx42_bridge)1175 static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1176 {
1177 	u32 tmp;
1178 	struct ca91cx42_driver *bridge;
1179 
1180 	bridge = ca91cx42_bridge->driver_priv;
1181 
1182 	tmp = ioread32(bridge->base + DGCS);
1183 
1184 	if (tmp & CA91CX42_DGCS_ACT)
1185 		return 0;
1186 	else
1187 		return 1;
1188 }
1189 
ca91cx42_dma_list_exec(struct vme_dma_list * list)1190 static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1191 {
1192 	struct vme_dma_resource *ctrlr;
1193 	struct ca91cx42_dma_entry *entry;
1194 	int retval = 0;
1195 	dma_addr_t bus_addr;
1196 	u32 val;
1197 	struct device *dev;
1198 	struct ca91cx42_driver *bridge;
1199 
1200 	ctrlr = list->parent;
1201 
1202 	bridge = ctrlr->parent->driver_priv;
1203 	dev = ctrlr->parent->parent;
1204 
1205 	mutex_lock(&ctrlr->mtx);
1206 
1207 	if (!(list_empty(&ctrlr->running))) {
1208 		/*
1209 		 * XXX We have an active DMA transfer and currently haven't
1210 		 *     sorted out the mechanism for "pending" DMA transfers.
1211 		 *     Return busy.
1212 		 */
1213 		/* Need to add to pending here */
1214 		mutex_unlock(&ctrlr->mtx);
1215 		return -EBUSY;
1216 	} else {
1217 		list_add(&list->list, &ctrlr->running);
1218 	}
1219 
1220 	/* Get first bus address and write into registers */
1221 	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1222 		list);
1223 
1224 	bus_addr = virt_to_bus(&entry->descriptor);
1225 
1226 	mutex_unlock(&ctrlr->mtx);
1227 
1228 	iowrite32(0, bridge->base + DTBC);
1229 	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1230 
1231 	/* Start the operation */
1232 	val = ioread32(bridge->base + DGCS);
1233 
1234 	/* XXX Could set VMEbus On and Off Counters here */
1235 	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1236 
1237 	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1238 		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1239 		CA91CX42_DGCS_PERR);
1240 
1241 	iowrite32(val, bridge->base + DGCS);
1242 
1243 	val |= CA91CX42_DGCS_GO;
1244 
1245 	iowrite32(val, bridge->base + DGCS);
1246 
1247 	wait_event_interruptible(bridge->dma_queue,
1248 		ca91cx42_dma_busy(ctrlr->parent));
1249 
1250 	/*
1251 	 * Read status register, this register is valid until we kick off a
1252 	 * new transfer.
1253 	 */
1254 	val = ioread32(bridge->base + DGCS);
1255 
1256 	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1257 		CA91CX42_DGCS_PERR)) {
1258 
1259 		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1260 		val = ioread32(bridge->base + DCTL);
1261 	}
1262 
1263 	/* Remove list from running list */
1264 	mutex_lock(&ctrlr->mtx);
1265 	list_del(&list->list);
1266 	mutex_unlock(&ctrlr->mtx);
1267 
1268 	return retval;
1269 
1270 }
1271 
ca91cx42_dma_list_empty(struct vme_dma_list * list)1272 static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1273 {
1274 	struct list_head *pos, *temp;
1275 	struct ca91cx42_dma_entry *entry;
1276 
1277 	/* detach and free each entry */
1278 	list_for_each_safe(pos, temp, &list->entries) {
1279 		list_del(pos);
1280 		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1281 		kfree(entry);
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 /*
1288  * All 4 location monitors reside at the same base - this is therefore a
1289  * system wide configuration.
1290  *
1291  * This does not enable the LM monitor - that should be done when the first
1292  * callback is attached and disabled when the last callback is removed.
1293  */
ca91cx42_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1294 static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1295 	unsigned long long lm_base, u32 aspace, u32 cycle)
1296 {
1297 	u32 temp_base, lm_ctl = 0;
1298 	int i;
1299 	struct ca91cx42_driver *bridge;
1300 	struct device *dev;
1301 
1302 	bridge = lm->parent->driver_priv;
1303 	dev = lm->parent->parent;
1304 
1305 	/* Check the alignment of the location monitor */
1306 	temp_base = (u32)lm_base;
1307 	if (temp_base & 0xffff) {
1308 		dev_err(dev, "Location monitor must be aligned to 64KB "
1309 			"boundary");
1310 		return -EINVAL;
1311 	}
1312 
1313 	mutex_lock(&lm->mtx);
1314 
1315 	/* If we already have a callback attached, we can't move it! */
1316 	for (i = 0; i < lm->monitors; i++) {
1317 		if (bridge->lm_callback[i] != NULL) {
1318 			mutex_unlock(&lm->mtx);
1319 			dev_err(dev, "Location monitor callback attached, "
1320 				"can't reset\n");
1321 			return -EBUSY;
1322 		}
1323 	}
1324 
1325 	switch (aspace) {
1326 	case VME_A16:
1327 		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1328 		break;
1329 	case VME_A24:
1330 		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1331 		break;
1332 	case VME_A32:
1333 		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1334 		break;
1335 	default:
1336 		mutex_unlock(&lm->mtx);
1337 		dev_err(dev, "Invalid address space\n");
1338 		return -EINVAL;
1339 		break;
1340 	}
1341 
1342 	if (cycle & VME_SUPER)
1343 		lm_ctl |= CA91CX42_LM_CTL_SUPR;
1344 	if (cycle & VME_USER)
1345 		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1346 	if (cycle & VME_PROG)
1347 		lm_ctl |= CA91CX42_LM_CTL_PGM;
1348 	if (cycle & VME_DATA)
1349 		lm_ctl |= CA91CX42_LM_CTL_DATA;
1350 
1351 	iowrite32(lm_base, bridge->base + LM_BS);
1352 	iowrite32(lm_ctl, bridge->base + LM_CTL);
1353 
1354 	mutex_unlock(&lm->mtx);
1355 
1356 	return 0;
1357 }
1358 
1359 /* Get configuration of the callback monitor and return whether it is enabled
1360  * or disabled.
1361  */
ca91cx42_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1362 static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1363 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1364 {
1365 	u32 lm_ctl, enabled = 0;
1366 	struct ca91cx42_driver *bridge;
1367 
1368 	bridge = lm->parent->driver_priv;
1369 
1370 	mutex_lock(&lm->mtx);
1371 
1372 	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1373 	lm_ctl = ioread32(bridge->base + LM_CTL);
1374 
1375 	if (lm_ctl & CA91CX42_LM_CTL_EN)
1376 		enabled = 1;
1377 
1378 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1379 		*aspace = VME_A16;
1380 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1381 		*aspace = VME_A24;
1382 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1383 		*aspace = VME_A32;
1384 
1385 	*cycle = 0;
1386 	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1387 		*cycle |= VME_SUPER;
1388 	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1389 		*cycle |= VME_USER;
1390 	if (lm_ctl & CA91CX42_LM_CTL_PGM)
1391 		*cycle |= VME_PROG;
1392 	if (lm_ctl & CA91CX42_LM_CTL_DATA)
1393 		*cycle |= VME_DATA;
1394 
1395 	mutex_unlock(&lm->mtx);
1396 
1397 	return enabled;
1398 }
1399 
1400 /*
1401  * Attach a callback to a specific location monitor.
1402  *
1403  * Callback will be passed the monitor triggered.
1404  */
ca91cx42_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(int))1405 static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1406 	void (*callback)(int))
1407 {
1408 	u32 lm_ctl, tmp;
1409 	struct ca91cx42_driver *bridge;
1410 	struct device *dev;
1411 
1412 	bridge = lm->parent->driver_priv;
1413 	dev = lm->parent->parent;
1414 
1415 	mutex_lock(&lm->mtx);
1416 
1417 	/* Ensure that the location monitor is configured - need PGM or DATA */
1418 	lm_ctl = ioread32(bridge->base + LM_CTL);
1419 	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1420 		mutex_unlock(&lm->mtx);
1421 		dev_err(dev, "Location monitor not properly configured\n");
1422 		return -EINVAL;
1423 	}
1424 
1425 	/* Check that a callback isn't already attached */
1426 	if (bridge->lm_callback[monitor] != NULL) {
1427 		mutex_unlock(&lm->mtx);
1428 		dev_err(dev, "Existing callback attached\n");
1429 		return -EBUSY;
1430 	}
1431 
1432 	/* Attach callback */
1433 	bridge->lm_callback[monitor] = callback;
1434 
1435 	/* Enable Location Monitor interrupt */
1436 	tmp = ioread32(bridge->base + LINT_EN);
1437 	tmp |= CA91CX42_LINT_LM[monitor];
1438 	iowrite32(tmp, bridge->base + LINT_EN);
1439 
1440 	/* Ensure that global Location Monitor Enable set */
1441 	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1442 		lm_ctl |= CA91CX42_LM_CTL_EN;
1443 		iowrite32(lm_ctl, bridge->base + LM_CTL);
1444 	}
1445 
1446 	mutex_unlock(&lm->mtx);
1447 
1448 	return 0;
1449 }
1450 
1451 /*
1452  * Detach a callback function forn a specific location monitor.
1453  */
ca91cx42_lm_detach(struct vme_lm_resource * lm,int monitor)1454 static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1455 {
1456 	u32 tmp;
1457 	struct ca91cx42_driver *bridge;
1458 
1459 	bridge = lm->parent->driver_priv;
1460 
1461 	mutex_lock(&lm->mtx);
1462 
1463 	/* Disable Location Monitor and ensure previous interrupts are clear */
1464 	tmp = ioread32(bridge->base + LINT_EN);
1465 	tmp &= ~CA91CX42_LINT_LM[monitor];
1466 	iowrite32(tmp, bridge->base + LINT_EN);
1467 
1468 	iowrite32(CA91CX42_LINT_LM[monitor],
1469 		 bridge->base + LINT_STAT);
1470 
1471 	/* Detach callback */
1472 	bridge->lm_callback[monitor] = NULL;
1473 
1474 	/* If all location monitors disabled, disable global Location Monitor */
1475 	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1476 			CA91CX42_LINT_LM3)) == 0) {
1477 		tmp = ioread32(bridge->base + LM_CTL);
1478 		tmp &= ~CA91CX42_LM_CTL_EN;
1479 		iowrite32(tmp, bridge->base + LM_CTL);
1480 	}
1481 
1482 	mutex_unlock(&lm->mtx);
1483 
1484 	return 0;
1485 }
1486 
ca91cx42_slot_get(struct vme_bridge * ca91cx42_bridge)1487 static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1488 {
1489 	u32 slot = 0;
1490 	struct ca91cx42_driver *bridge;
1491 
1492 	bridge = ca91cx42_bridge->driver_priv;
1493 
1494 	if (!geoid) {
1495 		slot = ioread32(bridge->base + VCSR_BS);
1496 		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1497 	} else
1498 		slot = geoid;
1499 
1500 	return (int)slot;
1501 
1502 }
1503 
ca91cx42_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)1504 void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1505 	dma_addr_t *dma)
1506 {
1507 	struct pci_dev *pdev;
1508 
1509 	/* Find pci_dev container of dev */
1510 	pdev = container_of(parent, struct pci_dev, dev);
1511 
1512 	return pci_alloc_consistent(pdev, size, dma);
1513 }
1514 
ca91cx42_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)1515 void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
1516 	dma_addr_t dma)
1517 {
1518 	struct pci_dev *pdev;
1519 
1520 	/* Find pci_dev container of dev */
1521 	pdev = container_of(parent, struct pci_dev, dev);
1522 
1523 	pci_free_consistent(pdev, size, vaddr, dma);
1524 }
1525 
ca91cx42_init(void)1526 static int __init ca91cx42_init(void)
1527 {
1528 	return pci_register_driver(&ca91cx42_driver);
1529 }
1530 
1531 /*
1532  * Configure CR/CSR space
1533  *
1534  * Access to the CR/CSR can be configured at power-up. The location of the
1535  * CR/CSR registers in the CR/CSR address space is determined by the boards
1536  * Auto-ID or Geographic address. This function ensures that the window is
1537  * enabled at an offset consistent with the boards geopgraphic address.
1538  */
ca91cx42_crcsr_init(struct vme_bridge * ca91cx42_bridge,struct pci_dev * pdev)1539 static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1540 	struct pci_dev *pdev)
1541 {
1542 	unsigned int crcsr_addr;
1543 	int tmp, slot;
1544 	struct ca91cx42_driver *bridge;
1545 
1546 	bridge = ca91cx42_bridge->driver_priv;
1547 
1548 	slot = ca91cx42_slot_get(ca91cx42_bridge);
1549 
1550 	/* Write CSR Base Address if slot ID is supplied as a module param */
1551 	if (geoid)
1552 		iowrite32(geoid << 27, bridge->base + VCSR_BS);
1553 
1554 	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1555 	if (slot == 0) {
1556 		dev_err(&pdev->dev, "Slot number is unset, not configuring "
1557 			"CR/CSR space\n");
1558 		return -EINVAL;
1559 	}
1560 
1561 	/* Allocate mem for CR/CSR image */
1562 	bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1563 		&bridge->crcsr_bus);
1564 	if (bridge->crcsr_kernel == NULL) {
1565 		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1566 			"image\n");
1567 		return -ENOMEM;
1568 	}
1569 
1570 	memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1571 
1572 	crcsr_addr = slot * (512 * 1024);
1573 	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1574 
1575 	tmp = ioread32(bridge->base + VCSR_CTL);
1576 	tmp |= CA91CX42_VCSR_CTL_EN;
1577 	iowrite32(tmp, bridge->base + VCSR_CTL);
1578 
1579 	return 0;
1580 }
1581 
ca91cx42_crcsr_exit(struct vme_bridge * ca91cx42_bridge,struct pci_dev * pdev)1582 static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1583 	struct pci_dev *pdev)
1584 {
1585 	u32 tmp;
1586 	struct ca91cx42_driver *bridge;
1587 
1588 	bridge = ca91cx42_bridge->driver_priv;
1589 
1590 	/* Turn off CR/CSR space */
1591 	tmp = ioread32(bridge->base + VCSR_CTL);
1592 	tmp &= ~CA91CX42_VCSR_CTL_EN;
1593 	iowrite32(tmp, bridge->base + VCSR_CTL);
1594 
1595 	/* Free image */
1596 	iowrite32(0, bridge->base + VCSR_TO);
1597 
1598 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1599 		bridge->crcsr_bus);
1600 }
1601 
ca91cx42_probe(struct pci_dev * pdev,const struct pci_device_id * id)1602 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1603 {
1604 	int retval, i;
1605 	u32 data;
1606 	struct list_head *pos = NULL;
1607 	struct vme_bridge *ca91cx42_bridge;
1608 	struct ca91cx42_driver *ca91cx42_device;
1609 	struct vme_master_resource *master_image;
1610 	struct vme_slave_resource *slave_image;
1611 	struct vme_dma_resource *dma_ctrlr;
1612 	struct vme_lm_resource *lm;
1613 
1614 	/* We want to support more than one of each bridge so we need to
1615 	 * dynamically allocate the bridge structure
1616 	 */
1617 	ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1618 
1619 	if (ca91cx42_bridge == NULL) {
1620 		dev_err(&pdev->dev, "Failed to allocate memory for device "
1621 			"structure\n");
1622 		retval = -ENOMEM;
1623 		goto err_struct;
1624 	}
1625 
1626 	ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1627 
1628 	if (ca91cx42_device == NULL) {
1629 		dev_err(&pdev->dev, "Failed to allocate memory for device "
1630 			"structure\n");
1631 		retval = -ENOMEM;
1632 		goto err_driver;
1633 	}
1634 
1635 	ca91cx42_bridge->driver_priv = ca91cx42_device;
1636 
1637 	/* Enable the device */
1638 	retval = pci_enable_device(pdev);
1639 	if (retval) {
1640 		dev_err(&pdev->dev, "Unable to enable device\n");
1641 		goto err_enable;
1642 	}
1643 
1644 	/* Map Registers */
1645 	retval = pci_request_regions(pdev, driver_name);
1646 	if (retval) {
1647 		dev_err(&pdev->dev, "Unable to reserve resources\n");
1648 		goto err_resource;
1649 	}
1650 
1651 	/* map registers in BAR 0 */
1652 	ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1653 		4096);
1654 	if (!ca91cx42_device->base) {
1655 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
1656 		retval = -EIO;
1657 		goto err_remap;
1658 	}
1659 
1660 	/* Check to see if the mapping worked out */
1661 	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1662 	if (data != PCI_VENDOR_ID_TUNDRA) {
1663 		dev_err(&pdev->dev, "PCI_ID check failed\n");
1664 		retval = -EIO;
1665 		goto err_test;
1666 	}
1667 
1668 	/* Initialize wait queues & mutual exclusion flags */
1669 	init_waitqueue_head(&ca91cx42_device->dma_queue);
1670 	init_waitqueue_head(&ca91cx42_device->iack_queue);
1671 	mutex_init(&ca91cx42_device->vme_int);
1672 	mutex_init(&ca91cx42_device->vme_rmw);
1673 
1674 	ca91cx42_bridge->parent = &pdev->dev;
1675 	strcpy(ca91cx42_bridge->name, driver_name);
1676 
1677 	/* Setup IRQ */
1678 	retval = ca91cx42_irq_init(ca91cx42_bridge);
1679 	if (retval != 0) {
1680 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
1681 		goto err_irq;
1682 	}
1683 
1684 	/* Add master windows to list */
1685 	INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1686 	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1687 		master_image = kmalloc(sizeof(struct vme_master_resource),
1688 			GFP_KERNEL);
1689 		if (master_image == NULL) {
1690 			dev_err(&pdev->dev, "Failed to allocate memory for "
1691 			"master resource structure\n");
1692 			retval = -ENOMEM;
1693 			goto err_master;
1694 		}
1695 		master_image->parent = ca91cx42_bridge;
1696 		spin_lock_init(&master_image->lock);
1697 		master_image->locked = 0;
1698 		master_image->number = i;
1699 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1700 			VME_CRCSR | VME_USER1 | VME_USER2;
1701 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1702 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1703 		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1704 		memset(&master_image->bus_resource, 0,
1705 			sizeof(struct resource));
1706 		master_image->kern_base  = NULL;
1707 		list_add_tail(&master_image->list,
1708 			&ca91cx42_bridge->master_resources);
1709 	}
1710 
1711 	/* Add slave windows to list */
1712 	INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1713 	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1714 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
1715 			GFP_KERNEL);
1716 		if (slave_image == NULL) {
1717 			dev_err(&pdev->dev, "Failed to allocate memory for "
1718 			"slave resource structure\n");
1719 			retval = -ENOMEM;
1720 			goto err_slave;
1721 		}
1722 		slave_image->parent = ca91cx42_bridge;
1723 		mutex_init(&slave_image->mtx);
1724 		slave_image->locked = 0;
1725 		slave_image->number = i;
1726 		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1727 			VME_USER2;
1728 
1729 		/* Only windows 0 and 4 support A16 */
1730 		if (i == 0 || i == 4)
1731 			slave_image->address_attr |= VME_A16;
1732 
1733 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1734 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1735 		list_add_tail(&slave_image->list,
1736 			&ca91cx42_bridge->slave_resources);
1737 	}
1738 
1739 	/* Add dma engines to list */
1740 	INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1741 	for (i = 0; i < CA91C142_MAX_DMA; i++) {
1742 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1743 			GFP_KERNEL);
1744 		if (dma_ctrlr == NULL) {
1745 			dev_err(&pdev->dev, "Failed to allocate memory for "
1746 			"dma resource structure\n");
1747 			retval = -ENOMEM;
1748 			goto err_dma;
1749 		}
1750 		dma_ctrlr->parent = ca91cx42_bridge;
1751 		mutex_init(&dma_ctrlr->mtx);
1752 		dma_ctrlr->locked = 0;
1753 		dma_ctrlr->number = i;
1754 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1755 			VME_DMA_MEM_TO_VME;
1756 		INIT_LIST_HEAD(&dma_ctrlr->pending);
1757 		INIT_LIST_HEAD(&dma_ctrlr->running);
1758 		list_add_tail(&dma_ctrlr->list,
1759 			&ca91cx42_bridge->dma_resources);
1760 	}
1761 
1762 	/* Add location monitor to list */
1763 	INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1764 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1765 	if (lm == NULL) {
1766 		dev_err(&pdev->dev, "Failed to allocate memory for "
1767 		"location monitor resource structure\n");
1768 		retval = -ENOMEM;
1769 		goto err_lm;
1770 	}
1771 	lm->parent = ca91cx42_bridge;
1772 	mutex_init(&lm->mtx);
1773 	lm->locked = 0;
1774 	lm->number = 1;
1775 	lm->monitors = 4;
1776 	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1777 
1778 	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1779 	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1780 	ca91cx42_bridge->master_get = ca91cx42_master_get;
1781 	ca91cx42_bridge->master_set = ca91cx42_master_set;
1782 	ca91cx42_bridge->master_read = ca91cx42_master_read;
1783 	ca91cx42_bridge->master_write = ca91cx42_master_write;
1784 	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1785 	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1786 	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1787 	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1788 	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1789 	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1790 	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1791 	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1792 	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1793 	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1794 	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1795 	ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1796 	ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1797 
1798 	data = ioread32(ca91cx42_device->base + MISC_CTL);
1799 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1800 		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1801 	dev_info(&pdev->dev, "Slot ID is %d\n",
1802 		ca91cx42_slot_get(ca91cx42_bridge));
1803 
1804 	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1805 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1806 
1807 	/* Need to save ca91cx42_bridge pointer locally in link list for use in
1808 	 * ca91cx42_remove()
1809 	 */
1810 	retval = vme_register_bridge(ca91cx42_bridge);
1811 	if (retval != 0) {
1812 		dev_err(&pdev->dev, "Chip Registration failed.\n");
1813 		goto err_reg;
1814 	}
1815 
1816 	pci_set_drvdata(pdev, ca91cx42_bridge);
1817 
1818 	return 0;
1819 
1820 err_reg:
1821 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1822 err_lm:
1823 	/* resources are stored in link list */
1824 	list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1825 		lm = list_entry(pos, struct vme_lm_resource, list);
1826 		list_del(pos);
1827 		kfree(lm);
1828 	}
1829 err_dma:
1830 	/* resources are stored in link list */
1831 	list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1832 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1833 		list_del(pos);
1834 		kfree(dma_ctrlr);
1835 	}
1836 err_slave:
1837 	/* resources are stored in link list */
1838 	list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1839 		slave_image = list_entry(pos, struct vme_slave_resource, list);
1840 		list_del(pos);
1841 		kfree(slave_image);
1842 	}
1843 err_master:
1844 	/* resources are stored in link list */
1845 	list_for_each(pos, &ca91cx42_bridge->master_resources) {
1846 		master_image = list_entry(pos, struct vme_master_resource,
1847 			list);
1848 		list_del(pos);
1849 		kfree(master_image);
1850 	}
1851 
1852 	ca91cx42_irq_exit(ca91cx42_device, pdev);
1853 err_irq:
1854 err_test:
1855 	iounmap(ca91cx42_device->base);
1856 err_remap:
1857 	pci_release_regions(pdev);
1858 err_resource:
1859 	pci_disable_device(pdev);
1860 err_enable:
1861 	kfree(ca91cx42_device);
1862 err_driver:
1863 	kfree(ca91cx42_bridge);
1864 err_struct:
1865 	return retval;
1866 
1867 }
1868 
ca91cx42_remove(struct pci_dev * pdev)1869 static void ca91cx42_remove(struct pci_dev *pdev)
1870 {
1871 	struct list_head *pos = NULL;
1872 	struct vme_master_resource *master_image;
1873 	struct vme_slave_resource *slave_image;
1874 	struct vme_dma_resource *dma_ctrlr;
1875 	struct vme_lm_resource *lm;
1876 	struct ca91cx42_driver *bridge;
1877 	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1878 
1879 	bridge = ca91cx42_bridge->driver_priv;
1880 
1881 
1882 	/* Turn off Ints */
1883 	iowrite32(0, bridge->base + LINT_EN);
1884 
1885 	/* Turn off the windows */
1886 	iowrite32(0x00800000, bridge->base + LSI0_CTL);
1887 	iowrite32(0x00800000, bridge->base + LSI1_CTL);
1888 	iowrite32(0x00800000, bridge->base + LSI2_CTL);
1889 	iowrite32(0x00800000, bridge->base + LSI3_CTL);
1890 	iowrite32(0x00800000, bridge->base + LSI4_CTL);
1891 	iowrite32(0x00800000, bridge->base + LSI5_CTL);
1892 	iowrite32(0x00800000, bridge->base + LSI6_CTL);
1893 	iowrite32(0x00800000, bridge->base + LSI7_CTL);
1894 	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1895 	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1896 	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1897 	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1898 	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1899 	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1900 	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1901 	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1902 
1903 	vme_unregister_bridge(ca91cx42_bridge);
1904 
1905 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1906 
1907 	/* resources are stored in link list */
1908 	list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1909 		lm = list_entry(pos, struct vme_lm_resource, list);
1910 		list_del(pos);
1911 		kfree(lm);
1912 	}
1913 
1914 	/* resources are stored in link list */
1915 	list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1916 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1917 		list_del(pos);
1918 		kfree(dma_ctrlr);
1919 	}
1920 
1921 	/* resources are stored in link list */
1922 	list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1923 		slave_image = list_entry(pos, struct vme_slave_resource, list);
1924 		list_del(pos);
1925 		kfree(slave_image);
1926 	}
1927 
1928 	/* resources are stored in link list */
1929 	list_for_each(pos, &ca91cx42_bridge->master_resources) {
1930 		master_image = list_entry(pos, struct vme_master_resource,
1931 			list);
1932 		list_del(pos);
1933 		kfree(master_image);
1934 	}
1935 
1936 	ca91cx42_irq_exit(bridge, pdev);
1937 
1938 	iounmap(bridge->base);
1939 
1940 	pci_release_regions(pdev);
1941 
1942 	pci_disable_device(pdev);
1943 
1944 	kfree(ca91cx42_bridge);
1945 }
1946 
ca91cx42_exit(void)1947 static void __exit ca91cx42_exit(void)
1948 {
1949 	pci_unregister_driver(&ca91cx42_driver);
1950 }
1951 
1952 MODULE_PARM_DESC(geoid, "Override geographical addressing");
1953 module_param(geoid, int, 0);
1954 
1955 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1956 MODULE_LICENSE("GPL");
1957 
1958 module_init(ca91cx42_init);
1959 module_exit(ca91cx42_exit);
1960