1 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/gfp.h>
16 
17 #include <asm/irq.h>
18 #include <asm/io.h>
19 #include <asm/dma.h>
20 
21 #include <scsi/scsi_host.h>
22 
23 #include "esp_scsi.h"
24 
25 #define DRV_MODULE_NAME		"sun_esp"
26 #define PFX DRV_MODULE_NAME	": "
27 #define DRV_VERSION		"1.100"
28 #define DRV_MODULE_RELDATE	"August 27, 2008"
29 
30 #define dma_read32(REG) \
31 	sbus_readl(esp->dma_regs + (REG))
32 #define dma_write32(VAL, REG) \
33 	sbus_writel((VAL), esp->dma_regs + (REG))
34 
35 /* DVMA chip revisions */
36 enum dvma_rev {
37 	dvmarev0,
38 	dvmaesc1,
39 	dvmarev1,
40 	dvmarev2,
41 	dvmarev3,
42 	dvmarevplus,
43 	dvmahme
44 };
45 
esp_sbus_setup_dma(struct esp * esp,struct platform_device * dma_of)46 static int __devinit esp_sbus_setup_dma(struct esp *esp,
47 					struct platform_device *dma_of)
48 {
49 	esp->dma = dma_of;
50 
51 	esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
52 				   resource_size(&dma_of->resource[0]),
53 				   "espdma");
54 	if (!esp->dma_regs)
55 		return -ENOMEM;
56 
57 	switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
58 	case DMA_VERS0:
59 		esp->dmarev = dvmarev0;
60 		break;
61 	case DMA_ESCV1:
62 		esp->dmarev = dvmaesc1;
63 		break;
64 	case DMA_VERS1:
65 		esp->dmarev = dvmarev1;
66 		break;
67 	case DMA_VERS2:
68 		esp->dmarev = dvmarev2;
69 		break;
70 	case DMA_VERHME:
71 		esp->dmarev = dvmahme;
72 		break;
73 	case DMA_VERSPLUS:
74 		esp->dmarev = dvmarevplus;
75 		break;
76 	}
77 
78 	return 0;
79 
80 }
81 
esp_sbus_map_regs(struct esp * esp,int hme)82 static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
83 {
84 	struct platform_device *op = esp->dev;
85 	struct resource *res;
86 
87 	/* On HME, two reg sets exist, first is DVMA,
88 	 * second is ESP registers.
89 	 */
90 	if (hme)
91 		res = &op->resource[1];
92 	else
93 		res = &op->resource[0];
94 
95 	esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
96 	if (!esp->regs)
97 		return -ENOMEM;
98 
99 	return 0;
100 }
101 
esp_sbus_map_command_block(struct esp * esp)102 static int __devinit esp_sbus_map_command_block(struct esp *esp)
103 {
104 	struct platform_device *op = esp->dev;
105 
106 	esp->command_block = dma_alloc_coherent(&op->dev, 16,
107 						&esp->command_block_dma,
108 						GFP_ATOMIC);
109 	if (!esp->command_block)
110 		return -ENOMEM;
111 	return 0;
112 }
113 
esp_sbus_register_irq(struct esp * esp)114 static int __devinit esp_sbus_register_irq(struct esp *esp)
115 {
116 	struct Scsi_Host *host = esp->host;
117 	struct platform_device *op = esp->dev;
118 
119 	host->irq = op->archdata.irqs[0];
120 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
121 }
122 
esp_get_scsi_id(struct esp * esp,struct platform_device * espdma)123 static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
124 {
125 	struct platform_device *op = esp->dev;
126 	struct device_node *dp;
127 
128 	dp = op->dev.of_node;
129 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
130 	if (esp->scsi_id != 0xff)
131 		goto done;
132 
133 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
134 	if (esp->scsi_id != 0xff)
135 		goto done;
136 
137 	esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
138 					     "scsi-initiator-id", 7);
139 
140 done:
141 	esp->host->this_id = esp->scsi_id;
142 	esp->scsi_id_mask = (1 << esp->scsi_id);
143 }
144 
esp_get_differential(struct esp * esp)145 static void __devinit esp_get_differential(struct esp *esp)
146 {
147 	struct platform_device *op = esp->dev;
148 	struct device_node *dp;
149 
150 	dp = op->dev.of_node;
151 	if (of_find_property(dp, "differential", NULL))
152 		esp->flags |= ESP_FLAG_DIFFERENTIAL;
153 	else
154 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
155 }
156 
esp_get_clock_params(struct esp * esp)157 static void __devinit esp_get_clock_params(struct esp *esp)
158 {
159 	struct platform_device *op = esp->dev;
160 	struct device_node *bus_dp, *dp;
161 	int fmhz;
162 
163 	dp = op->dev.of_node;
164 	bus_dp = dp->parent;
165 
166 	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
167 	if (fmhz == 0)
168 		fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
169 
170 	esp->cfreq = fmhz;
171 }
172 
esp_get_bursts(struct esp * esp,struct platform_device * dma_of)173 static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
174 {
175 	struct device_node *dma_dp = dma_of->dev.of_node;
176 	struct platform_device *op = esp->dev;
177 	struct device_node *dp;
178 	u8 bursts, val;
179 
180 	dp = op->dev.of_node;
181 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
182 	val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
183 	if (val != 0xff)
184 		bursts &= val;
185 
186 	val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
187 	if (val != 0xff)
188 		bursts &= val;
189 
190 	if (bursts == 0xff ||
191 	    (bursts & DMA_BURST16) == 0 ||
192 	    (bursts & DMA_BURST32) == 0)
193 		bursts = (DMA_BURST32 - 1);
194 
195 	esp->bursts = bursts;
196 }
197 
esp_sbus_get_props(struct esp * esp,struct platform_device * espdma)198 static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
199 {
200 	esp_get_scsi_id(esp, espdma);
201 	esp_get_differential(esp);
202 	esp_get_clock_params(esp);
203 	esp_get_bursts(esp, espdma);
204 }
205 
sbus_esp_write8(struct esp * esp,u8 val,unsigned long reg)206 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
207 {
208 	sbus_writeb(val, esp->regs + (reg * 4UL));
209 }
210 
sbus_esp_read8(struct esp * esp,unsigned long reg)211 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
212 {
213 	return sbus_readb(esp->regs + (reg * 4UL));
214 }
215 
sbus_esp_map_single(struct esp * esp,void * buf,size_t sz,int dir)216 static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
217 				      size_t sz, int dir)
218 {
219 	struct platform_device *op = esp->dev;
220 
221 	return dma_map_single(&op->dev, buf, sz, dir);
222 }
223 
sbus_esp_map_sg(struct esp * esp,struct scatterlist * sg,int num_sg,int dir)224 static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
225 				  int num_sg, int dir)
226 {
227 	struct platform_device *op = esp->dev;
228 
229 	return dma_map_sg(&op->dev, sg, num_sg, dir);
230 }
231 
sbus_esp_unmap_single(struct esp * esp,dma_addr_t addr,size_t sz,int dir)232 static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
233 				  size_t sz, int dir)
234 {
235 	struct platform_device *op = esp->dev;
236 
237 	dma_unmap_single(&op->dev, addr, sz, dir);
238 }
239 
sbus_esp_unmap_sg(struct esp * esp,struct scatterlist * sg,int num_sg,int dir)240 static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
241 			      int num_sg, int dir)
242 {
243 	struct platform_device *op = esp->dev;
244 
245 	dma_unmap_sg(&op->dev, sg, num_sg, dir);
246 }
247 
sbus_esp_irq_pending(struct esp * esp)248 static int sbus_esp_irq_pending(struct esp *esp)
249 {
250 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
251 		return 1;
252 	return 0;
253 }
254 
sbus_esp_reset_dma(struct esp * esp)255 static void sbus_esp_reset_dma(struct esp *esp)
256 {
257 	int can_do_burst16, can_do_burst32, can_do_burst64;
258 	int can_do_sbus64, lim;
259 	struct platform_device *op;
260 	u32 val;
261 
262 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
263 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
264 	can_do_burst64 = 0;
265 	can_do_sbus64 = 0;
266 	op = esp->dev;
267 	if (sbus_can_dma_64bit())
268 		can_do_sbus64 = 1;
269 	if (sbus_can_burst64())
270 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
271 
272 	/* Put the DVMA into a known state. */
273 	if (esp->dmarev != dvmahme) {
274 		val = dma_read32(DMA_CSR);
275 		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
276 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
277 	}
278 	switch (esp->dmarev) {
279 	case dvmahme:
280 		dma_write32(DMA_RESET_FAS366, DMA_CSR);
281 		dma_write32(DMA_RST_SCSI, DMA_CSR);
282 
283 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
284 					DMA_SCSI_DISAB | DMA_INT_ENAB);
285 
286 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
287 					  DMA_BRST_SZ);
288 
289 		if (can_do_burst64)
290 			esp->prev_hme_dmacsr |= DMA_BRST64;
291 		else if (can_do_burst32)
292 			esp->prev_hme_dmacsr |= DMA_BRST32;
293 
294 		if (can_do_sbus64) {
295 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
296 			sbus_set_sbus64(&op->dev, esp->bursts);
297 		}
298 
299 		lim = 1000;
300 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
301 			if (--lim == 0) {
302 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
303 				       "will not clear!\n",
304 				       esp->host->unique_id);
305 				break;
306 			}
307 			udelay(1);
308 		}
309 
310 		dma_write32(0, DMA_CSR);
311 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
312 
313 		dma_write32(0, DMA_ADDR);
314 		break;
315 
316 	case dvmarev2:
317 		if (esp->rev != ESP100) {
318 			val = dma_read32(DMA_CSR);
319 			dma_write32(val | DMA_3CLKS, DMA_CSR);
320 		}
321 		break;
322 
323 	case dvmarev3:
324 		val = dma_read32(DMA_CSR);
325 		val &= ~DMA_3CLKS;
326 		val |= DMA_2CLKS;
327 		if (can_do_burst32) {
328 			val &= ~DMA_BRST_SZ;
329 			val |= DMA_BRST32;
330 		}
331 		dma_write32(val, DMA_CSR);
332 		break;
333 
334 	case dvmaesc1:
335 		val = dma_read32(DMA_CSR);
336 		val |= DMA_ADD_ENABLE;
337 		val &= ~DMA_BCNT_ENAB;
338 		if (!can_do_burst32 && can_do_burst16) {
339 			val |= DMA_ESC_BURST;
340 		} else {
341 			val &= ~(DMA_ESC_BURST);
342 		}
343 		dma_write32(val, DMA_CSR);
344 		break;
345 
346 	default:
347 		break;
348 	}
349 
350 	/* Enable interrupts.  */
351 	val = dma_read32(DMA_CSR);
352 	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
353 }
354 
sbus_esp_dma_drain(struct esp * esp)355 static void sbus_esp_dma_drain(struct esp *esp)
356 {
357 	u32 csr;
358 	int lim;
359 
360 	if (esp->dmarev == dvmahme)
361 		return;
362 
363 	csr = dma_read32(DMA_CSR);
364 	if (!(csr & DMA_FIFO_ISDRAIN))
365 		return;
366 
367 	if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
368 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
369 
370 	lim = 1000;
371 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
372 		if (--lim == 0) {
373 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
374 			       esp->host->unique_id);
375 			break;
376 		}
377 		udelay(1);
378 	}
379 }
380 
sbus_esp_dma_invalidate(struct esp * esp)381 static void sbus_esp_dma_invalidate(struct esp *esp)
382 {
383 	if (esp->dmarev == dvmahme) {
384 		dma_write32(DMA_RST_SCSI, DMA_CSR);
385 
386 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
387 					 (DMA_PARITY_OFF | DMA_2CLKS |
388 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
389 					~(DMA_ST_WRITE | DMA_ENABLE));
390 
391 		dma_write32(0, DMA_CSR);
392 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
393 
394 		/* This is necessary to avoid having the SCSI channel
395 		 * engine lock up on us.
396 		 */
397 		dma_write32(0, DMA_ADDR);
398 	} else {
399 		u32 val;
400 		int lim;
401 
402 		lim = 1000;
403 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
404 			if (--lim == 0) {
405 				printk(KERN_ALERT PFX "esp%d: DMA will not "
406 				       "invalidate!\n", esp->host->unique_id);
407 				break;
408 			}
409 			udelay(1);
410 		}
411 
412 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
413 		val |= DMA_FIFO_INV;
414 		dma_write32(val, DMA_CSR);
415 		val &= ~DMA_FIFO_INV;
416 		dma_write32(val, DMA_CSR);
417 	}
418 }
419 
sbus_esp_send_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)420 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
421 				  u32 dma_count, int write, u8 cmd)
422 {
423 	u32 csr;
424 
425 	BUG_ON(!(cmd & ESP_CMD_DMA));
426 
427 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
428 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
429 	if (esp->rev == FASHME) {
430 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
431 		sbus_esp_write8(esp, 0, FAS_RHI);
432 
433 		scsi_esp_cmd(esp, cmd);
434 
435 		csr = esp->prev_hme_dmacsr;
436 		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
437 		if (write)
438 			csr |= DMA_ST_WRITE;
439 		else
440 			csr &= ~DMA_ST_WRITE;
441 		esp->prev_hme_dmacsr = csr;
442 
443 		dma_write32(dma_count, DMA_COUNT);
444 		dma_write32(addr, DMA_ADDR);
445 		dma_write32(csr, DMA_CSR);
446 	} else {
447 		csr = dma_read32(DMA_CSR);
448 		csr |= DMA_ENABLE;
449 		if (write)
450 			csr |= DMA_ST_WRITE;
451 		else
452 			csr &= ~DMA_ST_WRITE;
453 		dma_write32(csr, DMA_CSR);
454 		if (esp->dmarev == dvmaesc1) {
455 			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
456 			dma_write32(end - addr, DMA_COUNT);
457 		}
458 		dma_write32(addr, DMA_ADDR);
459 
460 		scsi_esp_cmd(esp, cmd);
461 	}
462 
463 }
464 
sbus_esp_dma_error(struct esp * esp)465 static int sbus_esp_dma_error(struct esp *esp)
466 {
467 	u32 csr = dma_read32(DMA_CSR);
468 
469 	if (csr & DMA_HNDL_ERROR)
470 		return 1;
471 
472 	return 0;
473 }
474 
475 static const struct esp_driver_ops sbus_esp_ops = {
476 	.esp_write8	=	sbus_esp_write8,
477 	.esp_read8	=	sbus_esp_read8,
478 	.map_single	=	sbus_esp_map_single,
479 	.map_sg		=	sbus_esp_map_sg,
480 	.unmap_single	=	sbus_esp_unmap_single,
481 	.unmap_sg	=	sbus_esp_unmap_sg,
482 	.irq_pending	=	sbus_esp_irq_pending,
483 	.reset_dma	=	sbus_esp_reset_dma,
484 	.dma_drain	=	sbus_esp_dma_drain,
485 	.dma_invalidate	=	sbus_esp_dma_invalidate,
486 	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
487 	.dma_error	=	sbus_esp_dma_error,
488 };
489 
esp_sbus_probe_one(struct platform_device * op,struct platform_device * espdma,int hme)490 static int __devinit esp_sbus_probe_one(struct platform_device *op,
491 					struct platform_device *espdma,
492 					int hme)
493 {
494 	struct scsi_host_template *tpnt = &scsi_esp_template;
495 	struct Scsi_Host *host;
496 	struct esp *esp;
497 	int err;
498 
499 	host = scsi_host_alloc(tpnt, sizeof(struct esp));
500 
501 	err = -ENOMEM;
502 	if (!host)
503 		goto fail;
504 
505 	host->max_id = (hme ? 16 : 8);
506 	esp = shost_priv(host);
507 
508 	esp->host = host;
509 	esp->dev = op;
510 	esp->ops = &sbus_esp_ops;
511 
512 	if (hme)
513 		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
514 
515 	err = esp_sbus_setup_dma(esp, espdma);
516 	if (err < 0)
517 		goto fail_unlink;
518 
519 	err = esp_sbus_map_regs(esp, hme);
520 	if (err < 0)
521 		goto fail_unlink;
522 
523 	err = esp_sbus_map_command_block(esp);
524 	if (err < 0)
525 		goto fail_unmap_regs;
526 
527 	err = esp_sbus_register_irq(esp);
528 	if (err < 0)
529 		goto fail_unmap_command_block;
530 
531 	esp_sbus_get_props(esp, espdma);
532 
533 	/* Before we try to touch the ESP chip, ESC1 dma can
534 	 * come up with the reset bit set, so make sure that
535 	 * is clear first.
536 	 */
537 	if (esp->dmarev == dvmaesc1) {
538 		u32 val = dma_read32(DMA_CSR);
539 
540 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
541 	}
542 
543 	dev_set_drvdata(&op->dev, esp);
544 
545 	err = scsi_esp_register(esp, &op->dev);
546 	if (err)
547 		goto fail_free_irq;
548 
549 	return 0;
550 
551 fail_free_irq:
552 	free_irq(host->irq, esp);
553 fail_unmap_command_block:
554 	dma_free_coherent(&op->dev, 16,
555 			  esp->command_block,
556 			  esp->command_block_dma);
557 fail_unmap_regs:
558 	of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
559 fail_unlink:
560 	scsi_host_put(host);
561 fail:
562 	return err;
563 }
564 
esp_sbus_probe(struct platform_device * op)565 static int __devinit esp_sbus_probe(struct platform_device *op)
566 {
567 	struct device_node *dma_node = NULL;
568 	struct device_node *dp = op->dev.of_node;
569 	struct platform_device *dma_of = NULL;
570 	int hme = 0;
571 
572 	if (dp->parent &&
573 	    (!strcmp(dp->parent->name, "espdma") ||
574 	     !strcmp(dp->parent->name, "dma")))
575 		dma_node = dp->parent;
576 	else if (!strcmp(dp->name, "SUNW,fas")) {
577 		dma_node = op->dev.of_node;
578 		hme = 1;
579 	}
580 	if (dma_node)
581 		dma_of = of_find_device_by_node(dma_node);
582 	if (!dma_of)
583 		return -ENODEV;
584 
585 	return esp_sbus_probe_one(op, dma_of, hme);
586 }
587 
esp_sbus_remove(struct platform_device * op)588 static int __devexit esp_sbus_remove(struct platform_device *op)
589 {
590 	struct esp *esp = dev_get_drvdata(&op->dev);
591 	struct platform_device *dma_of = esp->dma;
592 	unsigned int irq = esp->host->irq;
593 	bool is_hme;
594 	u32 val;
595 
596 	scsi_esp_unregister(esp);
597 
598 	/* Disable interrupts.  */
599 	val = dma_read32(DMA_CSR);
600 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
601 
602 	free_irq(irq, esp);
603 
604 	is_hme = (esp->dmarev == dvmahme);
605 
606 	dma_free_coherent(&op->dev, 16,
607 			  esp->command_block,
608 			  esp->command_block_dma);
609 	of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
610 		   SBUS_ESP_REG_SIZE);
611 	of_iounmap(&dma_of->resource[0], esp->dma_regs,
612 		   resource_size(&dma_of->resource[0]));
613 
614 	scsi_host_put(esp->host);
615 
616 	dev_set_drvdata(&op->dev, NULL);
617 
618 	return 0;
619 }
620 
621 static const struct of_device_id esp_match[] = {
622 	{
623 		.name = "SUNW,esp",
624 	},
625 	{
626 		.name = "SUNW,fas",
627 	},
628 	{
629 		.name = "esp",
630 	},
631 	{},
632 };
633 MODULE_DEVICE_TABLE(of, esp_match);
634 
635 static struct platform_driver esp_sbus_driver = {
636 	.driver = {
637 		.name = "esp",
638 		.owner = THIS_MODULE,
639 		.of_match_table = esp_match,
640 	},
641 	.probe		= esp_sbus_probe,
642 	.remove		= __devexit_p(esp_sbus_remove),
643 };
644 
sunesp_init(void)645 static int __init sunesp_init(void)
646 {
647 	return platform_driver_register(&esp_sbus_driver);
648 }
649 
sunesp_exit(void)650 static void __exit sunesp_exit(void)
651 {
652 	platform_driver_unregister(&esp_sbus_driver);
653 }
654 
655 MODULE_DESCRIPTION("Sun ESP SCSI driver");
656 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
657 MODULE_LICENSE("GPL");
658 MODULE_VERSION(DRV_VERSION);
659 
660 module_init(sunesp_init);
661 module_exit(sunesp_exit);
662