1 /*
2  *  carmel.c: Driver for Promise SATA SX8 looks-like-I2O hardware
3  *
4  *  Copyright 2004 Red Hat, Inc.
5  *
6  *  Author/maintainer:  Jeff Garzik <jgarzik@pobox.com>
7  *
8  *  This file is subject to the terms and conditions of the GNU General Public
9  *  License.  See the file "COPYING" in the main directory of this archive
10  *  for more details.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/blk.h>
20 #include <linux/blkdev.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/compiler.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/time.h>
27 #include <linux/hdreg.h>
28 #include <linux/fs.h>
29 #include <linux/blkpg.h>
30 #include <asm/io.h>
31 #include <asm/semaphore.h>
32 #include <asm/uaccess.h>
33 
34 MODULE_AUTHOR("Jeff Garzik");
35 MODULE_LICENSE("GPL");
36 MODULE_DESCRIPTION("Promise SATA SX8 (carmel) block driver");
37 
38 #if 0
39 #define CARM_DEBUG
40 #define CARM_VERBOSE_DEBUG
41 #else
42 #undef CARM_DEBUG
43 #undef CARM_VERBOSE_DEBUG
44 #endif
45 #undef CARM_NDEBUG
46 
47 #define DRV_NAME "carmel"
48 #define DRV_VERSION "0.8-24.1"
49 #define PFX DRV_NAME ": "
50 
51 #define NEXT_RESP(idx)	((idx + 1) % RMSG_Q_LEN)
52 
53 /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
54 #define TAG_ENCODE(tag)	(((tag) << 16) | 0xf)
55 #define TAG_DECODE(tag)	(((tag) >> 16) & 0x1f)
56 #define TAG_VALID(tag)	((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
57 
58 /* note: prints function name for you */
59 #ifdef CARM_DEBUG
60 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
61 #ifdef CARM_VERBOSE_DEBUG
62 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 #else
64 #define VPRINTK(fmt, args...)
65 #endif	/* CARM_VERBOSE_DEBUG */
66 #else
67 #define DPRINTK(fmt, args...)
68 #define VPRINTK(fmt, args...)
69 #endif	/* CARM_DEBUG */
70 
71 #ifdef CARM_NDEBUG
72 #define assert(expr)
73 #else
74 #define assert(expr) \
75         if(unlikely(!(expr))) {                                   \
76         printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
77         #expr,__FILE__,__FUNCTION__,__LINE__);          \
78         }
79 #endif
80 
81 /* defines only for the constants which don't work well as enums */
82 struct carm_host;
83 
84 enum {
85 	/* adapter-wide limits */
86 	CARM_MAX_PORTS		= 8,
87 	CARM_SHM_SIZE		= (4096 << 7),
88 	CARM_PART_SHIFT		= 5,
89 	CARM_MINORS_PER_MAJOR	= (1 << CARM_PART_SHIFT),
90 	CARM_MAX_WAIT_Q		= CARM_MAX_PORTS + 1,
91 
92 	/* command message queue limits */
93 	CARM_MAX_REQ		= 64,	       /* max command msgs per host */
94 	CARM_MAX_Q		= 1,		   /* one command at a time */
95 	CARM_MSG_LOW_WATER	= (CARM_MAX_REQ / 4),	     /* refill mark */
96 
97 	/* S/G limits, host-wide and per-request */
98 	CARM_MAX_REQ_SG		= 32,	     /* max s/g entries per request */
99 	CARM_SG_BOUNDARY	= 0xffffUL,	    /* s/g segment boundary */
100 	CARM_MAX_HOST_SG	= 600,		/* max s/g entries per host */
101 	CARM_SG_LOW_WATER	= (CARM_MAX_HOST_SG / 4),   /* re-fill mark */
102 
103 	/* hardware registers */
104 	CARM_IHQP		= 0x1c,
105 	CARM_INT_STAT		= 0x10, /* interrupt status */
106 	CARM_INT_MASK		= 0x14, /* interrupt mask */
107 	CARM_HMUC		= 0x18, /* host message unit control */
108 	RBUF_ADDR_LO		= 0x20, /* response msg DMA buf low 32 bits */
109 	RBUF_ADDR_HI		= 0x24, /* response msg DMA buf high 32 bits */
110 	RBUF_BYTE_SZ		= 0x28,
111 	CARM_RESP_IDX		= 0x2c,
112 	CARM_CMS0		= 0x30, /* command message size reg 0 */
113 	CARM_LMUC		= 0x48,
114 	CARM_HMPHA		= 0x6c,
115 	CARM_INITC		= 0xb5,
116 
117 	/* bits in CARM_INT_{STAT,MASK} */
118 	INT_RESERVED		= 0xfffffff0,
119 	INT_WATCHDOG		= (1 << 3),	/* watchdog timer */
120 	INT_Q_OVERFLOW		= (1 << 2),	/* cmd msg q overflow */
121 	INT_Q_AVAILABLE		= (1 << 1),	/* cmd msg q has free space */
122 	INT_RESPONSE		= (1 << 0),	/* response msg available */
123 	INT_ACK_MASK		= INT_WATCHDOG | INT_Q_OVERFLOW,
124 	INT_DEF_MASK		= INT_RESERVED | INT_Q_OVERFLOW |
125 				  INT_RESPONSE,
126 
127 	/* command messages, and related register bits */
128 	CARM_HAVE_RESP		= 0x01,
129 	CARM_MSG_READ		= 1,
130 	CARM_MSG_WRITE		= 2,
131 	CARM_MSG_VERIFY		= 3,
132 	CARM_MSG_GET_CAPACITY	= 4,
133 	CARM_MSG_FLUSH		= 5,
134 	CARM_MSG_IOCTL		= 6,
135 	CARM_MSG_ARRAY		= 8,
136 	CARM_MSG_MISC		= 9,
137 	CARM_CME		= (1 << 2),
138 	CARM_RME		= (1 << 1),
139 	CARM_WZBC		= (1 << 0),
140 	CARM_RMI		= (1 << 0),
141 	CARM_Q_FULL		= (1 << 3),
142 	CARM_MSG_SIZE		= 288,
143 	CARM_Q_LEN		= 48,
144 
145 	/* CARM_MSG_IOCTL messages */
146 	CARM_IOC_SCAN_CHAN	= 5,	/* scan channels for devices */
147 	CARM_IOC_GET_TCQ	= 13,	/* get tcq/ncq depth */
148 	CARM_IOC_SET_TCQ	= 14,	/* set tcq/ncq depth */
149 
150 	IOC_SCAN_CHAN_NODEV	= 0x1f,
151 	IOC_SCAN_CHAN_OFFSET	= 0x40,
152 
153 	/* CARM_MSG_ARRAY messages */
154 	CARM_ARRAY_INFO		= 0,
155 
156 	ARRAY_NO_EXIST		= (1 << 31),
157 
158 	/* response messages */
159 	RMSG_SZ			= 8,	/* sizeof(struct carm_response) */
160 	RMSG_Q_LEN		= 48,	/* resp. msg list length */
161 	RMSG_OK			= 1,	/* bit indicating msg was successful */
162 					/* length of entire resp. msg buffer */
163 	RBUF_LEN		= RMSG_SZ * RMSG_Q_LEN,
164 
165 	PDC_SHM_SIZE		= (4096 << 7), /* length of entire h/w buffer */
166 
167 	/* CARM_MSG_MISC messages */
168 	MISC_GET_FW_VER		= 2,
169 	MISC_ALLOC_MEM		= 3,
170 	MISC_SET_TIME		= 5,
171 
172 	/* MISC_GET_FW_VER feature bits */
173 	FW_VER_4PORT		= (1 << 2), /* 1=4 ports, 0=8 ports */
174 	FW_VER_NON_RAID		= (1 << 1), /* 1=non-RAID firmware, 0=RAID */
175 	FW_VER_ZCR		= (1 << 0), /* zero channel RAID (whatever that is) */
176 
177 	/* carm_host flags */
178 	FL_NON_RAID		= FW_VER_NON_RAID,
179 	FL_4PORT		= FW_VER_4PORT,
180 	FL_FW_VER_MASK		= (FW_VER_NON_RAID | FW_VER_4PORT),
181 	FL_DAC			= (1 << 16),
182 	FL_DYN_MAJOR		= (1 << 17),
183 };
184 
185 enum carm_magic_numbers {
186 	CARM_MAGIC_HOST		= 0xdeadbeefUL,
187 	CARM_MAGIC_PORT		= 0xbedac0edUL,
188 };
189 
190 enum scatter_gather_types {
191 	SGT_32BIT		= 0,
192 	SGT_64BIT		= 1,
193 };
194 
195 enum host_states {
196 	HST_INVALID,		/* invalid state; never used */
197 	HST_ALLOC_BUF,		/* setting up master SHM area */
198 	HST_ERROR,		/* we never leave here */
199 	HST_PORT_SCAN,		/* start dev scan */
200 	HST_DEV_SCAN_START,	/* start per-device probe */
201 	HST_DEV_SCAN,		/* continue per-device probe */
202 	HST_DEV_ACTIVATE,	/* activate devices we found */
203 	HST_PROBE_FINISHED,	/* probe is complete */
204 	HST_PROBE_START,	/* initiate probe */
205 	HST_SYNC_TIME,		/* tell firmware what time it is */
206 	HST_GET_FW_VER,		/* get firmware version, adapter port cnt */
207 };
208 
209 #ifdef CARM_DEBUG
210 static const char *state_name[] = {
211 	"HST_INVALID",
212 	"HST_ALLOC_BUF",
213 	"HST_ERROR",
214 	"HST_PORT_SCAN",
215 	"HST_DEV_SCAN_START",
216 	"HST_DEV_SCAN",
217 	"HST_DEV_ACTIVATE",
218 	"HST_PROBE_FINISHED",
219 	"HST_PROBE_START",
220 	"HST_SYNC_TIME",
221 	"HST_GET_FW_VER",
222 };
223 #endif
224 
225 struct carm_port {
226 	unsigned long			magic;
227 	unsigned int			port_no;
228 	unsigned int			n_queued;
229 	struct carm_host		*host;
230 	struct tasklet_struct		tasklet;
231 	request_queue_t			q;
232 
233 	/* attached device characteristics */
234 	u64				capacity;
235 	char				name[41];
236 	u16				dev_geom_head;
237 	u16				dev_geom_sect;
238 	u16				dev_geom_cyl;
239 };
240 
241 struct carm_request {
242 	unsigned int			tag;
243 	int				n_elem;
244 	unsigned int			msg_type;
245 	unsigned int			msg_subtype;
246 	unsigned int			msg_bucket;
247 	struct request			*rq;
248 	struct carm_port		*port;
249 	struct request			special_rq;
250 	struct scatterlist		sg[CARM_MAX_REQ_SG];
251 };
252 
253 struct carm_host {
254 	unsigned long			magic;
255 	unsigned long			flags;
256 	void				*mmio;
257 	void				*shm;
258 	dma_addr_t			shm_dma;
259 
260 	int				major;
261 	int				id;
262 	char				name[32];
263 
264 	struct pci_dev			*pdev;
265 	unsigned int			state;
266 	u32				fw_ver;
267 
268 	request_queue_t			oob_q;
269 	unsigned int			n_oob;
270 	struct tasklet_struct		oob_tasklet;
271 
272 	unsigned int			hw_sg_used;
273 
274 	unsigned int			resp_idx;
275 
276 	unsigned int			wait_q_prod;
277 	unsigned int			wait_q_cons;
278 	request_queue_t			*wait_q[CARM_MAX_WAIT_Q];
279 
280 	unsigned int			n_msgs;
281 	u64				msg_alloc;
282 	struct carm_request		req[CARM_MAX_REQ];
283 	void				*msg_base;
284 	dma_addr_t			msg_dma;
285 
286 	int				cur_scan_dev;
287 	unsigned long			dev_active;
288 	unsigned long			dev_present;
289 	struct carm_port		port[CARM_MAX_PORTS];
290 
291 	struct tq_struct		fsm_task;
292 
293 	struct semaphore		probe_sem;
294 
295 	struct gendisk			gendisk;
296 	struct hd_struct		gendisk_hd[256];
297 	int				blk_sizes[256];
298 	int				blk_block_sizes[256];
299 	int				blk_sect_sizes[256];
300 
301 	struct list_head		host_list_node;
302 };
303 
304 struct carm_response {
305 	u32 ret_handle;
306 	u32 status;
307 }  __attribute__((packed));
308 
309 struct carm_msg_sg {
310 	u32 start;
311 	u32 len;
312 }  __attribute__((packed));
313 
314 struct carm_msg_rw {
315 	u8 type;
316 	u8 id;
317 	u8 sg_count;
318 	u8 sg_type;
319 	u32 handle;
320 	u32 lba;
321 	u16 lba_count;
322 	u16 lba_high;
323 	struct carm_msg_sg sg[32];
324 }  __attribute__((packed));
325 
326 struct carm_msg_allocbuf {
327 	u8 type;
328 	u8 subtype;
329 	u8 n_sg;
330 	u8 sg_type;
331 	u32 handle;
332 	u32 addr;
333 	u32 len;
334 	u32 evt_pool;
335 	u32 n_evt;
336 	u32 rbuf_pool;
337 	u32 n_rbuf;
338 	u32 msg_pool;
339 	u32 n_msg;
340 	struct carm_msg_sg sg[8];
341 }  __attribute__((packed));
342 
343 struct carm_msg_ioctl {
344 	u8 type;
345 	u8 subtype;
346 	u8 array_id;
347 	u8 reserved1;
348 	u32 handle;
349 	u32 data_addr;
350 	u32 reserved2;
351 }  __attribute__((packed));
352 
353 struct carm_msg_sync_time {
354 	u8 type;
355 	u8 subtype;
356 	u16 reserved1;
357 	u32 handle;
358 	u32 reserved2;
359 	u32 timestamp;
360 }  __attribute__((packed));
361 
362 struct carm_msg_get_fw_ver {
363 	u8 type;
364 	u8 subtype;
365 	u16 reserved1;
366 	u32 handle;
367 	u32 data_addr;
368 	u32 reserved2;
369 }  __attribute__((packed));
370 
371 struct carm_fw_ver {
372 	u32 version;
373 	u8 features;
374 	u8 reserved1;
375 	u16 reserved2;
376 }  __attribute__((packed));
377 
378 struct carm_array_info {
379 	u32 size;
380 
381 	u16 size_hi;
382 	u16 stripe_size;
383 
384 	u32 mode;
385 
386 	u16 stripe_blk_sz;
387 	u16 reserved1;
388 
389 	u16 cyl;
390 	u16 head;
391 
392 	u16 sect;
393 	u8 array_id;
394 	u8 reserved2;
395 
396 	char name[40];
397 
398 	u32 array_status;
399 
400 	/* device list continues beyond this point? */
401 }  __attribute__((packed));
402 
403 static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
404 static void carm_remove_one (struct pci_dev *pdev);
405 static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
406 			   unsigned int cmd, unsigned long arg);
407 static request_queue_t *carm_find_queue(kdev_t device);
408 static int carm_revalidate_disk(kdev_t dev);
409 
410 static struct pci_device_id carm_pci_tbl[] = {
411 	{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
412 	{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
413 	{ }	/* terminate list */
414 };
415 MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
416 
417 static struct pci_driver carm_driver = {
418 	.name		= DRV_NAME,
419 	.id_table	= carm_pci_tbl,
420 	.probe		= carm_init_one,
421 	.remove		= carm_remove_one,
422 };
423 
424 static struct block_device_operations carm_bd_ops = {
425 	.owner		= THIS_MODULE,
426 	.ioctl		= carm_bdev_ioctl,
427 };
428 
429 static unsigned int carm_host_id;
430 static unsigned long carm_major_alloc;
431 
432 
carm_from_dev(kdev_t dev,struct carm_port ** port_out)433 static struct carm_host *carm_from_dev(kdev_t dev, struct carm_port **port_out)
434 {
435 	struct carm_host *host;
436 	struct carm_port *port;
437 	request_queue_t *q;
438 
439 	q = carm_find_queue(dev);
440 	if (!q || !q->queuedata) {
441 		printk(KERN_ERR PFX "queue not found for major %d minor %d\n",
442 			MAJOR(dev), MINOR(dev));
443 		return NULL;
444 	}
445 
446 	port = q->queuedata;
447 	if (unlikely(port->magic != CARM_MAGIC_PORT)) {
448 		printk(KERN_ERR PFX "bad port magic number for major %d minor %d\n",
449 			MAJOR(dev), MINOR(dev));
450 		return NULL;
451 	}
452 
453 	host = port->host;
454 	if (unlikely(host->magic != CARM_MAGIC_HOST)) {
455 		printk(KERN_ERR PFX "bad host magic number for major %d minor %d\n",
456 			MAJOR(dev), MINOR(dev));
457 		return NULL;
458 	}
459 
460 	if (port_out)
461 		*port_out = port;
462 	return host;
463 }
464 
carm_bdev_ioctl(struct inode * ino,struct file * fil,unsigned int cmd,unsigned long arg)465 static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
466 			   unsigned int cmd, unsigned long arg)
467 {
468 	void *usermem = (void *) arg;
469 	struct carm_port *port = NULL;
470 	struct carm_host *host;
471 
472 	host = carm_from_dev(ino->i_rdev, &port);
473 	if (!host)
474 		return -EINVAL;
475 
476 	switch (cmd) {
477 	case HDIO_GETGEO: {
478 		struct hd_geometry geom;
479 
480 		if (!usermem)
481 			return -EINVAL;
482 
483 		if (port->dev_geom_cyl) {
484 			geom.heads = port->dev_geom_head;
485 			geom.sectors = port->dev_geom_sect;
486 			geom.cylinders = port->dev_geom_cyl;
487 		} else {
488 			u32 tmp = ((u32)port->capacity) / (0xff * 0x3f);
489 			geom.heads = 0xff;
490 			geom.sectors = 0x3f;
491 			if (tmp > 65536)
492 				geom.cylinders = 0xffff;
493 			else
494 				geom.cylinders = tmp;
495 		}
496 		geom.start = host->gendisk_hd[MINOR(ino->i_rdev)].start_sect;
497 
498 		if (copy_to_user(usermem, &geom, sizeof(geom)))
499 			return -EFAULT;
500 		return 0;
501 	}
502 
503 	case HDIO_GETGEO_BIG: {
504 		struct hd_big_geometry geom;
505 
506 		if (!usermem)
507 			return -EINVAL;
508 
509 		if (port->dev_geom_cyl) {
510 			geom.heads = port->dev_geom_head;
511 			geom.sectors = port->dev_geom_sect;
512 			geom.cylinders = port->dev_geom_cyl;
513 		} else {
514 			geom.heads = 0xff;
515 			geom.sectors = 0x3f;
516 			geom.cylinders = ((u32)port->capacity) / (0xff * 0x3f);
517 		}
518 		geom.start = host->gendisk_hd[MINOR(ino->i_rdev)].start_sect;
519 
520 		if (copy_to_user(usermem, &geom, sizeof(geom)))
521 			return -EFAULT;
522 		return 0;
523 	}
524 
525 	case BLKRRPART:
526 		if (!capable(CAP_SYS_ADMIN))
527 			return -EPERM;
528 		return carm_revalidate_disk(ino->i_rdev);
529 
530 	case BLKGETSIZE:
531 	case BLKGETSIZE64:
532 	case BLKFLSBUF:
533 	case BLKBSZSET:
534 	case BLKBSZGET:
535 	case BLKROSET:
536 	case BLKROGET:
537 	case BLKRASET:
538 	case BLKRAGET:
539 	case BLKPG:
540 	case BLKELVGET:
541 	case BLKELVSET:
542 		return blk_ioctl(ino->i_rdev, cmd, arg);
543 
544 	default:
545 		break;
546 	}
547 
548 	return -EOPNOTSUPP;
549 }
550 
551 static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
552 
carm_lookup_bucket(u32 msg_size)553 static inline int carm_lookup_bucket(u32 msg_size)
554 {
555 	int i;
556 
557 	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
558 		if (msg_size <= msg_sizes[i])
559 			return i;
560 
561 	return -ENOENT;
562 }
563 
carm_init_buckets(void * mmio)564 static void carm_init_buckets(void *mmio)
565 {
566 	unsigned int i;
567 
568 	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
569 		writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
570 }
571 
carm_ref_msg(struct carm_host * host,unsigned int msg_idx)572 static inline void *carm_ref_msg(struct carm_host *host,
573 				 unsigned int msg_idx)
574 {
575 	return host->msg_base + (msg_idx * CARM_MSG_SIZE);
576 }
577 
carm_ref_msg_dma(struct carm_host * host,unsigned int msg_idx)578 static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
579 					  unsigned int msg_idx)
580 {
581 	return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
582 }
583 
carm_send_msg(struct carm_host * host,struct carm_request * crq)584 static int carm_send_msg(struct carm_host *host,
585 			 struct carm_request *crq)
586 {
587 	void *mmio = host->mmio;
588 	u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
589 	u32 cm_bucket = crq->msg_bucket;
590 	u32 tmp;
591 	int rc = 0;
592 
593 	VPRINTK("ENTER\n");
594 
595 	tmp = readl(mmio + CARM_HMUC);
596 	if (tmp & CARM_Q_FULL) {
597 #if 0
598 		tmp = readl(mmio + CARM_INT_MASK);
599 		tmp |= INT_Q_AVAILABLE;
600 		writel(tmp, mmio + CARM_INT_MASK);
601 		readl(mmio + CARM_INT_MASK);	/* flush */
602 #endif
603 		DPRINTK("host msg queue full\n");
604 		rc = -EBUSY;
605 	} else {
606 		writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
607 		readl(mmio + CARM_IHQP);	/* flush */
608 	}
609 
610 	return rc;
611 }
612 
carm_get_request(struct carm_host * host)613 static struct carm_request *carm_get_request(struct carm_host *host)
614 {
615 	unsigned int i;
616 
617 	/* obey global hardware limit on S/G entries */
618 	if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
619 		return NULL;
620 
621 	for (i = 0; i < CARM_MAX_Q; i++)
622 		if ((host->msg_alloc & (1ULL << i)) == 0) {
623 			struct carm_request *crq = &host->req[i];
624 			crq->port = NULL;
625 			crq->n_elem = 0;
626 
627 			host->msg_alloc |= (1ULL << i);
628 			host->n_msgs++;
629 
630 			assert(host->n_msgs <= CARM_MAX_REQ);
631 			return crq;
632 		}
633 
634 	DPRINTK("no request available, returning NULL\n");
635 	return NULL;
636 }
637 
carm_put_request(struct carm_host * host,struct carm_request * crq)638 static int carm_put_request(struct carm_host *host, struct carm_request *crq)
639 {
640 	assert(crq->tag < CARM_MAX_Q);
641 
642 	if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
643 		return -EINVAL; /* tried to clear a tag that was not active */
644 
645 	assert(host->hw_sg_used >= crq->n_elem);
646 
647 	host->msg_alloc &= ~(1ULL << crq->tag);
648 	host->hw_sg_used -= crq->n_elem;
649 	host->n_msgs--;
650 
651 	return 0;
652 }
653 
carm_insert_special(request_queue_t * q,struct request * rq,void * data,int at_head)654 static void carm_insert_special(request_queue_t *q, struct request *rq,
655 				void *data, int at_head)
656 {
657 	unsigned long flags;
658 
659 	rq->cmd = SPECIAL;
660 	rq->special = data;
661 	rq->q = NULL;
662 	rq->nr_segments = 0;
663 	rq->elevator_sequence = 0;
664 
665 	spin_lock_irqsave(&io_request_lock, flags);
666 	if (at_head)
667 		list_add(&rq->queue, &q->queue_head);
668 	else
669 		list_add_tail(&rq->queue, &q->queue_head);
670 	q->request_fn(q);
671 	spin_unlock_irqrestore(&io_request_lock, flags);
672 }
673 
carm_get_special(struct carm_host * host)674 static struct carm_request *carm_get_special(struct carm_host *host)
675 {
676 	unsigned long flags;
677 	struct carm_request *crq = NULL;
678 	int tries = 5000;
679 
680 	while (tries-- > 0) {
681 		spin_lock_irqsave(&io_request_lock, flags);
682 		crq = carm_get_request(host);
683 		spin_unlock_irqrestore(&io_request_lock, flags);
684 
685 		if (crq)
686 			break;
687 		msleep(10);
688 	}
689 
690 	if (!crq)
691 		return NULL;
692 
693 	crq->rq = &crq->special_rq;
694 	return crq;
695 }
696 
carm_array_info(struct carm_host * host,unsigned int array_idx)697 static int carm_array_info (struct carm_host *host, unsigned int array_idx)
698 {
699 	struct carm_msg_ioctl *ioc;
700 	unsigned int idx;
701 	u32 msg_data;
702 	dma_addr_t msg_dma;
703 	struct carm_request *crq;
704 	int rc;
705 	unsigned long flags;
706 
707 	crq = carm_get_special(host);
708 	if (!crq) {
709 		rc = -ENOMEM;
710 		goto err_out;
711 	}
712 
713 	idx = crq->tag;
714 
715 	ioc = carm_ref_msg(host, idx);
716 	msg_dma = carm_ref_msg_dma(host, idx);
717 	msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
718 
719 	crq->msg_type = CARM_MSG_ARRAY;
720 	crq->msg_subtype = CARM_ARRAY_INFO;
721 	rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
722 				sizeof(struct carm_array_info));
723 	BUG_ON(rc < 0);
724 	crq->msg_bucket = (u32) rc;
725 
726 	memset(ioc, 0, sizeof(*ioc));
727 	ioc->type	= CARM_MSG_ARRAY;
728 	ioc->subtype	= CARM_ARRAY_INFO;
729 	ioc->array_id	= (u8) array_idx;
730 	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
731 	ioc->data_addr	= cpu_to_le32(msg_data);
732 
733 	assert(host->state == HST_DEV_SCAN_START ||
734 	       host->state == HST_DEV_SCAN);
735 
736 	DPRINTK("blk_insert_request, tag == %u\n", idx);
737 	carm_insert_special(&host->oob_q, crq->rq, crq, 1);
738 
739 	return 0;
740 
741 err_out:
742 	spin_lock_irqsave(&io_request_lock, flags);
743 	host->state = HST_ERROR;
744 	spin_unlock_irqrestore(&io_request_lock, flags);
745 	return rc;
746 }
747 
748 typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
749 
carm_send_special(struct carm_host * host,carm_sspc_t func)750 static int carm_send_special (struct carm_host *host, carm_sspc_t func)
751 {
752 	struct carm_request *crq;
753 	struct carm_msg_ioctl *ioc;
754 	void *mem;
755 	unsigned int idx, msg_size;
756 	int rc;
757 
758 	crq = carm_get_special(host);
759 	if (!crq)
760 		return -ENOMEM;
761 
762 	idx = crq->tag;
763 
764 	mem = carm_ref_msg(host, idx);
765 
766 	msg_size = func(host, idx, mem);
767 
768 	ioc = mem;
769 	crq->msg_type = ioc->type;
770 	crq->msg_subtype = ioc->subtype;
771 	rc = carm_lookup_bucket(msg_size);
772 	BUG_ON(rc < 0);
773 	crq->msg_bucket = (u32) rc;
774 
775 	DPRINTK("blk_insert_request, tag == %u\n", idx);
776 	carm_insert_special(&host->oob_q, crq->rq, crq, 1);
777 
778 	return 0;
779 }
780 
carm_fill_sync_time(struct carm_host * host,unsigned int idx,void * mem)781 static unsigned int carm_fill_sync_time(struct carm_host *host,
782 					unsigned int idx, void *mem)
783 {
784 	struct timeval tv;
785 	struct carm_msg_sync_time *st = mem;
786 
787 	do_gettimeofday(&tv);
788 
789 	memset(st, 0, sizeof(*st));
790 	st->type	= CARM_MSG_MISC;
791 	st->subtype	= MISC_SET_TIME;
792 	st->handle	= cpu_to_le32(TAG_ENCODE(idx));
793 	st->timestamp	= cpu_to_le32(tv.tv_sec);
794 
795 	return sizeof(struct carm_msg_sync_time);
796 }
797 
carm_fill_alloc_buf(struct carm_host * host,unsigned int idx,void * mem)798 static unsigned int carm_fill_alloc_buf(struct carm_host *host,
799 					unsigned int idx, void *mem)
800 {
801 	struct carm_msg_allocbuf *ab = mem;
802 
803 	memset(ab, 0, sizeof(*ab));
804 	ab->type	= CARM_MSG_MISC;
805 	ab->subtype	= MISC_ALLOC_MEM;
806 	ab->handle	= cpu_to_le32(TAG_ENCODE(idx));
807 	ab->n_sg	= 1;
808 	ab->sg_type	= SGT_32BIT;
809 	ab->addr	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
810 	ab->len		= cpu_to_le32(PDC_SHM_SIZE >> 1);
811 	ab->evt_pool	= cpu_to_le32(host->shm_dma + (16 * 1024));
812 	ab->n_evt	= cpu_to_le32(1024);
813 	ab->rbuf_pool	= cpu_to_le32(host->shm_dma);
814 	ab->n_rbuf	= cpu_to_le32(RMSG_Q_LEN);
815 	ab->msg_pool	= cpu_to_le32(host->shm_dma + RBUF_LEN);
816 	ab->n_msg	= cpu_to_le32(CARM_Q_LEN);
817 	ab->sg[0].start	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
818 	ab->sg[0].len	= cpu_to_le32(65536);
819 
820 	return sizeof(struct carm_msg_allocbuf);
821 }
822 
carm_fill_scan_channels(struct carm_host * host,unsigned int idx,void * mem)823 static unsigned int carm_fill_scan_channels(struct carm_host *host,
824 					    unsigned int idx, void *mem)
825 {
826 	struct carm_msg_ioctl *ioc = mem;
827 	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
828 			      IOC_SCAN_CHAN_OFFSET);
829 
830 	memset(ioc, 0, sizeof(*ioc));
831 	ioc->type	= CARM_MSG_IOCTL;
832 	ioc->subtype	= CARM_IOC_SCAN_CHAN;
833 	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
834 	ioc->data_addr	= cpu_to_le32(msg_data);
835 
836 	/* fill output data area with "no device" default values */
837 	mem += IOC_SCAN_CHAN_OFFSET;
838 	memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
839 
840 	return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
841 }
842 
carm_fill_get_fw_ver(struct carm_host * host,unsigned int idx,void * mem)843 static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
844 					 unsigned int idx, void *mem)
845 {
846 	struct carm_msg_get_fw_ver *ioc = mem;
847 	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
848 
849 	memset(ioc, 0, sizeof(*ioc));
850 	ioc->type	= CARM_MSG_MISC;
851 	ioc->subtype	= MISC_GET_FW_VER;
852 	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
853 	ioc->data_addr	= cpu_to_le32(msg_data);
854 
855 	return sizeof(struct carm_msg_get_fw_ver) +
856 	       sizeof(struct carm_fw_ver);
857 }
858 
carm_activate_disk(struct carm_host * host,struct carm_port * port)859 static void carm_activate_disk(struct carm_host *host,
860 			       struct carm_port *port)
861 {
862 	int minor_start = port->port_no << CARM_PART_SHIFT;
863 	int start, end, i;
864 
865 	host->gendisk_hd[minor_start].nr_sects = port->capacity;
866 	host->blk_sizes[minor_start] = port->capacity;
867 
868 	start = minor_start;
869 	end = minor_start + CARM_MINORS_PER_MAJOR;
870 	for (i = start; i < end; i++) {
871 		invalidate_device(MKDEV(host->major, i), 1);
872 		host->gendisk.part[i].start_sect = 0;
873 		host->gendisk.part[i].nr_sects = 0;
874 		host->blk_block_sizes[i] = 512;
875 		host->blk_sect_sizes[i] = 512;
876 	}
877 
878 	grok_partitions(&host->gendisk, port->port_no,
879 			CARM_MINORS_PER_MAJOR,
880 			port->capacity);
881 }
882 
carm_revalidate_disk(kdev_t dev)883 static int carm_revalidate_disk(kdev_t dev)
884 {
885 	struct carm_host *host;
886 	struct carm_port *port = NULL;
887 
888 	host = carm_from_dev(dev, &port);
889 	if (!host)
890 		return -EINVAL;
891 
892 	carm_activate_disk(host, port);
893 
894 	return 0;
895 }
896 
complete_buffers(struct buffer_head * bh,int status)897 static inline void complete_buffers(struct buffer_head *bh, int status)
898 {
899 	struct buffer_head *xbh;
900 
901 	while (bh) {
902 		xbh = bh->b_reqnext;
903 		bh->b_reqnext = NULL;
904 		blk_finished_io(bh->b_size >> 9);
905 		bh->b_end_io(bh, status);
906 		bh = xbh;
907 	}
908 }
909 
carm_end_request_queued(struct carm_host * host,struct carm_request * crq,int uptodate)910 static inline void carm_end_request_queued(struct carm_host *host,
911 					   struct carm_request *crq,
912 					   int uptodate)
913 {
914 	struct request *req = crq->rq;
915 	int rc;
916 
917 	complete_buffers(req->bh, uptodate);
918 	end_that_request_last(req);
919 
920 	rc = carm_put_request(host, crq);
921 	assert(rc == 0);
922 }
923 
carm_push_q(struct carm_host * host,request_queue_t * q)924 static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
925 {
926 	unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
927 
928 	VPRINTK("STOPPED QUEUE %p\n", q);
929 
930 	host->wait_q[idx] = q;
931 	host->wait_q_prod++;
932 	BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
933 }
934 
carm_pop_q(struct carm_host * host)935 static inline request_queue_t *carm_pop_q(struct carm_host *host)
936 {
937 	unsigned int idx;
938 
939 	if (host->wait_q_prod == host->wait_q_cons)
940 		return NULL;
941 
942 	idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
943 	host->wait_q_cons++;
944 
945 	return host->wait_q[idx];
946 }
947 
carm_round_robin(struct carm_host * host)948 static inline void carm_round_robin(struct carm_host *host)
949 {
950 	request_queue_t *q = carm_pop_q(host);
951 	if (q) {
952 		struct tasklet_struct *tasklet;
953 		if (q == &host->oob_q)
954 			tasklet = &host->oob_tasklet;
955 		else {
956 			struct carm_port *port = q->queuedata;
957 			tasklet = &port->tasklet;
958 		}
959 		tasklet_schedule(tasklet);
960 		VPRINTK("STARTED QUEUE %p\n", q);
961 	}
962 }
963 
carm_end_rq(struct carm_host * host,struct carm_request * crq,int is_ok)964 static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
965 			int is_ok)
966 {
967 	carm_end_request_queued(host, crq, is_ok);
968 	if (CARM_MAX_Q == 1)
969 		carm_round_robin(host);
970 	else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
971 		 (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
972 		carm_round_robin(host);
973 	}
974 }
975 
carm_new_segment(request_queue_t * q,struct request * rq)976 static inline int carm_new_segment(request_queue_t *q, struct request *rq)
977 {
978         if (rq->nr_segments < CARM_MAX_REQ_SG) {
979                 rq->nr_segments++;
980                 return 1;
981         }
982         return 0;
983 }
984 
carm_back_merge_fn(request_queue_t * q,struct request * rq,struct buffer_head * bh,int max_segments)985 static int carm_back_merge_fn(request_queue_t *q, struct request *rq,
986                              struct buffer_head *bh, int max_segments)
987 {
988 	if (blk_seg_merge_ok(rq->bhtail, bh))
989                 return 1;
990         return carm_new_segment(q, rq);
991 }
992 
carm_front_merge_fn(request_queue_t * q,struct request * rq,struct buffer_head * bh,int max_segments)993 static int carm_front_merge_fn(request_queue_t *q, struct request *rq,
994                              struct buffer_head *bh, int max_segments)
995 {
996 	if (blk_seg_merge_ok(bh, rq->bh))
997                 return 1;
998         return carm_new_segment(q, rq);
999 }
1000 
carm_merge_requests_fn(request_queue_t * q,struct request * rq,struct request * nxt,int max_segments)1001 static int carm_merge_requests_fn(request_queue_t *q, struct request *rq,
1002                                  struct request *nxt, int max_segments)
1003 {
1004         int total_segments = rq->nr_segments + nxt->nr_segments;
1005 
1006 	if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
1007                 total_segments--;
1008 
1009         if (total_segments > CARM_MAX_REQ_SG)
1010                 return 0;
1011 
1012         rq->nr_segments = total_segments;
1013         return 1;
1014 }
1015 
carm_oob_rq_fn(request_queue_t * q)1016 static void carm_oob_rq_fn(request_queue_t *q)
1017 {
1018 	struct carm_host *host = q->queuedata;
1019 
1020 	tasklet_schedule(&host->oob_tasklet);
1021 }
1022 
carm_rq_fn(request_queue_t * q)1023 static void carm_rq_fn(request_queue_t *q)
1024 {
1025 	struct carm_port *port = q->queuedata;
1026 
1027 	tasklet_schedule(&port->tasklet);
1028 }
1029 
carm_oob_tasklet(unsigned long _data)1030 static void carm_oob_tasklet(unsigned long _data)
1031 {
1032 	struct carm_host *host = (void *) _data;
1033 	request_queue_t *q = &host->oob_q;
1034 	struct carm_request *crq;
1035 	struct request *rq;
1036 	int rc, have_work = 1;
1037 	struct list_head *queue_head = &q->queue_head;
1038 	unsigned long flags;
1039 
1040 	spin_lock_irqsave(&io_request_lock, flags);
1041 	if (q->plugged || list_empty(queue_head))
1042 		have_work = 0;
1043 
1044 	if (!have_work)
1045 		goto out;
1046 
1047 	while (1) {
1048 		DPRINTK("get req\n");
1049 		if (list_empty(queue_head))
1050 			break;
1051 
1052 		rq = blkdev_entry_next_request(queue_head);
1053 
1054 		crq = rq->special;
1055 		assert(crq != NULL);
1056 		assert(crq->rq == rq);
1057 
1058 		crq->n_elem = 0;
1059 
1060 		DPRINTK("send req\n");
1061 		rc = carm_send_msg(host, crq);
1062 		if (rc) {
1063 			carm_push_q(host, q);
1064 			break;		/* call us again later, eventually */
1065 		} else
1066 			blkdev_dequeue_request(rq);
1067 	}
1068 
1069 out:
1070 	spin_unlock_irqrestore(&io_request_lock, flags);
1071 }
1072 
blk_rq_map_sg(request_queue_t * q,struct request * rq,struct scatterlist * sg)1073 static int blk_rq_map_sg(request_queue_t *q, struct request *rq,
1074 			 struct scatterlist *sg)
1075 {
1076 	int n_elem = 0;
1077 	struct buffer_head *bh = rq->bh;
1078 	u64 last_phys = ~0ULL;
1079 
1080 	while (bh) {
1081 		if (bh_phys(bh) == last_phys) {
1082 			sg[n_elem - 1].length += bh->b_size;
1083 			last_phys += bh->b_size;
1084 		} else {
1085 			if (unlikely(n_elem == CARM_MAX_REQ_SG))
1086 				BUG();
1087 			sg[n_elem].page = bh->b_page;
1088 			sg[n_elem].length = bh->b_size;
1089 			sg[n_elem].offset = bh_offset(bh);
1090 			last_phys = bh_phys(bh) + bh->b_size;
1091 			n_elem++;
1092 		}
1093 
1094 		bh = bh->b_reqnext;
1095 	}
1096 
1097 	return n_elem;
1098 }
1099 
carm_rw_tasklet(unsigned long _data)1100 static void carm_rw_tasklet(unsigned long _data)
1101 {
1102 	struct carm_port *port = (void *) _data;
1103 	struct carm_host *host = port->host;
1104 	request_queue_t *q = &port->q;
1105 	struct carm_msg_rw *msg;
1106 	struct carm_request *crq;
1107 	struct request *rq;
1108 	struct scatterlist *sg;
1109 	int writing = 0, pci_dir, i, n_elem, rc, have_work = 1;
1110 	u32 tmp;
1111 	unsigned int msg_size;
1112 	unsigned long flags;
1113 	struct list_head *queue_head = &q->queue_head;
1114 	unsigned long start_sector;
1115 
1116 	spin_lock_irqsave(&io_request_lock, flags);
1117 	if (q->plugged || list_empty(queue_head))
1118 		have_work = 0;
1119 
1120 	if (!have_work)
1121 		goto out;
1122 
1123 queue_one_request:
1124 	VPRINTK("get req\n");
1125 	if (list_empty(queue_head))
1126 		goto out;
1127 
1128 	rq = blkdev_entry_next_request(queue_head);
1129 
1130 	crq = carm_get_request(host);
1131 	if (!crq) {
1132 		carm_push_q(host, q);
1133 		goto out;	/* call us again later, eventually */
1134 	}
1135 	crq->rq = rq;
1136 
1137 	if (rq_data_dir(rq) == WRITE) {
1138 		writing = 1;
1139 		pci_dir = PCI_DMA_TODEVICE;
1140 	} else {
1141 		pci_dir = PCI_DMA_FROMDEVICE;
1142 	}
1143 
1144 	/* get scatterlist from block layer */
1145 	sg = &crq->sg[0];
1146 	n_elem = blk_rq_map_sg(q, rq, sg);
1147 	if (n_elem <= 0) {
1148 		carm_end_rq(host, crq, 0);
1149 		goto out;	/* request with no s/g entries? */
1150 	}
1151 
1152 	/* map scatterlist to PCI bus addresses */
1153 	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
1154 	if (n_elem <= 0) {
1155 		carm_end_rq(host, crq, 0);
1156 		goto out;	/* request with no s/g entries? */
1157 	}
1158 	crq->n_elem = n_elem;
1159 	crq->port = port;
1160 	host->hw_sg_used += n_elem;
1161 
1162 	/*
1163 	 * build read/write message
1164 	 */
1165 
1166 	VPRINTK("build msg\n");
1167 	msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag);
1168 
1169 	if (writing) {
1170 		msg->type = CARM_MSG_WRITE;
1171 		crq->msg_type = CARM_MSG_WRITE;
1172 	} else {
1173 		msg->type = CARM_MSG_READ;
1174 		crq->msg_type = CARM_MSG_READ;
1175 	}
1176 
1177 	start_sector = rq->sector;
1178 	start_sector += host->gendisk_hd[MINOR(rq->rq_dev)].start_sect;
1179 
1180 	msg->id		= port->port_no;
1181 	msg->sg_count	= n_elem;
1182 	msg->sg_type	= SGT_32BIT;
1183 	msg->handle	= cpu_to_le32(TAG_ENCODE(crq->tag));
1184 	msg->lba	= cpu_to_le32(start_sector & 0xffffffff);
1185 	tmp		= (start_sector >> 16) >> 16;
1186 	msg->lba_high	= cpu_to_le16( (u16) tmp );
1187 	msg->lba_count	= cpu_to_le16(rq->nr_sectors);
1188 
1189 	msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
1190 	for (i = 0; i < n_elem; i++) {
1191 		struct carm_msg_sg *carm_sg = &msg->sg[i];
1192 		carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
1193 		carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
1194 		msg_size += sizeof(struct carm_msg_sg);
1195 	}
1196 
1197 	rc = carm_lookup_bucket(msg_size);
1198 	BUG_ON(rc < 0);
1199 	crq->msg_bucket = (u32) rc;
1200 
1201 	/*
1202 	 * queue read/write message to hardware
1203 	 */
1204 
1205 	VPRINTK("send msg, tag == %u\n", crq->tag);
1206 	rc = carm_send_msg(host, crq);
1207 	if (rc) {
1208 		carm_put_request(host, crq);
1209 		carm_push_q(host, q);
1210 		goto out;	/* call us again later, eventually */
1211 	} else
1212 		blkdev_dequeue_request(rq);
1213 
1214 	goto queue_one_request;
1215 
1216 out:
1217 	spin_unlock_irqrestore(&io_request_lock, flags);
1218 }
1219 
carm_handle_array_info(struct carm_host * host,struct carm_request * crq,u8 * mem,int is_ok)1220 static void carm_handle_array_info(struct carm_host *host,
1221 				   struct carm_request *crq, u8 *mem,
1222 				   int is_ok)
1223 {
1224 	struct carm_port *port;
1225 	u8 *msg_data = mem + sizeof(struct carm_array_info);
1226 	struct carm_array_info *desc = (struct carm_array_info *) msg_data;
1227 	u64 lo, hi;
1228 	int cur_port;
1229 	size_t slen;
1230 
1231 	DPRINTK("ENTER\n");
1232 
1233 	carm_end_rq(host, crq, is_ok);
1234 
1235 	if (!is_ok)
1236 		goto out;
1237 	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
1238 		goto out;
1239 
1240 	cur_port = host->cur_scan_dev;
1241 
1242 	/* should never occur */
1243 	if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
1244 		printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
1245 		       cur_port, (int) desc->array_id);
1246 		goto out;
1247 	}
1248 
1249 	port = &host->port[cur_port];
1250 
1251 	lo = (u64) le32_to_cpu(desc->size);
1252 	hi = (u64) le32_to_cpu(desc->size_hi);
1253 
1254 	port->capacity = lo | (hi << 32);
1255 	port->dev_geom_head = le16_to_cpu(desc->head);
1256 	port->dev_geom_sect = le16_to_cpu(desc->sect);
1257 	port->dev_geom_cyl = le16_to_cpu(desc->cyl);
1258 
1259 	host->dev_active |= (1 << cur_port);
1260 
1261 	strncpy(port->name, desc->name, sizeof(port->name));
1262 	port->name[sizeof(port->name) - 1] = 0;
1263 	slen = strlen(port->name);
1264 	while (slen && (port->name[slen - 1] == ' ')) {
1265 		port->name[slen - 1] = 0;
1266 		slen--;
1267 	}
1268 
1269 	printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
1270 	       pci_name(host->pdev), port->port_no, port->capacity);
1271 	printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
1272 	       pci_name(host->pdev), port->port_no, port->name);
1273 
1274 out:
1275 	assert(host->state == HST_DEV_SCAN);
1276 	schedule_task(&host->fsm_task);
1277 }
1278 
carm_handle_scan_chan(struct carm_host * host,struct carm_request * crq,u8 * mem,int is_ok)1279 static void carm_handle_scan_chan(struct carm_host *host,
1280 				  struct carm_request *crq, u8 *mem,
1281 				  int is_ok)
1282 {
1283 	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
1284 	unsigned int i, dev_count = 0;
1285 	int new_state = HST_DEV_SCAN_START;
1286 
1287 	DPRINTK("ENTER\n");
1288 
1289 	carm_end_rq(host, crq, is_ok);
1290 
1291 	if (!is_ok) {
1292 		new_state = HST_ERROR;
1293 		goto out;
1294 	}
1295 
1296 	/* TODO: scan and support non-disk devices */
1297 	for (i = 0; i < 8; i++)
1298 		if (msg_data[i] == 0) { /* direct-access device (disk) */
1299 			host->dev_present |= (1 << i);
1300 			dev_count++;
1301 		}
1302 
1303 	printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
1304 	       pci_name(host->pdev), dev_count);
1305 
1306 out:
1307 	assert(host->state == HST_PORT_SCAN);
1308 	host->state = new_state;
1309 	schedule_task(&host->fsm_task);
1310 }
1311 
carm_handle_generic(struct carm_host * host,struct carm_request * crq,int is_ok,int cur_state,int next_state)1312 static void carm_handle_generic(struct carm_host *host,
1313 				struct carm_request *crq, int is_ok,
1314 				int cur_state, int next_state)
1315 {
1316 	DPRINTK("ENTER\n");
1317 
1318 	carm_end_rq(host, crq, is_ok);
1319 
1320 	assert(host->state == cur_state);
1321 	if (is_ok)
1322 		host->state = next_state;
1323 	else
1324 		host->state = HST_ERROR;
1325 	schedule_task(&host->fsm_task);
1326 }
1327 
carm_handle_rw(struct carm_host * host,struct carm_request * crq,int is_ok)1328 static inline void carm_handle_rw(struct carm_host *host,
1329 				  struct carm_request *crq, int is_ok)
1330 {
1331 	int pci_dir;
1332 
1333 	VPRINTK("ENTER\n");
1334 
1335 	if (rq_data_dir(crq->rq) == WRITE)
1336 		pci_dir = PCI_DMA_TODEVICE;
1337 	else
1338 		pci_dir = PCI_DMA_FROMDEVICE;
1339 
1340 	pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
1341 
1342 	carm_end_rq(host, crq, is_ok);
1343 }
1344 
carm_handle_resp(struct carm_host * host,u32 ret_handle_le,u32 status)1345 static inline void carm_handle_resp(struct carm_host *host,
1346 				    u32 ret_handle_le, u32 status)
1347 {
1348 	u32 handle = le32_to_cpu(ret_handle_le);
1349 	unsigned int msg_idx;
1350 	struct carm_request *crq;
1351 	int is_ok = (status == RMSG_OK);
1352 	u8 *mem;
1353 
1354 	VPRINTK("ENTER, handle == 0x%x\n", handle);
1355 
1356 	if (unlikely(!TAG_VALID(handle))) {
1357 		printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
1358 		       pci_name(host->pdev), handle);
1359 		return;
1360 	}
1361 
1362 	msg_idx = TAG_DECODE(handle);
1363 	VPRINTK("tag == %u\n", msg_idx);
1364 
1365 	crq = &host->req[msg_idx];
1366 
1367 	/* fast path */
1368 	if (likely(crq->msg_type == CARM_MSG_READ ||
1369 		   crq->msg_type == CARM_MSG_WRITE)) {
1370 		carm_handle_rw(host, crq, is_ok);
1371 		return;
1372 	}
1373 
1374 	mem = carm_ref_msg(host, msg_idx);
1375 
1376 	switch (crq->msg_type) {
1377 	case CARM_MSG_IOCTL: {
1378 		switch (crq->msg_subtype) {
1379 		case CARM_IOC_SCAN_CHAN:
1380 			carm_handle_scan_chan(host, crq, mem, is_ok);
1381 			break;
1382 		default:
1383 			/* unknown / invalid response */
1384 			goto err_out;
1385 		}
1386 		break;
1387 	}
1388 
1389 	case CARM_MSG_MISC: {
1390 		switch (crq->msg_subtype) {
1391 		case MISC_ALLOC_MEM:
1392 			carm_handle_generic(host, crq, is_ok,
1393 					    HST_ALLOC_BUF, HST_SYNC_TIME);
1394 			break;
1395 		case MISC_SET_TIME:
1396 			carm_handle_generic(host, crq, is_ok,
1397 					    HST_SYNC_TIME, HST_GET_FW_VER);
1398 			break;
1399 		case MISC_GET_FW_VER: {
1400 			struct carm_fw_ver *ver = (struct carm_fw_ver *)
1401 				mem + sizeof(struct carm_msg_get_fw_ver);
1402 			if (is_ok) {
1403 				host->fw_ver = le32_to_cpu(ver->version);
1404 				host->flags |= (ver->features & FL_FW_VER_MASK);
1405 			}
1406 			carm_handle_generic(host, crq, is_ok,
1407 					    HST_GET_FW_VER, HST_PORT_SCAN);
1408 			break;
1409 		}
1410 		default:
1411 			/* unknown / invalid response */
1412 			goto err_out;
1413 		}
1414 		break;
1415 	}
1416 
1417 	case CARM_MSG_ARRAY: {
1418 		switch (crq->msg_subtype) {
1419 		case CARM_ARRAY_INFO:
1420 			carm_handle_array_info(host, crq, mem, is_ok);
1421 			break;
1422 		default:
1423 			/* unknown / invalid response */
1424 			goto err_out;
1425 		}
1426 		break;
1427 	}
1428 
1429 	default:
1430 		/* unknown / invalid response */
1431 		goto err_out;
1432 	}
1433 
1434 	return;
1435 
1436 err_out:
1437 	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
1438 	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
1439 	carm_end_rq(host, crq, 0);
1440 }
1441 
carm_handle_responses(struct carm_host * host)1442 static inline void carm_handle_responses(struct carm_host *host)
1443 {
1444 	void *mmio = host->mmio;
1445 	struct carm_response *resp = (struct carm_response *) host->shm;
1446 	unsigned int work = 0;
1447 	unsigned int idx = host->resp_idx % RMSG_Q_LEN;
1448 
1449 	while (1) {
1450 		u32 status = le32_to_cpu(resp[idx].status);
1451 
1452 		if (status == 0xffffffff) {
1453 			VPRINTK("ending response on index %u\n", idx);
1454 			writel(idx << 3, mmio + CARM_RESP_IDX);
1455 			break;
1456 		}
1457 
1458 		/* response to a message we sent */
1459 		else if ((status & (1 << 31)) == 0) {
1460 			VPRINTK("handling msg response on index %u\n", idx);
1461 			carm_handle_resp(host, resp[idx].ret_handle, status);
1462 			resp[idx].status = 0xffffffff;
1463 		}
1464 
1465 		/* asynchronous events the hardware throws our way */
1466 		else if ((status & 0xff000000) == (1 << 31)) {
1467 			u8 *evt_type_ptr = (u8 *) &resp[idx];
1468 			u8 evt_type = *evt_type_ptr;
1469 			printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
1470 			       pci_name(host->pdev), (int) evt_type);
1471 			resp[idx].status = 0xffffffff;
1472 		}
1473 
1474 		idx = NEXT_RESP(idx);
1475 		work++;
1476 	}
1477 
1478 	VPRINTK("EXIT, work==%u\n", work);
1479 	host->resp_idx += work;
1480 }
1481 
carm_interrupt(int irq,void * __host,struct pt_regs * regs)1482 static irqreturn_t carm_interrupt(int irq, void *__host, struct pt_regs *regs)
1483 {
1484 	struct carm_host *host = __host;
1485 	void *mmio;
1486 	u32 mask;
1487 	int handled = 0;
1488 	unsigned long flags;
1489 
1490 	if (!host) {
1491 		VPRINTK("no host\n");
1492 		return IRQ_NONE;
1493 	}
1494 
1495 	spin_lock_irqsave(&io_request_lock, flags);
1496 
1497 	mmio = host->mmio;
1498 
1499 	/* reading should also clear interrupts */
1500 	mask = readl(mmio + CARM_INT_STAT);
1501 
1502 	if (mask == 0 || mask == 0xffffffff) {
1503 		VPRINTK("no work, mask == 0x%x\n", mask);
1504 		goto out;
1505 	}
1506 
1507 	if (mask & INT_ACK_MASK)
1508 		writel(mask, mmio + CARM_INT_STAT);
1509 
1510 	if (unlikely(host->state == HST_INVALID)) {
1511 		VPRINTK("not initialized yet, mask = 0x%x\n", mask);
1512 		goto out;
1513 	}
1514 
1515 	if (mask & CARM_HAVE_RESP) {
1516 		handled = 1;
1517 		carm_handle_responses(host);
1518 	}
1519 
1520 out:
1521 	spin_unlock_irqrestore(&io_request_lock, flags);
1522 	VPRINTK("EXIT\n");
1523 	return IRQ_RETVAL(handled);
1524 }
1525 
carm_fsm_task(void * _data)1526 static void carm_fsm_task (void *_data)
1527 {
1528 	struct carm_host *host = _data;
1529 	unsigned long flags;
1530 	unsigned int state;
1531 	int rc, i, next_dev;
1532 	int reschedule = 0;
1533 	int new_state = HST_INVALID;
1534 
1535 	spin_lock_irqsave(&io_request_lock, flags);
1536 	state = host->state;
1537 	spin_unlock_irqrestore(&io_request_lock, flags);
1538 
1539 	DPRINTK("ENTER, state == %s\n", state_name[state]);
1540 
1541 	switch (state) {
1542 	case HST_PROBE_START:
1543 		new_state = HST_ALLOC_BUF;
1544 		reschedule = 1;
1545 		break;
1546 
1547 	case HST_ALLOC_BUF:
1548 		rc = carm_send_special(host, carm_fill_alloc_buf);
1549 		if (rc) {
1550 			new_state = HST_ERROR;
1551 			reschedule = 1;
1552 		}
1553 		break;
1554 
1555 	case HST_SYNC_TIME:
1556 		rc = carm_send_special(host, carm_fill_sync_time);
1557 		if (rc) {
1558 			new_state = HST_ERROR;
1559 			reschedule = 1;
1560 		}
1561 		break;
1562 
1563 	case HST_GET_FW_VER:
1564 		rc = carm_send_special(host, carm_fill_get_fw_ver);
1565 		if (rc) {
1566 			new_state = HST_ERROR;
1567 			reschedule = 1;
1568 		}
1569 		break;
1570 
1571 	case HST_PORT_SCAN:
1572 		rc = carm_send_special(host, carm_fill_scan_channels);
1573 		if (rc) {
1574 			new_state = HST_ERROR;
1575 			reschedule = 1;
1576 		}
1577 		break;
1578 
1579 	case HST_DEV_SCAN_START:
1580 		host->cur_scan_dev = -1;
1581 		new_state = HST_DEV_SCAN;
1582 		reschedule = 1;
1583 		break;
1584 
1585 	case HST_DEV_SCAN:
1586 		next_dev = -1;
1587 		for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
1588 			if (host->dev_present & (1 << i)) {
1589 				next_dev = i;
1590 				break;
1591 			}
1592 
1593 		if (next_dev >= 0) {
1594 			host->cur_scan_dev = next_dev;
1595 			rc = carm_array_info(host, next_dev);
1596 			if (rc) {
1597 				new_state = HST_ERROR;
1598 				reschedule = 1;
1599 			}
1600 		} else {
1601 			new_state = HST_DEV_ACTIVATE;
1602 			reschedule = 1;
1603 		}
1604 		break;
1605 
1606 	case HST_DEV_ACTIVATE: {
1607 		int activated = 0;
1608 		for (i = 0; i < CARM_MAX_PORTS; i++)
1609 			if (host->dev_active & (1 << i)) {
1610 				carm_activate_disk(host, &host->port[i]);
1611 				activated++;
1612 			}
1613 
1614 		printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
1615 		       pci_name(host->pdev), activated);
1616 
1617 		new_state = HST_PROBE_FINISHED;
1618 		reschedule = 1;
1619 		break;
1620 	}
1621 
1622 	case HST_PROBE_FINISHED:
1623 		up(&host->probe_sem);
1624 		break;
1625 
1626 	case HST_ERROR:
1627 		/* FIXME: TODO */
1628 		break;
1629 
1630 	default:
1631 		/* should never occur */
1632 		printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
1633 		assert(0);
1634 		break;
1635 	}
1636 
1637 	if (new_state != HST_INVALID) {
1638 		spin_lock_irqsave(&io_request_lock, flags);
1639 		host->state = new_state;
1640 		spin_unlock_irqrestore(&io_request_lock, flags);
1641 	}
1642 	if (reschedule)
1643 		schedule_task(&host->fsm_task);
1644 }
1645 
carm_init_wait(void * mmio,u32 bits,unsigned int test_bit)1646 static int carm_init_wait(void *mmio, u32 bits, unsigned int test_bit)
1647 {
1648 	unsigned int i;
1649 
1650 	for (i = 0; i < 50000; i++) {
1651 		u32 tmp = readl(mmio + CARM_LMUC);
1652 		udelay(100);
1653 
1654 		if (test_bit) {
1655 			if ((tmp & bits) == bits)
1656 				return 0;
1657 		} else {
1658 			if ((tmp & bits) == 0)
1659 				return 0;
1660 		}
1661 
1662 		cond_resched();
1663 	}
1664 
1665 	printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
1666 	       bits, test_bit ? "yes" : "no");
1667 	return -EBUSY;
1668 }
1669 
carm_init_responses(struct carm_host * host)1670 static void carm_init_responses(struct carm_host *host)
1671 {
1672 	void *mmio = host->mmio;
1673 	unsigned int i;
1674 	struct carm_response *resp = (struct carm_response *) host->shm;
1675 
1676 	for (i = 0; i < RMSG_Q_LEN; i++)
1677 		resp[i].status = 0xffffffff;
1678 
1679 	writel(0, mmio + CARM_RESP_IDX);
1680 }
1681 
carm_init_host(struct carm_host * host)1682 static int carm_init_host(struct carm_host *host)
1683 {
1684 	void *mmio = host->mmio;
1685 	u32 tmp;
1686 	u8 tmp8;
1687 	int rc;
1688 	unsigned long flags;
1689 
1690 	DPRINTK("ENTER\n");
1691 
1692 	writel(0, mmio + CARM_INT_MASK);
1693 
1694 	tmp8 = readb(mmio + CARM_INITC);
1695 	if (tmp8 & 0x01) {
1696 		tmp8 &= ~0x01;
1697 		writeb(tmp8, CARM_INITC);
1698 		readb(mmio + CARM_INITC);	/* flush */
1699 
1700 		DPRINTK("snooze...\n");
1701 		msleep(5000);
1702 	}
1703 
1704 	tmp = readl(mmio + CARM_HMUC);
1705 	if (tmp & CARM_CME) {
1706 		DPRINTK("CME bit present, waiting\n");
1707 		rc = carm_init_wait(mmio, CARM_CME, 1);
1708 		if (rc) {
1709 			DPRINTK("EXIT, carm_init_wait 1 failed\n");
1710 			return rc;
1711 		}
1712 	}
1713 	if (tmp & CARM_RME) {
1714 		DPRINTK("RME bit present, waiting\n");
1715 		rc = carm_init_wait(mmio, CARM_RME, 1);
1716 		if (rc) {
1717 			DPRINTK("EXIT, carm_init_wait 2 failed\n");
1718 			return rc;
1719 		}
1720 	}
1721 
1722 	tmp &= ~(CARM_RME | CARM_CME);
1723 	writel(tmp, mmio + CARM_HMUC);
1724 	readl(mmio + CARM_HMUC);	/* flush */
1725 
1726 	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
1727 	if (rc) {
1728 		DPRINTK("EXIT, carm_init_wait 3 failed\n");
1729 		return rc;
1730 	}
1731 
1732 	carm_init_buckets(mmio);
1733 
1734 	writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
1735 	writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
1736 	writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
1737 
1738 	tmp = readl(mmio + CARM_HMUC);
1739 	tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
1740 	writel(tmp, mmio + CARM_HMUC);
1741 	readl(mmio + CARM_HMUC);	/* flush */
1742 
1743 	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
1744 	if (rc) {
1745 		DPRINTK("EXIT, carm_init_wait 4 failed\n");
1746 		return rc;
1747 	}
1748 
1749 	writel(0, mmio + CARM_HMPHA);
1750 	writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
1751 
1752 	carm_init_responses(host);
1753 
1754 	/* start initialization, probing state machine */
1755 	spin_lock_irqsave(&io_request_lock, flags);
1756 	assert(host->state == HST_INVALID);
1757 	host->state = HST_PROBE_START;
1758 	spin_unlock_irqrestore(&io_request_lock, flags);
1759 
1760 	schedule_task(&host->fsm_task);
1761 
1762 	DPRINTK("EXIT\n");
1763 	return 0;
1764 }
1765 
carm_init_ports(struct carm_host * host)1766 static void carm_init_ports (struct carm_host *host)
1767 {
1768 	struct carm_port *port;
1769 	request_queue_t *q;
1770 	unsigned int i;
1771 
1772 	for (i = 0; i < CARM_MAX_PORTS; i++) {
1773 		port = &host->port[i];
1774 		port->magic = CARM_MAGIC_PORT;
1775 		port->host = host;
1776 		port->port_no = i;
1777 		tasklet_init(&port->tasklet, carm_rw_tasklet,
1778 			     (unsigned long) port);
1779 
1780 		q = &port->q;
1781 
1782 		blk_init_queue(q, carm_rq_fn);
1783 		q->queuedata = port;
1784 		blk_queue_bounce_limit(q, host->pdev->dma_mask);
1785 		blk_queue_headactive(q, 0);
1786 
1787 		q->back_merge_fn = carm_back_merge_fn;
1788 		q->front_merge_fn = carm_front_merge_fn;
1789 		q->merge_requests_fn = carm_merge_requests_fn;
1790 	}
1791 }
1792 
carm_find_queue(kdev_t device)1793 static request_queue_t *carm_find_queue(kdev_t device)
1794 {
1795 	struct carm_host *host;
1796 
1797 	host = blk_dev[MAJOR(device)].data;
1798 	if (!host)
1799 		return NULL;
1800 	if (host->magic != CARM_MAGIC_HOST)
1801 		return NULL;
1802 
1803 	DPRINTK("match: major %d, minor %d\n",
1804 		MAJOR(device), MINOR(device));
1805 	return &host->port[MINOR(device) >> CARM_PART_SHIFT].q;
1806 }
1807 
carm_init_disks(struct carm_host * host)1808 static int carm_init_disks(struct carm_host *host)
1809 {
1810 	host->gendisk.major = host->major;
1811 	host->gendisk.major_name = host->name;
1812 	host->gendisk.minor_shift = CARM_PART_SHIFT;
1813 	host->gendisk.max_p = CARM_MINORS_PER_MAJOR;
1814 	host->gendisk.part = host->gendisk_hd;
1815 	host->gendisk.sizes = host->blk_sizes;
1816 	host->gendisk.nr_real = CARM_MAX_PORTS;
1817 	host->gendisk.fops = &carm_bd_ops;
1818 
1819 	blk_dev[host->major].queue = carm_find_queue;
1820 	blk_dev[host->major].data = host;
1821 	blk_size[host->major] = host->blk_sizes;
1822 	blksize_size[host->major] = host->blk_block_sizes;
1823 	hardsect_size[host->major] = host->blk_sect_sizes;
1824 
1825 	add_gendisk(&host->gendisk);
1826 
1827 	return 0;
1828 }
1829 
carm_free_disks(struct carm_host * host)1830 static void carm_free_disks(struct carm_host *host)
1831 {
1832 	unsigned int i;
1833 
1834 	del_gendisk(&host->gendisk);
1835 
1836 	for (i = 0; i < CARM_MAX_PORTS; i++) {
1837 		struct carm_port *port = &host->port[i];
1838 
1839 		blk_cleanup_queue(&port->q);
1840 	}
1841 
1842 	blk_dev[host->major].queue = NULL;
1843 	blk_dev[host->major].data = NULL;
1844 	blk_size[host->major] = NULL;
1845 	blksize_size[host->major] = NULL;
1846 	hardsect_size[host->major] = NULL;
1847 }
1848 
carm_stop_tasklets(struct carm_host * host)1849 static void carm_stop_tasklets(struct carm_host *host)
1850 {
1851 	unsigned int i;
1852 
1853 	tasklet_kill(&host->oob_tasklet);
1854 
1855 	for (i = 0; i < CARM_MAX_PORTS; i++) {
1856 		struct carm_port *port = &host->port[i];
1857 		tasklet_kill(&port->tasklet);
1858 	}
1859 }
1860 
carm_init_shm(struct carm_host * host)1861 static int carm_init_shm(struct carm_host *host)
1862 {
1863 	host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
1864 					 &host->shm_dma);
1865 	if (!host->shm)
1866 		return -ENOMEM;
1867 
1868 	host->msg_base = host->shm + RBUF_LEN;
1869 	host->msg_dma = host->shm_dma + RBUF_LEN;
1870 
1871 	memset(host->shm, 0xff, RBUF_LEN);
1872 	memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
1873 
1874 	return 0;
1875 }
1876 
carm_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1877 static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1878 {
1879 	static unsigned int printed_version;
1880 	struct carm_host *host;
1881 	unsigned int pci_dac;
1882 	int rc;
1883 	unsigned int i;
1884 
1885 	if (!printed_version++)
1886 		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
1887 
1888 	rc = pci_enable_device(pdev);
1889 	if (rc)
1890 		return rc;
1891 
1892 	rc = pci_request_regions(pdev, DRV_NAME);
1893 	if (rc)
1894 		goto err_out;
1895 
1896 #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1897 	rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
1898 	if (!rc) {
1899 		rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
1900 		if (rc) {
1901 			printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
1902 				pci_name(pdev));
1903 			goto err_out_regions;
1904 		}
1905 		pci_dac = 1;
1906 	} else {
1907 #endif
1908 		rc = pci_set_dma_mask(pdev, 0xffffffffULL);
1909 		if (rc) {
1910 			printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
1911 				pci_name(pdev));
1912 			goto err_out_regions;
1913 		}
1914 		pci_dac = 0;
1915 #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1916 	}
1917 #endif
1918 
1919 	host = kmalloc(sizeof(*host), GFP_KERNEL);
1920 	if (!host) {
1921 		printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
1922 		       pci_name(pdev));
1923 		rc = -ENOMEM;
1924 		goto err_out_regions;
1925 	}
1926 
1927 	memset(host, 0, sizeof(*host));
1928 	host->magic = CARM_MAGIC_HOST;
1929 	host->pdev = pdev;
1930 	host->flags = pci_dac ? FL_DAC : 0;
1931 	INIT_TQUEUE(&host->fsm_task, carm_fsm_task, host);
1932 	INIT_LIST_HEAD(&host->host_list_node);
1933 	init_MUTEX_LOCKED(&host->probe_sem);
1934 	tasklet_init(&host->oob_tasklet, carm_oob_tasklet,
1935 		     (unsigned long) host);
1936 	carm_init_ports(host);
1937 
1938 	for (i = 0; i < ARRAY_SIZE(host->req); i++)
1939 		host->req[i].tag = i;
1940 
1941 	host->mmio = ioremap(pci_resource_start(pdev, 0),
1942 			     pci_resource_len(pdev, 0));
1943 	if (!host->mmio) {
1944 		printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
1945 		       pci_name(pdev));
1946 		rc = -ENOMEM;
1947 		goto err_out_kfree;
1948 	}
1949 
1950 	rc = carm_init_shm(host);
1951 	if (rc) {
1952 		printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
1953 		       pci_name(pdev));
1954 		goto err_out_iounmap;
1955 	}
1956 
1957 	blk_init_queue(&host->oob_q, carm_oob_rq_fn);
1958 	host->oob_q.queuedata = host;
1959 	blk_queue_bounce_limit(&host->oob_q, pdev->dma_mask);
1960 	blk_queue_headactive(&host->oob_q, 0);
1961 
1962 	/*
1963 	 * Figure out which major to use: 160, 161, or dynamic
1964 	 */
1965 	if (!test_and_set_bit(0, &carm_major_alloc))
1966 		host->major = 160;
1967 	else if (!test_and_set_bit(1, &carm_major_alloc))
1968 		host->major = 161;
1969 	else
1970 		host->flags |= FL_DYN_MAJOR;
1971 
1972 	host->id = carm_host_id;
1973 	sprintf(host->name, DRV_NAME "%d", carm_host_id);
1974 
1975 	rc = register_blkdev(host->major, host->name, &carm_bd_ops);
1976 	if (rc < 0)
1977 		goto err_out_free_majors;
1978 	if (host->flags & FL_DYN_MAJOR)
1979 		host->major = rc;
1980 
1981 	rc = carm_init_disks(host);
1982 	if (rc)
1983 		goto err_out_blkdev_disks;
1984 
1985 	pci_set_master(pdev);
1986 
1987 	rc = request_irq(pdev->irq, carm_interrupt, SA_SHIRQ, DRV_NAME, host);
1988 	if (rc) {
1989 		printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
1990 		       pci_name(pdev));
1991 		goto err_out_blkdev_disks;
1992 	}
1993 
1994 	rc = carm_init_host(host);
1995 	if (rc)
1996 		goto err_out_free_irq;
1997 
1998 	DPRINTK("waiting for probe_sem\n");
1999 	down(&host->probe_sem);
2000 
2001 	printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
2002 	       host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
2003 	       pci_resource_start(pdev, 0), pdev->irq, host->major);
2004 
2005 	carm_host_id++;
2006 	pci_set_drvdata(pdev, host);
2007 	return 0;
2008 
2009 err_out_free_irq:
2010 	free_irq(pdev->irq, host);
2011 err_out_blkdev_disks:
2012 	carm_free_disks(host);
2013 	unregister_blkdev(host->major, host->name);
2014 err_out_free_majors:
2015 	if (host->major == 160)
2016 		clear_bit(0, &carm_major_alloc);
2017 	else if (host->major == 161)
2018 		clear_bit(1, &carm_major_alloc);
2019 	blk_cleanup_queue(&host->oob_q);
2020 	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
2021 err_out_iounmap:
2022 	iounmap(host->mmio);
2023 err_out_kfree:
2024 	kfree(host);
2025 err_out_regions:
2026 	pci_release_regions(pdev);
2027 err_out:
2028 	pci_disable_device(pdev);
2029 	return rc;
2030 }
2031 
carm_remove_one(struct pci_dev * pdev)2032 static void carm_remove_one (struct pci_dev *pdev)
2033 {
2034 	struct carm_host *host = pci_get_drvdata(pdev);
2035 
2036 	if (!host) {
2037 		printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
2038 		       pci_name(pdev));
2039 		return;
2040 	}
2041 
2042 	free_irq(pdev->irq, host);
2043 	carm_stop_tasklets(host);
2044 	carm_free_disks(host);
2045 	unregister_blkdev(host->major, host->name);
2046 	if (host->major == 160)
2047 		clear_bit(0, &carm_major_alloc);
2048 	else if (host->major == 161)
2049 		clear_bit(1, &carm_major_alloc);
2050 	blk_cleanup_queue(&host->oob_q);
2051 	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
2052 	iounmap(host->mmio);
2053 	kfree(host);
2054 	pci_release_regions(pdev);
2055 	pci_disable_device(pdev);
2056 	pci_set_drvdata(pdev, NULL);
2057 }
2058 
carm_init(void)2059 static int __init carm_init(void)
2060 {
2061 	return pci_module_init(&carm_driver);
2062 }
2063 
carm_exit(void)2064 static void __exit carm_exit(void)
2065 {
2066 	pci_unregister_driver(&carm_driver);
2067 }
2068 
2069 module_init(carm_init);
2070 module_exit(carm_exit);
2071 
2072 
2073