1 /*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.7 (Jul 16 2006)
14 *
15 * Common management module
16 */
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/mutex.h>
20 #include "megaraid_mm.h"
21
22
23 // Entry points for char node driver
24 static DEFINE_MUTEX(mraid_mm_mutex);
25 static int mraid_mm_open(struct inode *, struct file *);
26 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
27
28
29 // routines to convert to and from the old the format
30 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
31 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
32
33
34 // Helper functions
35 static int handle_drvrcmd(void __user *, uint8_t, int *);
36 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
37 static void ioctl_done(uioc_t *);
38 static void lld_timedout(unsigned long);
39 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
40 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
41 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
42 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
43 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
44 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
45 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
46 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
47
48 #ifdef CONFIG_COMPAT
49 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
50 #endif
51
52 MODULE_AUTHOR("LSI Logic Corporation");
53 MODULE_DESCRIPTION("LSI Logic Management Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
56
57 static int dbglevel = CL_ANN;
58 module_param_named(dlevel, dbglevel, int, 0);
59 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
60
61 EXPORT_SYMBOL(mraid_mm_register_adp);
62 EXPORT_SYMBOL(mraid_mm_unregister_adp);
63 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
64
65 static uint32_t drvr_ver = 0x02200207;
66
67 static int adapters_count_g;
68 static struct list_head adapters_list_g;
69
70 static wait_queue_head_t wait_q;
71
72 static const struct file_operations lsi_fops = {
73 .open = mraid_mm_open,
74 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
75 #ifdef CONFIG_COMPAT
76 .compat_ioctl = mraid_mm_compat_ioctl,
77 #endif
78 .owner = THIS_MODULE,
79 .llseek = noop_llseek,
80 };
81
82 static struct miscdevice megaraid_mm_dev = {
83 .minor = MISC_DYNAMIC_MINOR,
84 .name = "megadev0",
85 .fops = &lsi_fops,
86 };
87
88 /**
89 * mraid_mm_open - open routine for char node interface
90 * @inode : unused
91 * @filep : unused
92 *
93 * Allow ioctl operations by apps only if they have superuser privilege.
94 */
95 static int
mraid_mm_open(struct inode * inode,struct file * filep)96 mraid_mm_open(struct inode *inode, struct file *filep)
97 {
98 /*
99 * Only allow superuser to access private ioctl interface
100 */
101 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
102
103 return 0;
104 }
105
106 /**
107 * mraid_mm_ioctl - module entry-point for ioctls
108 * @inode : inode (ignored)
109 * @filep : file operations pointer (ignored)
110 * @cmd : ioctl command
111 * @arg : user ioctl packet
112 */
113 static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)114 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
115 {
116 uioc_t *kioc;
117 char signature[EXT_IOCTL_SIGN_SZ] = {0};
118 int rval;
119 mraid_mmadp_t *adp;
120 uint8_t old_ioctl;
121 int drvrcmd_rval;
122 void __user *argp = (void __user *)arg;
123
124 /*
125 * Make sure only USCSICMD are issued through this interface.
126 * MIMD application would still fire different command.
127 */
128
129 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
130 return (-EINVAL);
131 }
132
133 /*
134 * Look for signature to see if this is the new or old ioctl format.
135 */
136 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
137 con_log(CL_ANN, (KERN_WARNING
138 "megaraid cmm: copy from usr addr failed\n"));
139 return (-EFAULT);
140 }
141
142 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
143 old_ioctl = 0;
144 else
145 old_ioctl = 1;
146
147 /*
148 * At present, we don't support the new ioctl packet
149 */
150 if (!old_ioctl )
151 return (-EINVAL);
152
153 /*
154 * If it is a driver ioctl (as opposed to fw ioctls), then we can
155 * handle the command locally. rval > 0 means it is not a drvr cmd
156 */
157 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
158
159 if (rval < 0)
160 return rval;
161 else if (rval == 0)
162 return drvrcmd_rval;
163
164 rval = 0;
165 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
166 return rval;
167 }
168
169 /*
170 * Check if adapter can accept ioctl. We may have marked it offline
171 * if any previous kioc had timedout on this controller.
172 */
173 if (!adp->quiescent) {
174 con_log(CL_ANN, (KERN_WARNING
175 "megaraid cmm: controller cannot accept cmds due to "
176 "earlier errors\n" ));
177 return -EFAULT;
178 }
179
180 /*
181 * The following call will block till a kioc is available
182 */
183 kioc = mraid_mm_alloc_kioc(adp);
184
185 /*
186 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
187 */
188 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
189 mraid_mm_dealloc_kioc(adp, kioc);
190 return rval;
191 }
192
193 kioc->done = ioctl_done;
194
195 /*
196 * Issue the IOCTL to the low level driver. After the IOCTL completes
197 * release the kioc if and only if it was _not_ timedout. If it was
198 * timedout, that means that resources are still with low level driver.
199 */
200 if ((rval = lld_ioctl(adp, kioc))) {
201
202 if (!kioc->timedout)
203 mraid_mm_dealloc_kioc(adp, kioc);
204
205 return rval;
206 }
207
208 /*
209 * Convert the kioc back to user space
210 */
211 rval = kioc_to_mimd(kioc, argp);
212
213 /*
214 * Return the kioc to free pool
215 */
216 mraid_mm_dealloc_kioc(adp, kioc);
217
218 return rval;
219 }
220
221 static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)222 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
223 unsigned long arg)
224 {
225 int err;
226
227 /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
228 mutex_lock(&mraid_mm_mutex);
229 err = mraid_mm_ioctl(filep, cmd, arg);
230 mutex_unlock(&mraid_mm_mutex);
231
232 return err;
233 }
234
235 /**
236 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
237 * @umimd : User space mimd_t ioctl packet
238 * @rval : returned success/error status
239 *
240 * The function return value is a pointer to the located @adapter.
241 */
242 static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)243 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
244 {
245 mraid_mmadp_t *adapter;
246 mimd_t mimd;
247 uint32_t adapno;
248 int iterator;
249
250
251 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
252 *rval = -EFAULT;
253 return NULL;
254 }
255
256 adapno = GETADAP(mimd.ui.fcs.adapno);
257
258 if (adapno >= adapters_count_g) {
259 *rval = -ENODEV;
260 return NULL;
261 }
262
263 adapter = NULL;
264 iterator = 0;
265
266 list_for_each_entry(adapter, &adapters_list_g, list) {
267 if (iterator++ == adapno) break;
268 }
269
270 if (!adapter) {
271 *rval = -ENODEV;
272 return NULL;
273 }
274
275 return adapter;
276 }
277
278 /**
279 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
280 * @arg : packet sent by the user app
281 * @old_ioctl : mimd if 1; uioc otherwise
282 * @rval : pointer for command's returned value (not function status)
283 */
284 static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)285 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
286 {
287 mimd_t __user *umimd;
288 mimd_t kmimd;
289 uint8_t opcode;
290 uint8_t subopcode;
291
292 if (old_ioctl)
293 goto old_packet;
294 else
295 goto new_packet;
296
297 new_packet:
298 return (-ENOTSUPP);
299
300 old_packet:
301 *rval = 0;
302 umimd = arg;
303
304 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
305 return (-EFAULT);
306
307 opcode = kmimd.ui.fcs.opcode;
308 subopcode = kmimd.ui.fcs.subopcode;
309
310 /*
311 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
312 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
313 * indicate that we cannot handle this.
314 */
315 if (opcode != 0x82)
316 return 1;
317
318 switch (subopcode) {
319
320 case MEGAIOC_QDRVRVER:
321
322 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
323 return (-EFAULT);
324
325 return 0;
326
327 case MEGAIOC_QNADAP:
328
329 *rval = adapters_count_g;
330
331 if (copy_to_user(kmimd.data, &adapters_count_g,
332 sizeof(uint32_t)))
333 return (-EFAULT);
334
335 return 0;
336
337 default:
338 /* cannot handle */
339 return 1;
340 }
341
342 return 0;
343 }
344
345
346 /**
347 * mimd_to_kioc - Converter from old to new ioctl format
348 * @umimd : user space old MIMD IOCTL
349 * @adp : adapter softstate
350 * @kioc : kernel space new format IOCTL
351 *
352 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
353 * new packet is in kernel space so that driver can perform operations on it
354 * freely.
355 */
356
357 static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)358 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
359 {
360 mbox64_t *mbox64;
361 mbox_t *mbox;
362 mraid_passthru_t *pthru32;
363 uint32_t adapno;
364 uint8_t opcode;
365 uint8_t subopcode;
366 mimd_t mimd;
367
368 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
369 return (-EFAULT);
370
371 /*
372 * Applications are not allowed to send extd pthru
373 */
374 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
375 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
376 return (-EINVAL);
377
378 opcode = mimd.ui.fcs.opcode;
379 subopcode = mimd.ui.fcs.subopcode;
380 adapno = GETADAP(mimd.ui.fcs.adapno);
381
382 if (adapno >= adapters_count_g)
383 return (-ENODEV);
384
385 kioc->adapno = adapno;
386 kioc->mb_type = MBOX_LEGACY;
387 kioc->app_type = APPTYPE_MIMD;
388
389 switch (opcode) {
390
391 case 0x82:
392
393 if (subopcode == MEGAIOC_QADAPINFO) {
394
395 kioc->opcode = GET_ADAP_INFO;
396 kioc->data_dir = UIOC_RD;
397 kioc->xferlen = sizeof(mraid_hba_info_t);
398
399 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
400 return (-ENOMEM);
401 }
402 else {
403 con_log(CL_ANN, (KERN_WARNING
404 "megaraid cmm: Invalid subop\n"));
405 return (-EINVAL);
406 }
407
408 break;
409
410 case 0x81:
411
412 kioc->opcode = MBOX_CMD;
413 kioc->xferlen = mimd.ui.fcs.length;
414 kioc->user_data_len = kioc->xferlen;
415 kioc->user_data = mimd.ui.fcs.buffer;
416
417 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
418 return (-ENOMEM);
419
420 if (mimd.outlen) kioc->data_dir = UIOC_RD;
421 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
422
423 break;
424
425 case 0x80:
426
427 kioc->opcode = MBOX_CMD;
428 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
429 mimd.outlen : mimd.inlen;
430 kioc->user_data_len = kioc->xferlen;
431 kioc->user_data = mimd.data;
432
433 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
434 return (-ENOMEM);
435
436 if (mimd.outlen) kioc->data_dir = UIOC_RD;
437 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
438
439 break;
440
441 default:
442 return (-EINVAL);
443 }
444
445 /*
446 * If driver command, nothing else to do
447 */
448 if (opcode == 0x82)
449 return 0;
450
451 /*
452 * This is a mailbox cmd; copy the mailbox from mimd
453 */
454 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
455 mbox = &mbox64->mbox32;
456 memcpy(mbox, mimd.mbox, 14);
457
458 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
459
460 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
461
462 if (kioc->data_dir & UIOC_WR) {
463 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
464 kioc->xferlen)) {
465 return (-EFAULT);
466 }
467 }
468
469 return 0;
470 }
471
472 /*
473 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
474 * Just like in above case, the beginning for memblk is treated as
475 * a mailbox. The passthru will begin at next 1K boundary. And the
476 * data will start 1K after that.
477 */
478 pthru32 = kioc->pthru32;
479 kioc->user_pthru = &umimd->pthru;
480 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
481
482 if (copy_from_user(pthru32, kioc->user_pthru,
483 sizeof(mraid_passthru_t))) {
484 return (-EFAULT);
485 }
486
487 pthru32->dataxferaddr = kioc->buf_paddr;
488 if (kioc->data_dir & UIOC_WR) {
489 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
490 pthru32->dataxferlen)) {
491 return (-EFAULT);
492 }
493 }
494
495 return 0;
496 }
497
498 /**
499 * mraid_mm_attch_buf - Attach a free dma buffer for required size
500 * @adp : Adapter softstate
501 * @kioc : kioc that the buffer needs to be attached to
502 * @xferlen : required length for buffer
503 *
504 * First we search for a pool with smallest buffer that is >= @xferlen. If
505 * that pool has no free buffer, we will try for the next bigger size. If none
506 * is available, we will try to allocate the smallest buffer that is >=
507 * @xferlen and attach it the pool.
508 */
509 static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)510 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
511 {
512 mm_dmapool_t *pool;
513 int right_pool = -1;
514 unsigned long flags;
515 int i;
516
517 kioc->pool_index = -1;
518 kioc->buf_vaddr = NULL;
519 kioc->buf_paddr = 0;
520 kioc->free_buf = 0;
521
522 /*
523 * We need xferlen amount of memory. See if we can get it from our
524 * dma pools. If we don't get exact size, we will try bigger buffer
525 */
526
527 for (i = 0; i < MAX_DMA_POOLS; i++) {
528
529 pool = &adp->dma_pool_list[i];
530
531 if (xferlen > pool->buf_size)
532 continue;
533
534 if (right_pool == -1)
535 right_pool = i;
536
537 spin_lock_irqsave(&pool->lock, flags);
538
539 if (!pool->in_use) {
540
541 pool->in_use = 1;
542 kioc->pool_index = i;
543 kioc->buf_vaddr = pool->vaddr;
544 kioc->buf_paddr = pool->paddr;
545
546 spin_unlock_irqrestore(&pool->lock, flags);
547 return 0;
548 }
549 else {
550 spin_unlock_irqrestore(&pool->lock, flags);
551 continue;
552 }
553 }
554
555 /*
556 * If xferlen doesn't match any of our pools, return error
557 */
558 if (right_pool == -1)
559 return -EINVAL;
560
561 /*
562 * We did not get any buffer from the preallocated pool. Let us try
563 * to allocate one new buffer. NOTE: This is a blocking call.
564 */
565 pool = &adp->dma_pool_list[right_pool];
566
567 spin_lock_irqsave(&pool->lock, flags);
568
569 kioc->pool_index = right_pool;
570 kioc->free_buf = 1;
571 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
572 &kioc->buf_paddr);
573 spin_unlock_irqrestore(&pool->lock, flags);
574
575 if (!kioc->buf_vaddr)
576 return -ENOMEM;
577
578 return 0;
579 }
580
581 /**
582 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
583 * @adp : Adapter softstate for this module
584 *
585 * The kioc_semaphore is initialized with number of kioc nodes in the
586 * free kioc pool. If the kioc pool is empty, this function blocks till
587 * a kioc becomes free.
588 */
589 static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)590 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
591 {
592 uioc_t *kioc;
593 struct list_head* head;
594 unsigned long flags;
595
596 down(&adp->kioc_semaphore);
597
598 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
599
600 head = &adp->kioc_pool;
601
602 if (list_empty(head)) {
603 up(&adp->kioc_semaphore);
604 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
605
606 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
607 return NULL;
608 }
609
610 kioc = list_entry(head->next, uioc_t, list);
611 list_del_init(&kioc->list);
612
613 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
614
615 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
616 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
617
618 kioc->buf_vaddr = NULL;
619 kioc->buf_paddr = 0;
620 kioc->pool_index =-1;
621 kioc->free_buf = 0;
622 kioc->user_data = NULL;
623 kioc->user_data_len = 0;
624 kioc->user_pthru = NULL;
625 kioc->timedout = 0;
626
627 return kioc;
628 }
629
630 /**
631 * mraid_mm_dealloc_kioc - Return kioc to free pool
632 * @adp : Adapter softstate
633 * @kioc : uioc_t node to be returned to free pool
634 */
635 static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)636 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
637 {
638 mm_dmapool_t *pool;
639 unsigned long flags;
640
641 if (kioc->pool_index != -1) {
642 pool = &adp->dma_pool_list[kioc->pool_index];
643
644 /* This routine may be called in non-isr context also */
645 spin_lock_irqsave(&pool->lock, flags);
646
647 /*
648 * While attaching the dma buffer, if we didn't get the
649 * required buffer from the pool, we would have allocated
650 * it at the run time and set the free_buf flag. We must
651 * free that buffer. Otherwise, just mark that the buffer is
652 * not in use
653 */
654 if (kioc->free_buf == 1)
655 pci_pool_free(pool->handle, kioc->buf_vaddr,
656 kioc->buf_paddr);
657 else
658 pool->in_use = 0;
659
660 spin_unlock_irqrestore(&pool->lock, flags);
661 }
662
663 /* Return the kioc to the free pool */
664 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
665 list_add(&kioc->list, &adp->kioc_pool);
666 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
667
668 /* increment the free kioc count */
669 up(&adp->kioc_semaphore);
670
671 return;
672 }
673
674 /**
675 * lld_ioctl - Routine to issue ioctl to low level drvr
676 * @adp : The adapter handle
677 * @kioc : The ioctl packet with kernel addresses
678 */
679 static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)680 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
681 {
682 int rval;
683 struct timer_list timer;
684 struct timer_list *tp = NULL;
685
686 kioc->status = -ENODATA;
687 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
688
689 if (rval) return rval;
690
691 /*
692 * Start the timer
693 */
694 if (adp->timeout > 0) {
695 tp = &timer;
696 init_timer(tp);
697
698 tp->function = lld_timedout;
699 tp->data = (unsigned long)kioc;
700 tp->expires = jiffies + adp->timeout * HZ;
701
702 add_timer(tp);
703 }
704
705 /*
706 * Wait till the low level driver completes the ioctl. After this
707 * call, the ioctl either completed successfully or timedout.
708 */
709 wait_event(wait_q, (kioc->status != -ENODATA));
710 if (tp) {
711 del_timer_sync(tp);
712 }
713
714 /*
715 * If the command had timedout, we mark the controller offline
716 * before returning
717 */
718 if (kioc->timedout) {
719 adp->quiescent = 0;
720 }
721
722 return kioc->status;
723 }
724
725
726 /**
727 * ioctl_done - callback from the low level driver
728 * @kioc : completed ioctl packet
729 */
730 static void
ioctl_done(uioc_t * kioc)731 ioctl_done(uioc_t *kioc)
732 {
733 uint32_t adapno;
734 int iterator;
735 mraid_mmadp_t* adapter;
736
737 /*
738 * When the kioc returns from driver, make sure it still doesn't
739 * have ENODATA in status. Otherwise, driver will hang on wait_event
740 * forever
741 */
742 if (kioc->status == -ENODATA) {
743 con_log(CL_ANN, (KERN_WARNING
744 "megaraid cmm: lld didn't change status!\n"));
745
746 kioc->status = -EINVAL;
747 }
748
749 /*
750 * Check if this kioc was timedout before. If so, nobody is waiting
751 * on this kioc. We don't have to wake up anybody. Instead, we just
752 * have to free the kioc
753 */
754 if (kioc->timedout) {
755 iterator = 0;
756 adapter = NULL;
757 adapno = kioc->adapno;
758
759 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
760 "ioctl that was timedout before\n"));
761
762 list_for_each_entry(adapter, &adapters_list_g, list) {
763 if (iterator++ == adapno) break;
764 }
765
766 kioc->timedout = 0;
767
768 if (adapter) {
769 mraid_mm_dealloc_kioc( adapter, kioc );
770 }
771 }
772 else {
773 wake_up(&wait_q);
774 }
775 }
776
777
778 /**
779 * lld_timedout - callback from the expired timer
780 * @ptr : ioctl packet that timed out
781 */
782 static void
lld_timedout(unsigned long ptr)783 lld_timedout(unsigned long ptr)
784 {
785 uioc_t *kioc = (uioc_t *)ptr;
786
787 kioc->status = -ETIME;
788 kioc->timedout = 1;
789
790 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
791
792 wake_up(&wait_q);
793 }
794
795
796 /**
797 * kioc_to_mimd - Converter from new back to old format
798 * @kioc : Kernel space IOCTL packet (successfully issued)
799 * @mimd : User space MIMD packet
800 */
801 static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)802 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
803 {
804 mimd_t kmimd;
805 uint8_t opcode;
806 uint8_t subopcode;
807
808 mbox64_t *mbox64;
809 mraid_passthru_t __user *upthru32;
810 mraid_passthru_t *kpthru32;
811 mcontroller_t cinfo;
812 mraid_hba_info_t *hinfo;
813
814
815 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
816 return (-EFAULT);
817
818 opcode = kmimd.ui.fcs.opcode;
819 subopcode = kmimd.ui.fcs.subopcode;
820
821 if (opcode == 0x82) {
822 switch (subopcode) {
823
824 case MEGAIOC_QADAPINFO:
825
826 hinfo = (mraid_hba_info_t *)(unsigned long)
827 kioc->buf_vaddr;
828
829 hinfo_to_cinfo(hinfo, &cinfo);
830
831 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
832 return (-EFAULT);
833
834 return 0;
835
836 default:
837 return (-EINVAL);
838 }
839
840 return 0;
841 }
842
843 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
844
845 if (kioc->user_pthru) {
846
847 upthru32 = kioc->user_pthru;
848 kpthru32 = kioc->pthru32;
849
850 if (copy_to_user(&upthru32->scsistatus,
851 &kpthru32->scsistatus,
852 sizeof(uint8_t))) {
853 return (-EFAULT);
854 }
855 }
856
857 if (kioc->user_data) {
858 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
859 kioc->user_data_len)) {
860 return (-EFAULT);
861 }
862 }
863
864 if (copy_to_user(&mimd->mbox[17],
865 &mbox64->mbox32.status, sizeof(uint8_t))) {
866 return (-EFAULT);
867 }
868
869 return 0;
870 }
871
872
873 /**
874 * hinfo_to_cinfo - Convert new format hba info into old format
875 * @hinfo : New format, more comprehensive adapter info
876 * @cinfo : Old format adapter info to support mimd_t apps
877 */
878 static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)879 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
880 {
881 if (!hinfo || !cinfo)
882 return;
883
884 cinfo->base = hinfo->baseport;
885 cinfo->irq = hinfo->irq;
886 cinfo->numldrv = hinfo->num_ldrv;
887 cinfo->pcibus = hinfo->pci_bus;
888 cinfo->pcidev = hinfo->pci_slot;
889 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
890 cinfo->pciid = hinfo->pci_device_id;
891 cinfo->pcivendor = hinfo->pci_vendor_id;
892 cinfo->pcislot = hinfo->pci_slot;
893 cinfo->uid = hinfo->unique_id;
894 }
895
896
897 /**
898 * mraid_mm_register_adp - Registration routine for low level drivers
899 * @lld_adp : Adapter objejct
900 */
901 int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)902 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
903 {
904 mraid_mmadp_t *adapter;
905 mbox64_t *mbox_list;
906 uioc_t *kioc;
907 uint32_t rval;
908 int i;
909
910
911 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
912 return (-EINVAL);
913
914 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
915
916 if (!adapter)
917 return -ENOMEM;
918
919
920 adapter->unique_id = lld_adp->unique_id;
921 adapter->drvr_type = lld_adp->drvr_type;
922 adapter->drvr_data = lld_adp->drvr_data;
923 adapter->pdev = lld_adp->pdev;
924 adapter->issue_uioc = lld_adp->issue_uioc;
925 adapter->timeout = lld_adp->timeout;
926 adapter->max_kioc = lld_adp->max_kioc;
927 adapter->quiescent = 1;
928
929 /*
930 * Allocate single blocks of memory for all required kiocs,
931 * mailboxes and passthru structures.
932 */
933 adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
934 GFP_KERNEL);
935 adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
936 GFP_KERNEL);
937 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
938 adapter->pdev,
939 sizeof(mraid_passthru_t),
940 16, 0);
941
942 if (!adapter->kioc_list || !adapter->mbox_list ||
943 !adapter->pthru_dma_pool) {
944
945 con_log(CL_ANN, (KERN_WARNING
946 "megaraid cmm: out of memory, %s %d\n", __func__,
947 __LINE__));
948
949 rval = (-ENOMEM);
950
951 goto memalloc_error;
952 }
953
954 /*
955 * Slice kioc_list and make a kioc_pool with the individiual kiocs
956 */
957 INIT_LIST_HEAD(&adapter->kioc_pool);
958 spin_lock_init(&adapter->kioc_pool_lock);
959 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
960
961 mbox_list = (mbox64_t *)adapter->mbox_list;
962
963 for (i = 0; i < lld_adp->max_kioc; i++) {
964
965 kioc = adapter->kioc_list + i;
966 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
967 kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
968 GFP_KERNEL, &kioc->pthru32_h);
969
970 if (!kioc->pthru32) {
971
972 con_log(CL_ANN, (KERN_WARNING
973 "megaraid cmm: out of memory, %s %d\n",
974 __func__, __LINE__));
975
976 rval = (-ENOMEM);
977
978 goto pthru_dma_pool_error;
979 }
980
981 list_add_tail(&kioc->list, &adapter->kioc_pool);
982 }
983
984 // Setup the dma pools for data buffers
985 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
986 goto dma_pool_error;
987 }
988
989 list_add_tail(&adapter->list, &adapters_list_g);
990
991 adapters_count_g++;
992
993 return 0;
994
995 dma_pool_error:
996 /* Do nothing */
997
998 pthru_dma_pool_error:
999
1000 for (i = 0; i < lld_adp->max_kioc; i++) {
1001 kioc = adapter->kioc_list + i;
1002 if (kioc->pthru32) {
1003 pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1004 kioc->pthru32_h);
1005 }
1006 }
1007
1008 memalloc_error:
1009
1010 kfree(adapter->kioc_list);
1011 kfree(adapter->mbox_list);
1012
1013 if (adapter->pthru_dma_pool)
1014 pci_pool_destroy(adapter->pthru_dma_pool);
1015
1016 kfree(adapter);
1017
1018 return rval;
1019 }
1020
1021
1022 /**
1023 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1024 * @unique_id : adapter unique identifier
1025 *
1026 * For the given driver data, locate the adapter in our global list and
1027 * return the corresponding handle, which is also used by applications to
1028 * uniquely identify an adapter.
1029 *
1030 * Return adapter handle if found in the list.
1031 * Return 0 if adapter could not be located, should never happen though.
1032 */
1033 uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1034 mraid_mm_adapter_app_handle(uint32_t unique_id)
1035 {
1036 mraid_mmadp_t *adapter;
1037 mraid_mmadp_t *tmp;
1038 int index = 0;
1039
1040 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1041
1042 if (adapter->unique_id == unique_id) {
1043
1044 return MKADAP(index);
1045 }
1046
1047 index++;
1048 }
1049
1050 return 0;
1051 }
1052
1053
1054 /**
1055 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1056 * @adp : Adapter softstate
1057 *
1058 * We maintain a pool of dma buffers per each adapter. Each pool has one
1059 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1060 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1061 * dont' want to waste too much memory by allocating more buffers per each
1062 * pool.
1063 */
1064 static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1065 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1066 {
1067 mm_dmapool_t *pool;
1068 int bufsize;
1069 int i;
1070
1071 /*
1072 * Create MAX_DMA_POOLS number of pools
1073 */
1074 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1075
1076 for (i = 0; i < MAX_DMA_POOLS; i++){
1077
1078 pool = &adp->dma_pool_list[i];
1079
1080 pool->buf_size = bufsize;
1081 spin_lock_init(&pool->lock);
1082
1083 pool->handle = pci_pool_create("megaraid mm data buffer",
1084 adp->pdev, bufsize, 16, 0);
1085
1086 if (!pool->handle) {
1087 goto dma_pool_setup_error;
1088 }
1089
1090 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
1091 &pool->paddr);
1092
1093 if (!pool->vaddr)
1094 goto dma_pool_setup_error;
1095
1096 bufsize = bufsize * 2;
1097 }
1098
1099 return 0;
1100
1101 dma_pool_setup_error:
1102
1103 mraid_mm_teardown_dma_pools(adp);
1104 return (-ENOMEM);
1105 }
1106
1107
1108 /**
1109 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1110 * @unique_id : UID of the adpater
1111 *
1112 * Assumes no outstanding ioctls to llds.
1113 */
1114 int
mraid_mm_unregister_adp(uint32_t unique_id)1115 mraid_mm_unregister_adp(uint32_t unique_id)
1116 {
1117 mraid_mmadp_t *adapter;
1118 mraid_mmadp_t *tmp;
1119
1120 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1121
1122
1123 if (adapter->unique_id == unique_id) {
1124
1125 adapters_count_g--;
1126
1127 list_del_init(&adapter->list);
1128
1129 mraid_mm_free_adp_resources(adapter);
1130
1131 kfree(adapter);
1132
1133 con_log(CL_ANN, (
1134 "megaraid cmm: Unregistered one adapter:%#x\n",
1135 unique_id));
1136
1137 return 0;
1138 }
1139 }
1140
1141 return (-ENODEV);
1142 }
1143
1144 /**
1145 * mraid_mm_free_adp_resources - Free adapter softstate
1146 * @adp : Adapter softstate
1147 */
1148 static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1149 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1150 {
1151 uioc_t *kioc;
1152 int i;
1153
1154 mraid_mm_teardown_dma_pools(adp);
1155
1156 for (i = 0; i < adp->max_kioc; i++) {
1157
1158 kioc = adp->kioc_list + i;
1159
1160 pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1161 kioc->pthru32_h);
1162 }
1163
1164 kfree(adp->kioc_list);
1165 kfree(adp->mbox_list);
1166
1167 pci_pool_destroy(adp->pthru_dma_pool);
1168
1169
1170 return;
1171 }
1172
1173
1174 /**
1175 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1176 * @adp : Adapter softstate
1177 */
1178 static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1179 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1180 {
1181 int i;
1182 mm_dmapool_t *pool;
1183
1184 for (i = 0; i < MAX_DMA_POOLS; i++) {
1185
1186 pool = &adp->dma_pool_list[i];
1187
1188 if (pool->handle) {
1189
1190 if (pool->vaddr)
1191 pci_pool_free(pool->handle, pool->vaddr,
1192 pool->paddr);
1193
1194 pci_pool_destroy(pool->handle);
1195 pool->handle = NULL;
1196 }
1197 }
1198
1199 return;
1200 }
1201
1202 /**
1203 * mraid_mm_init - Module entry point
1204 */
1205 static int __init
mraid_mm_init(void)1206 mraid_mm_init(void)
1207 {
1208 int err;
1209
1210 // Announce the driver version
1211 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1212 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1213
1214 err = misc_register(&megaraid_mm_dev);
1215 if (err < 0) {
1216 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1217 return err;
1218 }
1219
1220 init_waitqueue_head(&wait_q);
1221
1222 INIT_LIST_HEAD(&adapters_list_g);
1223
1224 return 0;
1225 }
1226
1227
1228 #ifdef CONFIG_COMPAT
1229 /**
1230 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
1231 * @filep : file operations pointer (ignored)
1232 * @cmd : ioctl command
1233 * @arg : user ioctl packet
1234 */
1235 static long
mraid_mm_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1236 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1237 unsigned long arg)
1238 {
1239 int err;
1240
1241 err = mraid_mm_ioctl(filep, cmd, arg);
1242
1243 return err;
1244 }
1245 #endif
1246
1247 /**
1248 * mraid_mm_exit - Module exit point
1249 */
1250 static void __exit
mraid_mm_exit(void)1251 mraid_mm_exit(void)
1252 {
1253 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1254
1255 misc_deregister(&megaraid_mm_dev);
1256 }
1257
1258 module_init(mraid_mm_init);
1259 module_exit(mraid_mm_exit);
1260
1261 /* vi: set ts=8 sw=8 tw=78: */
1262