1 /*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <linux/mm.h>
37 #include <linux/errno.h>
38 #include <asm/pgtable.h>
39
40 #include "qib_verbs.h"
41
42 /**
43 * qib_release_mmap_info - free mmap info structure
44 * @ref: a pointer to the kref within struct qib_mmap_info
45 */
qib_release_mmap_info(struct kref * ref)46 void qib_release_mmap_info(struct kref *ref)
47 {
48 struct qib_mmap_info *ip =
49 container_of(ref, struct qib_mmap_info, ref);
50 struct qib_ibdev *dev = to_idev(ip->context->device);
51
52 spin_lock_irq(&dev->pending_lock);
53 list_del(&ip->pending_mmaps);
54 spin_unlock_irq(&dev->pending_lock);
55
56 vfree(ip->obj);
57 kfree(ip);
58 }
59
60 /*
61 * open and close keep track of how many times the CQ is mapped,
62 * to avoid releasing it.
63 */
qib_vma_open(struct vm_area_struct * vma)64 static void qib_vma_open(struct vm_area_struct *vma)
65 {
66 struct qib_mmap_info *ip = vma->vm_private_data;
67
68 kref_get(&ip->ref);
69 }
70
qib_vma_close(struct vm_area_struct * vma)71 static void qib_vma_close(struct vm_area_struct *vma)
72 {
73 struct qib_mmap_info *ip = vma->vm_private_data;
74
75 kref_put(&ip->ref, qib_release_mmap_info);
76 }
77
78 static struct vm_operations_struct qib_vm_ops = {
79 .open = qib_vma_open,
80 .close = qib_vma_close,
81 };
82
83 /**
84 * qib_mmap - create a new mmap region
85 * @context: the IB user context of the process making the mmap() call
86 * @vma: the VMA to be initialized
87 * Return zero if the mmap is OK. Otherwise, return an errno.
88 */
qib_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
90 {
91 struct qib_ibdev *dev = to_idev(context->device);
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
94 struct qib_mmap_info *ip, *pp;
95 int ret = -EINVAL;
96
97 /*
98 * Search the device's list of objects waiting for a mmap call.
99 * Normally, this list is very short since a call to create a
100 * CQ, QP, or SRQ is soon followed by a call to mmap().
101 */
102 spin_lock_irq(&dev->pending_lock);
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
104 pending_mmaps) {
105 /* Only the creator is allowed to mmap the object */
106 if (context != ip->context || (__u64) offset != ip->offset)
107 continue;
108 /* Don't allow a mmap larger than the object. */
109 if (size > ip->size)
110 break;
111
112 list_del_init(&ip->pending_mmaps);
113 spin_unlock_irq(&dev->pending_lock);
114
115 ret = remap_vmalloc_range(vma, ip->obj, 0);
116 if (ret)
117 goto done;
118 vma->vm_ops = &qib_vm_ops;
119 vma->vm_private_data = ip;
120 qib_vma_open(vma);
121 goto done;
122 }
123 spin_unlock_irq(&dev->pending_lock);
124 done:
125 return ret;
126 }
127
128 /*
129 * Allocate information for qib_mmap
130 */
qib_create_mmap_info(struct qib_ibdev * dev,u32 size,struct ib_ucontext * context,void * obj)131 struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
132 u32 size,
133 struct ib_ucontext *context,
134 void *obj) {
135 struct qib_mmap_info *ip;
136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL);
138 if (!ip)
139 goto bail;
140
141 size = PAGE_ALIGN(size);
142
143 spin_lock_irq(&dev->mmap_offset_lock);
144 if (dev->mmap_offset == 0)
145 dev->mmap_offset = PAGE_SIZE;
146 ip->offset = dev->mmap_offset;
147 dev->mmap_offset += size;
148 spin_unlock_irq(&dev->mmap_offset_lock);
149
150 INIT_LIST_HEAD(&ip->pending_mmaps);
151 ip->size = size;
152 ip->context = context;
153 ip->obj = obj;
154 kref_init(&ip->ref);
155
156 bail:
157 return ip;
158 }
159
qib_update_mmap_info(struct qib_ibdev * dev,struct qib_mmap_info * ip,u32 size,void * obj)160 void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
161 u32 size, void *obj)
162 {
163 size = PAGE_ALIGN(size);
164
165 spin_lock_irq(&dev->mmap_offset_lock);
166 if (dev->mmap_offset == 0)
167 dev->mmap_offset = PAGE_SIZE;
168 ip->offset = dev->mmap_offset;
169 dev->mmap_offset += size;
170 spin_unlock_irq(&dev->mmap_offset_lock);
171
172 ip->size = size;
173 ip->obj = obj;
174 }
175