1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/mm.h>
35 #include <linux/device.h>
36 
37 #include "qib.h"
38 
__qib_release_user_pages(struct page ** p,size_t num_pages,int dirty)39 static void __qib_release_user_pages(struct page **p, size_t num_pages,
40 				     int dirty)
41 {
42 	size_t i;
43 
44 	for (i = 0; i < num_pages; i++) {
45 		if (dirty)
46 			set_page_dirty_lock(p[i]);
47 		put_page(p[i]);
48 	}
49 }
50 
51 /*
52  * Call with current->mm->mmap_sem held.
53  */
__qib_get_user_pages(unsigned long start_page,size_t num_pages,struct page ** p,struct vm_area_struct ** vma)54 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
55 				struct page **p, struct vm_area_struct **vma)
56 {
57 	unsigned long lock_limit;
58 	size_t got;
59 	int ret;
60 
61 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
62 
63 	if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
64 		ret = -ENOMEM;
65 		goto bail;
66 	}
67 
68 	for (got = 0; got < num_pages; got += ret) {
69 		ret = get_user_pages(current, current->mm,
70 				     start_page + got * PAGE_SIZE,
71 				     num_pages - got, 1, 1,
72 				     p + got, vma);
73 		if (ret < 0)
74 			goto bail_release;
75 	}
76 
77 	current->mm->pinned_vm += num_pages;
78 
79 	ret = 0;
80 	goto bail;
81 
82 bail_release:
83 	__qib_release_user_pages(p, got, 0);
84 bail:
85 	return ret;
86 }
87 
88 /**
89  * qib_map_page - a safety wrapper around pci_map_page()
90  *
91  * A dma_addr of all 0's is interpreted by the chip as "disabled".
92  * Unfortunately, it can also be a valid dma_addr returned on some
93  * architectures.
94  *
95  * The powerpc iommu assigns dma_addrs in ascending order, so we don't
96  * have to bother with retries or mapping a dummy page to insure we
97  * don't just get the same mapping again.
98  *
99  * I'm sure we won't be so lucky with other iommu's, so FIXME.
100  */
qib_map_page(struct pci_dev * hwdev,struct page * page,unsigned long offset,size_t size,int direction)101 dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
102 			unsigned long offset, size_t size, int direction)
103 {
104 	dma_addr_t phys;
105 
106 	phys = pci_map_page(hwdev, page, offset, size, direction);
107 
108 	if (phys == 0) {
109 		pci_unmap_page(hwdev, phys, size, direction);
110 		phys = pci_map_page(hwdev, page, offset, size, direction);
111 		/*
112 		 * FIXME: If we get 0 again, we should keep this page,
113 		 * map another, then free the 0 page.
114 		 */
115 	}
116 
117 	return phys;
118 }
119 
120 /**
121  * qib_get_user_pages - lock user pages into memory
122  * @start_page: the start page
123  * @num_pages: the number of pages
124  * @p: the output page structures
125  *
126  * This function takes a given start page (page aligned user virtual
127  * address) and pins it and the following specified number of pages.  For
128  * now, num_pages is always 1, but that will probably change at some point
129  * (because caller is doing expected sends on a single virtually contiguous
130  * buffer, so we can do all pages at once).
131  */
qib_get_user_pages(unsigned long start_page,size_t num_pages,struct page ** p)132 int qib_get_user_pages(unsigned long start_page, size_t num_pages,
133 		       struct page **p)
134 {
135 	int ret;
136 
137 	down_write(&current->mm->mmap_sem);
138 
139 	ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
140 
141 	up_write(&current->mm->mmap_sem);
142 
143 	return ret;
144 }
145 
qib_release_user_pages(struct page ** p,size_t num_pages)146 void qib_release_user_pages(struct page **p, size_t num_pages)
147 {
148 	if (current->mm) /* during close after signal, mm can be NULL */
149 		down_write(&current->mm->mmap_sem);
150 
151 	__qib_release_user_pages(p, num_pages, 1);
152 
153 	if (current->mm) {
154 		current->mm->pinned_vm -= num_pages;
155 		up_write(&current->mm->mmap_sem);
156 	}
157 }
158