1 /*
2  * DMA region bookkeeping routines
3  *
4  * Copyright (C) 2002 Maas Digital LLC
5  *
6  * This code is licensed under the GPL.  See the file COPYING in the root
7  * directory of the kernel sources for details.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/mm.h>
14 #include "dma.h"
15 
16 /* dma_prog_region */
17 
dma_prog_region_init(struct dma_prog_region * prog)18 void dma_prog_region_init(struct dma_prog_region *prog)
19 {
20 	prog->kvirt = NULL;
21 	prog->dev = NULL;
22 	prog->n_pages = 0;
23 	prog->bus_addr = 0;
24 }
25 
dma_prog_region_alloc(struct dma_prog_region * prog,unsigned long n_bytes,struct pci_dev * dev)26 int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
27 {
28 	/* round up to page size */
29 	n_bytes = round_up_to_page(n_bytes);
30 
31 	prog->n_pages = n_bytes / PAGE_SIZE;
32 
33 	prog->kvirt = pci_alloc_consistent(dev, prog->n_pages * PAGE_SIZE, &prog->bus_addr);
34 	if (!prog->kvirt) {
35 		printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
36 		dma_prog_region_free(prog);
37 		return -ENOMEM;
38 	}
39 
40 	prog->dev = dev;
41 
42 	return 0;
43 }
44 
dma_prog_region_free(struct dma_prog_region * prog)45 void dma_prog_region_free(struct dma_prog_region *prog)
46 {
47 	if (prog->kvirt) {
48 		pci_free_consistent(prog->dev, prog->n_pages * PAGE_SIZE, prog->kvirt, prog->bus_addr);
49 	}
50 
51 	prog->kvirt = NULL;
52 	prog->dev = NULL;
53 	prog->n_pages = 0;
54 	prog->bus_addr = 0;
55 }
56 
57 /* dma_region */
58 
dma_region_init(struct dma_region * dma)59 void dma_region_init(struct dma_region *dma)
60 {
61 	dma->kvirt = NULL;
62 	dma->dev = NULL;
63 	dma->n_pages = 0;
64 	dma->n_dma_pages = 0;
65 	dma->sglist = NULL;
66 }
67 
dma_region_alloc(struct dma_region * dma,unsigned long n_bytes,struct pci_dev * dev,int direction)68 int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
69 {
70 	unsigned int i, n_pages;
71 
72 	/* round up to page size */
73 	n_bytes = round_up_to_page(n_bytes);
74 
75 	n_pages = n_bytes / PAGE_SIZE;
76 
77 	dma->kvirt = vmalloc_32(n_pages * PAGE_SIZE);
78 	if (!dma->kvirt) {
79 		printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
80 		goto err;
81 	}
82 
83 	dma->n_pages = n_pages;
84 
85 	/* Clear the ram out, no junk to the user */
86 	memset(dma->kvirt, 0, n_pages * PAGE_SIZE);
87 
88 	/* allocate scatter/gather list */
89 	dma->sglist = vmalloc(dma->n_pages * sizeof(struct scatterlist));
90 	if (!dma->sglist) {
91 		printk(KERN_ERR "dma_region_alloc: kmalloc(sglist) failed\n");
92 		goto err;
93 	}
94 
95 	/* just to be safe - this will become unnecessary once sglist->address goes away */
96 	memset(dma->sglist, 0, dma->n_pages * sizeof(struct scatterlist));
97 
98 	/* fill scatter/gather list with pages */
99 	for (i = 0; i < dma->n_pages; i++) {
100 		unsigned long va = (unsigned long) dma->kvirt + i * PAGE_SIZE;
101 
102 		dma->sglist[i].page = vmalloc_to_page((void *)va);
103 		dma->sglist[i].length = PAGE_SIZE;
104 	}
105 
106 	/* map sglist to the IOMMU */
107 	dma->n_dma_pages = pci_map_sg(dev, &dma->sglist[0], dma->n_pages, direction);
108 
109 	if (dma->n_dma_pages == 0) {
110 		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
111 		goto err;
112 	}
113 
114 	dma->dev = dev;
115 	dma->direction = direction;
116 
117 	return 0;
118 
119 err:
120 	dma_region_free(dma);
121 	return -ENOMEM;
122 }
123 
dma_region_free(struct dma_region * dma)124 void dma_region_free(struct dma_region *dma)
125 {
126 	if (dma->n_dma_pages) {
127 		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
128 		dma->n_dma_pages = 0;
129 		dma->dev = NULL;
130 	}
131 
132 	if (dma->sglist) {
133 		vfree(dma->sglist);
134 		dma->sglist = NULL;
135 	}
136 
137 	if (dma->kvirt) {
138 		vfree(dma->kvirt);
139 		dma->kvirt = NULL;
140 		dma->n_pages = 0;
141 	}
142 }
143 
144 /* find the scatterlist index and remaining offset corresponding to a
145    given offset from the beginning of the buffer */
dma_region_find(struct dma_region * dma,unsigned long offset,unsigned long * rem)146 static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
147 {
148 	int i;
149 	unsigned long off = offset;
150 
151 	for (i = 0; i < dma->n_dma_pages; i++) {
152 		if (off < sg_dma_len(&dma->sglist[i])) {
153 			*rem = off;
154 			break;
155 		}
156 
157 		off -= sg_dma_len(&dma->sglist[i]);
158 	}
159 
160 	BUG_ON(i >= dma->n_dma_pages);
161 
162 	return i;
163 }
164 
dma_region_offset_to_bus(struct dma_region * dma,unsigned long offset)165 dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
166 {
167 	unsigned long rem;
168 
169 	struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
170 	return sg_dma_address(sg) + rem;
171 }
172 
dma_region_sync(struct dma_region * dma,unsigned long offset,unsigned long len)173 void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long len)
174 {
175 	int first, last;
176 	unsigned long rem;
177 
178 	if (!len)
179 		len = 1;
180 
181 	first = dma_region_find(dma, offset, &rem);
182 	last = dma_region_find(dma, offset + len - 1, &rem);
183 
184 	pci_dma_sync_sg(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
185 }
186 
187 /* nopage() handler for mmap access */
188 
189 static struct page*
dma_region_pagefault(struct vm_area_struct * area,unsigned long address,int write_access)190 dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int write_access)
191 {
192 	unsigned long offset;
193 	unsigned long kernel_virt_addr;
194 	struct page *ret = NOPAGE_SIGBUS;
195 
196 	struct dma_region *dma = (struct dma_region*) area->vm_private_data;
197 
198 	if (!dma->kvirt)
199 		goto out;
200 
201 	if ( (address < (unsigned long) area->vm_start) ||
202 	    (address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) )
203 		goto out;
204 
205 	offset = address - area->vm_start;
206 	kernel_virt_addr = (unsigned long) dma->kvirt + offset;
207 	ret = vmalloc_to_page((void*) kernel_virt_addr);
208 	get_page(ret);
209 out:
210 	return ret;
211 }
212 
213 static struct vm_operations_struct dma_region_vm_ops = {
214 	.nopage	= dma_region_pagefault,
215 };
216 
dma_region_mmap(struct dma_region * dma,struct file * file,struct vm_area_struct * vma)217 int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
218 {
219 	unsigned long size;
220 
221 	if (!dma->kvirt)
222 		return -EINVAL;
223 
224 	/* must be page-aligned */
225 	if (vma->vm_pgoff != 0)
226 		return -EINVAL;
227 
228 	/* check the length */
229 	size = vma->vm_end - vma->vm_start;
230 	if (size > (PAGE_SIZE * dma->n_pages))
231 		return -EINVAL;
232 
233 	vma->vm_ops = &dma_region_vm_ops;
234 	vma->vm_private_data = dma;
235 	vma->vm_file = file;
236 	vma->vm_flags |= VM_RESERVED;
237 
238 	return 0;
239 }
240