1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  *
4  * Rewrite, cleanup:
5  *
6  * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7  * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8  *
9  * Dynamic DMA mapping support, iSeries-specific parts.
10  *
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
25  */
26 
27 #include <linux/types.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/list.h>
30 #include <linux/pci.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 
34 #include <asm/iommu.h>
35 #include <asm/vio.h>
36 #include <asm/tce.h>
37 #include <asm/machdep.h>
38 #include <asm/abs_addr.h>
39 #include <asm/prom.h>
40 #include <asm/pci-bridge.h>
41 #include <asm/iseries/hv_call_xm.h>
42 #include <asm/iseries/hv_call_event.h>
43 #include <asm/iseries/iommu.h>
44 
tce_build_iSeries(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,struct dma_attrs * attrs)45 static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 		unsigned long uaddr, enum dma_data_direction direction,
47 		struct dma_attrs *attrs)
48 {
49 	u64 rc;
50 	u64 tce, rpn;
51 
52 	while (npages--) {
53 		rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 		tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55 
56 		if (tbl->it_type == TCE_VB) {
57 			/* Virtual Bus */
58 			tce |= TCE_VALID|TCE_ALLIO;
59 			if (direction != DMA_TO_DEVICE)
60 				tce |= TCE_VB_WRITE;
61 		} else {
62 			/* PCI Bus */
63 			tce |= TCE_PCI_READ; /* Read allowed */
64 			if (direction != DMA_TO_DEVICE)
65 				tce |= TCE_PCI_WRITE;
66 		}
67 
68 		rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 		if (rc)
70 			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 					rc);
72 		index++;
73 		uaddr += TCE_PAGE_SIZE;
74 	}
75 	return 0;
76 }
77 
tce_free_iSeries(struct iommu_table * tbl,long index,long npages)78 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79 {
80 	u64 rc;
81 
82 	while (npages--) {
83 		rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 		if (rc)
85 			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 					rc);
87 		index++;
88 	}
89 }
90 
91 /*
92  * Structure passed to HvCallXm_getTceTableParms
93  */
94 struct iommu_table_cb {
95 	unsigned long	itc_busno;	/* Bus number for this tce table */
96 	unsigned long	itc_start;	/* Will be NULL for secondary */
97 	unsigned long	itc_totalsize;	/* Size (in pages) of whole table */
98 	unsigned long	itc_offset;	/* Index into real tce table of the
99 					   start of our section */
100 	unsigned long	itc_size;	/* Size (in pages) of our section */
101 	unsigned long	itc_index;	/* Index of this tce table */
102 	unsigned short	itc_maxtables;	/* Max num of tables for partition */
103 	unsigned char	itc_virtbus;	/* Flag to indicate virtual bus */
104 	unsigned char	itc_slotno;	/* IOA Tce Slot Index */
105 	unsigned char	itc_rsvd[4];
106 };
107 
108 /*
109  * Call Hv with the architected data structure to get TCE table info.
110  * info. Put the returned data into the Linux representation of the
111  * TCE table data.
112  * The Hardware Tce table comes in three flavors.
113  * 1. TCE table shared between Buses.
114  * 2. TCE table per Bus.
115  * 3. TCE Table per IOA.
116  */
iommu_table_getparms_iSeries(unsigned long busno,unsigned char slotno,unsigned char virtbus,struct iommu_table * tbl)117 void iommu_table_getparms_iSeries(unsigned long busno,
118 				  unsigned char slotno,
119 				  unsigned char virtbus,
120 				  struct iommu_table* tbl)
121 {
122 	struct iommu_table_cb *parms;
123 
124 	parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 	if (parms == NULL)
126 		panic("PCI_DMA: TCE Table Allocation failed.");
127 
128 	parms->itc_busno = busno;
129 	parms->itc_slotno = slotno;
130 	parms->itc_virtbus = virtbus;
131 
132 	HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133 
134 	if (parms->itc_size == 0)
135 		panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136 
137 	/* itc_size is in pages worth of table, it_size is in # of entries */
138 	tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 	tbl->it_busno = parms->itc_busno;
140 	tbl->it_offset = parms->itc_offset;
141 	tbl->it_index = parms->itc_index;
142 	tbl->it_blocksize = 1;
143 	tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144 
145 	kfree(parms);
146 }
147 
148 
149 #ifdef CONFIG_PCI
150 /*
151  * This function compares the known tables to find an iommu_table
152  * that has already been built for hardware TCEs.
153  */
iommu_table_find(struct iommu_table * tbl)154 static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155 {
156 	struct device_node *node;
157 
158 	for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 		struct pci_dn *pdn = PCI_DN(node);
160 		struct iommu_table *it;
161 
162 		if (pdn == NULL)
163 			continue;
164 		it = pdn->iommu_table;
165 		if ((it != NULL) &&
166 		    (it->it_type == TCE_PCI) &&
167 		    (it->it_offset == tbl->it_offset) &&
168 		    (it->it_index == tbl->it_index) &&
169 		    (it->it_size == tbl->it_size)) {
170 			of_node_put(node);
171 			return it;
172 		}
173 	}
174 	return NULL;
175 }
176 
177 
pci_dma_dev_setup_iseries(struct pci_dev * pdev)178 static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179 {
180 	struct iommu_table *tbl;
181 	struct device_node *dn = pci_device_to_OF_node(pdev);
182 	struct pci_dn *pdn = PCI_DN(dn);
183 	const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184 
185 	BUG_ON(lsn == NULL);
186 
187 	tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188 
189 	iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190 
191 	/* Look for existing tce table */
192 	pdn->iommu_table = iommu_table_find(tbl);
193 	if (pdn->iommu_table == NULL)
194 		pdn->iommu_table = iommu_init_table(tbl, -1);
195 	else
196 		kfree(tbl);
197 	set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198 }
199 #else
200 #define pci_dma_dev_setup_iseries	NULL
201 #endif
202 
203 static struct iommu_table veth_iommu_table;
204 static struct iommu_table vio_iommu_table;
205 
iseries_hv_alloc(size_t size,dma_addr_t * dma_handle,gfp_t flag)206 void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207 {
208 	return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 				DMA_BIT_MASK(32), flag, -1);
210 }
211 EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212 
iseries_hv_free(size_t size,void * vaddr,dma_addr_t dma_handle)213 void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214 {
215 	iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216 }
217 EXPORT_SYMBOL_GPL(iseries_hv_free);
218 
iseries_hv_map(void * vaddr,size_t size,enum dma_data_direction direction)219 dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 			enum dma_data_direction direction)
221 {
222 	return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 			      (unsigned long)vaddr % PAGE_SIZE, size,
224 			      DMA_BIT_MASK(32), direction, NULL);
225 }
226 
iseries_hv_unmap(dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)227 void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 			enum dma_data_direction direction)
229 {
230 	iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231 }
232 
iommu_vio_init(void)233 void __init iommu_vio_init(void)
234 {
235 	iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 	veth_iommu_table.it_size /= 2;
237 	vio_iommu_table = veth_iommu_table;
238 	vio_iommu_table.it_offset += veth_iommu_table.it_size;
239 
240 	if (!iommu_init_table(&veth_iommu_table, -1))
241 		printk("Virtual Bus VETH TCE table failed.\n");
242 	if (!iommu_init_table(&vio_iommu_table, -1))
243 		printk("Virtual Bus VIO TCE table failed.\n");
244 }
245 
vio_build_iommu_table_iseries(struct vio_dev * dev)246 struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247 {
248 	if (strcmp(dev->type, "network") == 0)
249 		return &veth_iommu_table;
250 	return &vio_iommu_table;
251 }
252 
iommu_init_early_iSeries(void)253 void iommu_init_early_iSeries(void)
254 {
255 	ppc_md.tce_build = tce_build_iSeries;
256 	ppc_md.tce_free  = tce_free_iSeries;
257 
258 	ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 	set_pci_dma_ops(&dma_iommu_ops);
260 }
261