1 /*
2  * drivers/misc/spear13xx_pcie_gadget.c
3  *
4  * Copyright (C) 2010 ST Microelectronics
5  * Pratyush Anand<pratyush.anand@st.com>
6  *
7  * This file is licensed under the terms of the GNU General Public
8  * License version 2. This program is licensed "as is" without any
9  * warranty of any kind, whether express or implied.
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15 #include <linux/io.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pci_regs.h>
22 #include <linux/configfs.h>
23 #include <mach/pcie.h>
24 #include <mach/misc_regs.h>
25 
26 #define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
27 /* In current implementation address translation is done using IN0 only.
28  * So IN1 start address and IN0 end address has been kept same
29 */
30 #define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
31 #define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
32 #define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
33 #define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
34 #define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
35 /* Keep default BAR size as 4K*/
36 /* AORAM would be mapped by default*/
37 #define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)
38 
39 #define INT_TYPE_NO_INT	0
40 #define INT_TYPE_INTX	1
41 #define INT_TYPE_MSI	2
42 struct spear_pcie_gadget_config {
43 	void __iomem *base;
44 	void __iomem *va_app_base;
45 	void __iomem *va_dbi_base;
46 	char int_type[10];
47 	ulong requested_msi;
48 	ulong configured_msi;
49 	ulong bar0_size;
50 	ulong bar0_rw_offset;
51 	void __iomem *va_bar0_address;
52 };
53 
54 struct pcie_gadget_target {
55 	struct configfs_subsystem subsys;
56 	struct spear_pcie_gadget_config config;
57 };
58 
59 struct pcie_gadget_target_attr {
60 	struct configfs_attribute	attr;
61 	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
62 						char *buf);
63 	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
64 						 const char *buf,
65 						 size_t count);
66 };
67 
enable_dbi_access(struct pcie_app_reg __iomem * app_reg)68 static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
69 {
70 	/* Enable DBI access */
71 	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
72 			&app_reg->slv_armisc);
73 	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
74 			&app_reg->slv_awmisc);
75 
76 }
77 
disable_dbi_access(struct pcie_app_reg __iomem * app_reg)78 static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
79 {
80 	/* disable DBI access */
81 	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
82 			&app_reg->slv_armisc);
83 	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
84 			&app_reg->slv_awmisc);
85 
86 }
87 
spear_dbi_read_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 * val)88 static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
89 		int where, int size, u32 *val)
90 {
91 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
92 	ulong va_address;
93 
94 	/* Enable DBI access */
95 	enable_dbi_access(app_reg);
96 
97 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
98 
99 	*val = readl(va_address);
100 
101 	if (size == 1)
102 		*val = (*val >> (8 * (where & 3))) & 0xff;
103 	else if (size == 2)
104 		*val = (*val >> (8 * (where & 3))) & 0xffff;
105 
106 	/* Disable DBI access */
107 	disable_dbi_access(app_reg);
108 }
109 
spear_dbi_write_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 val)110 static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
111 		int where, int size, u32 val)
112 {
113 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
114 	ulong va_address;
115 
116 	/* Enable DBI access */
117 	enable_dbi_access(app_reg);
118 
119 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
120 
121 	if (size == 4)
122 		writel(val, va_address);
123 	else if (size == 2)
124 		writew(val, va_address + (where & 2));
125 	else if (size == 1)
126 		writeb(val, va_address + (where & 3));
127 
128 	/* Disable DBI access */
129 	disable_dbi_access(app_reg);
130 }
131 
132 #define PCI_FIND_CAP_TTL	48
133 
pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config * config,u32 pos,int cap,int * ttl)134 static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
135 		u32 pos, int cap, int *ttl)
136 {
137 	u32 id;
138 
139 	while ((*ttl)--) {
140 		spear_dbi_read_reg(config, pos, 1, &pos);
141 		if (pos < 0x40)
142 			break;
143 		pos &= ~3;
144 		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
145 		if (id == 0xff)
146 			break;
147 		if (id == cap)
148 			return pos;
149 		pos += PCI_CAP_LIST_NEXT;
150 	}
151 	return 0;
152 }
153 
pci_find_own_next_cap(struct spear_pcie_gadget_config * config,u32 pos,int cap)154 static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
155 			u32 pos, int cap)
156 {
157 	int ttl = PCI_FIND_CAP_TTL;
158 
159 	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
160 }
161 
pci_find_own_cap_start(struct spear_pcie_gadget_config * config,u8 hdr_type)162 static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
163 				u8 hdr_type)
164 {
165 	u32 status;
166 
167 	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
168 	if (!(status & PCI_STATUS_CAP_LIST))
169 		return 0;
170 
171 	switch (hdr_type) {
172 	case PCI_HEADER_TYPE_NORMAL:
173 	case PCI_HEADER_TYPE_BRIDGE:
174 		return PCI_CAPABILITY_LIST;
175 	case PCI_HEADER_TYPE_CARDBUS:
176 		return PCI_CB_CAPABILITY_LIST;
177 	default:
178 		return 0;
179 	}
180 
181 	return 0;
182 }
183 
184 /*
185  * Tell if a device supports a given PCI capability.
186  * Returns the address of the requested capability structure within the
187  * device's PCI configuration space or 0 in case the device does not
188  * support it. Possible values for @cap:
189  *
190  * %PCI_CAP_ID_PM	Power Management
191  * %PCI_CAP_ID_AGP	Accelerated Graphics Port
192  * %PCI_CAP_ID_VPD	Vital Product Data
193  * %PCI_CAP_ID_SLOTID	Slot Identification
194  * %PCI_CAP_ID_MSI	Message Signalled Interrupts
195  * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
196  * %PCI_CAP_ID_PCIX	PCI-X
197  * %PCI_CAP_ID_EXP	PCI Express
198  */
pci_find_own_capability(struct spear_pcie_gadget_config * config,int cap)199 static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
200 		int cap)
201 {
202 	u32 pos;
203 	u32 hdr_type;
204 
205 	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
206 
207 	pos = pci_find_own_cap_start(config, hdr_type);
208 	if (pos)
209 		pos = pci_find_own_next_cap(config, pos, cap);
210 
211 	return pos;
212 }
213 
spear_pcie_gadget_irq(int irq,void * dev_id)214 static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
215 {
216 	return 0;
217 }
218 
219 /*
220  * configfs interfaces show/store functions
221  */
pcie_gadget_show_link(struct spear_pcie_gadget_config * config,char * buf)222 static ssize_t pcie_gadget_show_link(
223 		struct spear_pcie_gadget_config *config,
224 		char *buf)
225 {
226 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
227 
228 	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
229 		return sprintf(buf, "UP");
230 	else
231 		return sprintf(buf, "DOWN");
232 }
233 
pcie_gadget_store_link(struct spear_pcie_gadget_config * config,const char * buf,size_t count)234 static ssize_t pcie_gadget_store_link(
235 		struct spear_pcie_gadget_config *config,
236 		const char *buf, size_t count)
237 {
238 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
239 
240 	if (sysfs_streq(buf, "UP"))
241 		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
242 			&app_reg->app_ctrl_0);
243 	else if (sysfs_streq(buf, "DOWN"))
244 		writel(readl(&app_reg->app_ctrl_0)
245 				& ~(1 << APP_LTSSM_ENABLE_ID),
246 				&app_reg->app_ctrl_0);
247 	else
248 		return -EINVAL;
249 	return count;
250 }
251 
pcie_gadget_show_int_type(struct spear_pcie_gadget_config * config,char * buf)252 static ssize_t pcie_gadget_show_int_type(
253 		struct spear_pcie_gadget_config *config,
254 		char *buf)
255 {
256 	return sprintf(buf, "%s", config->int_type);
257 }
258 
pcie_gadget_store_int_type(struct spear_pcie_gadget_config * config,const char * buf,size_t count)259 static ssize_t pcie_gadget_store_int_type(
260 		struct spear_pcie_gadget_config *config,
261 		const char *buf, size_t count)
262 {
263 	u32 cap, vec, flags;
264 	ulong vector;
265 
266 	if (sysfs_streq(buf, "INTA"))
267 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
268 
269 	else if (sysfs_streq(buf, "MSI")) {
270 		vector = config->requested_msi;
271 		vec = 0;
272 		while (vector > 1) {
273 			vector /= 2;
274 			vec++;
275 		}
276 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
277 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
278 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
279 		flags &= ~PCI_MSI_FLAGS_QMASK;
280 		flags |= vec << 1;
281 		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
282 	} else
283 		return -EINVAL;
284 
285 	strcpy(config->int_type, buf);
286 
287 	return count;
288 }
289 
pcie_gadget_show_no_of_msi(struct spear_pcie_gadget_config * config,char * buf)290 static ssize_t pcie_gadget_show_no_of_msi(
291 		struct spear_pcie_gadget_config *config,
292 		char *buf)
293 {
294 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
295 	u32 cap, vec, flags;
296 	ulong vector;
297 
298 	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
299 			!= (1 << CFG_MSI_EN_ID))
300 		vector = 0;
301 	else {
302 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
303 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
304 		flags &= ~PCI_MSI_FLAGS_QSIZE;
305 		vec = flags >> 4;
306 		vector = 1;
307 		while (vec--)
308 			vector *= 2;
309 	}
310 	config->configured_msi = vector;
311 
312 	return sprintf(buf, "%lu", vector);
313 }
314 
pcie_gadget_store_no_of_msi(struct spear_pcie_gadget_config * config,const char * buf,size_t count)315 static ssize_t pcie_gadget_store_no_of_msi(
316 		struct spear_pcie_gadget_config *config,
317 		const char *buf, size_t count)
318 {
319 	if (strict_strtoul(buf, 0, &config->requested_msi))
320 		return -EINVAL;
321 	if (config->requested_msi > 32)
322 		config->requested_msi = 32;
323 
324 	return count;
325 }
326 
pcie_gadget_store_inta(struct spear_pcie_gadget_config * config,const char * buf,size_t count)327 static ssize_t pcie_gadget_store_inta(
328 		struct spear_pcie_gadget_config *config,
329 		const char *buf, size_t count)
330 {
331 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
332 	ulong en;
333 
334 	if (strict_strtoul(buf, 0, &en))
335 		return -EINVAL;
336 
337 	if (en)
338 		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
339 				&app_reg->app_ctrl_0);
340 	else
341 		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
342 				&app_reg->app_ctrl_0);
343 
344 	return count;
345 }
346 
pcie_gadget_store_send_msi(struct spear_pcie_gadget_config * config,const char * buf,size_t count)347 static ssize_t pcie_gadget_store_send_msi(
348 		struct spear_pcie_gadget_config *config,
349 		const char *buf, size_t count)
350 {
351 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
352 	ulong vector;
353 	u32 ven_msi;
354 
355 	if (strict_strtoul(buf, 0, &vector))
356 		return -EINVAL;
357 
358 	if (!config->configured_msi)
359 		return -EINVAL;
360 
361 	if (vector >= config->configured_msi)
362 		return -EINVAL;
363 
364 	ven_msi = readl(&app_reg->ven_msi_1);
365 	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
366 	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
367 	ven_msi &= ~VEN_MSI_TC_MASK;
368 	ven_msi |= 0 << VEN_MSI_TC_ID;
369 	ven_msi &= ~VEN_MSI_VECTOR_MASK;
370 	ven_msi |= vector << VEN_MSI_VECTOR_ID;
371 
372 	/* generating interrupt for msi vector */
373 	ven_msi |= VEN_MSI_REQ_EN;
374 	writel(ven_msi, &app_reg->ven_msi_1);
375 	udelay(1);
376 	ven_msi &= ~VEN_MSI_REQ_EN;
377 	writel(ven_msi, &app_reg->ven_msi_1);
378 
379 	return count;
380 }
381 
pcie_gadget_show_vendor_id(struct spear_pcie_gadget_config * config,char * buf)382 static ssize_t pcie_gadget_show_vendor_id(
383 		struct spear_pcie_gadget_config *config,
384 		char *buf)
385 {
386 	u32 id;
387 
388 	spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
389 
390 	return sprintf(buf, "%x", id);
391 }
392 
pcie_gadget_store_vendor_id(struct spear_pcie_gadget_config * config,const char * buf,size_t count)393 static ssize_t pcie_gadget_store_vendor_id(
394 		struct spear_pcie_gadget_config *config,
395 		const char *buf, size_t count)
396 {
397 	ulong id;
398 
399 	if (strict_strtoul(buf, 0, &id))
400 		return -EINVAL;
401 
402 	spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
403 
404 	return count;
405 }
406 
pcie_gadget_show_device_id(struct spear_pcie_gadget_config * config,char * buf)407 static ssize_t pcie_gadget_show_device_id(
408 		struct spear_pcie_gadget_config *config,
409 		char *buf)
410 {
411 	u32 id;
412 
413 	spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
414 
415 	return sprintf(buf, "%x", id);
416 }
417 
pcie_gadget_store_device_id(struct spear_pcie_gadget_config * config,const char * buf,size_t count)418 static ssize_t pcie_gadget_store_device_id(
419 		struct spear_pcie_gadget_config *config,
420 		const char *buf, size_t count)
421 {
422 	ulong id;
423 
424 	if (strict_strtoul(buf, 0, &id))
425 		return -EINVAL;
426 
427 	spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
428 
429 	return count;
430 }
431 
pcie_gadget_show_bar0_size(struct spear_pcie_gadget_config * config,char * buf)432 static ssize_t pcie_gadget_show_bar0_size(
433 		struct spear_pcie_gadget_config *config,
434 		char *buf)
435 {
436 	return sprintf(buf, "%lx", config->bar0_size);
437 }
438 
pcie_gadget_store_bar0_size(struct spear_pcie_gadget_config * config,const char * buf,size_t count)439 static ssize_t pcie_gadget_store_bar0_size(
440 		struct spear_pcie_gadget_config *config,
441 		const char *buf, size_t count)
442 {
443 	ulong size;
444 	u32 pos, pos1;
445 	u32 no_of_bit = 0;
446 
447 	if (strict_strtoul(buf, 0, &size))
448 		return -EINVAL;
449 	/* min bar size is 256 */
450 	if (size <= 0x100)
451 		size = 0x100;
452 	/* max bar size is 1MB*/
453 	else if (size >= 0x100000)
454 		size = 0x100000;
455 	else {
456 		pos = 0;
457 		pos1 = 0;
458 		while (pos < 21) {
459 			pos = find_next_bit((ulong *)&size, 21, pos);
460 			if (pos != 21)
461 				pos1 = pos + 1;
462 			pos++;
463 			no_of_bit++;
464 		}
465 		if (no_of_bit == 2)
466 			pos1--;
467 
468 		size = 1 << pos1;
469 	}
470 	config->bar0_size = size;
471 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
472 
473 	return count;
474 }
475 
pcie_gadget_show_bar0_address(struct spear_pcie_gadget_config * config,char * buf)476 static ssize_t pcie_gadget_show_bar0_address(
477 		struct spear_pcie_gadget_config *config,
478 		char *buf)
479 {
480 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
481 
482 	u32 address = readl(&app_reg->pim0_mem_addr_start);
483 
484 	return sprintf(buf, "%x", address);
485 }
486 
pcie_gadget_store_bar0_address(struct spear_pcie_gadget_config * config,const char * buf,size_t count)487 static ssize_t pcie_gadget_store_bar0_address(
488 		struct spear_pcie_gadget_config *config,
489 		const char *buf, size_t count)
490 {
491 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
492 	ulong address;
493 
494 	if (strict_strtoul(buf, 0, &address))
495 		return -EINVAL;
496 
497 	address &= ~(config->bar0_size - 1);
498 	if (config->va_bar0_address)
499 		iounmap(config->va_bar0_address);
500 	config->va_bar0_address = ioremap(address, config->bar0_size);
501 	if (!config->va_bar0_address)
502 		return -ENOMEM;
503 
504 	writel(address, &app_reg->pim0_mem_addr_start);
505 
506 	return count;
507 }
508 
pcie_gadget_show_bar0_rw_offset(struct spear_pcie_gadget_config * config,char * buf)509 static ssize_t pcie_gadget_show_bar0_rw_offset(
510 		struct spear_pcie_gadget_config *config,
511 		char *buf)
512 {
513 	return sprintf(buf, "%lx", config->bar0_rw_offset);
514 }
515 
pcie_gadget_store_bar0_rw_offset(struct spear_pcie_gadget_config * config,const char * buf,size_t count)516 static ssize_t pcie_gadget_store_bar0_rw_offset(
517 		struct spear_pcie_gadget_config *config,
518 		const char *buf, size_t count)
519 {
520 	ulong offset;
521 
522 	if (strict_strtoul(buf, 0, &offset))
523 		return -EINVAL;
524 
525 	if (offset % 4)
526 		return -EINVAL;
527 
528 	config->bar0_rw_offset = offset;
529 
530 	return count;
531 }
532 
pcie_gadget_show_bar0_data(struct spear_pcie_gadget_config * config,char * buf)533 static ssize_t pcie_gadget_show_bar0_data(
534 		struct spear_pcie_gadget_config *config,
535 		char *buf)
536 {
537 	ulong data;
538 
539 	if (!config->va_bar0_address)
540 		return -ENOMEM;
541 
542 	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
543 
544 	return sprintf(buf, "%lx", data);
545 }
546 
pcie_gadget_store_bar0_data(struct spear_pcie_gadget_config * config,const char * buf,size_t count)547 static ssize_t pcie_gadget_store_bar0_data(
548 		struct spear_pcie_gadget_config *config,
549 		const char *buf, size_t count)
550 {
551 	ulong data;
552 
553 	if (strict_strtoul(buf, 0, &data))
554 		return -EINVAL;
555 
556 	if (!config->va_bar0_address)
557 		return -ENOMEM;
558 
559 	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
560 
561 	return count;
562 }
563 
564 /*
565  * Attribute definitions.
566  */
567 
568 #define PCIE_GADGET_TARGET_ATTR_RO(_name)				\
569 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
570 	__CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
571 
572 #define PCIE_GADGET_TARGET_ATTR_WO(_name)				\
573 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
574 	__CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
575 
576 #define PCIE_GADGET_TARGET_ATTR_RW(_name)				\
577 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
578 	__CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
579 			pcie_gadget_store_##_name)
580 PCIE_GADGET_TARGET_ATTR_RW(link);
581 PCIE_GADGET_TARGET_ATTR_RW(int_type);
582 PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
583 PCIE_GADGET_TARGET_ATTR_WO(inta);
584 PCIE_GADGET_TARGET_ATTR_WO(send_msi);
585 PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
586 PCIE_GADGET_TARGET_ATTR_RW(device_id);
587 PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
588 PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
589 PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
590 PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
591 
592 static struct configfs_attribute *pcie_gadget_target_attrs[] = {
593 	&pcie_gadget_target_link.attr,
594 	&pcie_gadget_target_int_type.attr,
595 	&pcie_gadget_target_no_of_msi.attr,
596 	&pcie_gadget_target_inta.attr,
597 	&pcie_gadget_target_send_msi.attr,
598 	&pcie_gadget_target_vendor_id.attr,
599 	&pcie_gadget_target_device_id.attr,
600 	&pcie_gadget_target_bar0_size.attr,
601 	&pcie_gadget_target_bar0_address.attr,
602 	&pcie_gadget_target_bar0_rw_offset.attr,
603 	&pcie_gadget_target_bar0_data.attr,
604 	NULL,
605 };
606 
to_target(struct config_item * item)607 static struct pcie_gadget_target *to_target(struct config_item *item)
608 {
609 	return item ?
610 		container_of(to_configfs_subsystem(to_config_group(item)),
611 				struct pcie_gadget_target, subsys) : NULL;
612 }
613 
614 /*
615  * Item operations and type for pcie_gadget_target.
616  */
617 
pcie_gadget_target_attr_show(struct config_item * item,struct configfs_attribute * attr,char * buf)618 static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
619 					   struct configfs_attribute *attr,
620 					   char *buf)
621 {
622 	ssize_t ret = -EINVAL;
623 	struct pcie_gadget_target *target = to_target(item);
624 	struct pcie_gadget_target_attr *t_attr =
625 		container_of(attr, struct pcie_gadget_target_attr, attr);
626 
627 	if (t_attr->show)
628 		ret = t_attr->show(&target->config, buf);
629 	return ret;
630 }
631 
pcie_gadget_target_attr_store(struct config_item * item,struct configfs_attribute * attr,const char * buf,size_t count)632 static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
633 					struct configfs_attribute *attr,
634 					const char *buf,
635 					size_t count)
636 {
637 	ssize_t ret = -EINVAL;
638 	struct pcie_gadget_target *target = to_target(item);
639 	struct pcie_gadget_target_attr *t_attr =
640 		container_of(attr, struct pcie_gadget_target_attr, attr);
641 
642 	if (t_attr->store)
643 		ret = t_attr->store(&target->config, buf, count);
644 	return ret;
645 }
646 
647 static struct configfs_item_operations pcie_gadget_target_item_ops = {
648 	.show_attribute		= pcie_gadget_target_attr_show,
649 	.store_attribute	= pcie_gadget_target_attr_store,
650 };
651 
652 static struct config_item_type pcie_gadget_target_type = {
653 	.ct_attrs		= pcie_gadget_target_attrs,
654 	.ct_item_ops		= &pcie_gadget_target_item_ops,
655 	.ct_owner		= THIS_MODULE,
656 };
657 
spear13xx_pcie_device_init(struct spear_pcie_gadget_config * config)658 static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
659 {
660 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
661 
662 	/*setup registers for outbound translation */
663 
664 	writel(config->base, &app_reg->in0_mem_addr_start);
665 	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
666 			&app_reg->in0_mem_addr_limit);
667 	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
668 	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
669 			&app_reg->in1_mem_addr_limit);
670 	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
671 	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
672 			&app_reg->in_io_addr_limit);
673 	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
674 	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
675 			&app_reg->in_cfg0_addr_limit);
676 	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
677 	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
678 			&app_reg->in_cfg1_addr_limit);
679 	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
680 	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
681 			&app_reg->in_msg_addr_limit);
682 
683 	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
684 	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
685 	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
686 
687 	/*setup registers for inbound translation */
688 
689 	/* Keep AORAM mapped at BAR0 as default */
690 	config->bar0_size = INBOUND_ADDR_MASK + 1;
691 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
692 	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
693 	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
694 			config->bar0_size);
695 
696 	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
697 	writel(0, &app_reg->pim1_mem_addr_start);
698 	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
699 
700 	writel(0x0, &app_reg->pim_io_addr_start);
701 	writel(0x0, &app_reg->pim_io_addr_start);
702 	writel(0x0, &app_reg->pim_rom_addr_start);
703 
704 	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
705 			| ((u32)1 << REG_TRANSLATION_ENABLE),
706 			&app_reg->app_ctrl_0);
707 	/* disable all rx interrupts */
708 	writel(0, &app_reg->int_mask);
709 
710 	/* Select INTA as default*/
711 	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
712 }
713 
spear_pcie_gadget_probe(struct platform_device * pdev)714 static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
715 {
716 	struct resource *res0, *res1;
717 	unsigned int status = 0;
718 	int irq;
719 	struct clk *clk;
720 	static struct pcie_gadget_target *target;
721 	struct spear_pcie_gadget_config *config;
722 	struct config_item		*cg_item;
723 	struct configfs_subsystem *subsys;
724 
725 	/* get resource for application registers*/
726 
727 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
728 	if (!res0) {
729 		dev_err(&pdev->dev, "no resource defined\n");
730 		return -EBUSY;
731 	}
732 	if (!request_mem_region(res0->start, resource_size(res0),
733 				pdev->name)) {
734 		dev_err(&pdev->dev, "pcie gadget region already	claimed\n");
735 		return -EBUSY;
736 	}
737 	/* get resource for dbi registers*/
738 
739 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
740 	if (!res1) {
741 		dev_err(&pdev->dev, "no resource defined\n");
742 		goto err_rel_res0;
743 	}
744 	if (!request_mem_region(res1->start, resource_size(res1),
745 				pdev->name)) {
746 		dev_err(&pdev->dev, "pcie gadget region already	claimed\n");
747 		goto err_rel_res0;
748 	}
749 
750 	target = kzalloc(sizeof(*target), GFP_KERNEL);
751 	if (!target) {
752 		dev_err(&pdev->dev, "out of memory\n");
753 		status = -ENOMEM;
754 		goto err_rel_res;
755 	}
756 
757 	cg_item = &target->subsys.su_group.cg_item;
758 	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
759 	cg_item->ci_type	= &pcie_gadget_target_type;
760 	config = &target->config;
761 	config->va_app_base = (void __iomem *)ioremap(res0->start,
762 			resource_size(res0));
763 	if (!config->va_app_base) {
764 		dev_err(&pdev->dev, "ioremap fail\n");
765 		status = -ENOMEM;
766 		goto err_kzalloc;
767 	}
768 
769 	config->base = (void __iomem *)res1->start;
770 
771 	config->va_dbi_base = (void __iomem *)ioremap(res1->start,
772 			resource_size(res1));
773 	if (!config->va_dbi_base) {
774 		dev_err(&pdev->dev, "ioremap fail\n");
775 		status = -ENOMEM;
776 		goto err_iounmap_app;
777 	}
778 
779 	dev_set_drvdata(&pdev->dev, target);
780 
781 	irq = platform_get_irq(pdev, 0);
782 	if (irq < 0) {
783 		dev_err(&pdev->dev, "no update irq?\n");
784 		status = irq;
785 		goto err_iounmap;
786 	}
787 
788 	status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
789 	if (status) {
790 		dev_err(&pdev->dev, "pcie gadget interrupt IRQ%d already \
791 				claimed\n", irq);
792 		goto err_iounmap;
793 	}
794 
795 	/* Register configfs hooks */
796 	subsys = &target->subsys;
797 	config_group_init(&subsys->su_group);
798 	mutex_init(&subsys->su_mutex);
799 	status = configfs_register_subsystem(subsys);
800 	if (status)
801 		goto err_irq;
802 
803 	/*
804 	 * init basic pcie application registers
805 	 * do not enable clock if it is PCIE0.Ideally , all controller should
806 	 * have been independent from others with respect to clock. But PCIE1
807 	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
808 	 */
809 	if (pdev->id == 1) {
810 		/*
811 		 * Ideally CFG Clock should have been also enabled here. But
812 		 * it is done currently during board init routne
813 		 */
814 		clk = clk_get_sys("pcie1", NULL);
815 		if (IS_ERR(clk)) {
816 			pr_err("%s:couldn't get clk for pcie1\n", __func__);
817 			goto err_irq;
818 		}
819 		if (clk_enable(clk)) {
820 			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
821 			goto err_irq;
822 		}
823 	} else if (pdev->id == 2) {
824 		/*
825 		 * Ideally CFG Clock should have been also enabled here. But
826 		 * it is done currently during board init routne
827 		 */
828 		clk = clk_get_sys("pcie2", NULL);
829 		if (IS_ERR(clk)) {
830 			pr_err("%s:couldn't get clk for pcie2\n", __func__);
831 			goto err_irq;
832 		}
833 		if (clk_enable(clk)) {
834 			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
835 			goto err_irq;
836 		}
837 	}
838 	spear13xx_pcie_device_init(config);
839 
840 	return 0;
841 err_irq:
842 	free_irq(irq, NULL);
843 err_iounmap:
844 	iounmap(config->va_dbi_base);
845 err_iounmap_app:
846 	iounmap(config->va_app_base);
847 err_kzalloc:
848 	kfree(config);
849 err_rel_res:
850 	release_mem_region(res1->start, resource_size(res1));
851 err_rel_res0:
852 	release_mem_region(res0->start, resource_size(res0));
853 	return status;
854 }
855 
spear_pcie_gadget_remove(struct platform_device * pdev)856 static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
857 {
858 	struct resource *res0, *res1;
859 	static struct pcie_gadget_target *target;
860 	struct spear_pcie_gadget_config *config;
861 	int irq;
862 
863 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
865 	irq = platform_get_irq(pdev, 0);
866 	target = dev_get_drvdata(&pdev->dev);
867 	config = &target->config;
868 
869 	free_irq(irq, NULL);
870 	iounmap(config->va_dbi_base);
871 	iounmap(config->va_app_base);
872 	release_mem_region(res1->start, resource_size(res1));
873 	release_mem_region(res0->start, resource_size(res0));
874 	configfs_unregister_subsystem(&target->subsys);
875 	kfree(target);
876 
877 	return 0;
878 }
879 
spear_pcie_gadget_shutdown(struct platform_device * pdev)880 static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
881 {
882 }
883 
884 static struct platform_driver spear_pcie_gadget_driver = {
885 	.probe = spear_pcie_gadget_probe,
886 	.remove = spear_pcie_gadget_remove,
887 	.shutdown = spear_pcie_gadget_shutdown,
888 	.driver = {
889 		.name = "pcie-gadget-spear",
890 		.bus = &platform_bus_type
891 	},
892 };
893 
spear_pcie_gadget_init(void)894 static int __init spear_pcie_gadget_init(void)
895 {
896 	return platform_driver_register(&spear_pcie_gadget_driver);
897 }
898 module_init(spear_pcie_gadget_init);
899 
spear_pcie_gadget_exit(void)900 static void __exit spear_pcie_gadget_exit(void)
901 {
902 	platform_driver_unregister(&spear_pcie_gadget_driver);
903 }
904 module_exit(spear_pcie_gadget_exit);
905 
906 MODULE_ALIAS("pcie-gadget-spear");
907 MODULE_AUTHOR("Pratyush Anand");
908 MODULE_LICENSE("GPL");
909