1 #ifndef __LIBATA_COMPAT_H__
2 #define __LIBATA_COMPAT_H__
3 
4 #include <linux/types.h>
5 #include <linux/delay.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 
9 typedef u32 __le32;
10 typedef u64 __le64;
11 
12 #define DMA_64BIT_MASK 0xffffffffffffffffULL
13 #define DMA_32BIT_MASK 0x00000000ffffffffULL
14 
15 /* These definitions mirror those in pci.h, so they can be used
16  * interchangeably with their PCI_ counterparts */
17 enum dma_data_direction {
18 	DMA_BIDIRECTIONAL = 0,
19 	DMA_TO_DEVICE = 1,
20 	DMA_FROM_DEVICE = 2,
21 	DMA_NONE = 3,
22 };
23 
24 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
25 
26 #define MODULE_VERSION(ver_str)
27 
28 /* remaps usage of KM_IRQ0 onto KM_SOFTIRQ0. KM_IRQ0 only exists on ia64 in
29  * 2.4. Warning: this will also remap KM_IRQ0 on ia64, so be careful about
30  * the files included after this file. */
31 
32 #define KM_IRQ0	KM_SOFTIRQ0
33 
34 struct device {
35 	struct pci_dev pdev;
36 };
37 
to_pci_dev(struct device * dev)38 static inline struct pci_dev *to_pci_dev(struct device *dev)
39 {
40 	return (struct pci_dev *) dev;
41 }
42 
43 #define pdev_printk(lvl, pdev, fmt, args...)			\
44 	do {							\
45 		printk("%s%s(%s): ", lvl,			\
46 			(pdev)->driver && (pdev)->driver->name ? \
47 				(pdev)->driver->name : "PCI",	\
48 			pci_name(pdev));			\
49 		printk(fmt, ## args);				\
50 	} while (0)
51 
pci_enable_msi(struct pci_dev * dev)52 static inline int pci_enable_msi(struct pci_dev *dev) { return -1; }
pci_disable_msi(struct pci_dev * dev)53 static inline void pci_disable_msi(struct pci_dev *dev) {}
54 
pci_set_consistent_dma_mask(struct pci_dev * dev,u64 mask)55 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
56 {
57 	if (mask == (u64)dev->dma_mask)
58 		return 0;
59 	return -EIO;
60 }
61 
62 /* NOTE: dangerous! we ignore the 'gfp' argument */
63 #define dma_alloc_coherent(dev,sz,dma,gfp) \
64 	pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
65 #define dma_free_coherent(dev,sz,addr,dma_addr) \
66 	pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
67 
68 #define dma_map_sg(dev,a,b,c) \
69 	pci_map_sg(to_pci_dev(dev),(a),(b),(c))
70 #define dma_unmap_sg(dev,a,b,c) \
71 	pci_unmap_sg(to_pci_dev(dev),(a),(b),(c))
72 
73 #define dma_map_single(dev,a,b,c) \
74 	pci_map_single(to_pci_dev(dev),(a),(b),(c))
75 #define dma_unmap_single(dev,a,b,c) \
76 	pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
77 
78 #define dma_mapping_error(addr) (0)
79 
80 #define dev_get_drvdata(dev) \
81 	pci_get_drvdata(to_pci_dev(dev))
82 #define dev_set_drvdata(dev,ptr) \
83 	pci_set_drvdata(to_pci_dev(dev),(ptr))
84 
kcalloc(size_t nmemb,size_t size,int flags)85 static inline void *kcalloc(size_t nmemb, size_t size, int flags)
86 {
87 	size_t total = nmemb * size;
88 	void *mem = kmalloc(total, flags);
89 	if (mem)
90 		memset(mem, 0, total);
91 	return mem;
92 }
93 
kzalloc(size_t size,int flags)94 static inline void *kzalloc(size_t size, int flags)
95 {
96 	return kcalloc(1, size, flags);
97 }
98 
pci_iounmap(struct pci_dev * pdev,void * mem)99 static inline void pci_iounmap(struct pci_dev *pdev, void *mem)
100 {
101 	iounmap(mem);
102 }
103 
104 /**
105  * pci_intx - enables/disables PCI INTx for device dev
106  * @pdev: the PCI device to operate on
107  * @enable: boolean: whether to enable or disable PCI INTx
108  *
109  * Enables/disables PCI INTx for device dev
110  */
111 static inline void
pci_intx(struct pci_dev * pdev,int enable)112 pci_intx(struct pci_dev *pdev, int enable)
113 {
114 	u16 pci_command, new;
115 
116 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
117 
118 	if (enable) {
119 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
120 	} else {
121 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
122 	}
123 
124 	if (new != pci_command) {
125 		pci_write_config_word(pdev, PCI_COMMAND, new);
126 	}
127 }
128 
129 static inline void __iomem *
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)130 pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
131 {
132 	unsigned long start = pci_resource_start(dev, bar);
133 	unsigned long len = pci_resource_len(dev, bar);
134 	unsigned long flags = pci_resource_flags(dev, bar);
135 
136 	if (!len || !start)
137 		return NULL;
138 	if (maxlen && len > maxlen)
139 		len = maxlen;
140 	if (flags & IORESOURCE_IO) {
141 		BUG();
142 	}
143 	if (flags & IORESOURCE_MEM) {
144 		if (flags & IORESOURCE_CACHEABLE)
145 			return ioremap(start, len);
146 		return ioremap_nocache(start, len);
147 	}
148 	/* What? */
149 	return NULL;
150 }
151 
sg_set_buf(struct scatterlist * sg,void * buf,unsigned int buflen)152 static inline void sg_set_buf(struct scatterlist *sg, void *buf,
153 			      unsigned int buflen)
154 {
155 	sg->page = virt_to_page(buf);
156 	sg->offset = offset_in_page(buf);
157 	sg->length = buflen;
158 }
159 
sg_init_one(struct scatterlist * sg,void * buf,unsigned int buflen)160 static inline void sg_init_one(struct scatterlist *sg, void *buf,
161 			       unsigned int buflen)
162 {
163 	memset(sg, 0, sizeof(*sg));
164 	sg_set_buf(sg, buf, buflen);
165 }
166 
167 #endif /* __LIBATA_COMPAT_H__ */
168