1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  internal queue handling
5  *
6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
7  *           Reinhard Ernst <rernst@de.ibm.com>
8  *           Christoph Raisch <raisch@de.ibm.com>
9  *
10  *  Copyright (c) 2005 IBM Corporation
11  *
12  *  All rights reserved.
13  *
14  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15  *  BSD.
16  *
17  * OpenIB BSD License
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions are met:
21  *
22  * Redistributions of source code must retain the above copyright notice, this
23  * list of conditions and the following disclaimer.
24  *
25  * Redistributions in binary form must reproduce the above copyright notice,
26  * this list of conditions and the following disclaimer in the documentation
27  * and/or other materials
28  * provided with the distribution.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 #ifndef __IPZ_PT_FN_H__
44 #define __IPZ_PT_FN_H__
45 
46 #define EHCA_PAGESHIFT   12
47 #define EHCA_PAGESIZE   4096UL
48 #define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
49 #define EHCA_PT_ENTRIES 512UL
50 
51 #include "ehca_tools.h"
52 #include "ehca_qes.h"
53 
54 struct ehca_pd;
55 struct ipz_small_queue_page;
56 
57 extern struct kmem_cache *small_qp_cache;
58 
59 /* struct generic ehca page */
60 struct ipz_page {
61 	u8 entries[EHCA_PAGESIZE];
62 };
63 
64 #define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
65 
66 struct ipz_small_queue_page {
67 	unsigned long page;
68 	unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
69 	int fill;
70 	void *mapped_addr;
71 	u32 mmap_count;
72 	struct list_head list;
73 };
74 
75 /* struct generic queue in linux kernel virtual memory (kv) */
76 struct ipz_queue {
77 	u64 current_q_offset;	/* current queue entry */
78 
79 	struct ipz_page **queue_pages;	/* array of pages belonging to queue */
80 	u32 qe_size;		/* queue entry size */
81 	u32 act_nr_of_sg;
82 	u32 queue_length;	/* queue length allocated in bytes */
83 	u32 pagesize;
84 	u32 toggle_state;	/* toggle flag - per page */
85 	u32 offset; /* save offset within page for small_qp */
86 	struct ipz_small_queue_page *small_page;
87 };
88 
89 /*
90  * return current Queue Entry for a certain q_offset
91  * returns address (kv) of Queue Entry
92  */
ipz_qeit_calc(struct ipz_queue * queue,u64 q_offset)93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
94 {
95 	struct ipz_page *current_page;
96 	if (q_offset >= queue->queue_length)
97 		return NULL;
98 	current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
99 	return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
100 }
101 
102 /*
103  * return current Queue Entry
104  * returns address (kv) of Queue Entry
105  */
ipz_qeit_get(struct ipz_queue * queue)106 static inline void *ipz_qeit_get(struct ipz_queue *queue)
107 {
108 	return ipz_qeit_calc(queue, queue->current_q_offset);
109 }
110 
111 /*
112  * return current Queue Page , increment Queue Page iterator from
113  * page to page in struct ipz_queue, last increment will return 0! and
114  * NOT wrap
115  * returns address (kv) of Queue Page
116  * warning don't use in parallel with ipz_QE_get_inc()
117  */
118 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
119 
120 /*
121  * return current Queue Entry, increment Queue Entry iterator by one
122  * step in struct ipz_queue, will wrap in ringbuffer
123  * returns address (kv) of Queue Entry BEFORE increment
124  * warning don't use in parallel with ipz_qpageit_get_inc()
125  */
ipz_qeit_get_inc(struct ipz_queue * queue)126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
127 {
128 	void *ret = ipz_qeit_get(queue);
129 	queue->current_q_offset += queue->qe_size;
130 	if (queue->current_q_offset >= queue->queue_length) {
131 		queue->current_q_offset = 0;
132 		/* toggle the valid flag */
133 		queue->toggle_state = (~queue->toggle_state) & 1;
134 	}
135 
136 	return ret;
137 }
138 
139 /*
140  * return a bool indicating whether current Queue Entry is valid
141  */
ipz_qeit_is_valid(struct ipz_queue * queue)142 static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
143 {
144 	struct ehca_cqe *cqe = ipz_qeit_get(queue);
145 	return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
146 }
147 
148 /*
149  * return current Queue Entry, increment Queue Entry iterator by one
150  * step in struct ipz_queue, will wrap in ringbuffer
151  * returns address (kv) of Queue Entry BEFORE increment
152  * returns 0 and does not increment, if wrong valid state
153  * warning don't use in parallel with ipz_qpageit_get_inc()
154  */
ipz_qeit_get_inc_valid(struct ipz_queue * queue)155 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
156 {
157 	return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
158 }
159 
160 /*
161  * returns and resets Queue Entry iterator
162  * returns address (kv) of first Queue Entry
163  */
ipz_qeit_reset(struct ipz_queue * queue)164 static inline void *ipz_qeit_reset(struct ipz_queue *queue)
165 {
166 	queue->current_q_offset = 0;
167 	return ipz_qeit_get(queue);
168 }
169 
170 /*
171  * return the q_offset corresponding to an absolute address
172  */
173 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
174 
175 /*
176  * return the next queue offset. don't modify the queue.
177  */
ipz_queue_advance_offset(struct ipz_queue * queue,u64 offset)178 static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
179 {
180 	offset += queue->qe_size;
181 	if (offset >= queue->queue_length) offset = 0;
182 	return offset;
183 }
184 
185 /* struct generic page table */
186 struct ipz_pt {
187 	u64 entries[EHCA_PT_ENTRIES];
188 };
189 
190 /* struct page table for a queue, only to be used in pf */
191 struct ipz_qpt {
192 	/* queue page tables (kv), use u64 because we know the element length */
193 	u64 *qpts;
194 	u32 n_qpts;
195 	u32 n_ptes;       /*  number of page table entries */
196 	u64 *current_pte_addr;
197 };
198 
199 /*
200  * constructor for a ipz_queue_t, placement new for ipz_queue_t,
201  * new for all dependent datastructors
202  * all QP Tables are the same
203  * flow:
204  *    allocate+pin queue
205  * see ipz_qpt_ctor()
206  * returns true if ok, false if out of memory
207  */
208 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
209 		   const u32 nr_of_pages, const u32 pagesize,
210 		   const u32 qe_size, const u32 nr_of_sg,
211 		   int is_small);
212 
213 /*
214  * destructor for a ipz_queue_t
215  *  -# free queue
216  *  see ipz_queue_ctor()
217  *  returns true if ok, false if queue was NULL-ptr of free failed
218  */
219 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
220 
221 /*
222  * constructor for a ipz_qpt_t,
223  * placement new for struct ipz_queue, new for all dependent datastructors
224  * all QP Tables are the same,
225  * flow:
226  * -# allocate+pin queue
227  * -# initialise ptcb
228  * -# allocate+pin PTs
229  * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
230  * -# the ring must have room for exactly nr_of_PTEs
231  * see ipz_qpt_ctor()
232  */
233 void ipz_qpt_ctor(struct ipz_qpt *qpt,
234 		  const u32 nr_of_qes,
235 		  const u32 pagesize,
236 		  const u32 qe_size,
237 		  const u8 lowbyte, const u8 toggle,
238 		  u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
239 
240 /*
241  * return current Queue Entry, increment Queue Entry iterator by one
242  * step in struct ipz_queue, will wrap in ringbuffer
243  * returns address (kv) of Queue Entry BEFORE increment
244  * warning don't use in parallel with ipz_qpageit_get_inc()
245  * warning unpredictable results may occur if steps>act_nr_of_queue_entries
246  * fix EQ page problems
247  */
248 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
249 
250 /*
251  * return current Event Queue Entry, increment Queue Entry iterator
252  * by one step in struct ipz_queue if valid, will wrap in ringbuffer
253  * returns address (kv) of Queue Entry BEFORE increment
254  * returns 0 and does not increment, if wrong valid state
255  * warning don't use in parallel with ipz_queue_QPageit_get_inc()
256  * warning unpredictable results may occur if steps>act_nr_of_queue_entries
257  */
ipz_eqit_eq_get_inc_valid(struct ipz_queue * queue)258 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
259 {
260 	void *ret = ipz_qeit_get(queue);
261 	u32 qe = *(u8 *)ret;
262 	if ((qe >> 7) != (queue->toggle_state & 1))
263 		return NULL;
264 	ipz_qeit_eq_get_inc(queue); /* this is a good one */
265 	return ret;
266 }
267 
ipz_eqit_eq_peek_valid(struct ipz_queue * queue)268 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
269 {
270 	void *ret = ipz_qeit_get(queue);
271 	u32 qe = *(u8 *)ret;
272 	if ((qe >> 7) != (queue->toggle_state & 1))
273 		return NULL;
274 	return ret;
275 }
276 
277 /* returns address (GX) of first queue entry */
ipz_qpt_get_firstpage(struct ipz_qpt * qpt)278 static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
279 {
280 	return be64_to_cpu(qpt->qpts[0]);
281 }
282 
283 /* returns address (kv) of first page of queue page table */
ipz_qpt_get_qpt(struct ipz_qpt * qpt)284 static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
285 {
286 	return qpt->qpts;
287 }
288 
289 #endif				/* __IPZ_PT_FN_H__ */
290