1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include "rxe.h"
8
9 #define RXE_POOL_ALIGN (16)
10
11 static const struct rxe_type_info {
12 const char *name;
13 size_t size;
14 size_t elem_offset;
15 void (*cleanup)(struct rxe_pool_elem *elem);
16 u32 min_index;
17 u32 max_index;
18 u32 max_elem;
19 } rxe_type_info[RXE_NUM_TYPES] = {
20 [RXE_TYPE_UC] = {
21 .name = "uc",
22 .size = sizeof(struct rxe_ucontext),
23 .elem_offset = offsetof(struct rxe_ucontext, elem),
24 .min_index = 1,
25 .max_index = UINT_MAX,
26 .max_elem = UINT_MAX,
27 },
28 [RXE_TYPE_PD] = {
29 .name = "pd",
30 .size = sizeof(struct rxe_pd),
31 .elem_offset = offsetof(struct rxe_pd, elem),
32 .min_index = 1,
33 .max_index = UINT_MAX,
34 .max_elem = UINT_MAX,
35 },
36 [RXE_TYPE_AH] = {
37 .name = "ah",
38 .size = sizeof(struct rxe_ah),
39 .elem_offset = offsetof(struct rxe_ah, elem),
40 .min_index = RXE_MIN_AH_INDEX,
41 .max_index = RXE_MAX_AH_INDEX,
42 .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
43 },
44 [RXE_TYPE_SRQ] = {
45 .name = "srq",
46 .size = sizeof(struct rxe_srq),
47 .elem_offset = offsetof(struct rxe_srq, elem),
48 .cleanup = rxe_srq_cleanup,
49 .min_index = RXE_MIN_SRQ_INDEX,
50 .max_index = RXE_MAX_SRQ_INDEX,
51 .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
52 },
53 [RXE_TYPE_QP] = {
54 .name = "qp",
55 .size = sizeof(struct rxe_qp),
56 .elem_offset = offsetof(struct rxe_qp, elem),
57 .cleanup = rxe_qp_cleanup,
58 .min_index = RXE_MIN_QP_INDEX,
59 .max_index = RXE_MAX_QP_INDEX,
60 .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
61 },
62 [RXE_TYPE_CQ] = {
63 .name = "cq",
64 .size = sizeof(struct rxe_cq),
65 .elem_offset = offsetof(struct rxe_cq, elem),
66 .cleanup = rxe_cq_cleanup,
67 .min_index = 1,
68 .max_index = UINT_MAX,
69 .max_elem = UINT_MAX,
70 },
71 [RXE_TYPE_MR] = {
72 .name = "mr",
73 .size = sizeof(struct rxe_mr),
74 .elem_offset = offsetof(struct rxe_mr, elem),
75 .cleanup = rxe_mr_cleanup,
76 .min_index = RXE_MIN_MR_INDEX,
77 .max_index = RXE_MAX_MR_INDEX,
78 .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
79 },
80 [RXE_TYPE_MW] = {
81 .name = "mw",
82 .size = sizeof(struct rxe_mw),
83 .elem_offset = offsetof(struct rxe_mw, elem),
84 .cleanup = rxe_mw_cleanup,
85 .min_index = RXE_MIN_MW_INDEX,
86 .max_index = RXE_MAX_MW_INDEX,
87 .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
88 },
89 };
90
rxe_pool_init(struct rxe_dev * rxe,struct rxe_pool * pool,enum rxe_elem_type type)91 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
92 enum rxe_elem_type type)
93 {
94 const struct rxe_type_info *info = &rxe_type_info[type];
95
96 memset(pool, 0, sizeof(*pool));
97
98 pool->rxe = rxe;
99 pool->name = info->name;
100 pool->type = type;
101 pool->max_elem = info->max_elem;
102 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
103 pool->elem_offset = info->elem_offset;
104 pool->cleanup = info->cleanup;
105
106 atomic_set(&pool->num_elem, 0);
107
108 xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
109 pool->limit.min = info->min_index;
110 pool->limit.max = info->max_index;
111 }
112
rxe_pool_cleanup(struct rxe_pool * pool)113 void rxe_pool_cleanup(struct rxe_pool *pool)
114 {
115 WARN_ON(!xa_empty(&pool->xa));
116 }
117
rxe_alloc(struct rxe_pool * pool)118 void *rxe_alloc(struct rxe_pool *pool)
119 {
120 struct rxe_pool_elem *elem;
121 void *obj;
122 int err;
123
124 if (WARN_ON(!(pool->type == RXE_TYPE_MR)))
125 return NULL;
126
127 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
128 goto err_cnt;
129
130 obj = kzalloc(pool->elem_size, GFP_KERNEL);
131 if (!obj)
132 goto err_cnt;
133
134 elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
135
136 elem->pool = pool;
137 elem->obj = obj;
138 kref_init(&elem->ref_cnt);
139
140 err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
141 &pool->next, GFP_KERNEL);
142 if (err < 0)
143 goto err_free;
144
145 return obj;
146
147 err_free:
148 kfree(obj);
149 err_cnt:
150 atomic_dec(&pool->num_elem);
151 return NULL;
152 }
153
__rxe_add_to_pool(struct rxe_pool * pool,struct rxe_pool_elem * elem)154 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
155 {
156 int err;
157
158 if (WARN_ON(pool->type == RXE_TYPE_MR))
159 return -EINVAL;
160
161 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
162 goto err_cnt;
163
164 elem->pool = pool;
165 elem->obj = (u8 *)elem - pool->elem_offset;
166 kref_init(&elem->ref_cnt);
167
168 err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
169 &pool->next, GFP_KERNEL);
170 if (err < 0)
171 goto err_cnt;
172
173 return 0;
174
175 err_cnt:
176 atomic_dec(&pool->num_elem);
177 return -EINVAL;
178 }
179
rxe_pool_get_index(struct rxe_pool * pool,u32 index)180 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
181 {
182 struct rxe_pool_elem *elem;
183 struct xarray *xa = &pool->xa;
184 unsigned long flags;
185 void *obj;
186
187 xa_lock_irqsave(xa, flags);
188 elem = xa_load(xa, index);
189 if (elem && kref_get_unless_zero(&elem->ref_cnt))
190 obj = elem->obj;
191 else
192 obj = NULL;
193 xa_unlock_irqrestore(xa, flags);
194
195 return obj;
196 }
197
rxe_elem_release(struct kref * kref)198 static void rxe_elem_release(struct kref *kref)
199 {
200 struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
201 struct rxe_pool *pool = elem->pool;
202
203 xa_erase(&pool->xa, elem->index);
204
205 if (pool->cleanup)
206 pool->cleanup(elem);
207
208 if (pool->type == RXE_TYPE_MR)
209 kfree(elem->obj);
210
211 atomic_dec(&pool->num_elem);
212 }
213
__rxe_get(struct rxe_pool_elem * elem)214 int __rxe_get(struct rxe_pool_elem *elem)
215 {
216 return kref_get_unless_zero(&elem->ref_cnt);
217 }
218
__rxe_put(struct rxe_pool_elem * elem)219 int __rxe_put(struct rxe_pool_elem *elem)
220 {
221 return kref_put(&elem->ref_cnt, rxe_elem_release);
222 }
223