1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3
4 #include <linux/iova.h>
5 #include <linux/mlx5/driver.h>
6 #include "mlx5_vdpa.h"
7
alloc_pd(struct mlx5_vdpa_dev * dev,u32 * pdn,u16 uid)8 static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
9 {
10 struct mlx5_core_dev *mdev = dev->mdev;
11
12 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
13 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
14 int err;
15
16 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
17 MLX5_SET(alloc_pd_in, in, uid, uid);
18
19 err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
20 if (!err)
21 *pdn = MLX5_GET(alloc_pd_out, out, pd);
22
23 return err;
24 }
25
dealloc_pd(struct mlx5_vdpa_dev * dev,u32 pdn,u16 uid)26 static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
27 {
28 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
29 struct mlx5_core_dev *mdev = dev->mdev;
30
31 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
32 MLX5_SET(dealloc_pd_in, in, pd, pdn);
33 MLX5_SET(dealloc_pd_in, in, uid, uid);
34 return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
35 }
36
get_null_mkey(struct mlx5_vdpa_dev * dev,u32 * null_mkey)37 static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
38 {
39 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
40 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
41 struct mlx5_core_dev *mdev = dev->mdev;
42 int err;
43
44 MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
45 err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
46 if (!err)
47 *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
48 return err;
49 }
50
create_uctx(struct mlx5_vdpa_dev * mvdev,u16 * uid)51 static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
52 {
53 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
54 int inlen;
55 void *in;
56 int err;
57
58 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
59 return 0;
60
61 /* 0 means not supported */
62 if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
63 return -EOPNOTSUPP;
64
65 inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
66 in = kzalloc(inlen, GFP_KERNEL);
67 if (!in)
68 return -ENOMEM;
69
70 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
71 MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
72
73 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
74 kfree(in);
75 if (!err)
76 *uid = MLX5_GET(create_uctx_out, out, uid);
77
78 return err;
79 }
80
destroy_uctx(struct mlx5_vdpa_dev * mvdev,u32 uid)81 static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
82 {
83 u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
84 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
85
86 if (!uid)
87 return;
88
89 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
90 MLX5_SET(destroy_uctx_in, in, uid, uid);
91
92 mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
93 }
94
mlx5_vdpa_create_tis(struct mlx5_vdpa_dev * mvdev,void * in,u32 * tisn)95 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
96 {
97 u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
98 int err;
99
100 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
101 MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
102 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
103 if (!err)
104 *tisn = MLX5_GET(create_tis_out, out, tisn);
105
106 return err;
107 }
108
mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev * mvdev,u32 tisn)109 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
110 {
111 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
112
113 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
114 MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
115 MLX5_SET(destroy_tis_in, in, tisn, tisn);
116 mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
117 }
118
mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev * mvdev,void * in,int inlen,u32 * rqtn)119 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
120 {
121 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
122 int err;
123
124 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
125 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
126 if (!err)
127 *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
128
129 return err;
130 }
131
mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev * mvdev,void * in,int inlen,u32 rqtn)132 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
133 {
134 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
135
136 MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
137 MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
138 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
139 return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
140 }
141
mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev * mvdev,u32 rqtn)142 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
143 {
144 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
145
146 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
147 MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
148 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
149 mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
150 }
151
mlx5_vdpa_create_tir(struct mlx5_vdpa_dev * mvdev,void * in,u32 * tirn)152 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
153 {
154 u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
155 int err;
156
157 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
158 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
159 if (!err)
160 *tirn = MLX5_GET(create_tir_out, out, tirn);
161
162 return err;
163 }
164
mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev * mvdev,u32 tirn)165 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
166 {
167 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
168
169 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
170 MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
171 MLX5_SET(destroy_tir_in, in, tirn, tirn);
172 mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
173 }
174
mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev * mvdev,u32 * tdn)175 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
176 {
177 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
178 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
179 int err;
180
181 MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
182 MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
183
184 err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
185 if (!err)
186 *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
187
188 return err;
189 }
190
mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev * mvdev,u32 tdn)191 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
192 {
193 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
194
195 MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
196 MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
197 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
198 mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
199 }
200
mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev * mvdev,u32 * mkey,u32 * in,int inlen)201 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
202 int inlen)
203 {
204 u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
205 u32 mkey_index;
206 int err;
207
208 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
209 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
210
211 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
212 if (err)
213 return err;
214
215 mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
216 *mkey = mlx5_idx_to_mkey(mkey_index);
217 return 0;
218 }
219
mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev * mvdev,u32 mkey)220 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey)
221 {
222 u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
223
224 MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
225 MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
226 MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
227 return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
228 }
229
init_ctrl_vq(struct mlx5_vdpa_dev * mvdev)230 static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
231 {
232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
233 if (!mvdev->cvq.iotlb)
234 return -ENOMEM;
235
236 spin_lock_init(&mvdev->cvq.iommu_lock);
237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
238
239 return 0;
240 }
241
cleanup_ctrl_vq(struct mlx5_vdpa_dev * mvdev)242 static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
243 {
244 vhost_iotlb_free(mvdev->cvq.iotlb);
245 }
246
mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev * mvdev)247 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
248 {
249 u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
250 struct mlx5_vdpa_resources *res = &mvdev->res;
251 struct mlx5_core_dev *mdev = mvdev->mdev;
252 u64 kick_addr;
253 int err;
254
255 if (res->valid) {
256 mlx5_vdpa_warn(mvdev, "resources already allocated\n");
257 return -EINVAL;
258 }
259 mutex_init(&mvdev->mr.mkey_mtx);
260 res->uar = mlx5_get_uars_page(mdev);
261 if (IS_ERR(res->uar)) {
262 err = PTR_ERR(res->uar);
263 goto err_uars;
264 }
265
266 err = create_uctx(mvdev, &res->uid);
267 if (err)
268 goto err_uctx;
269
270 err = alloc_pd(mvdev, &res->pdn, res->uid);
271 if (err)
272 goto err_pd;
273
274 err = get_null_mkey(mvdev, &res->null_mkey);
275 if (err)
276 goto err_key;
277
278 kick_addr = mdev->bar_addr + offset;
279 res->phys_kick_addr = kick_addr;
280
281 res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
282 if (!res->kick_addr) {
283 err = -ENOMEM;
284 goto err_key;
285 }
286
287 err = init_ctrl_vq(mvdev);
288 if (err)
289 goto err_ctrl;
290
291 res->valid = true;
292
293 return 0;
294
295 err_ctrl:
296 iounmap(res->kick_addr);
297 err_key:
298 dealloc_pd(mvdev, res->pdn, res->uid);
299 err_pd:
300 destroy_uctx(mvdev, res->uid);
301 err_uctx:
302 mlx5_put_uars_page(mdev, res->uar);
303 err_uars:
304 mutex_destroy(&mvdev->mr.mkey_mtx);
305 return err;
306 }
307
mlx5_vdpa_free_resources(struct mlx5_vdpa_dev * mvdev)308 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
309 {
310 struct mlx5_vdpa_resources *res = &mvdev->res;
311
312 if (!res->valid)
313 return;
314
315 cleanup_ctrl_vq(mvdev);
316 iounmap(res->kick_addr);
317 res->kick_addr = NULL;
318 dealloc_pd(mvdev, res->pdn, res->uid);
319 destroy_uctx(mvdev, res->uid);
320 mlx5_put_uars_page(mvdev->mdev, res->uar);
321 mutex_destroy(&mvdev->mr.mkey_mtx);
322 res->valid = false;
323 }
324