1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2018 Mellanox Technologies
3 
4 #include <linux/hyperv.h>
5 #include "mlx5_core.h"
6 #include "lib/hv.h"
7 #include "lib/hv_vhca.h"
8 
9 struct mlx5_hv_vhca {
10 	struct mlx5_core_dev       *dev;
11 	struct workqueue_struct    *work_queue;
12 	struct mlx5_hv_vhca_agent  *agents[MLX5_HV_VHCA_AGENT_MAX];
13 	struct mutex                agents_lock; /* Protect agents array */
14 };
15 
16 struct mlx5_hv_vhca_work {
17 	struct work_struct     invalidate_work;
18 	struct mlx5_hv_vhca   *hv_vhca;
19 	u64                    block_mask;
20 };
21 
22 struct mlx5_hv_vhca_data_block {
23 	u16     sequence;
24 	u16     offset;
25 	u8      reserved[4];
26 	u64     data[15];
27 };
28 
29 struct mlx5_hv_vhca_agent {
30 	enum mlx5_hv_vhca_agent_type	 type;
31 	struct mlx5_hv_vhca		*hv_vhca;
32 	void				*priv;
33 	u16                              seq;
34 	void (*control)(struct mlx5_hv_vhca_agent *agent,
35 			struct mlx5_hv_vhca_control_block *block);
36 	void (*invalidate)(struct mlx5_hv_vhca_agent *agent,
37 			   u64 block_mask);
38 	void (*cleanup)(struct mlx5_hv_vhca_agent *agent);
39 };
40 
mlx5_hv_vhca_create(struct mlx5_core_dev * dev)41 struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
42 {
43 	struct mlx5_hv_vhca *hv_vhca = NULL;
44 
45 	hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
46 	if (!hv_vhca)
47 		return ERR_PTR(-ENOMEM);
48 
49 	hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca");
50 	if (!hv_vhca->work_queue) {
51 		kfree(hv_vhca);
52 		return ERR_PTR(-ENOMEM);
53 	}
54 
55 	hv_vhca->dev = dev;
56 	mutex_init(&hv_vhca->agents_lock);
57 
58 	return hv_vhca;
59 }
60 
mlx5_hv_vhca_destroy(struct mlx5_hv_vhca * hv_vhca)61 void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
62 {
63 	if (IS_ERR_OR_NULL(hv_vhca))
64 		return;
65 
66 	destroy_workqueue(hv_vhca->work_queue);
67 	kfree(hv_vhca);
68 }
69 
mlx5_hv_vhca_invalidate_work(struct work_struct * work)70 static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
71 {
72 	struct mlx5_hv_vhca_work *hwork;
73 	struct mlx5_hv_vhca *hv_vhca;
74 	int i;
75 
76 	hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
77 	hv_vhca = hwork->hv_vhca;
78 
79 	mutex_lock(&hv_vhca->agents_lock);
80 	for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
81 		struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
82 
83 		if (!agent || !agent->invalidate)
84 			continue;
85 
86 		if (!(BIT(agent->type) & hwork->block_mask))
87 			continue;
88 
89 		agent->invalidate(agent, hwork->block_mask);
90 	}
91 	mutex_unlock(&hv_vhca->agents_lock);
92 
93 	kfree(hwork);
94 }
95 
mlx5_hv_vhca_invalidate(void * context,u64 block_mask)96 void mlx5_hv_vhca_invalidate(void *context, u64 block_mask)
97 {
98 	struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context;
99 	struct mlx5_hv_vhca_work *work;
100 
101 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
102 	if (!work)
103 		return;
104 
105 	INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
106 	work->hv_vhca    = hv_vhca;
107 	work->block_mask = block_mask;
108 
109 	queue_work(hv_vhca->work_queue, &work->invalidate_work);
110 }
111 
112 #define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */)
113 
mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca * hv_vhca,struct mlx5_hv_vhca_control_block * block)114 static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca,
115 					struct mlx5_hv_vhca_control_block *block)
116 {
117 	int i;
118 
119 	for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
120 		struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
121 
122 		if (!agent || !agent->control)
123 			continue;
124 
125 		if (!(AGENT_MASK(agent->type) & block->control))
126 			continue;
127 
128 		agent->control(agent, block);
129 	}
130 }
131 
mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca * hv_vhca,u32 * capabilities)132 static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca,
133 				      u32 *capabilities)
134 {
135 	int i;
136 
137 	for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
138 		struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
139 
140 		if (agent)
141 			*capabilities |= AGENT_MASK(agent->type);
142 	}
143 }
144 
145 static void
mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent * agent,u64 block_mask)146 mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent,
147 				      u64 block_mask)
148 {
149 	struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
150 	struct mlx5_core_dev *dev = hv_vhca->dev;
151 	struct mlx5_hv_vhca_control_block *block;
152 	u32 capabilities = 0;
153 	int err;
154 
155 	block = kzalloc(sizeof(*block), GFP_KERNEL);
156 	if (!block)
157 		return;
158 
159 	err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
160 	if (err)
161 		goto free_block;
162 
163 	mlx5_hv_vhca_capabilities(hv_vhca, &capabilities);
164 
165 	/* In case no capabilities, send empty block in return */
166 	if (!capabilities) {
167 		memset(block, 0, sizeof(*block));
168 		goto write;
169 	}
170 
171 	if (block->capabilities != capabilities)
172 		block->capabilities = capabilities;
173 
174 	if (block->control & ~capabilities)
175 		goto free_block;
176 
177 	mlx5_hv_vhca_agents_control(hv_vhca, block);
178 	block->command_ack = block->command;
179 
180 write:
181 	mlx5_hv_write_config(dev, block, sizeof(*block), 0);
182 
183 free_block:
184 	kfree(block);
185 }
186 
187 static struct mlx5_hv_vhca_agent *
mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca * hv_vhca)188 mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca)
189 {
190 	return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL,
191 					 NULL,
192 					 mlx5_hv_vhca_control_agent_invalidate,
193 					 NULL, NULL);
194 }
195 
mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent * agent)196 static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent)
197 {
198 	mlx5_hv_vhca_agent_destroy(agent);
199 }
200 
mlx5_hv_vhca_init(struct mlx5_hv_vhca * hv_vhca)201 int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
202 {
203 	struct mlx5_hv_vhca_agent *agent;
204 	int err;
205 
206 	if (IS_ERR_OR_NULL(hv_vhca))
207 		return IS_ERR_OR_NULL(hv_vhca);
208 
209 	err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca,
210 					  mlx5_hv_vhca_invalidate);
211 	if (err)
212 		return err;
213 
214 	agent = mlx5_hv_vhca_control_agent_create(hv_vhca);
215 	if (IS_ERR_OR_NULL(agent)) {
216 		mlx5_hv_unregister_invalidate(hv_vhca->dev);
217 		return IS_ERR_OR_NULL(agent);
218 	}
219 
220 	hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent;
221 
222 	return 0;
223 }
224 
mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca * hv_vhca)225 void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
226 {
227 	struct mlx5_hv_vhca_agent *agent;
228 	int i;
229 
230 	if (IS_ERR_OR_NULL(hv_vhca))
231 		return;
232 
233 	agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL];
234 	if (agent)
235 		mlx5_hv_vhca_control_agent_destroy(agent);
236 
237 	mutex_lock(&hv_vhca->agents_lock);
238 	for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++)
239 		WARN_ON(hv_vhca->agents[i]);
240 
241 	mutex_unlock(&hv_vhca->agents_lock);
242 
243 	mlx5_hv_unregister_invalidate(hv_vhca->dev);
244 }
245 
mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca * hv_vhca)246 static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca)
247 {
248 	mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL));
249 }
250 
251 struct mlx5_hv_vhca_agent *
mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca * hv_vhca,enum mlx5_hv_vhca_agent_type type,void (* control)(struct mlx5_hv_vhca_agent *,struct mlx5_hv_vhca_control_block * block),void (* invalidate)(struct mlx5_hv_vhca_agent *,u64 block_mask),void (* cleaup)(struct mlx5_hv_vhca_agent * agent),void * priv)252 mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
253 			  enum mlx5_hv_vhca_agent_type type,
254 			  void (*control)(struct mlx5_hv_vhca_agent*,
255 					  struct mlx5_hv_vhca_control_block *block),
256 			  void (*invalidate)(struct mlx5_hv_vhca_agent*,
257 					     u64 block_mask),
258 			  void (*cleaup)(struct mlx5_hv_vhca_agent *agent),
259 			  void *priv)
260 {
261 	struct mlx5_hv_vhca_agent *agent;
262 
263 	if (IS_ERR_OR_NULL(hv_vhca))
264 		return ERR_PTR(-ENOMEM);
265 
266 	if (type >= MLX5_HV_VHCA_AGENT_MAX)
267 		return ERR_PTR(-EINVAL);
268 
269 	mutex_lock(&hv_vhca->agents_lock);
270 	if (hv_vhca->agents[type]) {
271 		mutex_unlock(&hv_vhca->agents_lock);
272 		return ERR_PTR(-EINVAL);
273 	}
274 	mutex_unlock(&hv_vhca->agents_lock);
275 
276 	agent = kzalloc(sizeof(*agent), GFP_KERNEL);
277 	if (!agent)
278 		return ERR_PTR(-ENOMEM);
279 
280 	agent->type      = type;
281 	agent->hv_vhca   = hv_vhca;
282 	agent->priv      = priv;
283 	agent->control   = control;
284 	agent->invalidate = invalidate;
285 	agent->cleanup   = cleaup;
286 
287 	mutex_lock(&hv_vhca->agents_lock);
288 	hv_vhca->agents[type] = agent;
289 	mutex_unlock(&hv_vhca->agents_lock);
290 
291 	mlx5_hv_vhca_agents_update(hv_vhca);
292 
293 	return agent;
294 }
295 
mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent * agent)296 void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
297 {
298 	struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
299 
300 	mutex_lock(&hv_vhca->agents_lock);
301 
302 	if (WARN_ON(agent != hv_vhca->agents[agent->type])) {
303 		mutex_unlock(&hv_vhca->agents_lock);
304 		return;
305 	}
306 
307 	hv_vhca->agents[agent->type] = NULL;
308 	mutex_unlock(&hv_vhca->agents_lock);
309 
310 	if (agent->cleanup)
311 		agent->cleanup(agent);
312 
313 	kfree(agent);
314 
315 	mlx5_hv_vhca_agents_update(hv_vhca);
316 }
317 
mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent * agent,struct mlx5_hv_vhca_data_block * data_block,void * src,int len,int * offset)318 static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent,
319 					   struct mlx5_hv_vhca_data_block *data_block,
320 					   void *src, int len, int *offset)
321 {
322 	int bytes = min_t(int, (int)sizeof(data_block->data), len);
323 
324 	data_block->sequence = agent->seq;
325 	data_block->offset   = (*offset)++;
326 	memcpy(data_block->data, src, bytes);
327 
328 	return bytes;
329 }
330 
mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent * agent)331 static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent)
332 {
333 	agent->seq++;
334 }
335 
mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent * agent,void * buf,int len)336 int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
337 			     void *buf, int len)
338 {
339 	int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX;
340 	int block_offset = 0;
341 	int total = 0;
342 	int err;
343 
344 	while (len) {
345 		struct mlx5_hv_vhca_data_block data_block = {0};
346 		int bytes;
347 
348 		bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block,
349 							buf + total,
350 							len, &block_offset);
351 		if (!bytes)
352 			return -ENOMEM;
353 
354 		err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block,
355 					   sizeof(data_block), offset);
356 		if (err)
357 			return err;
358 
359 		total += bytes;
360 		len   -= bytes;
361 	}
362 
363 	mlx5_hv_vhca_agent_seq_update(agent);
364 
365 	return 0;
366 }
367 
mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent * agent)368 void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent)
369 {
370 	return agent->priv;
371 }
372