1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44
45 #include "mlx4.h"
46 #include "fw.h"
47
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
50 #define ETH_ALEN 6
51
52 struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56 };
57
58 struct res_common {
59 struct list_head list;
60 u32 res_id;
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66 };
67
68 enum {
69 RES_ANY_BUSY = 1
70 };
71
72 struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
76 enum mlx4_steer_type steer;
77 };
78
79 enum res_qp_states {
80 RES_QP_BUSY = RES_ANY_BUSY,
81
82 /* QP number was allocated */
83 RES_QP_RESERVED,
84
85 /* ICM memory for QP context was mapped */
86 RES_QP_MAPPED,
87
88 /* QP is in hw ownership */
89 RES_QP_HW
90 };
91
qp_states_str(enum res_qp_states state)92 static inline const char *qp_states_str(enum res_qp_states state)
93 {
94 switch (state) {
95 case RES_QP_BUSY: return "RES_QP_BUSY";
96 case RES_QP_RESERVED: return "RES_QP_RESERVED";
97 case RES_QP_MAPPED: return "RES_QP_MAPPED";
98 case RES_QP_HW: return "RES_QP_HW";
99 default: return "Unknown";
100 }
101 }
102
103 struct res_qp {
104 struct res_common com;
105 struct res_mtt *mtt;
106 struct res_cq *rcq;
107 struct res_cq *scq;
108 struct res_srq *srq;
109 struct list_head mcg_list;
110 spinlock_t mcg_spl;
111 int local_qpn;
112 };
113
114 enum res_mtt_states {
115 RES_MTT_BUSY = RES_ANY_BUSY,
116 RES_MTT_ALLOCATED,
117 };
118
mtt_states_str(enum res_mtt_states state)119 static inline const char *mtt_states_str(enum res_mtt_states state)
120 {
121 switch (state) {
122 case RES_MTT_BUSY: return "RES_MTT_BUSY";
123 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
124 default: return "Unknown";
125 }
126 }
127
128 struct res_mtt {
129 struct res_common com;
130 int order;
131 atomic_t ref_count;
132 };
133
134 enum res_mpt_states {
135 RES_MPT_BUSY = RES_ANY_BUSY,
136 RES_MPT_RESERVED,
137 RES_MPT_MAPPED,
138 RES_MPT_HW,
139 };
140
141 struct res_mpt {
142 struct res_common com;
143 struct res_mtt *mtt;
144 int key;
145 };
146
147 enum res_eq_states {
148 RES_EQ_BUSY = RES_ANY_BUSY,
149 RES_EQ_RESERVED,
150 RES_EQ_HW,
151 };
152
153 struct res_eq {
154 struct res_common com;
155 struct res_mtt *mtt;
156 };
157
158 enum res_cq_states {
159 RES_CQ_BUSY = RES_ANY_BUSY,
160 RES_CQ_ALLOCATED,
161 RES_CQ_HW,
162 };
163
164 struct res_cq {
165 struct res_common com;
166 struct res_mtt *mtt;
167 atomic_t ref_count;
168 };
169
170 enum res_srq_states {
171 RES_SRQ_BUSY = RES_ANY_BUSY,
172 RES_SRQ_ALLOCATED,
173 RES_SRQ_HW,
174 };
175
srq_states_str(enum res_srq_states state)176 static inline const char *srq_states_str(enum res_srq_states state)
177 {
178 switch (state) {
179 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
180 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
181 case RES_SRQ_HW: return "RES_SRQ_HW";
182 default: return "Unknown";
183 }
184 }
185
186 struct res_srq {
187 struct res_common com;
188 struct res_mtt *mtt;
189 struct res_cq *cq;
190 atomic_t ref_count;
191 };
192
193 enum res_counter_states {
194 RES_COUNTER_BUSY = RES_ANY_BUSY,
195 RES_COUNTER_ALLOCATED,
196 };
197
counter_states_str(enum res_counter_states state)198 static inline const char *counter_states_str(enum res_counter_states state)
199 {
200 switch (state) {
201 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
202 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
203 default: return "Unknown";
204 }
205 }
206
207 struct res_counter {
208 struct res_common com;
209 int port;
210 };
211
212 /* For Debug uses */
ResourceType(enum mlx4_resource rt)213 static const char *ResourceType(enum mlx4_resource rt)
214 {
215 switch (rt) {
216 case RES_QP: return "RES_QP";
217 case RES_CQ: return "RES_CQ";
218 case RES_SRQ: return "RES_SRQ";
219 case RES_MPT: return "RES_MPT";
220 case RES_MTT: return "RES_MTT";
221 case RES_MAC: return "RES_MAC";
222 case RES_EQ: return "RES_EQ";
223 case RES_COUNTER: return "RES_COUNTER";
224 default: return "Unknown resource type !!!";
225 };
226 }
227
mlx4_init_resource_tracker(struct mlx4_dev * dev)228 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
229 {
230 struct mlx4_priv *priv = mlx4_priv(dev);
231 int i;
232 int t;
233
234 priv->mfunc.master.res_tracker.slave_list =
235 kzalloc(dev->num_slaves * sizeof(struct slave_list),
236 GFP_KERNEL);
237 if (!priv->mfunc.master.res_tracker.slave_list)
238 return -ENOMEM;
239
240 for (i = 0 ; i < dev->num_slaves; i++) {
241 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
242 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
243 slave_list[i].res_list[t]);
244 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
245 }
246
247 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
248 dev->num_slaves);
249 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
250 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
251 GFP_ATOMIC|__GFP_NOWARN);
252
253 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
254 return 0 ;
255 }
256
mlx4_free_resource_tracker(struct mlx4_dev * dev)257 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
258 {
259 struct mlx4_priv *priv = mlx4_priv(dev);
260 int i;
261
262 if (priv->mfunc.master.res_tracker.slave_list) {
263 for (i = 0 ; i < dev->num_slaves; i++)
264 mlx4_delete_all_resources_for_slave(dev, i);
265
266 kfree(priv->mfunc.master.res_tracker.slave_list);
267 }
268 }
269
update_ud_gid(struct mlx4_dev * dev,struct mlx4_qp_context * qp_ctx,u8 slave)270 static void update_ud_gid(struct mlx4_dev *dev,
271 struct mlx4_qp_context *qp_ctx, u8 slave)
272 {
273 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
274
275 if (MLX4_QP_ST_UD == ts)
276 qp_ctx->pri_path.mgid_index = 0x80 | slave;
277
278 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
279 slave, qp_ctx->pri_path.mgid_index);
280 }
281
mpt_mask(struct mlx4_dev * dev)282 static int mpt_mask(struct mlx4_dev *dev)
283 {
284 return dev->caps.num_mpts - 1;
285 }
286
find_res(struct mlx4_dev * dev,int res_id,enum mlx4_resource type)287 static void *find_res(struct mlx4_dev *dev, int res_id,
288 enum mlx4_resource type)
289 {
290 struct mlx4_priv *priv = mlx4_priv(dev);
291
292 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
293 res_id);
294 }
295
get_res(struct mlx4_dev * dev,int slave,int res_id,enum mlx4_resource type,void * res)296 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
297 enum mlx4_resource type,
298 void *res)
299 {
300 struct res_common *r;
301 int err = 0;
302
303 spin_lock_irq(mlx4_tlock(dev));
304 r = find_res(dev, res_id, type);
305 if (!r) {
306 err = -ENONET;
307 goto exit;
308 }
309
310 if (r->state == RES_ANY_BUSY) {
311 err = -EBUSY;
312 goto exit;
313 }
314
315 if (r->owner != slave) {
316 err = -EPERM;
317 goto exit;
318 }
319
320 r->from_state = r->state;
321 r->state = RES_ANY_BUSY;
322 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
323 ResourceType(type), r->res_id);
324
325 if (res)
326 *((struct res_common **)res) = r;
327
328 exit:
329 spin_unlock_irq(mlx4_tlock(dev));
330 return err;
331 }
332
mlx4_get_slave_from_resource_id(struct mlx4_dev * dev,enum mlx4_resource type,int res_id,int * slave)333 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
334 enum mlx4_resource type,
335 int res_id, int *slave)
336 {
337
338 struct res_common *r;
339 int err = -ENOENT;
340 int id = res_id;
341
342 if (type == RES_QP)
343 id &= 0x7fffff;
344 spin_lock(mlx4_tlock(dev));
345
346 r = find_res(dev, id, type);
347 if (r) {
348 *slave = r->owner;
349 err = 0;
350 }
351 spin_unlock(mlx4_tlock(dev));
352
353 return err;
354 }
355
put_res(struct mlx4_dev * dev,int slave,int res_id,enum mlx4_resource type)356 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
357 enum mlx4_resource type)
358 {
359 struct res_common *r;
360
361 spin_lock_irq(mlx4_tlock(dev));
362 r = find_res(dev, res_id, type);
363 if (r)
364 r->state = r->from_state;
365 spin_unlock_irq(mlx4_tlock(dev));
366 }
367
alloc_qp_tr(int id)368 static struct res_common *alloc_qp_tr(int id)
369 {
370 struct res_qp *ret;
371
372 ret = kzalloc(sizeof *ret, GFP_KERNEL);
373 if (!ret)
374 return NULL;
375
376 ret->com.res_id = id;
377 ret->com.state = RES_QP_RESERVED;
378 ret->local_qpn = id;
379 INIT_LIST_HEAD(&ret->mcg_list);
380 spin_lock_init(&ret->mcg_spl);
381
382 return &ret->com;
383 }
384
alloc_mtt_tr(int id,int order)385 static struct res_common *alloc_mtt_tr(int id, int order)
386 {
387 struct res_mtt *ret;
388
389 ret = kzalloc(sizeof *ret, GFP_KERNEL);
390 if (!ret)
391 return NULL;
392
393 ret->com.res_id = id;
394 ret->order = order;
395 ret->com.state = RES_MTT_ALLOCATED;
396 atomic_set(&ret->ref_count, 0);
397
398 return &ret->com;
399 }
400
alloc_mpt_tr(int id,int key)401 static struct res_common *alloc_mpt_tr(int id, int key)
402 {
403 struct res_mpt *ret;
404
405 ret = kzalloc(sizeof *ret, GFP_KERNEL);
406 if (!ret)
407 return NULL;
408
409 ret->com.res_id = id;
410 ret->com.state = RES_MPT_RESERVED;
411 ret->key = key;
412
413 return &ret->com;
414 }
415
alloc_eq_tr(int id)416 static struct res_common *alloc_eq_tr(int id)
417 {
418 struct res_eq *ret;
419
420 ret = kzalloc(sizeof *ret, GFP_KERNEL);
421 if (!ret)
422 return NULL;
423
424 ret->com.res_id = id;
425 ret->com.state = RES_EQ_RESERVED;
426
427 return &ret->com;
428 }
429
alloc_cq_tr(int id)430 static struct res_common *alloc_cq_tr(int id)
431 {
432 struct res_cq *ret;
433
434 ret = kzalloc(sizeof *ret, GFP_KERNEL);
435 if (!ret)
436 return NULL;
437
438 ret->com.res_id = id;
439 ret->com.state = RES_CQ_ALLOCATED;
440 atomic_set(&ret->ref_count, 0);
441
442 return &ret->com;
443 }
444
alloc_srq_tr(int id)445 static struct res_common *alloc_srq_tr(int id)
446 {
447 struct res_srq *ret;
448
449 ret = kzalloc(sizeof *ret, GFP_KERNEL);
450 if (!ret)
451 return NULL;
452
453 ret->com.res_id = id;
454 ret->com.state = RES_SRQ_ALLOCATED;
455 atomic_set(&ret->ref_count, 0);
456
457 return &ret->com;
458 }
459
alloc_counter_tr(int id)460 static struct res_common *alloc_counter_tr(int id)
461 {
462 struct res_counter *ret;
463
464 ret = kzalloc(sizeof *ret, GFP_KERNEL);
465 if (!ret)
466 return NULL;
467
468 ret->com.res_id = id;
469 ret->com.state = RES_COUNTER_ALLOCATED;
470
471 return &ret->com;
472 }
473
alloc_tr(int id,enum mlx4_resource type,int slave,int extra)474 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
475 int extra)
476 {
477 struct res_common *ret;
478
479 switch (type) {
480 case RES_QP:
481 ret = alloc_qp_tr(id);
482 break;
483 case RES_MPT:
484 ret = alloc_mpt_tr(id, extra);
485 break;
486 case RES_MTT:
487 ret = alloc_mtt_tr(id, extra);
488 break;
489 case RES_EQ:
490 ret = alloc_eq_tr(id);
491 break;
492 case RES_CQ:
493 ret = alloc_cq_tr(id);
494 break;
495 case RES_SRQ:
496 ret = alloc_srq_tr(id);
497 break;
498 case RES_MAC:
499 printk(KERN_ERR "implementation missing\n");
500 return NULL;
501 case RES_COUNTER:
502 ret = alloc_counter_tr(id);
503 break;
504
505 default:
506 return NULL;
507 }
508 if (ret)
509 ret->owner = slave;
510
511 return ret;
512 }
513
add_res_range(struct mlx4_dev * dev,int slave,int base,int count,enum mlx4_resource type,int extra)514 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
515 enum mlx4_resource type, int extra)
516 {
517 int i;
518 int err;
519 struct mlx4_priv *priv = mlx4_priv(dev);
520 struct res_common **res_arr;
521 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
522 struct radix_tree_root *root = &tracker->res_tree[type];
523
524 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
525 if (!res_arr)
526 return -ENOMEM;
527
528 for (i = 0; i < count; ++i) {
529 res_arr[i] = alloc_tr(base + i, type, slave, extra);
530 if (!res_arr[i]) {
531 for (--i; i >= 0; --i)
532 kfree(res_arr[i]);
533
534 kfree(res_arr);
535 return -ENOMEM;
536 }
537 }
538
539 spin_lock_irq(mlx4_tlock(dev));
540 for (i = 0; i < count; ++i) {
541 if (find_res(dev, base + i, type)) {
542 err = -EEXIST;
543 goto undo;
544 }
545 err = radix_tree_insert(root, base + i, res_arr[i]);
546 if (err)
547 goto undo;
548 list_add_tail(&res_arr[i]->list,
549 &tracker->slave_list[slave].res_list[type]);
550 }
551 spin_unlock_irq(mlx4_tlock(dev));
552 kfree(res_arr);
553
554 return 0;
555
556 undo:
557 for (--i; i >= base; --i)
558 radix_tree_delete(&tracker->res_tree[type], i);
559
560 spin_unlock_irq(mlx4_tlock(dev));
561
562 for (i = 0; i < count; ++i)
563 kfree(res_arr[i]);
564
565 kfree(res_arr);
566
567 return err;
568 }
569
remove_qp_ok(struct res_qp * res)570 static int remove_qp_ok(struct res_qp *res)
571 {
572 if (res->com.state == RES_QP_BUSY)
573 return -EBUSY;
574 else if (res->com.state != RES_QP_RESERVED)
575 return -EPERM;
576
577 return 0;
578 }
579
remove_mtt_ok(struct res_mtt * res,int order)580 static int remove_mtt_ok(struct res_mtt *res, int order)
581 {
582 if (res->com.state == RES_MTT_BUSY ||
583 atomic_read(&res->ref_count)) {
584 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
585 __func__, __LINE__,
586 mtt_states_str(res->com.state),
587 atomic_read(&res->ref_count));
588 return -EBUSY;
589 } else if (res->com.state != RES_MTT_ALLOCATED)
590 return -EPERM;
591 else if (res->order != order)
592 return -EINVAL;
593
594 return 0;
595 }
596
remove_mpt_ok(struct res_mpt * res)597 static int remove_mpt_ok(struct res_mpt *res)
598 {
599 if (res->com.state == RES_MPT_BUSY)
600 return -EBUSY;
601 else if (res->com.state != RES_MPT_RESERVED)
602 return -EPERM;
603
604 return 0;
605 }
606
remove_eq_ok(struct res_eq * res)607 static int remove_eq_ok(struct res_eq *res)
608 {
609 if (res->com.state == RES_MPT_BUSY)
610 return -EBUSY;
611 else if (res->com.state != RES_MPT_RESERVED)
612 return -EPERM;
613
614 return 0;
615 }
616
remove_counter_ok(struct res_counter * res)617 static int remove_counter_ok(struct res_counter *res)
618 {
619 if (res->com.state == RES_COUNTER_BUSY)
620 return -EBUSY;
621 else if (res->com.state != RES_COUNTER_ALLOCATED)
622 return -EPERM;
623
624 return 0;
625 }
626
remove_cq_ok(struct res_cq * res)627 static int remove_cq_ok(struct res_cq *res)
628 {
629 if (res->com.state == RES_CQ_BUSY)
630 return -EBUSY;
631 else if (res->com.state != RES_CQ_ALLOCATED)
632 return -EPERM;
633
634 return 0;
635 }
636
remove_srq_ok(struct res_srq * res)637 static int remove_srq_ok(struct res_srq *res)
638 {
639 if (res->com.state == RES_SRQ_BUSY)
640 return -EBUSY;
641 else if (res->com.state != RES_SRQ_ALLOCATED)
642 return -EPERM;
643
644 return 0;
645 }
646
remove_ok(struct res_common * res,enum mlx4_resource type,int extra)647 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
648 {
649 switch (type) {
650 case RES_QP:
651 return remove_qp_ok((struct res_qp *)res);
652 case RES_CQ:
653 return remove_cq_ok((struct res_cq *)res);
654 case RES_SRQ:
655 return remove_srq_ok((struct res_srq *)res);
656 case RES_MPT:
657 return remove_mpt_ok((struct res_mpt *)res);
658 case RES_MTT:
659 return remove_mtt_ok((struct res_mtt *)res, extra);
660 case RES_MAC:
661 return -ENOSYS;
662 case RES_EQ:
663 return remove_eq_ok((struct res_eq *)res);
664 case RES_COUNTER:
665 return remove_counter_ok((struct res_counter *)res);
666 default:
667 return -EINVAL;
668 }
669 }
670
rem_res_range(struct mlx4_dev * dev,int slave,int base,int count,enum mlx4_resource type,int extra)671 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
672 enum mlx4_resource type, int extra)
673 {
674 int i;
675 int err;
676 struct mlx4_priv *priv = mlx4_priv(dev);
677 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
678 struct res_common *r;
679
680 spin_lock_irq(mlx4_tlock(dev));
681 for (i = base; i < base + count; ++i) {
682 r = radix_tree_lookup(&tracker->res_tree[type], i);
683 if (!r) {
684 err = -ENOENT;
685 goto out;
686 }
687 if (r->owner != slave) {
688 err = -EPERM;
689 goto out;
690 }
691 err = remove_ok(r, type, extra);
692 if (err)
693 goto out;
694 }
695
696 for (i = base; i < base + count; ++i) {
697 r = radix_tree_lookup(&tracker->res_tree[type], i);
698 radix_tree_delete(&tracker->res_tree[type], i);
699 list_del(&r->list);
700 kfree(r);
701 }
702 err = 0;
703
704 out:
705 spin_unlock_irq(mlx4_tlock(dev));
706
707 return err;
708 }
709
qp_res_start_move_to(struct mlx4_dev * dev,int slave,int qpn,enum res_qp_states state,struct res_qp ** qp,int alloc)710 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
711 enum res_qp_states state, struct res_qp **qp,
712 int alloc)
713 {
714 struct mlx4_priv *priv = mlx4_priv(dev);
715 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
716 struct res_qp *r;
717 int err = 0;
718
719 spin_lock_irq(mlx4_tlock(dev));
720 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
721 if (!r)
722 err = -ENOENT;
723 else if (r->com.owner != slave)
724 err = -EPERM;
725 else {
726 switch (state) {
727 case RES_QP_BUSY:
728 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
729 __func__, r->com.res_id);
730 err = -EBUSY;
731 break;
732
733 case RES_QP_RESERVED:
734 if (r->com.state == RES_QP_MAPPED && !alloc)
735 break;
736
737 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
738 err = -EINVAL;
739 break;
740
741 case RES_QP_MAPPED:
742 if ((r->com.state == RES_QP_RESERVED && alloc) ||
743 r->com.state == RES_QP_HW)
744 break;
745 else {
746 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
747 r->com.res_id);
748 err = -EINVAL;
749 }
750
751 break;
752
753 case RES_QP_HW:
754 if (r->com.state != RES_QP_MAPPED)
755 err = -EINVAL;
756 break;
757 default:
758 err = -EINVAL;
759 }
760
761 if (!err) {
762 r->com.from_state = r->com.state;
763 r->com.to_state = state;
764 r->com.state = RES_QP_BUSY;
765 if (qp)
766 *qp = (struct res_qp *)r;
767 }
768 }
769
770 spin_unlock_irq(mlx4_tlock(dev));
771
772 return err;
773 }
774
mr_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_mpt_states state,struct res_mpt ** mpt)775 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
776 enum res_mpt_states state, struct res_mpt **mpt)
777 {
778 struct mlx4_priv *priv = mlx4_priv(dev);
779 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
780 struct res_mpt *r;
781 int err = 0;
782
783 spin_lock_irq(mlx4_tlock(dev));
784 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
785 if (!r)
786 err = -ENOENT;
787 else if (r->com.owner != slave)
788 err = -EPERM;
789 else {
790 switch (state) {
791 case RES_MPT_BUSY:
792 err = -EINVAL;
793 break;
794
795 case RES_MPT_RESERVED:
796 if (r->com.state != RES_MPT_MAPPED)
797 err = -EINVAL;
798 break;
799
800 case RES_MPT_MAPPED:
801 if (r->com.state != RES_MPT_RESERVED &&
802 r->com.state != RES_MPT_HW)
803 err = -EINVAL;
804 break;
805
806 case RES_MPT_HW:
807 if (r->com.state != RES_MPT_MAPPED)
808 err = -EINVAL;
809 break;
810 default:
811 err = -EINVAL;
812 }
813
814 if (!err) {
815 r->com.from_state = r->com.state;
816 r->com.to_state = state;
817 r->com.state = RES_MPT_BUSY;
818 if (mpt)
819 *mpt = (struct res_mpt *)r;
820 }
821 }
822
823 spin_unlock_irq(mlx4_tlock(dev));
824
825 return err;
826 }
827
eq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_eq_states state,struct res_eq ** eq)828 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
829 enum res_eq_states state, struct res_eq **eq)
830 {
831 struct mlx4_priv *priv = mlx4_priv(dev);
832 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
833 struct res_eq *r;
834 int err = 0;
835
836 spin_lock_irq(mlx4_tlock(dev));
837 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
838 if (!r)
839 err = -ENOENT;
840 else if (r->com.owner != slave)
841 err = -EPERM;
842 else {
843 switch (state) {
844 case RES_EQ_BUSY:
845 err = -EINVAL;
846 break;
847
848 case RES_EQ_RESERVED:
849 if (r->com.state != RES_EQ_HW)
850 err = -EINVAL;
851 break;
852
853 case RES_EQ_HW:
854 if (r->com.state != RES_EQ_RESERVED)
855 err = -EINVAL;
856 break;
857
858 default:
859 err = -EINVAL;
860 }
861
862 if (!err) {
863 r->com.from_state = r->com.state;
864 r->com.to_state = state;
865 r->com.state = RES_EQ_BUSY;
866 if (eq)
867 *eq = r;
868 }
869 }
870
871 spin_unlock_irq(mlx4_tlock(dev));
872
873 return err;
874 }
875
cq_res_start_move_to(struct mlx4_dev * dev,int slave,int cqn,enum res_cq_states state,struct res_cq ** cq)876 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
877 enum res_cq_states state, struct res_cq **cq)
878 {
879 struct mlx4_priv *priv = mlx4_priv(dev);
880 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
881 struct res_cq *r;
882 int err;
883
884 spin_lock_irq(mlx4_tlock(dev));
885 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
886 if (!r)
887 err = -ENOENT;
888 else if (r->com.owner != slave)
889 err = -EPERM;
890 else {
891 switch (state) {
892 case RES_CQ_BUSY:
893 err = -EBUSY;
894 break;
895
896 case RES_CQ_ALLOCATED:
897 if (r->com.state != RES_CQ_HW)
898 err = -EINVAL;
899 else if (atomic_read(&r->ref_count))
900 err = -EBUSY;
901 else
902 err = 0;
903 break;
904
905 case RES_CQ_HW:
906 if (r->com.state != RES_CQ_ALLOCATED)
907 err = -EINVAL;
908 else
909 err = 0;
910 break;
911
912 default:
913 err = -EINVAL;
914 }
915
916 if (!err) {
917 r->com.from_state = r->com.state;
918 r->com.to_state = state;
919 r->com.state = RES_CQ_BUSY;
920 if (cq)
921 *cq = r;
922 }
923 }
924
925 spin_unlock_irq(mlx4_tlock(dev));
926
927 return err;
928 }
929
srq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_cq_states state,struct res_srq ** srq)930 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
931 enum res_cq_states state, struct res_srq **srq)
932 {
933 struct mlx4_priv *priv = mlx4_priv(dev);
934 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
935 struct res_srq *r;
936 int err = 0;
937
938 spin_lock_irq(mlx4_tlock(dev));
939 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
940 if (!r)
941 err = -ENOENT;
942 else if (r->com.owner != slave)
943 err = -EPERM;
944 else {
945 switch (state) {
946 case RES_SRQ_BUSY:
947 err = -EINVAL;
948 break;
949
950 case RES_SRQ_ALLOCATED:
951 if (r->com.state != RES_SRQ_HW)
952 err = -EINVAL;
953 else if (atomic_read(&r->ref_count))
954 err = -EBUSY;
955 break;
956
957 case RES_SRQ_HW:
958 if (r->com.state != RES_SRQ_ALLOCATED)
959 err = -EINVAL;
960 break;
961
962 default:
963 err = -EINVAL;
964 }
965
966 if (!err) {
967 r->com.from_state = r->com.state;
968 r->com.to_state = state;
969 r->com.state = RES_SRQ_BUSY;
970 if (srq)
971 *srq = r;
972 }
973 }
974
975 spin_unlock_irq(mlx4_tlock(dev));
976
977 return err;
978 }
979
res_abort_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)980 static void res_abort_move(struct mlx4_dev *dev, int slave,
981 enum mlx4_resource type, int id)
982 {
983 struct mlx4_priv *priv = mlx4_priv(dev);
984 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
985 struct res_common *r;
986
987 spin_lock_irq(mlx4_tlock(dev));
988 r = radix_tree_lookup(&tracker->res_tree[type], id);
989 if (r && (r->owner == slave))
990 r->state = r->from_state;
991 spin_unlock_irq(mlx4_tlock(dev));
992 }
993
res_end_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)994 static void res_end_move(struct mlx4_dev *dev, int slave,
995 enum mlx4_resource type, int id)
996 {
997 struct mlx4_priv *priv = mlx4_priv(dev);
998 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
999 struct res_common *r;
1000
1001 spin_lock_irq(mlx4_tlock(dev));
1002 r = radix_tree_lookup(&tracker->res_tree[type], id);
1003 if (r && (r->owner == slave))
1004 r->state = r->to_state;
1005 spin_unlock_irq(mlx4_tlock(dev));
1006 }
1007
valid_reserved(struct mlx4_dev * dev,int slave,int qpn)1008 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1009 {
1010 return mlx4_is_qp_reserved(dev, qpn);
1011 }
1012
qp_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1013 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1014 u64 in_param, u64 *out_param)
1015 {
1016 int err;
1017 int count;
1018 int align;
1019 int base;
1020 int qpn;
1021
1022 switch (op) {
1023 case RES_OP_RESERVE:
1024 count = get_param_l(&in_param);
1025 align = get_param_h(&in_param);
1026 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1027 if (err)
1028 return err;
1029
1030 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1031 if (err) {
1032 __mlx4_qp_release_range(dev, base, count);
1033 return err;
1034 }
1035 set_param_l(out_param, base);
1036 break;
1037 case RES_OP_MAP_ICM:
1038 qpn = get_param_l(&in_param) & 0x7fffff;
1039 if (valid_reserved(dev, slave, qpn)) {
1040 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1041 if (err)
1042 return err;
1043 }
1044
1045 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1046 NULL, 1);
1047 if (err)
1048 return err;
1049
1050 if (!valid_reserved(dev, slave, qpn)) {
1051 err = __mlx4_qp_alloc_icm(dev, qpn);
1052 if (err) {
1053 res_abort_move(dev, slave, RES_QP, qpn);
1054 return err;
1055 }
1056 }
1057
1058 res_end_move(dev, slave, RES_QP, qpn);
1059 break;
1060
1061 default:
1062 err = -EINVAL;
1063 break;
1064 }
1065 return err;
1066 }
1067
mtt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1068 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1069 u64 in_param, u64 *out_param)
1070 {
1071 int err = -EINVAL;
1072 int base;
1073 int order;
1074
1075 if (op != RES_OP_RESERVE_AND_MAP)
1076 return err;
1077
1078 order = get_param_l(&in_param);
1079 base = __mlx4_alloc_mtt_range(dev, order);
1080 if (base == -1)
1081 return -ENOMEM;
1082
1083 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1084 if (err)
1085 __mlx4_free_mtt_range(dev, base, order);
1086 else
1087 set_param_l(out_param, base);
1088
1089 return err;
1090 }
1091
mpt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1092 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1093 u64 in_param, u64 *out_param)
1094 {
1095 int err = -EINVAL;
1096 int index;
1097 int id;
1098 struct res_mpt *mpt;
1099
1100 switch (op) {
1101 case RES_OP_RESERVE:
1102 index = __mlx4_mr_reserve(dev);
1103 if (index == -1)
1104 break;
1105 id = index & mpt_mask(dev);
1106
1107 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1108 if (err) {
1109 __mlx4_mr_release(dev, index);
1110 break;
1111 }
1112 set_param_l(out_param, index);
1113 break;
1114 case RES_OP_MAP_ICM:
1115 index = get_param_l(&in_param);
1116 id = index & mpt_mask(dev);
1117 err = mr_res_start_move_to(dev, slave, id,
1118 RES_MPT_MAPPED, &mpt);
1119 if (err)
1120 return err;
1121
1122 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1123 if (err) {
1124 res_abort_move(dev, slave, RES_MPT, id);
1125 return err;
1126 }
1127
1128 res_end_move(dev, slave, RES_MPT, id);
1129 break;
1130 }
1131 return err;
1132 }
1133
cq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1134 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1135 u64 in_param, u64 *out_param)
1136 {
1137 int cqn;
1138 int err;
1139
1140 switch (op) {
1141 case RES_OP_RESERVE_AND_MAP:
1142 err = __mlx4_cq_alloc_icm(dev, &cqn);
1143 if (err)
1144 break;
1145
1146 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1147 if (err) {
1148 __mlx4_cq_free_icm(dev, cqn);
1149 break;
1150 }
1151
1152 set_param_l(out_param, cqn);
1153 break;
1154
1155 default:
1156 err = -EINVAL;
1157 }
1158
1159 return err;
1160 }
1161
srq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1162 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1163 u64 in_param, u64 *out_param)
1164 {
1165 int srqn;
1166 int err;
1167
1168 switch (op) {
1169 case RES_OP_RESERVE_AND_MAP:
1170 err = __mlx4_srq_alloc_icm(dev, &srqn);
1171 if (err)
1172 break;
1173
1174 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1175 if (err) {
1176 __mlx4_srq_free_icm(dev, srqn);
1177 break;
1178 }
1179
1180 set_param_l(out_param, srqn);
1181 break;
1182
1183 default:
1184 err = -EINVAL;
1185 }
1186
1187 return err;
1188 }
1189
mac_add_to_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)1190 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1191 {
1192 struct mlx4_priv *priv = mlx4_priv(dev);
1193 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1194 struct mac_res *res;
1195
1196 res = kzalloc(sizeof *res, GFP_KERNEL);
1197 if (!res)
1198 return -ENOMEM;
1199 res->mac = mac;
1200 res->port = (u8) port;
1201 list_add_tail(&res->list,
1202 &tracker->slave_list[slave].res_list[RES_MAC]);
1203 return 0;
1204 }
1205
mac_del_from_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)1206 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1207 int port)
1208 {
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1211 struct list_head *mac_list =
1212 &tracker->slave_list[slave].res_list[RES_MAC];
1213 struct mac_res *res, *tmp;
1214
1215 list_for_each_entry_safe(res, tmp, mac_list, list) {
1216 if (res->mac == mac && res->port == (u8) port) {
1217 list_del(&res->list);
1218 kfree(res);
1219 break;
1220 }
1221 }
1222 }
1223
rem_slave_macs(struct mlx4_dev * dev,int slave)1224 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1225 {
1226 struct mlx4_priv *priv = mlx4_priv(dev);
1227 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1228 struct list_head *mac_list =
1229 &tracker->slave_list[slave].res_list[RES_MAC];
1230 struct mac_res *res, *tmp;
1231
1232 list_for_each_entry_safe(res, tmp, mac_list, list) {
1233 list_del(&res->list);
1234 __mlx4_unregister_mac(dev, res->port, res->mac);
1235 kfree(res);
1236 }
1237 }
1238
mac_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1239 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1240 u64 in_param, u64 *out_param)
1241 {
1242 int err = -EINVAL;
1243 int port;
1244 u64 mac;
1245
1246 if (op != RES_OP_RESERVE_AND_MAP)
1247 return err;
1248
1249 port = get_param_l(out_param);
1250 mac = in_param;
1251
1252 err = __mlx4_register_mac(dev, port, mac);
1253 if (err >= 0) {
1254 set_param_l(out_param, err);
1255 err = 0;
1256 }
1257
1258 if (!err) {
1259 err = mac_add_to_slave(dev, slave, mac, port);
1260 if (err)
1261 __mlx4_unregister_mac(dev, port, mac);
1262 }
1263 return err;
1264 }
1265
vlan_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1266 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1267 u64 in_param, u64 *out_param)
1268 {
1269 return 0;
1270 }
1271
mlx4_ALLOC_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1272 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1273 struct mlx4_vhcr *vhcr,
1274 struct mlx4_cmd_mailbox *inbox,
1275 struct mlx4_cmd_mailbox *outbox,
1276 struct mlx4_cmd_info *cmd)
1277 {
1278 int err;
1279 int alop = vhcr->op_modifier;
1280
1281 switch (vhcr->in_modifier) {
1282 case RES_QP:
1283 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1284 vhcr->in_param, &vhcr->out_param);
1285 break;
1286
1287 case RES_MTT:
1288 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1289 vhcr->in_param, &vhcr->out_param);
1290 break;
1291
1292 case RES_MPT:
1293 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1294 vhcr->in_param, &vhcr->out_param);
1295 break;
1296
1297 case RES_CQ:
1298 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1299 vhcr->in_param, &vhcr->out_param);
1300 break;
1301
1302 case RES_SRQ:
1303 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1304 vhcr->in_param, &vhcr->out_param);
1305 break;
1306
1307 case RES_MAC:
1308 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1309 vhcr->in_param, &vhcr->out_param);
1310 break;
1311
1312 case RES_VLAN:
1313 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1314 vhcr->in_param, &vhcr->out_param);
1315 break;
1316
1317 default:
1318 err = -EINVAL;
1319 break;
1320 }
1321
1322 return err;
1323 }
1324
qp_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)1325 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1326 u64 in_param)
1327 {
1328 int err;
1329 int count;
1330 int base;
1331 int qpn;
1332
1333 switch (op) {
1334 case RES_OP_RESERVE:
1335 base = get_param_l(&in_param) & 0x7fffff;
1336 count = get_param_h(&in_param);
1337 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1338 if (err)
1339 break;
1340 __mlx4_qp_release_range(dev, base, count);
1341 break;
1342 case RES_OP_MAP_ICM:
1343 qpn = get_param_l(&in_param) & 0x7fffff;
1344 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1345 NULL, 0);
1346 if (err)
1347 return err;
1348
1349 if (!valid_reserved(dev, slave, qpn))
1350 __mlx4_qp_free_icm(dev, qpn);
1351
1352 res_end_move(dev, slave, RES_QP, qpn);
1353
1354 if (valid_reserved(dev, slave, qpn))
1355 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1356 break;
1357 default:
1358 err = -EINVAL;
1359 break;
1360 }
1361 return err;
1362 }
1363
mtt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1364 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1365 u64 in_param, u64 *out_param)
1366 {
1367 int err = -EINVAL;
1368 int base;
1369 int order;
1370
1371 if (op != RES_OP_RESERVE_AND_MAP)
1372 return err;
1373
1374 base = get_param_l(&in_param);
1375 order = get_param_h(&in_param);
1376 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1377 if (!err)
1378 __mlx4_free_mtt_range(dev, base, order);
1379 return err;
1380 }
1381
mpt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)1382 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1383 u64 in_param)
1384 {
1385 int err = -EINVAL;
1386 int index;
1387 int id;
1388 struct res_mpt *mpt;
1389
1390 switch (op) {
1391 case RES_OP_RESERVE:
1392 index = get_param_l(&in_param);
1393 id = index & mpt_mask(dev);
1394 err = get_res(dev, slave, id, RES_MPT, &mpt);
1395 if (err)
1396 break;
1397 index = mpt->key;
1398 put_res(dev, slave, id, RES_MPT);
1399
1400 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1401 if (err)
1402 break;
1403 __mlx4_mr_release(dev, index);
1404 break;
1405 case RES_OP_MAP_ICM:
1406 index = get_param_l(&in_param);
1407 id = index & mpt_mask(dev);
1408 err = mr_res_start_move_to(dev, slave, id,
1409 RES_MPT_RESERVED, &mpt);
1410 if (err)
1411 return err;
1412
1413 __mlx4_mr_free_icm(dev, mpt->key);
1414 res_end_move(dev, slave, RES_MPT, id);
1415 return err;
1416 break;
1417 default:
1418 err = -EINVAL;
1419 break;
1420 }
1421 return err;
1422 }
1423
cq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1424 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1425 u64 in_param, u64 *out_param)
1426 {
1427 int cqn;
1428 int err;
1429
1430 switch (op) {
1431 case RES_OP_RESERVE_AND_MAP:
1432 cqn = get_param_l(&in_param);
1433 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1434 if (err)
1435 break;
1436
1437 __mlx4_cq_free_icm(dev, cqn);
1438 break;
1439
1440 default:
1441 err = -EINVAL;
1442 break;
1443 }
1444
1445 return err;
1446 }
1447
srq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1448 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1449 u64 in_param, u64 *out_param)
1450 {
1451 int srqn;
1452 int err;
1453
1454 switch (op) {
1455 case RES_OP_RESERVE_AND_MAP:
1456 srqn = get_param_l(&in_param);
1457 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1458 if (err)
1459 break;
1460
1461 __mlx4_srq_free_icm(dev, srqn);
1462 break;
1463
1464 default:
1465 err = -EINVAL;
1466 break;
1467 }
1468
1469 return err;
1470 }
1471
mac_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1472 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473 u64 in_param, u64 *out_param)
1474 {
1475 int port;
1476 int err = 0;
1477
1478 switch (op) {
1479 case RES_OP_RESERVE_AND_MAP:
1480 port = get_param_l(out_param);
1481 mac_del_from_slave(dev, slave, in_param, port);
1482 __mlx4_unregister_mac(dev, port, in_param);
1483 break;
1484 default:
1485 err = -EINVAL;
1486 break;
1487 }
1488
1489 return err;
1490
1491 }
1492
vlan_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1493 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1494 u64 in_param, u64 *out_param)
1495 {
1496 return 0;
1497 }
1498
mlx4_FREE_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1499 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1500 struct mlx4_vhcr *vhcr,
1501 struct mlx4_cmd_mailbox *inbox,
1502 struct mlx4_cmd_mailbox *outbox,
1503 struct mlx4_cmd_info *cmd)
1504 {
1505 int err = -EINVAL;
1506 int alop = vhcr->op_modifier;
1507
1508 switch (vhcr->in_modifier) {
1509 case RES_QP:
1510 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1511 vhcr->in_param);
1512 break;
1513
1514 case RES_MTT:
1515 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1516 vhcr->in_param, &vhcr->out_param);
1517 break;
1518
1519 case RES_MPT:
1520 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1521 vhcr->in_param);
1522 break;
1523
1524 case RES_CQ:
1525 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1526 vhcr->in_param, &vhcr->out_param);
1527 break;
1528
1529 case RES_SRQ:
1530 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1531 vhcr->in_param, &vhcr->out_param);
1532 break;
1533
1534 case RES_MAC:
1535 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1536 vhcr->in_param, &vhcr->out_param);
1537 break;
1538
1539 case RES_VLAN:
1540 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1541 vhcr->in_param, &vhcr->out_param);
1542 break;
1543
1544 default:
1545 break;
1546 }
1547 return err;
1548 }
1549
1550 /* ugly but other choices are uglier */
mr_phys_mpt(struct mlx4_mpt_entry * mpt)1551 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1552 {
1553 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1554 }
1555
mr_get_mtt_addr(struct mlx4_mpt_entry * mpt)1556 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1557 {
1558 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1559 }
1560
mr_get_mtt_size(struct mlx4_mpt_entry * mpt)1561 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1562 {
1563 return be32_to_cpu(mpt->mtt_sz);
1564 }
1565
qp_get_mtt_addr(struct mlx4_qp_context * qpc)1566 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1567 {
1568 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1569 }
1570
srq_get_mtt_addr(struct mlx4_srq_context * srqc)1571 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1572 {
1573 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1574 }
1575
qp_get_mtt_size(struct mlx4_qp_context * qpc)1576 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1577 {
1578 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1579 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1580 int log_sq_sride = qpc->sq_size_stride & 7;
1581 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1582 int log_rq_stride = qpc->rq_size_stride & 7;
1583 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1584 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1585 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1586 int sq_size;
1587 int rq_size;
1588 int total_pages;
1589 int total_mem;
1590 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1591
1592 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1593 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1594 total_mem = sq_size + rq_size;
1595 total_pages =
1596 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1597 page_shift);
1598
1599 return total_pages;
1600 }
1601
check_mtt_range(struct mlx4_dev * dev,int slave,int start,int size,struct res_mtt * mtt)1602 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1603 int size, struct res_mtt *mtt)
1604 {
1605 int res_start = mtt->com.res_id;
1606 int res_size = (1 << mtt->order);
1607
1608 if (start < res_start || start + size > res_start + res_size)
1609 return -EPERM;
1610 return 0;
1611 }
1612
mlx4_SW2HW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1613 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1614 struct mlx4_vhcr *vhcr,
1615 struct mlx4_cmd_mailbox *inbox,
1616 struct mlx4_cmd_mailbox *outbox,
1617 struct mlx4_cmd_info *cmd)
1618 {
1619 int err;
1620 int index = vhcr->in_modifier;
1621 struct res_mtt *mtt;
1622 struct res_mpt *mpt;
1623 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1624 int phys;
1625 int id;
1626
1627 id = index & mpt_mask(dev);
1628 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1629 if (err)
1630 return err;
1631
1632 phys = mr_phys_mpt(inbox->buf);
1633 if (!phys) {
1634 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1635 if (err)
1636 goto ex_abort;
1637
1638 err = check_mtt_range(dev, slave, mtt_base,
1639 mr_get_mtt_size(inbox->buf), mtt);
1640 if (err)
1641 goto ex_put;
1642
1643 mpt->mtt = mtt;
1644 }
1645
1646 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1647 if (err)
1648 goto ex_put;
1649
1650 if (!phys) {
1651 atomic_inc(&mtt->ref_count);
1652 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1653 }
1654
1655 res_end_move(dev, slave, RES_MPT, id);
1656 return 0;
1657
1658 ex_put:
1659 if (!phys)
1660 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1661 ex_abort:
1662 res_abort_move(dev, slave, RES_MPT, id);
1663
1664 return err;
1665 }
1666
mlx4_HW2SW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1667 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1668 struct mlx4_vhcr *vhcr,
1669 struct mlx4_cmd_mailbox *inbox,
1670 struct mlx4_cmd_mailbox *outbox,
1671 struct mlx4_cmd_info *cmd)
1672 {
1673 int err;
1674 int index = vhcr->in_modifier;
1675 struct res_mpt *mpt;
1676 int id;
1677
1678 id = index & mpt_mask(dev);
1679 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1680 if (err)
1681 return err;
1682
1683 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1684 if (err)
1685 goto ex_abort;
1686
1687 if (mpt->mtt)
1688 atomic_dec(&mpt->mtt->ref_count);
1689
1690 res_end_move(dev, slave, RES_MPT, id);
1691 return 0;
1692
1693 ex_abort:
1694 res_abort_move(dev, slave, RES_MPT, id);
1695
1696 return err;
1697 }
1698
mlx4_QUERY_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1699 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1700 struct mlx4_vhcr *vhcr,
1701 struct mlx4_cmd_mailbox *inbox,
1702 struct mlx4_cmd_mailbox *outbox,
1703 struct mlx4_cmd_info *cmd)
1704 {
1705 int err;
1706 int index = vhcr->in_modifier;
1707 struct res_mpt *mpt;
1708 int id;
1709
1710 id = index & mpt_mask(dev);
1711 err = get_res(dev, slave, id, RES_MPT, &mpt);
1712 if (err)
1713 return err;
1714
1715 if (mpt->com.from_state != RES_MPT_HW) {
1716 err = -EBUSY;
1717 goto out;
1718 }
1719
1720 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1721
1722 out:
1723 put_res(dev, slave, id, RES_MPT);
1724 return err;
1725 }
1726
qp_get_rcqn(struct mlx4_qp_context * qpc)1727 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1728 {
1729 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1730 }
1731
qp_get_scqn(struct mlx4_qp_context * qpc)1732 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1733 {
1734 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1735 }
1736
qp_get_srqn(struct mlx4_qp_context * qpc)1737 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1738 {
1739 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1740 }
1741
mlx4_RST2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1742 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1743 struct mlx4_vhcr *vhcr,
1744 struct mlx4_cmd_mailbox *inbox,
1745 struct mlx4_cmd_mailbox *outbox,
1746 struct mlx4_cmd_info *cmd)
1747 {
1748 int err;
1749 int qpn = vhcr->in_modifier & 0x7fffff;
1750 struct res_mtt *mtt;
1751 struct res_qp *qp;
1752 struct mlx4_qp_context *qpc = inbox->buf + 8;
1753 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1754 int mtt_size = qp_get_mtt_size(qpc);
1755 struct res_cq *rcq;
1756 struct res_cq *scq;
1757 int rcqn = qp_get_rcqn(qpc);
1758 int scqn = qp_get_scqn(qpc);
1759 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1760 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1761 struct res_srq *srq;
1762 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1763
1764 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1765 if (err)
1766 return err;
1767 qp->local_qpn = local_qpn;
1768
1769 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1770 if (err)
1771 goto ex_abort;
1772
1773 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1774 if (err)
1775 goto ex_put_mtt;
1776
1777 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1778 if (err)
1779 goto ex_put_mtt;
1780
1781 if (scqn != rcqn) {
1782 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1783 if (err)
1784 goto ex_put_rcq;
1785 } else
1786 scq = rcq;
1787
1788 if (use_srq) {
1789 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1790 if (err)
1791 goto ex_put_scq;
1792 }
1793
1794 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1795 if (err)
1796 goto ex_put_srq;
1797 atomic_inc(&mtt->ref_count);
1798 qp->mtt = mtt;
1799 atomic_inc(&rcq->ref_count);
1800 qp->rcq = rcq;
1801 atomic_inc(&scq->ref_count);
1802 qp->scq = scq;
1803
1804 if (scqn != rcqn)
1805 put_res(dev, slave, scqn, RES_CQ);
1806
1807 if (use_srq) {
1808 atomic_inc(&srq->ref_count);
1809 put_res(dev, slave, srqn, RES_SRQ);
1810 qp->srq = srq;
1811 }
1812 put_res(dev, slave, rcqn, RES_CQ);
1813 put_res(dev, slave, mtt_base, RES_MTT);
1814 res_end_move(dev, slave, RES_QP, qpn);
1815
1816 return 0;
1817
1818 ex_put_srq:
1819 if (use_srq)
1820 put_res(dev, slave, srqn, RES_SRQ);
1821 ex_put_scq:
1822 if (scqn != rcqn)
1823 put_res(dev, slave, scqn, RES_CQ);
1824 ex_put_rcq:
1825 put_res(dev, slave, rcqn, RES_CQ);
1826 ex_put_mtt:
1827 put_res(dev, slave, mtt_base, RES_MTT);
1828 ex_abort:
1829 res_abort_move(dev, slave, RES_QP, qpn);
1830
1831 return err;
1832 }
1833
eq_get_mtt_addr(struct mlx4_eq_context * eqc)1834 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1835 {
1836 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1837 }
1838
eq_get_mtt_size(struct mlx4_eq_context * eqc)1839 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1840 {
1841 int log_eq_size = eqc->log_eq_size & 0x1f;
1842 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1843
1844 if (log_eq_size + 5 < page_shift)
1845 return 1;
1846
1847 return 1 << (log_eq_size + 5 - page_shift);
1848 }
1849
cq_get_mtt_addr(struct mlx4_cq_context * cqc)1850 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1851 {
1852 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1853 }
1854
cq_get_mtt_size(struct mlx4_cq_context * cqc)1855 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1856 {
1857 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1858 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1859
1860 if (log_cq_size + 5 < page_shift)
1861 return 1;
1862
1863 return 1 << (log_cq_size + 5 - page_shift);
1864 }
1865
mlx4_SW2HW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1866 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1867 struct mlx4_vhcr *vhcr,
1868 struct mlx4_cmd_mailbox *inbox,
1869 struct mlx4_cmd_mailbox *outbox,
1870 struct mlx4_cmd_info *cmd)
1871 {
1872 int err;
1873 int eqn = vhcr->in_modifier;
1874 int res_id = (slave << 8) | eqn;
1875 struct mlx4_eq_context *eqc = inbox->buf;
1876 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1877 int mtt_size = eq_get_mtt_size(eqc);
1878 struct res_eq *eq;
1879 struct res_mtt *mtt;
1880
1881 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1882 if (err)
1883 return err;
1884 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1885 if (err)
1886 goto out_add;
1887
1888 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1889 if (err)
1890 goto out_move;
1891
1892 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1893 if (err)
1894 goto out_put;
1895
1896 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1897 if (err)
1898 goto out_put;
1899
1900 atomic_inc(&mtt->ref_count);
1901 eq->mtt = mtt;
1902 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1903 res_end_move(dev, slave, RES_EQ, res_id);
1904 return 0;
1905
1906 out_put:
1907 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1908 out_move:
1909 res_abort_move(dev, slave, RES_EQ, res_id);
1910 out_add:
1911 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1912 return err;
1913 }
1914
get_containing_mtt(struct mlx4_dev * dev,int slave,int start,int len,struct res_mtt ** res)1915 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1916 int len, struct res_mtt **res)
1917 {
1918 struct mlx4_priv *priv = mlx4_priv(dev);
1919 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1920 struct res_mtt *mtt;
1921 int err = -EINVAL;
1922
1923 spin_lock_irq(mlx4_tlock(dev));
1924 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1925 com.list) {
1926 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1927 *res = mtt;
1928 mtt->com.from_state = mtt->com.state;
1929 mtt->com.state = RES_MTT_BUSY;
1930 err = 0;
1931 break;
1932 }
1933 }
1934 spin_unlock_irq(mlx4_tlock(dev));
1935
1936 return err;
1937 }
1938
mlx4_WRITE_MTT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1939 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1940 struct mlx4_vhcr *vhcr,
1941 struct mlx4_cmd_mailbox *inbox,
1942 struct mlx4_cmd_mailbox *outbox,
1943 struct mlx4_cmd_info *cmd)
1944 {
1945 struct mlx4_mtt mtt;
1946 __be64 *page_list = inbox->buf;
1947 u64 *pg_list = (u64 *)page_list;
1948 int i;
1949 struct res_mtt *rmtt = NULL;
1950 int start = be64_to_cpu(page_list[0]);
1951 int npages = vhcr->in_modifier;
1952 int err;
1953
1954 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1955 if (err)
1956 return err;
1957
1958 /* Call the SW implementation of write_mtt:
1959 * - Prepare a dummy mtt struct
1960 * - Translate inbox contents to simple addresses in host endianess */
1961 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1962 we don't really use it */
1963 mtt.order = 0;
1964 mtt.page_shift = 0;
1965 for (i = 0; i < npages; ++i)
1966 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1967
1968 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1969 ((u64 *)page_list + 2));
1970
1971 if (rmtt)
1972 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1973
1974 return err;
1975 }
1976
mlx4_HW2SW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1977 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1978 struct mlx4_vhcr *vhcr,
1979 struct mlx4_cmd_mailbox *inbox,
1980 struct mlx4_cmd_mailbox *outbox,
1981 struct mlx4_cmd_info *cmd)
1982 {
1983 int eqn = vhcr->in_modifier;
1984 int res_id = eqn | (slave << 8);
1985 struct res_eq *eq;
1986 int err;
1987
1988 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1989 if (err)
1990 return err;
1991
1992 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1993 if (err)
1994 goto ex_abort;
1995
1996 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1997 if (err)
1998 goto ex_put;
1999
2000 atomic_dec(&eq->mtt->ref_count);
2001 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2002 res_end_move(dev, slave, RES_EQ, res_id);
2003 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2004
2005 return 0;
2006
2007 ex_put:
2008 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2009 ex_abort:
2010 res_abort_move(dev, slave, RES_EQ, res_id);
2011
2012 return err;
2013 }
2014
mlx4_GEN_EQE(struct mlx4_dev * dev,int slave,struct mlx4_eqe * eqe)2015 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2016 {
2017 struct mlx4_priv *priv = mlx4_priv(dev);
2018 struct mlx4_slave_event_eq_info *event_eq;
2019 struct mlx4_cmd_mailbox *mailbox;
2020 u32 in_modifier = 0;
2021 int err;
2022 int res_id;
2023 struct res_eq *req;
2024
2025 if (!priv->mfunc.master.slave_state)
2026 return -EINVAL;
2027
2028 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2029
2030 /* Create the event only if the slave is registered */
2031 if (event_eq->eqn < 0)
2032 return 0;
2033
2034 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2035 res_id = (slave << 8) | event_eq->eqn;
2036 err = get_res(dev, slave, res_id, RES_EQ, &req);
2037 if (err)
2038 goto unlock;
2039
2040 if (req->com.from_state != RES_EQ_HW) {
2041 err = -EINVAL;
2042 goto put;
2043 }
2044
2045 mailbox = mlx4_alloc_cmd_mailbox(dev);
2046 if (IS_ERR(mailbox)) {
2047 err = PTR_ERR(mailbox);
2048 goto put;
2049 }
2050
2051 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2052 ++event_eq->token;
2053 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2054 }
2055
2056 memcpy(mailbox->buf, (u8 *) eqe, 28);
2057
2058 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2059
2060 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2061 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2062 MLX4_CMD_NATIVE);
2063
2064 put_res(dev, slave, res_id, RES_EQ);
2065 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2066 mlx4_free_cmd_mailbox(dev, mailbox);
2067 return err;
2068
2069 put:
2070 put_res(dev, slave, res_id, RES_EQ);
2071
2072 unlock:
2073 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2074 return err;
2075 }
2076
mlx4_QUERY_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2077 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2078 struct mlx4_vhcr *vhcr,
2079 struct mlx4_cmd_mailbox *inbox,
2080 struct mlx4_cmd_mailbox *outbox,
2081 struct mlx4_cmd_info *cmd)
2082 {
2083 int eqn = vhcr->in_modifier;
2084 int res_id = eqn | (slave << 8);
2085 struct res_eq *eq;
2086 int err;
2087
2088 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2089 if (err)
2090 return err;
2091
2092 if (eq->com.from_state != RES_EQ_HW) {
2093 err = -EINVAL;
2094 goto ex_put;
2095 }
2096
2097 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2098
2099 ex_put:
2100 put_res(dev, slave, res_id, RES_EQ);
2101 return err;
2102 }
2103
mlx4_SW2HW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2104 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2105 struct mlx4_vhcr *vhcr,
2106 struct mlx4_cmd_mailbox *inbox,
2107 struct mlx4_cmd_mailbox *outbox,
2108 struct mlx4_cmd_info *cmd)
2109 {
2110 int err;
2111 int cqn = vhcr->in_modifier;
2112 struct mlx4_cq_context *cqc = inbox->buf;
2113 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2114 struct res_cq *cq;
2115 struct res_mtt *mtt;
2116
2117 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2118 if (err)
2119 return err;
2120 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2121 if (err)
2122 goto out_move;
2123 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2124 if (err)
2125 goto out_put;
2126 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2127 if (err)
2128 goto out_put;
2129 atomic_inc(&mtt->ref_count);
2130 cq->mtt = mtt;
2131 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2132 res_end_move(dev, slave, RES_CQ, cqn);
2133 return 0;
2134
2135 out_put:
2136 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2137 out_move:
2138 res_abort_move(dev, slave, RES_CQ, cqn);
2139 return err;
2140 }
2141
mlx4_HW2SW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2142 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2143 struct mlx4_vhcr *vhcr,
2144 struct mlx4_cmd_mailbox *inbox,
2145 struct mlx4_cmd_mailbox *outbox,
2146 struct mlx4_cmd_info *cmd)
2147 {
2148 int err;
2149 int cqn = vhcr->in_modifier;
2150 struct res_cq *cq;
2151
2152 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2153 if (err)
2154 return err;
2155 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2156 if (err)
2157 goto out_move;
2158 atomic_dec(&cq->mtt->ref_count);
2159 res_end_move(dev, slave, RES_CQ, cqn);
2160 return 0;
2161
2162 out_move:
2163 res_abort_move(dev, slave, RES_CQ, cqn);
2164 return err;
2165 }
2166
mlx4_QUERY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2167 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2168 struct mlx4_vhcr *vhcr,
2169 struct mlx4_cmd_mailbox *inbox,
2170 struct mlx4_cmd_mailbox *outbox,
2171 struct mlx4_cmd_info *cmd)
2172 {
2173 int cqn = vhcr->in_modifier;
2174 struct res_cq *cq;
2175 int err;
2176
2177 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2178 if (err)
2179 return err;
2180
2181 if (cq->com.from_state != RES_CQ_HW)
2182 goto ex_put;
2183
2184 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2185 ex_put:
2186 put_res(dev, slave, cqn, RES_CQ);
2187
2188 return err;
2189 }
2190
handle_resize(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd,struct res_cq * cq)2191 static int handle_resize(struct mlx4_dev *dev, int slave,
2192 struct mlx4_vhcr *vhcr,
2193 struct mlx4_cmd_mailbox *inbox,
2194 struct mlx4_cmd_mailbox *outbox,
2195 struct mlx4_cmd_info *cmd,
2196 struct res_cq *cq)
2197 {
2198 int err;
2199 struct res_mtt *orig_mtt;
2200 struct res_mtt *mtt;
2201 struct mlx4_cq_context *cqc = inbox->buf;
2202 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2203
2204 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2205 if (err)
2206 return err;
2207
2208 if (orig_mtt != cq->mtt) {
2209 err = -EINVAL;
2210 goto ex_put;
2211 }
2212
2213 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2214 if (err)
2215 goto ex_put;
2216
2217 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2218 if (err)
2219 goto ex_put1;
2220 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2221 if (err)
2222 goto ex_put1;
2223 atomic_dec(&orig_mtt->ref_count);
2224 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2225 atomic_inc(&mtt->ref_count);
2226 cq->mtt = mtt;
2227 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2228 return 0;
2229
2230 ex_put1:
2231 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2232 ex_put:
2233 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2234
2235 return err;
2236
2237 }
2238
mlx4_MODIFY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2239 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2240 struct mlx4_vhcr *vhcr,
2241 struct mlx4_cmd_mailbox *inbox,
2242 struct mlx4_cmd_mailbox *outbox,
2243 struct mlx4_cmd_info *cmd)
2244 {
2245 int cqn = vhcr->in_modifier;
2246 struct res_cq *cq;
2247 int err;
2248
2249 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2250 if (err)
2251 return err;
2252
2253 if (cq->com.from_state != RES_CQ_HW)
2254 goto ex_put;
2255
2256 if (vhcr->op_modifier == 0) {
2257 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2258 goto ex_put;
2259 }
2260
2261 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2262 ex_put:
2263 put_res(dev, slave, cqn, RES_CQ);
2264
2265 return err;
2266 }
2267
srq_get_mtt_size(struct mlx4_srq_context * srqc)2268 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2269 {
2270 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2271 int log_rq_stride = srqc->logstride & 7;
2272 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2273
2274 if (log_srq_size + log_rq_stride + 4 < page_shift)
2275 return 1;
2276
2277 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2278 }
2279
mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2280 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2281 struct mlx4_vhcr *vhcr,
2282 struct mlx4_cmd_mailbox *inbox,
2283 struct mlx4_cmd_mailbox *outbox,
2284 struct mlx4_cmd_info *cmd)
2285 {
2286 int err;
2287 int srqn = vhcr->in_modifier;
2288 struct res_mtt *mtt;
2289 struct res_srq *srq;
2290 struct mlx4_srq_context *srqc = inbox->buf;
2291 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2292
2293 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2294 return -EINVAL;
2295
2296 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2297 if (err)
2298 return err;
2299 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2300 if (err)
2301 goto ex_abort;
2302 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2303 mtt);
2304 if (err)
2305 goto ex_put_mtt;
2306
2307 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2308 if (err)
2309 goto ex_put_mtt;
2310
2311 atomic_inc(&mtt->ref_count);
2312 srq->mtt = mtt;
2313 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2314 res_end_move(dev, slave, RES_SRQ, srqn);
2315 return 0;
2316
2317 ex_put_mtt:
2318 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2319 ex_abort:
2320 res_abort_move(dev, slave, RES_SRQ, srqn);
2321
2322 return err;
2323 }
2324
mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2325 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2326 struct mlx4_vhcr *vhcr,
2327 struct mlx4_cmd_mailbox *inbox,
2328 struct mlx4_cmd_mailbox *outbox,
2329 struct mlx4_cmd_info *cmd)
2330 {
2331 int err;
2332 int srqn = vhcr->in_modifier;
2333 struct res_srq *srq;
2334
2335 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2336 if (err)
2337 return err;
2338 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2339 if (err)
2340 goto ex_abort;
2341 atomic_dec(&srq->mtt->ref_count);
2342 if (srq->cq)
2343 atomic_dec(&srq->cq->ref_count);
2344 res_end_move(dev, slave, RES_SRQ, srqn);
2345
2346 return 0;
2347
2348 ex_abort:
2349 res_abort_move(dev, slave, RES_SRQ, srqn);
2350
2351 return err;
2352 }
2353
mlx4_QUERY_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2354 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2355 struct mlx4_vhcr *vhcr,
2356 struct mlx4_cmd_mailbox *inbox,
2357 struct mlx4_cmd_mailbox *outbox,
2358 struct mlx4_cmd_info *cmd)
2359 {
2360 int err;
2361 int srqn = vhcr->in_modifier;
2362 struct res_srq *srq;
2363
2364 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2365 if (err)
2366 return err;
2367 if (srq->com.from_state != RES_SRQ_HW) {
2368 err = -EBUSY;
2369 goto out;
2370 }
2371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2372 out:
2373 put_res(dev, slave, srqn, RES_SRQ);
2374 return err;
2375 }
2376
mlx4_ARM_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2377 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2378 struct mlx4_vhcr *vhcr,
2379 struct mlx4_cmd_mailbox *inbox,
2380 struct mlx4_cmd_mailbox *outbox,
2381 struct mlx4_cmd_info *cmd)
2382 {
2383 int err;
2384 int srqn = vhcr->in_modifier;
2385 struct res_srq *srq;
2386
2387 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2388 if (err)
2389 return err;
2390
2391 if (srq->com.from_state != RES_SRQ_HW) {
2392 err = -EBUSY;
2393 goto out;
2394 }
2395
2396 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2397 out:
2398 put_res(dev, slave, srqn, RES_SRQ);
2399 return err;
2400 }
2401
mlx4_GEN_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2402 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2403 struct mlx4_vhcr *vhcr,
2404 struct mlx4_cmd_mailbox *inbox,
2405 struct mlx4_cmd_mailbox *outbox,
2406 struct mlx4_cmd_info *cmd)
2407 {
2408 int err;
2409 int qpn = vhcr->in_modifier & 0x7fffff;
2410 struct res_qp *qp;
2411
2412 err = get_res(dev, slave, qpn, RES_QP, &qp);
2413 if (err)
2414 return err;
2415 if (qp->com.from_state != RES_QP_HW) {
2416 err = -EBUSY;
2417 goto out;
2418 }
2419
2420 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2421 out:
2422 put_res(dev, slave, qpn, RES_QP);
2423 return err;
2424 }
2425
mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2426 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2427 struct mlx4_vhcr *vhcr,
2428 struct mlx4_cmd_mailbox *inbox,
2429 struct mlx4_cmd_mailbox *outbox,
2430 struct mlx4_cmd_info *cmd)
2431 {
2432 struct mlx4_qp_context *qpc = inbox->buf + 8;
2433
2434 update_ud_gid(dev, qpc, (u8)slave);
2435
2436 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2437 }
2438
mlx4_2RST_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2439 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2440 struct mlx4_vhcr *vhcr,
2441 struct mlx4_cmd_mailbox *inbox,
2442 struct mlx4_cmd_mailbox *outbox,
2443 struct mlx4_cmd_info *cmd)
2444 {
2445 int err;
2446 int qpn = vhcr->in_modifier & 0x7fffff;
2447 struct res_qp *qp;
2448
2449 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2450 if (err)
2451 return err;
2452 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2453 if (err)
2454 goto ex_abort;
2455
2456 atomic_dec(&qp->mtt->ref_count);
2457 atomic_dec(&qp->rcq->ref_count);
2458 atomic_dec(&qp->scq->ref_count);
2459 if (qp->srq)
2460 atomic_dec(&qp->srq->ref_count);
2461 res_end_move(dev, slave, RES_QP, qpn);
2462 return 0;
2463
2464 ex_abort:
2465 res_abort_move(dev, slave, RES_QP, qpn);
2466
2467 return err;
2468 }
2469
find_gid(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid)2470 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2471 struct res_qp *rqp, u8 *gid)
2472 {
2473 struct res_gid *res;
2474
2475 list_for_each_entry(res, &rqp->mcg_list, list) {
2476 if (!memcmp(res->gid, gid, 16))
2477 return res;
2478 }
2479 return NULL;
2480 }
2481
add_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer)2482 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2483 u8 *gid, enum mlx4_protocol prot,
2484 enum mlx4_steer_type steer)
2485 {
2486 struct res_gid *res;
2487 int err;
2488
2489 res = kzalloc(sizeof *res, GFP_KERNEL);
2490 if (!res)
2491 return -ENOMEM;
2492
2493 spin_lock_irq(&rqp->mcg_spl);
2494 if (find_gid(dev, slave, rqp, gid)) {
2495 kfree(res);
2496 err = -EEXIST;
2497 } else {
2498 memcpy(res->gid, gid, 16);
2499 res->prot = prot;
2500 res->steer = steer;
2501 list_add_tail(&res->list, &rqp->mcg_list);
2502 err = 0;
2503 }
2504 spin_unlock_irq(&rqp->mcg_spl);
2505
2506 return err;
2507 }
2508
rem_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer)2509 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2510 u8 *gid, enum mlx4_protocol prot,
2511 enum mlx4_steer_type steer)
2512 {
2513 struct res_gid *res;
2514 int err;
2515
2516 spin_lock_irq(&rqp->mcg_spl);
2517 res = find_gid(dev, slave, rqp, gid);
2518 if (!res || res->prot != prot || res->steer != steer)
2519 err = -EINVAL;
2520 else {
2521 list_del(&res->list);
2522 kfree(res);
2523 err = 0;
2524 }
2525 spin_unlock_irq(&rqp->mcg_spl);
2526
2527 return err;
2528 }
2529
mlx4_QP_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2530 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2531 struct mlx4_vhcr *vhcr,
2532 struct mlx4_cmd_mailbox *inbox,
2533 struct mlx4_cmd_mailbox *outbox,
2534 struct mlx4_cmd_info *cmd)
2535 {
2536 struct mlx4_qp qp; /* dummy for calling attach/detach */
2537 u8 *gid = inbox->buf;
2538 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2539 int err, err1;
2540 int qpn;
2541 struct res_qp *rqp;
2542 int attach = vhcr->op_modifier;
2543 int block_loopback = vhcr->in_modifier >> 31;
2544 u8 steer_type_mask = 2;
2545 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2546
2547 qpn = vhcr->in_modifier & 0xffffff;
2548 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2549 if (err)
2550 return err;
2551
2552 qp.qpn = qpn;
2553 if (attach) {
2554 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2555 if (err)
2556 goto ex_put;
2557
2558 err = mlx4_qp_attach_common(dev, &qp, gid,
2559 block_loopback, prot, type);
2560 if (err)
2561 goto ex_rem;
2562 } else {
2563 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2564 if (err)
2565 goto ex_put;
2566 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2567 }
2568
2569 put_res(dev, slave, qpn, RES_QP);
2570 return 0;
2571
2572 ex_rem:
2573 /* ignore error return below, already in error */
2574 err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2575 ex_put:
2576 put_res(dev, slave, qpn, RES_QP);
2577
2578 return err;
2579 }
2580
2581 enum {
2582 BUSY_MAX_RETRIES = 10
2583 };
2584
mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2585 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2586 struct mlx4_vhcr *vhcr,
2587 struct mlx4_cmd_mailbox *inbox,
2588 struct mlx4_cmd_mailbox *outbox,
2589 struct mlx4_cmd_info *cmd)
2590 {
2591 int err;
2592 int index = vhcr->in_modifier & 0xffff;
2593
2594 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2595 if (err)
2596 return err;
2597
2598 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2599 put_res(dev, slave, index, RES_COUNTER);
2600 return err;
2601 }
2602
detach_qp(struct mlx4_dev * dev,int slave,struct res_qp * rqp)2603 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2604 {
2605 struct res_gid *rgid;
2606 struct res_gid *tmp;
2607 int err;
2608 struct mlx4_qp qp; /* dummy for calling attach/detach */
2609
2610 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2611 qp.qpn = rqp->local_qpn;
2612 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2613 rgid->steer);
2614 list_del(&rgid->list);
2615 kfree(rgid);
2616 }
2617 }
2618
_move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int print)2619 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2620 enum mlx4_resource type, int print)
2621 {
2622 struct mlx4_priv *priv = mlx4_priv(dev);
2623 struct mlx4_resource_tracker *tracker =
2624 &priv->mfunc.master.res_tracker;
2625 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2626 struct res_common *r;
2627 struct res_common *tmp;
2628 int busy;
2629
2630 busy = 0;
2631 spin_lock_irq(mlx4_tlock(dev));
2632 list_for_each_entry_safe(r, tmp, rlist, list) {
2633 if (r->owner == slave) {
2634 if (!r->removing) {
2635 if (r->state == RES_ANY_BUSY) {
2636 if (print)
2637 mlx4_dbg(dev,
2638 "%s id 0x%x is busy\n",
2639 ResourceType(type),
2640 r->res_id);
2641 ++busy;
2642 } else {
2643 r->from_state = r->state;
2644 r->state = RES_ANY_BUSY;
2645 r->removing = 1;
2646 }
2647 }
2648 }
2649 }
2650 spin_unlock_irq(mlx4_tlock(dev));
2651
2652 return busy;
2653 }
2654
move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type)2655 static int move_all_busy(struct mlx4_dev *dev, int slave,
2656 enum mlx4_resource type)
2657 {
2658 unsigned long begin;
2659 int busy;
2660
2661 begin = jiffies;
2662 do {
2663 busy = _move_all_busy(dev, slave, type, 0);
2664 if (time_after(jiffies, begin + 5 * HZ))
2665 break;
2666 if (busy)
2667 cond_resched();
2668 } while (busy);
2669
2670 if (busy)
2671 busy = _move_all_busy(dev, slave, type, 1);
2672
2673 return busy;
2674 }
rem_slave_qps(struct mlx4_dev * dev,int slave)2675 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2676 {
2677 struct mlx4_priv *priv = mlx4_priv(dev);
2678 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2679 struct list_head *qp_list =
2680 &tracker->slave_list[slave].res_list[RES_QP];
2681 struct res_qp *qp;
2682 struct res_qp *tmp;
2683 int state;
2684 u64 in_param;
2685 int qpn;
2686 int err;
2687
2688 err = move_all_busy(dev, slave, RES_QP);
2689 if (err)
2690 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2691 "for slave %d\n", slave);
2692
2693 spin_lock_irq(mlx4_tlock(dev));
2694 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2695 spin_unlock_irq(mlx4_tlock(dev));
2696 if (qp->com.owner == slave) {
2697 qpn = qp->com.res_id;
2698 detach_qp(dev, slave, qp);
2699 state = qp->com.from_state;
2700 while (state != 0) {
2701 switch (state) {
2702 case RES_QP_RESERVED:
2703 spin_lock_irq(mlx4_tlock(dev));
2704 radix_tree_delete(&tracker->res_tree[RES_QP],
2705 qp->com.res_id);
2706 list_del(&qp->com.list);
2707 spin_unlock_irq(mlx4_tlock(dev));
2708 kfree(qp);
2709 state = 0;
2710 break;
2711 case RES_QP_MAPPED:
2712 if (!valid_reserved(dev, slave, qpn))
2713 __mlx4_qp_free_icm(dev, qpn);
2714 state = RES_QP_RESERVED;
2715 break;
2716 case RES_QP_HW:
2717 in_param = slave;
2718 err = mlx4_cmd(dev, in_param,
2719 qp->local_qpn, 2,
2720 MLX4_CMD_2RST_QP,
2721 MLX4_CMD_TIME_CLASS_A,
2722 MLX4_CMD_NATIVE);
2723 if (err)
2724 mlx4_dbg(dev, "rem_slave_qps: failed"
2725 " to move slave %d qpn %d to"
2726 " reset\n", slave,
2727 qp->local_qpn);
2728 atomic_dec(&qp->rcq->ref_count);
2729 atomic_dec(&qp->scq->ref_count);
2730 atomic_dec(&qp->mtt->ref_count);
2731 if (qp->srq)
2732 atomic_dec(&qp->srq->ref_count);
2733 state = RES_QP_MAPPED;
2734 break;
2735 default:
2736 state = 0;
2737 }
2738 }
2739 }
2740 spin_lock_irq(mlx4_tlock(dev));
2741 }
2742 spin_unlock_irq(mlx4_tlock(dev));
2743 }
2744
rem_slave_srqs(struct mlx4_dev * dev,int slave)2745 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2746 {
2747 struct mlx4_priv *priv = mlx4_priv(dev);
2748 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2749 struct list_head *srq_list =
2750 &tracker->slave_list[slave].res_list[RES_SRQ];
2751 struct res_srq *srq;
2752 struct res_srq *tmp;
2753 int state;
2754 u64 in_param;
2755 LIST_HEAD(tlist);
2756 int srqn;
2757 int err;
2758
2759 err = move_all_busy(dev, slave, RES_SRQ);
2760 if (err)
2761 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2762 "busy for slave %d\n", slave);
2763
2764 spin_lock_irq(mlx4_tlock(dev));
2765 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2766 spin_unlock_irq(mlx4_tlock(dev));
2767 if (srq->com.owner == slave) {
2768 srqn = srq->com.res_id;
2769 state = srq->com.from_state;
2770 while (state != 0) {
2771 switch (state) {
2772 case RES_SRQ_ALLOCATED:
2773 __mlx4_srq_free_icm(dev, srqn);
2774 spin_lock_irq(mlx4_tlock(dev));
2775 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2776 srqn);
2777 list_del(&srq->com.list);
2778 spin_unlock_irq(mlx4_tlock(dev));
2779 kfree(srq);
2780 state = 0;
2781 break;
2782
2783 case RES_SRQ_HW:
2784 in_param = slave;
2785 err = mlx4_cmd(dev, in_param, srqn, 1,
2786 MLX4_CMD_HW2SW_SRQ,
2787 MLX4_CMD_TIME_CLASS_A,
2788 MLX4_CMD_NATIVE);
2789 if (err)
2790 mlx4_dbg(dev, "rem_slave_srqs: failed"
2791 " to move slave %d srq %d to"
2792 " SW ownership\n",
2793 slave, srqn);
2794
2795 atomic_dec(&srq->mtt->ref_count);
2796 if (srq->cq)
2797 atomic_dec(&srq->cq->ref_count);
2798 state = RES_SRQ_ALLOCATED;
2799 break;
2800
2801 default:
2802 state = 0;
2803 }
2804 }
2805 }
2806 spin_lock_irq(mlx4_tlock(dev));
2807 }
2808 spin_unlock_irq(mlx4_tlock(dev));
2809 }
2810
rem_slave_cqs(struct mlx4_dev * dev,int slave)2811 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2812 {
2813 struct mlx4_priv *priv = mlx4_priv(dev);
2814 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2815 struct list_head *cq_list =
2816 &tracker->slave_list[slave].res_list[RES_CQ];
2817 struct res_cq *cq;
2818 struct res_cq *tmp;
2819 int state;
2820 u64 in_param;
2821 LIST_HEAD(tlist);
2822 int cqn;
2823 int err;
2824
2825 err = move_all_busy(dev, slave, RES_CQ);
2826 if (err)
2827 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2828 "busy for slave %d\n", slave);
2829
2830 spin_lock_irq(mlx4_tlock(dev));
2831 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2832 spin_unlock_irq(mlx4_tlock(dev));
2833 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2834 cqn = cq->com.res_id;
2835 state = cq->com.from_state;
2836 while (state != 0) {
2837 switch (state) {
2838 case RES_CQ_ALLOCATED:
2839 __mlx4_cq_free_icm(dev, cqn);
2840 spin_lock_irq(mlx4_tlock(dev));
2841 radix_tree_delete(&tracker->res_tree[RES_CQ],
2842 cqn);
2843 list_del(&cq->com.list);
2844 spin_unlock_irq(mlx4_tlock(dev));
2845 kfree(cq);
2846 state = 0;
2847 break;
2848
2849 case RES_CQ_HW:
2850 in_param = slave;
2851 err = mlx4_cmd(dev, in_param, cqn, 1,
2852 MLX4_CMD_HW2SW_CQ,
2853 MLX4_CMD_TIME_CLASS_A,
2854 MLX4_CMD_NATIVE);
2855 if (err)
2856 mlx4_dbg(dev, "rem_slave_cqs: failed"
2857 " to move slave %d cq %d to"
2858 " SW ownership\n",
2859 slave, cqn);
2860 atomic_dec(&cq->mtt->ref_count);
2861 state = RES_CQ_ALLOCATED;
2862 break;
2863
2864 default:
2865 state = 0;
2866 }
2867 }
2868 }
2869 spin_lock_irq(mlx4_tlock(dev));
2870 }
2871 spin_unlock_irq(mlx4_tlock(dev));
2872 }
2873
rem_slave_mrs(struct mlx4_dev * dev,int slave)2874 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2875 {
2876 struct mlx4_priv *priv = mlx4_priv(dev);
2877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2878 struct list_head *mpt_list =
2879 &tracker->slave_list[slave].res_list[RES_MPT];
2880 struct res_mpt *mpt;
2881 struct res_mpt *tmp;
2882 int state;
2883 u64 in_param;
2884 LIST_HEAD(tlist);
2885 int mptn;
2886 int err;
2887
2888 err = move_all_busy(dev, slave, RES_MPT);
2889 if (err)
2890 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2891 "busy for slave %d\n", slave);
2892
2893 spin_lock_irq(mlx4_tlock(dev));
2894 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2895 spin_unlock_irq(mlx4_tlock(dev));
2896 if (mpt->com.owner == slave) {
2897 mptn = mpt->com.res_id;
2898 state = mpt->com.from_state;
2899 while (state != 0) {
2900 switch (state) {
2901 case RES_MPT_RESERVED:
2902 __mlx4_mr_release(dev, mpt->key);
2903 spin_lock_irq(mlx4_tlock(dev));
2904 radix_tree_delete(&tracker->res_tree[RES_MPT],
2905 mptn);
2906 list_del(&mpt->com.list);
2907 spin_unlock_irq(mlx4_tlock(dev));
2908 kfree(mpt);
2909 state = 0;
2910 break;
2911
2912 case RES_MPT_MAPPED:
2913 __mlx4_mr_free_icm(dev, mpt->key);
2914 state = RES_MPT_RESERVED;
2915 break;
2916
2917 case RES_MPT_HW:
2918 in_param = slave;
2919 err = mlx4_cmd(dev, in_param, mptn, 0,
2920 MLX4_CMD_HW2SW_MPT,
2921 MLX4_CMD_TIME_CLASS_A,
2922 MLX4_CMD_NATIVE);
2923 if (err)
2924 mlx4_dbg(dev, "rem_slave_mrs: failed"
2925 " to move slave %d mpt %d to"
2926 " SW ownership\n",
2927 slave, mptn);
2928 if (mpt->mtt)
2929 atomic_dec(&mpt->mtt->ref_count);
2930 state = RES_MPT_MAPPED;
2931 break;
2932 default:
2933 state = 0;
2934 }
2935 }
2936 }
2937 spin_lock_irq(mlx4_tlock(dev));
2938 }
2939 spin_unlock_irq(mlx4_tlock(dev));
2940 }
2941
rem_slave_mtts(struct mlx4_dev * dev,int slave)2942 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2943 {
2944 struct mlx4_priv *priv = mlx4_priv(dev);
2945 struct mlx4_resource_tracker *tracker =
2946 &priv->mfunc.master.res_tracker;
2947 struct list_head *mtt_list =
2948 &tracker->slave_list[slave].res_list[RES_MTT];
2949 struct res_mtt *mtt;
2950 struct res_mtt *tmp;
2951 int state;
2952 LIST_HEAD(tlist);
2953 int base;
2954 int err;
2955
2956 err = move_all_busy(dev, slave, RES_MTT);
2957 if (err)
2958 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2959 "busy for slave %d\n", slave);
2960
2961 spin_lock_irq(mlx4_tlock(dev));
2962 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2963 spin_unlock_irq(mlx4_tlock(dev));
2964 if (mtt->com.owner == slave) {
2965 base = mtt->com.res_id;
2966 state = mtt->com.from_state;
2967 while (state != 0) {
2968 switch (state) {
2969 case RES_MTT_ALLOCATED:
2970 __mlx4_free_mtt_range(dev, base,
2971 mtt->order);
2972 spin_lock_irq(mlx4_tlock(dev));
2973 radix_tree_delete(&tracker->res_tree[RES_MTT],
2974 base);
2975 list_del(&mtt->com.list);
2976 spin_unlock_irq(mlx4_tlock(dev));
2977 kfree(mtt);
2978 state = 0;
2979 break;
2980
2981 default:
2982 state = 0;
2983 }
2984 }
2985 }
2986 spin_lock_irq(mlx4_tlock(dev));
2987 }
2988 spin_unlock_irq(mlx4_tlock(dev));
2989 }
2990
rem_slave_eqs(struct mlx4_dev * dev,int slave)2991 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2992 {
2993 struct mlx4_priv *priv = mlx4_priv(dev);
2994 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2995 struct list_head *eq_list =
2996 &tracker->slave_list[slave].res_list[RES_EQ];
2997 struct res_eq *eq;
2998 struct res_eq *tmp;
2999 int err;
3000 int state;
3001 LIST_HEAD(tlist);
3002 int eqn;
3003 struct mlx4_cmd_mailbox *mailbox;
3004
3005 err = move_all_busy(dev, slave, RES_EQ);
3006 if (err)
3007 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3008 "busy for slave %d\n", slave);
3009
3010 spin_lock_irq(mlx4_tlock(dev));
3011 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3012 spin_unlock_irq(mlx4_tlock(dev));
3013 if (eq->com.owner == slave) {
3014 eqn = eq->com.res_id;
3015 state = eq->com.from_state;
3016 while (state != 0) {
3017 switch (state) {
3018 case RES_EQ_RESERVED:
3019 spin_lock_irq(mlx4_tlock(dev));
3020 radix_tree_delete(&tracker->res_tree[RES_EQ],
3021 eqn);
3022 list_del(&eq->com.list);
3023 spin_unlock_irq(mlx4_tlock(dev));
3024 kfree(eq);
3025 state = 0;
3026 break;
3027
3028 case RES_EQ_HW:
3029 mailbox = mlx4_alloc_cmd_mailbox(dev);
3030 if (IS_ERR(mailbox)) {
3031 cond_resched();
3032 continue;
3033 }
3034 err = mlx4_cmd_box(dev, slave, 0,
3035 eqn & 0xff, 0,
3036 MLX4_CMD_HW2SW_EQ,
3037 MLX4_CMD_TIME_CLASS_A,
3038 MLX4_CMD_NATIVE);
3039 mlx4_dbg(dev, "rem_slave_eqs: failed"
3040 " to move slave %d eqs %d to"
3041 " SW ownership\n", slave, eqn);
3042 mlx4_free_cmd_mailbox(dev, mailbox);
3043 if (!err) {
3044 atomic_dec(&eq->mtt->ref_count);
3045 state = RES_EQ_RESERVED;
3046 }
3047 break;
3048
3049 default:
3050 state = 0;
3051 }
3052 }
3053 }
3054 spin_lock_irq(mlx4_tlock(dev));
3055 }
3056 spin_unlock_irq(mlx4_tlock(dev));
3057 }
3058
mlx4_delete_all_resources_for_slave(struct mlx4_dev * dev,int slave)3059 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3060 {
3061 struct mlx4_priv *priv = mlx4_priv(dev);
3062
3063 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3064 /*VLAN*/
3065 rem_slave_macs(dev, slave);
3066 rem_slave_qps(dev, slave);
3067 rem_slave_srqs(dev, slave);
3068 rem_slave_cqs(dev, slave);
3069 rem_slave_mrs(dev, slave);
3070 rem_slave_eqs(dev, slave);
3071 rem_slave_mtts(dev, slave);
3072 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3073 }
3074