1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #ifndef __MLX5_VNET_H__
5 #define __MLX5_VNET_H__
6
7 #include "mlx5_vdpa.h"
8
9 #define to_mlx5_vdpa_ndev(__mvdev) \
10 container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
11 #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
12
13 struct mlx5_vdpa_net_resources {
14 u32 tisn;
15 u32 tdn;
16 u32 tirn;
17 u32 rqtn;
18 bool valid;
19 struct dentry *tirn_dent;
20 };
21
22 #define MLX5V_MACVLAN_SIZE 256
23
key2vid(u64 key)24 static inline u16 key2vid(u64 key)
25 {
26 return (u16)(key >> 48) & 0xfff;
27 }
28
29 #define MLX5_VDPA_IRQ_NAME_LEN 32
30
31 struct mlx5_vdpa_irq_pool_entry {
32 struct msi_map map;
33 bool used;
34 char name[MLX5_VDPA_IRQ_NAME_LEN];
35 void *dev_id;
36 };
37
38 struct mlx5_vdpa_irq_pool {
39 int num_ent;
40 struct mlx5_vdpa_irq_pool_entry *entries;
41 };
42
43 struct mlx5_vdpa_net {
44 struct mlx5_vdpa_dev mvdev;
45 struct mlx5_vdpa_net_resources res;
46 struct virtio_net_config config;
47 struct mlx5_vdpa_virtqueue *vqs;
48 struct vdpa_callback *event_cbs;
49
50 /* Serialize vq resources creation and destruction. This is required
51 * since memory map might change and we need to destroy and create
52 * resources while driver in operational.
53 */
54 struct rw_semaphore reslock;
55 struct mlx5_flow_table *rxft;
56 struct dentry *rx_dent;
57 struct dentry *rx_table_dent;
58 bool setup;
59 u32 cur_num_vqs;
60 u32 rqt_size;
61 bool nb_registered;
62 struct notifier_block nb;
63 struct vdpa_callback config_cb;
64 struct mlx5_vdpa_wq_ent cvq_ent;
65 struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
66 struct mlx5_vdpa_irq_pool irqp;
67 struct dentry *debugfs;
68
69 u32 umem_1_buffer_param_a;
70 u32 umem_1_buffer_param_b;
71
72 u32 umem_2_buffer_param_a;
73 u32 umem_2_buffer_param_b;
74
75 u32 umem_3_buffer_param_a;
76 u32 umem_3_buffer_param_b;
77 };
78
79 struct mlx5_vdpa_counter {
80 struct mlx5_fc *counter;
81 struct dentry *dent;
82 struct mlx5_core_dev *mdev;
83 };
84
85 struct macvlan_node {
86 struct hlist_node hlist;
87 struct mlx5_flow_handle *ucast_rule;
88 struct mlx5_flow_handle *mcast_rule;
89 u64 macvlan;
90 struct mlx5_vdpa_net *ndev;
91 bool tagged;
92 #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
93 struct dentry *dent;
94 struct mlx5_vdpa_counter ucast_counter;
95 struct mlx5_vdpa_counter mcast_counter;
96 #endif
97 };
98
99 void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
100 void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
101 void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
102 void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
103 void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
104 void mlx5_vdpa_remove_tirn(struct mlx5_vdpa_net *ndev);
105 #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
106 void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
107 struct macvlan_node *node);
108 void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
109 struct macvlan_node *node);
110 #else
mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net * ndev,struct macvlan_node * node)111 static inline void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
112 struct macvlan_node *node) {}
mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net * ndev,struct macvlan_node * node)113 static inline void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
114 struct macvlan_node *node) {}
115 #endif
116
117
118 #endif /* __MLX5_VNET_H__ */
119