1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <linux/debugfs.h>
5 #include "eswitch.h"
6
7 enum vnic_diag_counter {
8 MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
9 MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
10 MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
11 MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
12 MLX5_VNIC_DIAG_CQ_OVERRUN,
13 MLX5_VNIC_DIAG_INVALID_COMMAND,
14 MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
15 };
16
mlx5_esw_query_vnic_diag(struct mlx5_vport * vport,enum vnic_diag_counter counter,u32 * val)17 static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
18 u32 *val)
19 {
20 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
21 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
22 struct mlx5_core_dev *dev = vport->dev;
23 u16 vport_num = vport->vport;
24 void *vnic_diag_out;
25 int err;
26
27 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
28 MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
29 if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
30 MLX5_SET(query_vnic_env_in, in, other_vport, 1);
31
32 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
33 if (err)
34 return err;
35
36 vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
37 switch (counter) {
38 case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
39 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
40 break;
41 case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
42 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
43 send_queue_priority_update_flow);
44 break;
45 case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
46 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
47 break;
48 case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
49 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
50 break;
51 case MLX5_VNIC_DIAG_CQ_OVERRUN:
52 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
53 break;
54 case MLX5_VNIC_DIAG_INVALID_COMMAND:
55 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
56 break;
57 case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
58 *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
59 break;
60 }
61
62 return 0;
63 }
64
__show_vnic_diag(struct seq_file * file,struct mlx5_vport * vport,enum vnic_diag_counter type)65 static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
66 enum vnic_diag_counter type)
67 {
68 u32 val = 0;
69 int ret;
70
71 ret = mlx5_esw_query_vnic_diag(vport, type, &val);
72 if (ret)
73 return ret;
74
75 seq_printf(file, "%d\n", val);
76 return 0;
77 }
78
total_q_under_processor_handle_show(struct seq_file * file,void * priv)79 static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
80 {
81 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
82 }
83
send_queue_priority_update_flow_show(struct seq_file * file,void * priv)84 static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
85 {
86 return __show_vnic_diag(file, file->private,
87 MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
88 }
89
comp_eq_overrun_show(struct seq_file * file,void * priv)90 static int comp_eq_overrun_show(struct seq_file *file, void *priv)
91 {
92 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
93 }
94
async_eq_overrun_show(struct seq_file * file,void * priv)95 static int async_eq_overrun_show(struct seq_file *file, void *priv)
96 {
97 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
98 }
99
cq_overrun_show(struct seq_file * file,void * priv)100 static int cq_overrun_show(struct seq_file *file, void *priv)
101 {
102 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
103 }
104
invalid_command_show(struct seq_file * file,void * priv)105 static int invalid_command_show(struct seq_file *file, void *priv)
106 {
107 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
108 }
109
quota_exceeded_command_show(struct seq_file * file,void * priv)110 static int quota_exceeded_command_show(struct seq_file *file, void *priv)
111 {
112 return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
113 }
114
115 DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
116 DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
117 DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
118 DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
119 DEFINE_SHOW_ATTRIBUTE(cq_overrun);
120 DEFINE_SHOW_ATTRIBUTE(invalid_command);
121 DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
122
mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch * esw,u16 vport_num)123 void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
124 {
125 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
126
127 debugfs_remove_recursive(vport->dbgfs);
128 vport->dbgfs = NULL;
129 }
130
131 /* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
132 #define VNIC_DIAG_DIR_NAME_MAX_LEN 8
133
mlx5_esw_vport_debugfs_create(struct mlx5_eswitch * esw,u16 vport_num,bool is_sf,u16 sf_num)134 void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
135 {
136 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
137 struct dentry *vnic_diag;
138 char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
139 int err;
140
141 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
142 return;
143
144 if (vport_num == MLX5_VPORT_PF) {
145 strcpy(dir_name, "pf");
146 } else if (vport_num == MLX5_VPORT_ECPF) {
147 strcpy(dir_name, "ecpf");
148 } else {
149 err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
150 is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
151 if (WARN_ON(err < 0))
152 return;
153 }
154
155 vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
156 vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
157
158 if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
159 debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
160 &total_q_under_processor_handle_fops);
161 debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
162 &send_queue_priority_update_flow_fops);
163 }
164
165 if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
166 debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
167 &comp_eq_overrun_fops);
168 debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
169 &async_eq_overrun_fops);
170 }
171
172 if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
173 debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
174
175 if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
176 debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
177 &invalid_command_fops);
178
179 if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
180 debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
181 "a_exceeded_command_fops);
182 }
183