1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
8 #include <linux/err.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <linux/firmware.h>
24 #include <asm/byteorder.h>
25 #include <net/devlink.h>
26 #include <trace/events/devlink.h>
27
28 #include "core.h"
29 #include "core_env.h"
30 #include "item.h"
31 #include "cmd.h"
32 #include "port.h"
33 #include "trap.h"
34 #include "emad.h"
35 #include "reg.h"
36 #include "resources.h"
37 #include "../mlxfw/mlxfw.h"
38
39 static LIST_HEAD(mlxsw_core_driver_list);
40 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
41
42 static const char mlxsw_core_driver_name[] = "mlxsw_core";
43
44 static struct workqueue_struct *mlxsw_wq;
45 static struct workqueue_struct *mlxsw_owq;
46
47 struct mlxsw_core_port {
48 struct devlink_port devlink_port;
49 void *port_driver_priv;
50 u16 local_port;
51 struct mlxsw_linecard *linecard;
52 };
53
mlxsw_core_port_driver_priv(struct mlxsw_core_port * mlxsw_core_port)54 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
55 {
56 return mlxsw_core_port->port_driver_priv;
57 }
58 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
59
mlxsw_core_port_check(struct mlxsw_core_port * mlxsw_core_port)60 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
61 {
62 return mlxsw_core_port->port_driver_priv != NULL;
63 }
64
65 struct mlxsw_core {
66 struct mlxsw_driver *driver;
67 const struct mlxsw_bus *bus;
68 void *bus_priv;
69 const struct mlxsw_bus_info *bus_info;
70 struct workqueue_struct *emad_wq;
71 struct list_head rx_listener_list;
72 struct list_head event_listener_list;
73 struct {
74 atomic64_t tid;
75 struct list_head trans_list;
76 spinlock_t trans_list_lock; /* protects trans_list writes */
77 bool use_emad;
78 bool enable_string_tlv;
79 } emad;
80 struct {
81 u16 *mapping; /* lag_id+port_index to local_port mapping */
82 } lag;
83 struct mlxsw_res res;
84 struct mlxsw_hwmon *hwmon;
85 struct mlxsw_thermal *thermal;
86 struct mlxsw_linecards *linecards;
87 struct mlxsw_core_port *ports;
88 unsigned int max_ports;
89 atomic_t active_ports_count;
90 bool fw_flash_in_progress;
91 struct {
92 struct devlink_health_reporter *fw_fatal;
93 } health;
94 struct mlxsw_env *env;
95 unsigned long driver_priv[];
96 /* driver_priv has to be always the last item */
97 };
98
mlxsw_core_linecards(struct mlxsw_core * mlxsw_core)99 struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
100 {
101 return mlxsw_core->linecards;
102 }
103
mlxsw_core_linecards_set(struct mlxsw_core * mlxsw_core,struct mlxsw_linecards * linecards)104 void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
105 struct mlxsw_linecards *linecards)
106 {
107 mlxsw_core->linecards = linecards;
108 }
109
110 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
111
mlxsw_ports_occ_get(void * priv)112 static u64 mlxsw_ports_occ_get(void *priv)
113 {
114 struct mlxsw_core *mlxsw_core = priv;
115
116 return atomic_read(&mlxsw_core->active_ports_count);
117 }
118
mlxsw_core_resources_ports_register(struct mlxsw_core * mlxsw_core)119 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
120 {
121 struct devlink *devlink = priv_to_devlink(mlxsw_core);
122 struct devlink_resource_size_params ports_num_params;
123 u32 max_ports;
124
125 max_ports = mlxsw_core->max_ports - 1;
126 devlink_resource_size_params_init(&ports_num_params, max_ports,
127 max_ports, 1,
128 DEVLINK_RESOURCE_UNIT_ENTRY);
129
130 return devlink_resource_register(devlink,
131 DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
132 max_ports, MLXSW_CORE_RESOURCE_PORTS,
133 DEVLINK_RESOURCE_ID_PARENT_TOP,
134 &ports_num_params);
135 }
136
mlxsw_ports_init(struct mlxsw_core * mlxsw_core,bool reload)137 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
138 {
139 struct devlink *devlink = priv_to_devlink(mlxsw_core);
140 int err;
141
142 /* Switch ports are numbered from 1 to queried value */
143 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
144 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
145 MAX_SYSTEM_PORT) + 1;
146 else
147 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
148
149 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
150 sizeof(struct mlxsw_core_port), GFP_KERNEL);
151 if (!mlxsw_core->ports)
152 return -ENOMEM;
153
154 if (!reload) {
155 err = mlxsw_core_resources_ports_register(mlxsw_core);
156 if (err)
157 goto err_resources_ports_register;
158 }
159 atomic_set(&mlxsw_core->active_ports_count, 0);
160 devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
161 mlxsw_ports_occ_get, mlxsw_core);
162
163 return 0;
164
165 err_resources_ports_register:
166 kfree(mlxsw_core->ports);
167 return err;
168 }
169
mlxsw_ports_fini(struct mlxsw_core * mlxsw_core,bool reload)170 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
171 {
172 struct devlink *devlink = priv_to_devlink(mlxsw_core);
173
174 devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
175 if (!reload)
176 devlink_resources_unregister(priv_to_devlink(mlxsw_core));
177
178 kfree(mlxsw_core->ports);
179 }
180
mlxsw_core_max_ports(const struct mlxsw_core * mlxsw_core)181 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
182 {
183 return mlxsw_core->max_ports;
184 }
185 EXPORT_SYMBOL(mlxsw_core_max_ports);
186
mlxsw_core_driver_priv(struct mlxsw_core * mlxsw_core)187 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
188 {
189 return mlxsw_core->driver_priv;
190 }
191 EXPORT_SYMBOL(mlxsw_core_driver_priv);
192
193 bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev * rev,const struct mlxsw_fw_rev * req_rev)194 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
195 const struct mlxsw_fw_rev *req_rev)
196 {
197 return rev->minor > req_rev->minor ||
198 (rev->minor == req_rev->minor &&
199 rev->subminor >= req_rev->subminor);
200 }
201 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
202
203 struct mlxsw_rx_listener_item {
204 struct list_head list;
205 struct mlxsw_rx_listener rxl;
206 void *priv;
207 bool enabled;
208 };
209
210 struct mlxsw_event_listener_item {
211 struct list_head list;
212 struct mlxsw_core *mlxsw_core;
213 struct mlxsw_event_listener el;
214 void *priv;
215 };
216
217 static const u8 mlxsw_core_trap_groups[] = {
218 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
219 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
220 };
221
mlxsw_core_trap_groups_set(struct mlxsw_core * mlxsw_core)222 static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
223 {
224 char htgt_pl[MLXSW_REG_HTGT_LEN];
225 int err;
226 int i;
227
228 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
229 return 0;
230
231 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
232 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
233 MLXSW_REG_HTGT_INVALID_POLICER,
234 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
235 MLXSW_REG_HTGT_DEFAULT_TC);
236 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
237 if (err)
238 return err;
239 }
240 return 0;
241 }
242
243 /******************
244 * EMAD processing
245 ******************/
246
247 /* emad_eth_hdr_dmac
248 * Destination MAC in EMAD's Ethernet header.
249 * Must be set to 01:02:c9:00:00:01
250 */
251 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
252
253 /* emad_eth_hdr_smac
254 * Source MAC in EMAD's Ethernet header.
255 * Must be set to 00:02:c9:01:02:03
256 */
257 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
258
259 /* emad_eth_hdr_ethertype
260 * Ethertype in EMAD's Ethernet header.
261 * Must be set to 0x8932
262 */
263 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
264
265 /* emad_eth_hdr_mlx_proto
266 * Mellanox protocol.
267 * Must be set to 0x0.
268 */
269 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
270
271 /* emad_eth_hdr_ver
272 * Mellanox protocol version.
273 * Must be set to 0x0.
274 */
275 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
276
277 /* emad_op_tlv_type
278 * Type of the TLV.
279 * Must be set to 0x1 (operation TLV).
280 */
281 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
282
283 /* emad_op_tlv_len
284 * Length of the operation TLV in u32.
285 * Must be set to 0x4.
286 */
287 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
288
289 /* emad_op_tlv_dr
290 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
291 * EMAD. DR TLV must follow.
292 *
293 * Note: Currently not supported and must not be set.
294 */
295 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
296
297 /* emad_op_tlv_status
298 * Returned status in case of EMAD response. Must be set to 0 in case
299 * of EMAD request.
300 * 0x0 - success
301 * 0x1 - device is busy. Requester should retry
302 * 0x2 - Mellanox protocol version not supported
303 * 0x3 - unknown TLV
304 * 0x4 - register not supported
305 * 0x5 - operation class not supported
306 * 0x6 - EMAD method not supported
307 * 0x7 - bad parameter (e.g. port out of range)
308 * 0x8 - resource not available
309 * 0x9 - message receipt acknowledgment. Requester should retry
310 * 0x70 - internal error
311 */
312 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
313
314 /* emad_op_tlv_register_id
315 * Register ID of register within register TLV.
316 */
317 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
318
319 /* emad_op_tlv_r
320 * Response bit. Setting to 1 indicates Response, otherwise request.
321 */
322 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
323
324 /* emad_op_tlv_method
325 * EMAD method type.
326 * 0x1 - query
327 * 0x2 - write
328 * 0x3 - send (currently not supported)
329 * 0x4 - event
330 */
331 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
332
333 /* emad_op_tlv_class
334 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
335 */
336 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
337
338 /* emad_op_tlv_tid
339 * EMAD transaction ID. Used for pairing request and response EMADs.
340 */
341 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
342
343 /* emad_string_tlv_type
344 * Type of the TLV.
345 * Must be set to 0x2 (string TLV).
346 */
347 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
348
349 /* emad_string_tlv_len
350 * Length of the string TLV in u32.
351 */
352 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
353
354 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
355
356 /* emad_string_tlv_string
357 * String provided by the device's firmware in case of erroneous register access
358 */
359 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
360 MLXSW_EMAD_STRING_TLV_STRING_LEN);
361
362 /* emad_reg_tlv_type
363 * Type of the TLV.
364 * Must be set to 0x3 (register TLV).
365 */
366 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
367
368 /* emad_reg_tlv_len
369 * Length of the operation TLV in u32.
370 */
371 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
372
373 /* emad_end_tlv_type
374 * Type of the TLV.
375 * Must be set to 0x0 (end TLV).
376 */
377 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
378
379 /* emad_end_tlv_len
380 * Length of the end TLV in u32.
381 * Must be set to 1.
382 */
383 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
384
385 enum mlxsw_core_reg_access_type {
386 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
387 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
388 };
389
390 static inline const char *
mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)391 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
392 {
393 switch (type) {
394 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
395 return "query";
396 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
397 return "write";
398 }
399 BUG();
400 }
401
mlxsw_emad_pack_end_tlv(char * end_tlv)402 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
403 {
404 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
405 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
406 }
407
mlxsw_emad_pack_reg_tlv(char * reg_tlv,const struct mlxsw_reg_info * reg,char * payload)408 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
409 const struct mlxsw_reg_info *reg,
410 char *payload)
411 {
412 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
413 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
414 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
415 }
416
mlxsw_emad_pack_string_tlv(char * string_tlv)417 static void mlxsw_emad_pack_string_tlv(char *string_tlv)
418 {
419 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
420 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
421 }
422
mlxsw_emad_pack_op_tlv(char * op_tlv,const struct mlxsw_reg_info * reg,enum mlxsw_core_reg_access_type type,u64 tid)423 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
424 const struct mlxsw_reg_info *reg,
425 enum mlxsw_core_reg_access_type type,
426 u64 tid)
427 {
428 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
429 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
430 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
431 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
432 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
433 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
434 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
435 mlxsw_emad_op_tlv_method_set(op_tlv,
436 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
437 else
438 mlxsw_emad_op_tlv_method_set(op_tlv,
439 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
440 mlxsw_emad_op_tlv_class_set(op_tlv,
441 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
442 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
443 }
444
mlxsw_emad_construct_eth_hdr(struct sk_buff * skb)445 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
446 {
447 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
448
449 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
450 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
451 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
452 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
453 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
454
455 skb_reset_mac_header(skb);
456
457 return 0;
458 }
459
mlxsw_emad_construct(struct sk_buff * skb,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,u64 tid,bool enable_string_tlv)460 static void mlxsw_emad_construct(struct sk_buff *skb,
461 const struct mlxsw_reg_info *reg,
462 char *payload,
463 enum mlxsw_core_reg_access_type type,
464 u64 tid, bool enable_string_tlv)
465 {
466 char *buf;
467
468 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
469 mlxsw_emad_pack_end_tlv(buf);
470
471 buf = skb_push(skb, reg->len + sizeof(u32));
472 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
473
474 if (enable_string_tlv) {
475 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
476 mlxsw_emad_pack_string_tlv(buf);
477 }
478
479 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
480 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
481
482 mlxsw_emad_construct_eth_hdr(skb);
483 }
484
485 struct mlxsw_emad_tlv_offsets {
486 u16 op_tlv;
487 u16 string_tlv;
488 u16 reg_tlv;
489 };
490
mlxsw_emad_tlv_is_string_tlv(const char * tlv)491 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
492 {
493 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
494
495 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
496 }
497
mlxsw_emad_tlv_parse(struct sk_buff * skb)498 static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
499 {
500 struct mlxsw_emad_tlv_offsets *offsets =
501 (struct mlxsw_emad_tlv_offsets *) skb->cb;
502
503 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
504 offsets->string_tlv = 0;
505 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
506 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
507
508 /* If string TLV is present, it must come after the operation TLV. */
509 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
510 offsets->string_tlv = offsets->reg_tlv;
511 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
512 }
513 }
514
mlxsw_emad_op_tlv(const struct sk_buff * skb)515 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
516 {
517 struct mlxsw_emad_tlv_offsets *offsets =
518 (struct mlxsw_emad_tlv_offsets *) skb->cb;
519
520 return ((char *) (skb->data + offsets->op_tlv));
521 }
522
mlxsw_emad_string_tlv(const struct sk_buff * skb)523 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
524 {
525 struct mlxsw_emad_tlv_offsets *offsets =
526 (struct mlxsw_emad_tlv_offsets *) skb->cb;
527
528 if (!offsets->string_tlv)
529 return NULL;
530
531 return ((char *) (skb->data + offsets->string_tlv));
532 }
533
mlxsw_emad_reg_tlv(const struct sk_buff * skb)534 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
535 {
536 struct mlxsw_emad_tlv_offsets *offsets =
537 (struct mlxsw_emad_tlv_offsets *) skb->cb;
538
539 return ((char *) (skb->data + offsets->reg_tlv));
540 }
541
mlxsw_emad_reg_payload(const char * reg_tlv)542 static char *mlxsw_emad_reg_payload(const char *reg_tlv)
543 {
544 return ((char *) (reg_tlv + sizeof(u32)));
545 }
546
mlxsw_emad_reg_payload_cmd(const char * mbox)547 static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
548 {
549 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
550 }
551
mlxsw_emad_get_tid(const struct sk_buff * skb)552 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
553 {
554 char *op_tlv;
555
556 op_tlv = mlxsw_emad_op_tlv(skb);
557 return mlxsw_emad_op_tlv_tid_get(op_tlv);
558 }
559
mlxsw_emad_is_resp(const struct sk_buff * skb)560 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
561 {
562 char *op_tlv;
563
564 op_tlv = mlxsw_emad_op_tlv(skb);
565 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
566 }
567
mlxsw_emad_process_status(char * op_tlv,enum mlxsw_emad_op_tlv_status * p_status)568 static int mlxsw_emad_process_status(char *op_tlv,
569 enum mlxsw_emad_op_tlv_status *p_status)
570 {
571 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
572
573 switch (*p_status) {
574 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
575 return 0;
576 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
577 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
578 return -EAGAIN;
579 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
580 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
581 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
582 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
583 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
584 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
585 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
586 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
587 default:
588 return -EIO;
589 }
590 }
591
592 static int
mlxsw_emad_process_status_skb(struct sk_buff * skb,enum mlxsw_emad_op_tlv_status * p_status)593 mlxsw_emad_process_status_skb(struct sk_buff *skb,
594 enum mlxsw_emad_op_tlv_status *p_status)
595 {
596 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
597 }
598
599 struct mlxsw_reg_trans {
600 struct list_head list;
601 struct list_head bulk_list;
602 struct mlxsw_core *core;
603 struct sk_buff *tx_skb;
604 struct mlxsw_tx_info tx_info;
605 struct delayed_work timeout_dw;
606 unsigned int retries;
607 u64 tid;
608 struct completion completion;
609 atomic_t active;
610 mlxsw_reg_trans_cb_t *cb;
611 unsigned long cb_priv;
612 const struct mlxsw_reg_info *reg;
613 enum mlxsw_core_reg_access_type type;
614 int err;
615 char *emad_err_string;
616 enum mlxsw_emad_op_tlv_status emad_status;
617 struct rcu_head rcu;
618 };
619
mlxsw_emad_process_string_tlv(const struct sk_buff * skb,struct mlxsw_reg_trans * trans)620 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
621 struct mlxsw_reg_trans *trans)
622 {
623 char *string_tlv;
624 char *string;
625
626 string_tlv = mlxsw_emad_string_tlv(skb);
627 if (!string_tlv)
628 return;
629
630 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
631 GFP_ATOMIC);
632 if (!trans->emad_err_string)
633 return;
634
635 string = mlxsw_emad_string_tlv_string_data(string_tlv);
636 strlcpy(trans->emad_err_string, string,
637 MLXSW_EMAD_STRING_TLV_STRING_LEN);
638 }
639
640 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
641 #define MLXSW_EMAD_TIMEOUT_MS 200
642
mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans * trans)643 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
644 {
645 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
646
647 if (trans->core->fw_flash_in_progress)
648 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
649
650 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
651 timeout << trans->retries);
652 }
653
mlxsw_emad_transmit(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)654 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
655 struct mlxsw_reg_trans *trans)
656 {
657 struct sk_buff *skb;
658 int err;
659
660 skb = skb_clone(trans->tx_skb, GFP_KERNEL);
661 if (!skb)
662 return -ENOMEM;
663
664 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
665 skb->data + mlxsw_core->driver->txhdr_len,
666 skb->len - mlxsw_core->driver->txhdr_len);
667
668 atomic_set(&trans->active, 1);
669 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
670 if (err) {
671 dev_kfree_skb(skb);
672 return err;
673 }
674 mlxsw_emad_trans_timeout_schedule(trans);
675 return 0;
676 }
677
mlxsw_emad_trans_finish(struct mlxsw_reg_trans * trans,int err)678 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
679 {
680 struct mlxsw_core *mlxsw_core = trans->core;
681
682 dev_kfree_skb(trans->tx_skb);
683 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
684 list_del_rcu(&trans->list);
685 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
686 trans->err = err;
687 complete(&trans->completion);
688 }
689
mlxsw_emad_transmit_retry(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)690 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
691 struct mlxsw_reg_trans *trans)
692 {
693 int err;
694
695 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
696 trans->retries++;
697 err = mlxsw_emad_transmit(trans->core, trans);
698 if (err == 0)
699 return;
700
701 if (!atomic_dec_and_test(&trans->active))
702 return;
703 } else {
704 err = -EIO;
705 }
706 mlxsw_emad_trans_finish(trans, err);
707 }
708
mlxsw_emad_trans_timeout_work(struct work_struct * work)709 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
710 {
711 struct mlxsw_reg_trans *trans = container_of(work,
712 struct mlxsw_reg_trans,
713 timeout_dw.work);
714
715 if (!atomic_dec_and_test(&trans->active))
716 return;
717
718 mlxsw_emad_transmit_retry(trans->core, trans);
719 }
720
mlxsw_emad_process_response(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans,struct sk_buff * skb)721 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
722 struct mlxsw_reg_trans *trans,
723 struct sk_buff *skb)
724 {
725 int err;
726
727 if (!atomic_dec_and_test(&trans->active))
728 return;
729
730 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
731 if (err == -EAGAIN) {
732 mlxsw_emad_transmit_retry(mlxsw_core, trans);
733 } else {
734 if (err == 0) {
735 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
736
737 if (trans->cb)
738 trans->cb(mlxsw_core,
739 mlxsw_emad_reg_payload(reg_tlv),
740 trans->reg->len, trans->cb_priv);
741 } else {
742 mlxsw_emad_process_string_tlv(skb, trans);
743 }
744 mlxsw_emad_trans_finish(trans, err);
745 }
746 }
747
748 /* called with rcu read lock held */
mlxsw_emad_rx_listener_func(struct sk_buff * skb,u16 local_port,void * priv)749 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
750 void *priv)
751 {
752 struct mlxsw_core *mlxsw_core = priv;
753 struct mlxsw_reg_trans *trans;
754
755 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
756 skb->data, skb->len);
757
758 mlxsw_emad_tlv_parse(skb);
759
760 if (!mlxsw_emad_is_resp(skb))
761 goto free_skb;
762
763 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
764 if (mlxsw_emad_get_tid(skb) == trans->tid) {
765 mlxsw_emad_process_response(mlxsw_core, trans, skb);
766 break;
767 }
768 }
769
770 free_skb:
771 dev_kfree_skb(skb);
772 }
773
774 static const struct mlxsw_listener mlxsw_emad_rx_listener =
775 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
776 EMAD, DISCARD);
777
mlxsw_emad_init(struct mlxsw_core * mlxsw_core)778 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
779 {
780 struct workqueue_struct *emad_wq;
781 u64 tid;
782 int err;
783
784 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
785 return 0;
786
787 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
788 if (!emad_wq)
789 return -ENOMEM;
790 mlxsw_core->emad_wq = emad_wq;
791
792 /* Set the upper 32 bits of the transaction ID field to a random
793 * number. This allows us to discard EMADs addressed to other
794 * devices.
795 */
796 get_random_bytes(&tid, 4);
797 tid <<= 32;
798 atomic64_set(&mlxsw_core->emad.tid, tid);
799
800 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
801 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
802
803 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
804 mlxsw_core);
805 if (err)
806 goto err_trap_register;
807
808 mlxsw_core->emad.use_emad = true;
809
810 return 0;
811
812 err_trap_register:
813 destroy_workqueue(mlxsw_core->emad_wq);
814 return err;
815 }
816
mlxsw_emad_fini(struct mlxsw_core * mlxsw_core)817 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
818 {
819
820 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
821 return;
822
823 mlxsw_core->emad.use_emad = false;
824 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
825 mlxsw_core);
826 destroy_workqueue(mlxsw_core->emad_wq);
827 }
828
mlxsw_emad_alloc(const struct mlxsw_core * mlxsw_core,u16 reg_len,bool enable_string_tlv)829 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
830 u16 reg_len, bool enable_string_tlv)
831 {
832 struct sk_buff *skb;
833 u16 emad_len;
834
835 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
836 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
837 sizeof(u32) + mlxsw_core->driver->txhdr_len);
838 if (enable_string_tlv)
839 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
840 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
841 return NULL;
842
843 skb = netdev_alloc_skb(NULL, emad_len);
844 if (!skb)
845 return NULL;
846 memset(skb->data, 0, emad_len);
847 skb_reserve(skb, emad_len);
848
849 return skb;
850 }
851
mlxsw_emad_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct mlxsw_reg_trans * trans,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv,u64 tid)852 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
853 const struct mlxsw_reg_info *reg,
854 char *payload,
855 enum mlxsw_core_reg_access_type type,
856 struct mlxsw_reg_trans *trans,
857 struct list_head *bulk_list,
858 mlxsw_reg_trans_cb_t *cb,
859 unsigned long cb_priv, u64 tid)
860 {
861 bool enable_string_tlv;
862 struct sk_buff *skb;
863 int err;
864
865 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
866 tid, reg->id, mlxsw_reg_id_str(reg->id),
867 mlxsw_core_reg_access_type_str(type));
868
869 /* Since this can be changed during emad_reg_access, read it once and
870 * use the value all the way.
871 */
872 enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
873
874 skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
875 if (!skb)
876 return -ENOMEM;
877
878 list_add_tail(&trans->bulk_list, bulk_list);
879 trans->core = mlxsw_core;
880 trans->tx_skb = skb;
881 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
882 trans->tx_info.is_emad = true;
883 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
884 trans->tid = tid;
885 init_completion(&trans->completion);
886 trans->cb = cb;
887 trans->cb_priv = cb_priv;
888 trans->reg = reg;
889 trans->type = type;
890
891 mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
892 enable_string_tlv);
893 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
894
895 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
896 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
897 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
898 err = mlxsw_emad_transmit(mlxsw_core, trans);
899 if (err)
900 goto err_out;
901 return 0;
902
903 err_out:
904 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
905 list_del_rcu(&trans->list);
906 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
907 list_del(&trans->bulk_list);
908 dev_kfree_skb(trans->tx_skb);
909 return err;
910 }
911
912 /*****************
913 * Core functions
914 *****************/
915
mlxsw_core_driver_register(struct mlxsw_driver * mlxsw_driver)916 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
917 {
918 spin_lock(&mlxsw_core_driver_list_lock);
919 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
920 spin_unlock(&mlxsw_core_driver_list_lock);
921 return 0;
922 }
923 EXPORT_SYMBOL(mlxsw_core_driver_register);
924
mlxsw_core_driver_unregister(struct mlxsw_driver * mlxsw_driver)925 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
926 {
927 spin_lock(&mlxsw_core_driver_list_lock);
928 list_del(&mlxsw_driver->list);
929 spin_unlock(&mlxsw_core_driver_list_lock);
930 }
931 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
932
__driver_find(const char * kind)933 static struct mlxsw_driver *__driver_find(const char *kind)
934 {
935 struct mlxsw_driver *mlxsw_driver;
936
937 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
938 if (strcmp(mlxsw_driver->kind, kind) == 0)
939 return mlxsw_driver;
940 }
941 return NULL;
942 }
943
mlxsw_core_driver_get(const char * kind)944 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
945 {
946 struct mlxsw_driver *mlxsw_driver;
947
948 spin_lock(&mlxsw_core_driver_list_lock);
949 mlxsw_driver = __driver_find(kind);
950 spin_unlock(&mlxsw_core_driver_list_lock);
951 return mlxsw_driver;
952 }
953
954 struct mlxsw_core_fw_info {
955 struct mlxfw_dev mlxfw_dev;
956 struct mlxsw_core *mlxsw_core;
957 };
958
mlxsw_core_fw_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)959 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
960 u16 component_index, u32 *p_max_size,
961 u8 *p_align_bits, u16 *p_max_write_size)
962 {
963 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
964 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
965 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
966 char mcqi_pl[MLXSW_REG_MCQI_LEN];
967 int err;
968
969 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
970 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
971 if (err)
972 return err;
973 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
974
975 *p_align_bits = max_t(u8, *p_align_bits, 2);
976 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
977 return 0;
978 }
979
mlxsw_core_fw_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)980 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
981 {
982 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
983 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
984 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
985 char mcc_pl[MLXSW_REG_MCC_LEN];
986 u8 control_state;
987 int err;
988
989 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
990 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
991 if (err)
992 return err;
993
994 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
995 if (control_state != MLXFW_FSM_STATE_IDLE)
996 return -EBUSY;
997
998 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
999 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1000 }
1001
mlxsw_core_fw_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)1002 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1003 u16 component_index, u32 component_size)
1004 {
1005 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1006 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1007 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1008 char mcc_pl[MLXSW_REG_MCC_LEN];
1009
1010 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
1011 component_index, fwhandle, component_size);
1012 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1013 }
1014
mlxsw_core_fw_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)1015 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1016 u8 *data, u16 size, u32 offset)
1017 {
1018 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1019 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1020 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1021 char mcda_pl[MLXSW_REG_MCDA_LEN];
1022
1023 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
1024 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
1025 }
1026
mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)1027 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1028 u16 component_index)
1029 {
1030 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1031 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1032 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1033 char mcc_pl[MLXSW_REG_MCC_LEN];
1034
1035 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
1036 component_index, fwhandle, 0);
1037 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1038 }
1039
mlxsw_core_fw_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1040 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1041 {
1042 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1043 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1044 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1045 char mcc_pl[MLXSW_REG_MCC_LEN];
1046
1047 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
1048 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1049 }
1050
mlxsw_core_fw_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)1051 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1052 enum mlxfw_fsm_state *fsm_state,
1053 enum mlxfw_fsm_state_err *fsm_state_err)
1054 {
1055 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1056 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1057 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1058 char mcc_pl[MLXSW_REG_MCC_LEN];
1059 u8 control_state;
1060 u8 error_code;
1061 int err;
1062
1063 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
1064 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1065 if (err)
1066 return err;
1067
1068 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
1069 *fsm_state = control_state;
1070 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
1071 return 0;
1072 }
1073
mlxsw_core_fw_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1074 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1075 {
1076 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1077 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1078 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1079 char mcc_pl[MLXSW_REG_MCC_LEN];
1080
1081 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
1082 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1083 }
1084
mlxsw_core_fw_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1085 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1086 {
1087 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1088 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1089 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1090 char mcc_pl[MLXSW_REG_MCC_LEN];
1091
1092 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
1093 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1094 }
1095
1096 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
1097 .component_query = mlxsw_core_fw_component_query,
1098 .fsm_lock = mlxsw_core_fw_fsm_lock,
1099 .fsm_component_update = mlxsw_core_fw_fsm_component_update,
1100 .fsm_block_download = mlxsw_core_fw_fsm_block_download,
1101 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
1102 .fsm_activate = mlxsw_core_fw_fsm_activate,
1103 .fsm_query_state = mlxsw_core_fw_fsm_query_state,
1104 .fsm_cancel = mlxsw_core_fw_fsm_cancel,
1105 .fsm_release = mlxsw_core_fw_fsm_release,
1106 };
1107
mlxsw_core_fw_flash(struct mlxsw_core * mlxsw_core,const struct firmware * firmware,struct netlink_ext_ack * extack)1108 static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
1109 struct netlink_ext_ack *extack)
1110 {
1111 struct mlxsw_core_fw_info mlxsw_core_fw_info = {
1112 .mlxfw_dev = {
1113 .ops = &mlxsw_core_fw_mlxsw_dev_ops,
1114 .psid = mlxsw_core->bus_info->psid,
1115 .psid_size = strlen(mlxsw_core->bus_info->psid),
1116 .devlink = priv_to_devlink(mlxsw_core),
1117 },
1118 .mlxsw_core = mlxsw_core
1119 };
1120 int err;
1121
1122 mlxsw_core->fw_flash_in_progress = true;
1123 err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
1124 mlxsw_core->fw_flash_in_progress = false;
1125
1126 return err;
1127 }
1128
mlxsw_core_fw_rev_validate(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_fw_rev * req_rev,const char * filename)1129 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
1130 const struct mlxsw_bus_info *mlxsw_bus_info,
1131 const struct mlxsw_fw_rev *req_rev,
1132 const char *filename)
1133 {
1134 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
1135 union devlink_param_value value;
1136 const struct firmware *firmware;
1137 int err;
1138
1139 /* Don't check if driver does not require it */
1140 if (!req_rev || !filename)
1141 return 0;
1142
1143 /* Don't check if devlink 'fw_load_policy' param is 'flash' */
1144 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
1145 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1146 &value);
1147 if (err)
1148 return err;
1149 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
1150 return 0;
1151
1152 /* Validate driver & FW are compatible */
1153 if (rev->major != req_rev->major) {
1154 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
1155 rev->major, req_rev->major);
1156 return -EINVAL;
1157 }
1158 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
1159 return 0;
1160
1161 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
1162 rev->major, rev->minor, rev->subminor, req_rev->major,
1163 req_rev->minor, req_rev->subminor);
1164 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
1165
1166 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
1167 if (err) {
1168 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
1169 return err;
1170 }
1171
1172 err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
1173 release_firmware(firmware);
1174 if (err)
1175 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
1176
1177 /* On FW flash success, tell the caller FW reset is needed
1178 * if current FW supports it.
1179 */
1180 if (rev->minor >= req_rev->can_reset_minor)
1181 return err ? err : -EAGAIN;
1182 else
1183 return 0;
1184 }
1185
mlxsw_core_fw_flash_update(struct mlxsw_core * mlxsw_core,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1186 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
1187 struct devlink_flash_update_params *params,
1188 struct netlink_ext_ack *extack)
1189 {
1190 return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
1191 }
1192
mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1193 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
1194 union devlink_param_value val,
1195 struct netlink_ext_ack *extack)
1196 {
1197 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
1198 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
1199 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
1200 return -EINVAL;
1201 }
1202
1203 return 0;
1204 }
1205
1206 static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
1207 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
1208 mlxsw_core_devlink_param_fw_load_policy_validate),
1209 };
1210
mlxsw_core_fw_params_register(struct mlxsw_core * mlxsw_core)1211 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
1212 {
1213 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1214 union devlink_param_value value;
1215 int err;
1216
1217 err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
1218 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1219 if (err)
1220 return err;
1221
1222 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
1223 devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
1224 return 0;
1225 }
1226
mlxsw_core_fw_params_unregister(struct mlxsw_core * mlxsw_core)1227 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
1228 {
1229 devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
1230 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1231 }
1232
__dl_port(struct devlink_port * devlink_port)1233 static void *__dl_port(struct devlink_port *devlink_port)
1234 {
1235 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
1236 }
1237
mlxsw_devlink_port_split(struct devlink * devlink,struct devlink_port * port,unsigned int count,struct netlink_ext_ack * extack)1238 static int mlxsw_devlink_port_split(struct devlink *devlink,
1239 struct devlink_port *port,
1240 unsigned int count,
1241 struct netlink_ext_ack *extack)
1242 {
1243 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
1244 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1245
1246 if (!mlxsw_core->driver->port_split)
1247 return -EOPNOTSUPP;
1248 return mlxsw_core->driver->port_split(mlxsw_core,
1249 mlxsw_core_port->local_port,
1250 count, extack);
1251 }
1252
mlxsw_devlink_port_unsplit(struct devlink * devlink,struct devlink_port * port,struct netlink_ext_ack * extack)1253 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
1254 struct devlink_port *port,
1255 struct netlink_ext_ack *extack)
1256 {
1257 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
1258 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1259
1260 if (!mlxsw_core->driver->port_unsplit)
1261 return -EOPNOTSUPP;
1262 return mlxsw_core->driver->port_unsplit(mlxsw_core,
1263 mlxsw_core_port->local_port,
1264 extack);
1265 }
1266
1267 static int
mlxsw_devlink_sb_pool_get(struct devlink * devlink,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)1268 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
1269 unsigned int sb_index, u16 pool_index,
1270 struct devlink_sb_pool_info *pool_info)
1271 {
1272 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1273 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1274
1275 if (!mlxsw_driver->sb_pool_get)
1276 return -EOPNOTSUPP;
1277 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
1278 pool_index, pool_info);
1279 }
1280
1281 static int
mlxsw_devlink_sb_pool_set(struct devlink * devlink,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)1282 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
1283 unsigned int sb_index, u16 pool_index, u32 size,
1284 enum devlink_sb_threshold_type threshold_type,
1285 struct netlink_ext_ack *extack)
1286 {
1287 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1288 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1289
1290 if (!mlxsw_driver->sb_pool_set)
1291 return -EOPNOTSUPP;
1292 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
1293 pool_index, size, threshold_type,
1294 extack);
1295 }
1296
mlxsw_devlink_port_type_set(struct devlink_port * devlink_port,enum devlink_port_type port_type)1297 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
1298 enum devlink_port_type port_type)
1299 {
1300 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1301 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1302 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1303
1304 if (!mlxsw_driver->port_type_set)
1305 return -EOPNOTSUPP;
1306
1307 return mlxsw_driver->port_type_set(mlxsw_core,
1308 mlxsw_core_port->local_port,
1309 port_type);
1310 }
1311
mlxsw_devlink_sb_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)1312 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
1313 unsigned int sb_index, u16 pool_index,
1314 u32 *p_threshold)
1315 {
1316 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1317 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1318 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1319
1320 if (!mlxsw_driver->sb_port_pool_get ||
1321 !mlxsw_core_port_check(mlxsw_core_port))
1322 return -EOPNOTSUPP;
1323 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
1324 pool_index, p_threshold);
1325 }
1326
mlxsw_devlink_sb_port_pool_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1327 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
1328 unsigned int sb_index, u16 pool_index,
1329 u32 threshold,
1330 struct netlink_ext_ack *extack)
1331 {
1332 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1333 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1334 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1335
1336 if (!mlxsw_driver->sb_port_pool_set ||
1337 !mlxsw_core_port_check(mlxsw_core_port))
1338 return -EOPNOTSUPP;
1339 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
1340 pool_index, threshold, extack);
1341 }
1342
1343 static int
mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)1344 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
1345 unsigned int sb_index, u16 tc_index,
1346 enum devlink_sb_pool_type pool_type,
1347 u16 *p_pool_index, u32 *p_threshold)
1348 {
1349 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1350 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1351 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1352
1353 if (!mlxsw_driver->sb_tc_pool_bind_get ||
1354 !mlxsw_core_port_check(mlxsw_core_port))
1355 return -EOPNOTSUPP;
1356 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
1357 tc_index, pool_type,
1358 p_pool_index, p_threshold);
1359 }
1360
1361 static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1362 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
1363 unsigned int sb_index, u16 tc_index,
1364 enum devlink_sb_pool_type pool_type,
1365 u16 pool_index, u32 threshold,
1366 struct netlink_ext_ack *extack)
1367 {
1368 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1369 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1370 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1371
1372 if (!mlxsw_driver->sb_tc_pool_bind_set ||
1373 !mlxsw_core_port_check(mlxsw_core_port))
1374 return -EOPNOTSUPP;
1375 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
1376 tc_index, pool_type,
1377 pool_index, threshold, extack);
1378 }
1379
mlxsw_devlink_sb_occ_snapshot(struct devlink * devlink,unsigned int sb_index)1380 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1381 unsigned int sb_index)
1382 {
1383 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1384 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1385
1386 if (!mlxsw_driver->sb_occ_snapshot)
1387 return -EOPNOTSUPP;
1388 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1389 }
1390
mlxsw_devlink_sb_occ_max_clear(struct devlink * devlink,unsigned int sb_index)1391 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1392 unsigned int sb_index)
1393 {
1394 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1395 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1396
1397 if (!mlxsw_driver->sb_occ_max_clear)
1398 return -EOPNOTSUPP;
1399 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1400 }
1401
1402 static int
mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)1403 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1404 unsigned int sb_index, u16 pool_index,
1405 u32 *p_cur, u32 *p_max)
1406 {
1407 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1408 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1409 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1410
1411 if (!mlxsw_driver->sb_occ_port_pool_get ||
1412 !mlxsw_core_port_check(mlxsw_core_port))
1413 return -EOPNOTSUPP;
1414 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1415 pool_index, p_cur, p_max);
1416 }
1417
1418 static int
mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1419 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1420 unsigned int sb_index, u16 tc_index,
1421 enum devlink_sb_pool_type pool_type,
1422 u32 *p_cur, u32 *p_max)
1423 {
1424 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1425 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1426 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1427
1428 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
1429 !mlxsw_core_port_check(mlxsw_core_port))
1430 return -EOPNOTSUPP;
1431 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1432 sb_index, tc_index,
1433 pool_type, p_cur, p_max);
1434 }
1435
1436 static int
mlxsw_devlink_info_get(struct devlink * devlink,struct devlink_info_req * req,struct netlink_ext_ack * extack)1437 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1438 struct netlink_ext_ack *extack)
1439 {
1440 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1441 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
1442 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
1443 char mgir_pl[MLXSW_REG_MGIR_LEN];
1444 char buf[32];
1445 int err;
1446
1447 err = devlink_info_driver_name_put(req,
1448 mlxsw_core->bus_info->device_kind);
1449 if (err)
1450 return err;
1451
1452 mlxsw_reg_mgir_pack(mgir_pl);
1453 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
1454 if (err)
1455 return err;
1456 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
1457 &fw_minor, &fw_sub_minor);
1458
1459 sprintf(buf, "%X", hw_rev);
1460 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
1461 if (err)
1462 return err;
1463
1464 err = devlink_info_version_fixed_put(req,
1465 DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
1466 fw_info_psid);
1467 if (err)
1468 return err;
1469
1470 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
1471 err = devlink_info_version_running_put(req, "fw.version", buf);
1472 if (err)
1473 return err;
1474
1475 return devlink_info_version_running_put(req,
1476 DEVLINK_INFO_VERSION_GENERIC_FW,
1477 buf);
1478 }
1479
1480 static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink * devlink,bool netns_change,enum devlink_reload_action action,enum devlink_reload_limit limit,struct netlink_ext_ack * extack)1481 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
1482 bool netns_change, enum devlink_reload_action action,
1483 enum devlink_reload_limit limit,
1484 struct netlink_ext_ack *extack)
1485 {
1486 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1487
1488 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1489 return -EOPNOTSUPP;
1490
1491 mlxsw_core_bus_device_unregister(mlxsw_core, true);
1492 return 0;
1493 }
1494
1495 static int
mlxsw_devlink_core_bus_device_reload_up(struct devlink * devlink,enum devlink_reload_action action,enum devlink_reload_limit limit,u32 * actions_performed,struct netlink_ext_ack * extack)1496 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
1497 enum devlink_reload_limit limit, u32 *actions_performed,
1498 struct netlink_ext_ack *extack)
1499 {
1500 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1501
1502 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1503 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1504 return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1505 mlxsw_core->bus,
1506 mlxsw_core->bus_priv, true,
1507 devlink, extack);
1508 }
1509
mlxsw_devlink_flash_update(struct devlink * devlink,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1510 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1511 struct devlink_flash_update_params *params,
1512 struct netlink_ext_ack *extack)
1513 {
1514 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1515
1516 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
1517 }
1518
mlxsw_devlink_trap_init(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1519 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1520 const struct devlink_trap *trap,
1521 void *trap_ctx)
1522 {
1523 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1524 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1525
1526 if (!mlxsw_driver->trap_init)
1527 return -EOPNOTSUPP;
1528 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1529 }
1530
mlxsw_devlink_trap_fini(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1531 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1532 const struct devlink_trap *trap,
1533 void *trap_ctx)
1534 {
1535 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1536 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1537
1538 if (!mlxsw_driver->trap_fini)
1539 return;
1540 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1541 }
1542
mlxsw_devlink_trap_action_set(struct devlink * devlink,const struct devlink_trap * trap,enum devlink_trap_action action,struct netlink_ext_ack * extack)1543 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1544 const struct devlink_trap *trap,
1545 enum devlink_trap_action action,
1546 struct netlink_ext_ack *extack)
1547 {
1548 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1549 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1550
1551 if (!mlxsw_driver->trap_action_set)
1552 return -EOPNOTSUPP;
1553 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
1554 }
1555
1556 static int
mlxsw_devlink_trap_group_init(struct devlink * devlink,const struct devlink_trap_group * group)1557 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1558 const struct devlink_trap_group *group)
1559 {
1560 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1561 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1562
1563 if (!mlxsw_driver->trap_group_init)
1564 return -EOPNOTSUPP;
1565 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1566 }
1567
1568 static int
mlxsw_devlink_trap_group_set(struct devlink * devlink,const struct devlink_trap_group * group,const struct devlink_trap_policer * policer,struct netlink_ext_ack * extack)1569 mlxsw_devlink_trap_group_set(struct devlink *devlink,
1570 const struct devlink_trap_group *group,
1571 const struct devlink_trap_policer *policer,
1572 struct netlink_ext_ack *extack)
1573 {
1574 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1575 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1576
1577 if (!mlxsw_driver->trap_group_set)
1578 return -EOPNOTSUPP;
1579 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
1580 }
1581
1582 static int
mlxsw_devlink_trap_policer_init(struct devlink * devlink,const struct devlink_trap_policer * policer)1583 mlxsw_devlink_trap_policer_init(struct devlink *devlink,
1584 const struct devlink_trap_policer *policer)
1585 {
1586 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1587 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1588
1589 if (!mlxsw_driver->trap_policer_init)
1590 return -EOPNOTSUPP;
1591 return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
1592 }
1593
1594 static void
mlxsw_devlink_trap_policer_fini(struct devlink * devlink,const struct devlink_trap_policer * policer)1595 mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
1596 const struct devlink_trap_policer *policer)
1597 {
1598 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1599 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1600
1601 if (!mlxsw_driver->trap_policer_fini)
1602 return;
1603 mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
1604 }
1605
1606 static int
mlxsw_devlink_trap_policer_set(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 rate,u64 burst,struct netlink_ext_ack * extack)1607 mlxsw_devlink_trap_policer_set(struct devlink *devlink,
1608 const struct devlink_trap_policer *policer,
1609 u64 rate, u64 burst,
1610 struct netlink_ext_ack *extack)
1611 {
1612 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1613 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1614
1615 if (!mlxsw_driver->trap_policer_set)
1616 return -EOPNOTSUPP;
1617 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
1618 extack);
1619 }
1620
1621 static int
mlxsw_devlink_trap_policer_counter_get(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 * p_drops)1622 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
1623 const struct devlink_trap_policer *policer,
1624 u64 *p_drops)
1625 {
1626 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1627 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1628
1629 if (!mlxsw_driver->trap_policer_counter_get)
1630 return -EOPNOTSUPP;
1631 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
1632 p_drops);
1633 }
1634
1635 static const struct devlink_ops mlxsw_devlink_ops = {
1636 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1637 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1638 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1639 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1640 .port_type_set = mlxsw_devlink_port_type_set,
1641 .port_split = mlxsw_devlink_port_split,
1642 .port_unsplit = mlxsw_devlink_port_unsplit,
1643 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1644 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1645 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1646 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1647 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1648 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1649 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1650 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1651 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1652 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1653 .info_get = mlxsw_devlink_info_get,
1654 .flash_update = mlxsw_devlink_flash_update,
1655 .trap_init = mlxsw_devlink_trap_init,
1656 .trap_fini = mlxsw_devlink_trap_fini,
1657 .trap_action_set = mlxsw_devlink_trap_action_set,
1658 .trap_group_init = mlxsw_devlink_trap_group_init,
1659 .trap_group_set = mlxsw_devlink_trap_group_set,
1660 .trap_policer_init = mlxsw_devlink_trap_policer_init,
1661 .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
1662 .trap_policer_set = mlxsw_devlink_trap_policer_set,
1663 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
1664 };
1665
mlxsw_core_params_register(struct mlxsw_core * mlxsw_core)1666 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
1667 {
1668 int err;
1669
1670 err = mlxsw_core_fw_params_register(mlxsw_core);
1671 if (err)
1672 return err;
1673
1674 if (mlxsw_core->driver->params_register) {
1675 err = mlxsw_core->driver->params_register(mlxsw_core);
1676 if (err)
1677 goto err_params_register;
1678 }
1679 return 0;
1680
1681 err_params_register:
1682 mlxsw_core_fw_params_unregister(mlxsw_core);
1683 return err;
1684 }
1685
mlxsw_core_params_unregister(struct mlxsw_core * mlxsw_core)1686 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
1687 {
1688 mlxsw_core_fw_params_unregister(mlxsw_core);
1689 if (mlxsw_core->driver->params_register)
1690 mlxsw_core->driver->params_unregister(mlxsw_core);
1691 }
1692
1693 struct mlxsw_core_health_event {
1694 struct mlxsw_core *mlxsw_core;
1695 char mfde_pl[MLXSW_REG_MFDE_LEN];
1696 struct work_struct work;
1697 };
1698
mlxsw_core_health_event_work(struct work_struct * work)1699 static void mlxsw_core_health_event_work(struct work_struct *work)
1700 {
1701 struct mlxsw_core_health_event *event;
1702 struct mlxsw_core *mlxsw_core;
1703
1704 event = container_of(work, struct mlxsw_core_health_event, work);
1705 mlxsw_core = event->mlxsw_core;
1706 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
1707 event->mfde_pl);
1708 kfree(event);
1709 }
1710
mlxsw_core_health_listener_func(const struct mlxsw_reg_info * reg,char * mfde_pl,void * priv)1711 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
1712 char *mfde_pl, void *priv)
1713 {
1714 struct mlxsw_core_health_event *event;
1715 struct mlxsw_core *mlxsw_core = priv;
1716
1717 event = kmalloc(sizeof(*event), GFP_ATOMIC);
1718 if (!event)
1719 return;
1720 event->mlxsw_core = mlxsw_core;
1721 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
1722 INIT_WORK(&event->work, mlxsw_core_health_event_work);
1723 mlxsw_core_schedule_work(&event->work);
1724 }
1725
1726 static const struct mlxsw_listener mlxsw_core_health_listener =
1727 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
1728
1729 static int
mlxsw_core_health_fw_fatal_dump_fatal_cause(const char * mfde_pl,struct devlink_fmsg * fmsg)1730 mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
1731 struct devlink_fmsg *fmsg)
1732 {
1733 u32 val, tile_v;
1734 int err;
1735
1736 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
1737 err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
1738 if (err)
1739 return err;
1740 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
1741 if (tile_v) {
1742 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
1743 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1744 if (err)
1745 return err;
1746 }
1747
1748 return 0;
1749 }
1750
1751 static int
mlxsw_core_health_fw_fatal_dump_fw_assert(const char * mfde_pl,struct devlink_fmsg * fmsg)1752 mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
1753 struct devlink_fmsg *fmsg)
1754 {
1755 u32 val, tile_v;
1756 int err;
1757
1758 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
1759 err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
1760 if (err)
1761 return err;
1762 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
1763 err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
1764 if (err)
1765 return err;
1766 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
1767 err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
1768 if (err)
1769 return err;
1770 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
1771 err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
1772 if (err)
1773 return err;
1774 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
1775 err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
1776 if (err)
1777 return err;
1778 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
1779 err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
1780 if (err)
1781 return err;
1782 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
1783 err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
1784 if (err)
1785 return err;
1786 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
1787 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1788 if (err)
1789 return err;
1790 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
1791 if (tile_v) {
1792 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
1793 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1794 if (err)
1795 return err;
1796 }
1797 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
1798 err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
1799 if (err)
1800 return err;
1801
1802 return 0;
1803 }
1804
1805 static int
mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char * mfde_pl,struct devlink_fmsg * fmsg)1806 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
1807 struct devlink_fmsg *fmsg)
1808 {
1809 u32 val;
1810 int err;
1811
1812 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
1813 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1814 if (err)
1815 return err;
1816 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
1817 return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
1818 }
1819
1820 static int
mlxsw_core_health_fw_fatal_dump_crspace_to(const char * mfde_pl,struct devlink_fmsg * fmsg)1821 mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
1822 struct devlink_fmsg *fmsg)
1823 {
1824 u32 val;
1825 int err;
1826
1827 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
1828 err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
1829 if (err)
1830 return err;
1831 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
1832 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1833 if (err)
1834 return err;
1835 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
1836 err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
1837 if (err)
1838 return err;
1839 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
1840 err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
1841 if (err)
1842 return err;
1843
1844 return 0;
1845 }
1846
mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)1847 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
1848 struct devlink_fmsg *fmsg, void *priv_ctx,
1849 struct netlink_ext_ack *extack)
1850 {
1851 char *mfde_pl = priv_ctx;
1852 char *val_str;
1853 u8 event_id;
1854 u32 val;
1855 int err;
1856
1857 if (!priv_ctx)
1858 /* User-triggered dumps are not possible */
1859 return -EOPNOTSUPP;
1860
1861 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
1862 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
1863 if (err)
1864 return err;
1865 err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
1866 if (err)
1867 return err;
1868
1869 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
1870 err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
1871 if (err)
1872 return err;
1873 switch (event_id) {
1874 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1875 val_str = "CR space timeout";
1876 break;
1877 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1878 val_str = "KVD insertion machine stopped";
1879 break;
1880 case MLXSW_REG_MFDE_EVENT_ID_TEST:
1881 val_str = "Test";
1882 break;
1883 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
1884 val_str = "FW assert";
1885 break;
1886 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
1887 val_str = "Fatal cause";
1888 break;
1889 default:
1890 val_str = NULL;
1891 }
1892 if (val_str) {
1893 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1894 if (err)
1895 return err;
1896 }
1897
1898 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1899 if (err)
1900 return err;
1901
1902 err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
1903 if (err)
1904 return err;
1905
1906 val = mlxsw_reg_mfde_severity_get(mfde_pl);
1907 err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
1908 if (err)
1909 return err;
1910 switch (val) {
1911 case MLXSW_REG_MFDE_SEVERITY_FATL:
1912 val_str = "Fatal";
1913 break;
1914 case MLXSW_REG_MFDE_SEVERITY_NRML:
1915 val_str = "Normal";
1916 break;
1917 case MLXSW_REG_MFDE_SEVERITY_INTR:
1918 val_str = "Debug";
1919 break;
1920 default:
1921 val_str = NULL;
1922 }
1923 if (val_str) {
1924 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1925 if (err)
1926 return err;
1927 }
1928
1929 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1930 if (err)
1931 return err;
1932
1933 val = mlxsw_reg_mfde_method_get(mfde_pl);
1934 switch (val) {
1935 case MLXSW_REG_MFDE_METHOD_QUERY:
1936 val_str = "query";
1937 break;
1938 case MLXSW_REG_MFDE_METHOD_WRITE:
1939 val_str = "write";
1940 break;
1941 default:
1942 val_str = NULL;
1943 }
1944 if (val_str) {
1945 err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
1946 if (err)
1947 return err;
1948 }
1949
1950 val = mlxsw_reg_mfde_long_process_get(mfde_pl);
1951 err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
1952 if (err)
1953 return err;
1954
1955 val = mlxsw_reg_mfde_command_type_get(mfde_pl);
1956 switch (val) {
1957 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
1958 val_str = "mad";
1959 break;
1960 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
1961 val_str = "emad";
1962 break;
1963 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
1964 val_str = "cmdif";
1965 break;
1966 default:
1967 val_str = NULL;
1968 }
1969 if (val_str) {
1970 err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
1971 if (err)
1972 return err;
1973 }
1974
1975 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
1976 err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
1977 if (err)
1978 return err;
1979
1980 switch (event_id) {
1981 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1982 return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
1983 fmsg);
1984 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1985 return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
1986 fmsg);
1987 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
1988 return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
1989 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
1990 return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
1991 fmsg);
1992 }
1993
1994 return 0;
1995 }
1996
1997 static int
mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter * reporter,struct netlink_ext_ack * extack)1998 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
1999 struct netlink_ext_ack *extack)
2000 {
2001 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
2002 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2003 int err;
2004
2005 /* Read the register first to make sure no other bits are changed. */
2006 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2007 if (err)
2008 return err;
2009 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
2010 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2011 }
2012
2013 static const struct devlink_health_reporter_ops
2014 mlxsw_core_health_fw_fatal_ops = {
2015 .name = "fw_fatal",
2016 .dump = mlxsw_core_health_fw_fatal_dump,
2017 .test = mlxsw_core_health_fw_fatal_test,
2018 };
2019
mlxsw_core_health_fw_fatal_config(struct mlxsw_core * mlxsw_core,bool enable)2020 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
2021 bool enable)
2022 {
2023 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2024 int err;
2025
2026 /* Read the register first to make sure no other bits are changed. */
2027 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2028 if (err)
2029 return err;
2030 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
2031 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2032 }
2033
mlxsw_core_health_init(struct mlxsw_core * mlxsw_core)2034 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
2035 {
2036 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2037 struct devlink_health_reporter *fw_fatal;
2038 int err;
2039
2040 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2041 return 0;
2042
2043 fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
2044 0, mlxsw_core);
2045 if (IS_ERR(fw_fatal)) {
2046 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
2047 return PTR_ERR(fw_fatal);
2048 }
2049 mlxsw_core->health.fw_fatal = fw_fatal;
2050
2051 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2052 if (err)
2053 goto err_trap_register;
2054
2055 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
2056 if (err)
2057 goto err_fw_fatal_config;
2058
2059 return 0;
2060
2061 err_fw_fatal_config:
2062 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2063 err_trap_register:
2064 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
2065 return err;
2066 }
2067
mlxsw_core_health_fini(struct mlxsw_core * mlxsw_core)2068 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
2069 {
2070 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2071 return;
2072
2073 mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
2074 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2075 /* Make sure there is no more event work scheduled */
2076 mlxsw_core_flush_owq();
2077 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
2078 }
2079
2080 static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)2081 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2082 const struct mlxsw_bus *mlxsw_bus,
2083 void *bus_priv, bool reload,
2084 struct devlink *devlink,
2085 struct netlink_ext_ack *extack)
2086 {
2087 const char *device_kind = mlxsw_bus_info->device_kind;
2088 struct mlxsw_core *mlxsw_core;
2089 struct mlxsw_driver *mlxsw_driver;
2090 size_t alloc_size;
2091 int err;
2092
2093 mlxsw_driver = mlxsw_core_driver_get(device_kind);
2094 if (!mlxsw_driver)
2095 return -EINVAL;
2096
2097 if (!reload) {
2098 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
2099 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size,
2100 mlxsw_bus_info->dev);
2101 if (!devlink) {
2102 err = -ENOMEM;
2103 goto err_devlink_alloc;
2104 }
2105 }
2106
2107 mlxsw_core = devlink_priv(devlink);
2108 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
2109 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
2110 mlxsw_core->driver = mlxsw_driver;
2111 mlxsw_core->bus = mlxsw_bus;
2112 mlxsw_core->bus_priv = bus_priv;
2113 mlxsw_core->bus_info = mlxsw_bus_info;
2114
2115 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
2116 &mlxsw_core->res);
2117 if (err)
2118 goto err_bus_init;
2119
2120 if (mlxsw_driver->resources_register && !reload) {
2121 err = mlxsw_driver->resources_register(mlxsw_core);
2122 if (err)
2123 goto err_register_resources;
2124 }
2125
2126 err = mlxsw_ports_init(mlxsw_core, reload);
2127 if (err)
2128 goto err_ports_init;
2129
2130 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
2131 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
2132 alloc_size = sizeof(*mlxsw_core->lag.mapping) *
2133 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
2134 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
2135 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
2136 if (!mlxsw_core->lag.mapping) {
2137 err = -ENOMEM;
2138 goto err_alloc_lag_mapping;
2139 }
2140 }
2141
2142 err = mlxsw_core_trap_groups_set(mlxsw_core);
2143 if (err)
2144 goto err_trap_groups_set;
2145
2146 err = mlxsw_emad_init(mlxsw_core);
2147 if (err)
2148 goto err_emad_init;
2149
2150 if (!reload) {
2151 err = mlxsw_core_params_register(mlxsw_core);
2152 if (err)
2153 goto err_register_params;
2154 }
2155
2156 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
2157 mlxsw_driver->fw_filename);
2158 if (err)
2159 goto err_fw_rev_validate;
2160
2161 err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
2162 if (err)
2163 goto err_linecards_init;
2164
2165 err = mlxsw_core_health_init(mlxsw_core);
2166 if (err)
2167 goto err_health_init;
2168
2169 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
2170 if (err)
2171 goto err_hwmon_init;
2172
2173 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
2174 &mlxsw_core->thermal);
2175 if (err)
2176 goto err_thermal_init;
2177
2178 err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
2179 if (err)
2180 goto err_env_init;
2181
2182 if (mlxsw_driver->init) {
2183 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
2184 if (err)
2185 goto err_driver_init;
2186 }
2187
2188 if (!reload) {
2189 devlink_set_features(devlink, DEVLINK_F_RELOAD);
2190 devlink_register(devlink);
2191 }
2192 return 0;
2193
2194 err_driver_init:
2195 mlxsw_env_fini(mlxsw_core->env);
2196 err_env_init:
2197 mlxsw_thermal_fini(mlxsw_core->thermal);
2198 err_thermal_init:
2199 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2200 err_hwmon_init:
2201 mlxsw_core_health_fini(mlxsw_core);
2202 err_health_init:
2203 mlxsw_linecards_fini(mlxsw_core);
2204 err_linecards_init:
2205 err_fw_rev_validate:
2206 if (!reload)
2207 mlxsw_core_params_unregister(mlxsw_core);
2208 err_register_params:
2209 mlxsw_emad_fini(mlxsw_core);
2210 err_emad_init:
2211 err_trap_groups_set:
2212 kfree(mlxsw_core->lag.mapping);
2213 err_alloc_lag_mapping:
2214 mlxsw_ports_fini(mlxsw_core, reload);
2215 err_ports_init:
2216 if (!reload)
2217 devlink_resources_unregister(devlink);
2218 err_register_resources:
2219 mlxsw_bus->fini(bus_priv);
2220 err_bus_init:
2221 if (!reload)
2222 devlink_free(devlink);
2223 err_devlink_alloc:
2224 return err;
2225 }
2226
mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)2227 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2228 const struct mlxsw_bus *mlxsw_bus,
2229 void *bus_priv, bool reload,
2230 struct devlink *devlink,
2231 struct netlink_ext_ack *extack)
2232 {
2233 bool called_again = false;
2234 int err;
2235
2236 again:
2237 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
2238 bus_priv, reload,
2239 devlink, extack);
2240 /* -EAGAIN is returned in case the FW was updated. FW needs
2241 * a reset, so lets try to call __mlxsw_core_bus_device_register()
2242 * again.
2243 */
2244 if (err == -EAGAIN && !called_again) {
2245 called_again = true;
2246 goto again;
2247 }
2248
2249 return err;
2250 }
2251 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
2252
mlxsw_core_bus_device_unregister(struct mlxsw_core * mlxsw_core,bool reload)2253 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
2254 bool reload)
2255 {
2256 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2257
2258 if (!reload)
2259 devlink_unregister(devlink);
2260
2261 if (devlink_is_reload_failed(devlink)) {
2262 if (!reload)
2263 /* Only the parts that were not de-initialized in the
2264 * failed reload attempt need to be de-initialized.
2265 */
2266 goto reload_fail_deinit;
2267 else
2268 return;
2269 }
2270
2271 if (mlxsw_core->driver->fini)
2272 mlxsw_core->driver->fini(mlxsw_core);
2273 mlxsw_env_fini(mlxsw_core->env);
2274 mlxsw_thermal_fini(mlxsw_core->thermal);
2275 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2276 mlxsw_core_health_fini(mlxsw_core);
2277 mlxsw_linecards_fini(mlxsw_core);
2278 if (!reload)
2279 mlxsw_core_params_unregister(mlxsw_core);
2280 mlxsw_emad_fini(mlxsw_core);
2281 kfree(mlxsw_core->lag.mapping);
2282 mlxsw_ports_fini(mlxsw_core, reload);
2283 if (!reload)
2284 devlink_resources_unregister(devlink);
2285 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
2286 if (!reload)
2287 devlink_free(devlink);
2288
2289 return;
2290
2291 reload_fail_deinit:
2292 mlxsw_core_params_unregister(mlxsw_core);
2293 devlink_resources_unregister(devlink);
2294 devlink_free(devlink);
2295 }
2296 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
2297
mlxsw_core_skb_transmit_busy(struct mlxsw_core * mlxsw_core,const struct mlxsw_tx_info * tx_info)2298 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
2299 const struct mlxsw_tx_info *tx_info)
2300 {
2301 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
2302 tx_info);
2303 }
2304 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
2305
mlxsw_core_skb_transmit(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)2306 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2307 const struct mlxsw_tx_info *tx_info)
2308 {
2309 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
2310 tx_info);
2311 }
2312 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
2313
mlxsw_core_ptp_transmitted(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,u16 local_port)2314 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
2315 struct sk_buff *skb, u16 local_port)
2316 {
2317 if (mlxsw_core->driver->ptp_transmitted)
2318 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
2319 local_port);
2320 }
2321 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
2322
__is_rx_listener_equal(const struct mlxsw_rx_listener * rxl_a,const struct mlxsw_rx_listener * rxl_b)2323 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
2324 const struct mlxsw_rx_listener *rxl_b)
2325 {
2326 return (rxl_a->func == rxl_b->func &&
2327 rxl_a->local_port == rxl_b->local_port &&
2328 rxl_a->trap_id == rxl_b->trap_id &&
2329 rxl_a->mirror_reason == rxl_b->mirror_reason);
2330 }
2331
2332 static struct mlxsw_rx_listener_item *
__find_rx_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2333 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
2334 const struct mlxsw_rx_listener *rxl)
2335 {
2336 struct mlxsw_rx_listener_item *rxl_item;
2337
2338 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
2339 if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
2340 return rxl_item;
2341 }
2342 return NULL;
2343 }
2344
mlxsw_core_rx_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,void * priv,bool enabled)2345 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
2346 const struct mlxsw_rx_listener *rxl,
2347 void *priv, bool enabled)
2348 {
2349 struct mlxsw_rx_listener_item *rxl_item;
2350
2351 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2352 if (rxl_item)
2353 return -EEXIST;
2354 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
2355 if (!rxl_item)
2356 return -ENOMEM;
2357 rxl_item->rxl = *rxl;
2358 rxl_item->priv = priv;
2359 rxl_item->enabled = enabled;
2360
2361 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
2362 return 0;
2363 }
2364 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
2365
mlxsw_core_rx_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2366 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
2367 const struct mlxsw_rx_listener *rxl)
2368 {
2369 struct mlxsw_rx_listener_item *rxl_item;
2370
2371 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2372 if (!rxl_item)
2373 return;
2374 list_del_rcu(&rxl_item->list);
2375 synchronize_rcu();
2376 kfree(rxl_item);
2377 }
2378 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
2379
2380 static void
mlxsw_core_rx_listener_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,bool enabled)2381 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
2382 const struct mlxsw_rx_listener *rxl,
2383 bool enabled)
2384 {
2385 struct mlxsw_rx_listener_item *rxl_item;
2386
2387 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2388 if (WARN_ON(!rxl_item))
2389 return;
2390 rxl_item->enabled = enabled;
2391 }
2392
mlxsw_core_event_listener_func(struct sk_buff * skb,u16 local_port,void * priv)2393 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
2394 void *priv)
2395 {
2396 struct mlxsw_event_listener_item *event_listener_item = priv;
2397 struct mlxsw_core *mlxsw_core;
2398 struct mlxsw_reg_info reg;
2399 char *payload;
2400 char *reg_tlv;
2401 char *op_tlv;
2402
2403 mlxsw_core = event_listener_item->mlxsw_core;
2404 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
2405 skb->data, skb->len);
2406
2407 mlxsw_emad_tlv_parse(skb);
2408 op_tlv = mlxsw_emad_op_tlv(skb);
2409 reg_tlv = mlxsw_emad_reg_tlv(skb);
2410
2411 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
2412 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
2413 payload = mlxsw_emad_reg_payload(reg_tlv);
2414 event_listener_item->el.func(®, payload, event_listener_item->priv);
2415 dev_kfree_skb(skb);
2416 }
2417
__is_event_listener_equal(const struct mlxsw_event_listener * el_a,const struct mlxsw_event_listener * el_b)2418 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
2419 const struct mlxsw_event_listener *el_b)
2420 {
2421 return (el_a->func == el_b->func &&
2422 el_a->trap_id == el_b->trap_id);
2423 }
2424
2425 static struct mlxsw_event_listener_item *
__find_event_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2426 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
2427 const struct mlxsw_event_listener *el)
2428 {
2429 struct mlxsw_event_listener_item *el_item;
2430
2431 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
2432 if (__is_event_listener_equal(&el_item->el, el))
2433 return el_item;
2434 }
2435 return NULL;
2436 }
2437
mlxsw_core_event_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el,void * priv)2438 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
2439 const struct mlxsw_event_listener *el,
2440 void *priv)
2441 {
2442 int err;
2443 struct mlxsw_event_listener_item *el_item;
2444 const struct mlxsw_rx_listener rxl = {
2445 .func = mlxsw_core_event_listener_func,
2446 .local_port = MLXSW_PORT_DONT_CARE,
2447 .trap_id = el->trap_id,
2448 };
2449
2450 el_item = __find_event_listener_item(mlxsw_core, el);
2451 if (el_item)
2452 return -EEXIST;
2453 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
2454 if (!el_item)
2455 return -ENOMEM;
2456 el_item->mlxsw_core = mlxsw_core;
2457 el_item->el = *el;
2458 el_item->priv = priv;
2459
2460 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
2461 if (err)
2462 goto err_rx_listener_register;
2463
2464 /* No reason to save item if we did not manage to register an RX
2465 * listener for it.
2466 */
2467 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
2468
2469 return 0;
2470
2471 err_rx_listener_register:
2472 kfree(el_item);
2473 return err;
2474 }
2475 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
2476
mlxsw_core_event_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2477 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
2478 const struct mlxsw_event_listener *el)
2479 {
2480 struct mlxsw_event_listener_item *el_item;
2481 const struct mlxsw_rx_listener rxl = {
2482 .func = mlxsw_core_event_listener_func,
2483 .local_port = MLXSW_PORT_DONT_CARE,
2484 .trap_id = el->trap_id,
2485 };
2486
2487 el_item = __find_event_listener_item(mlxsw_core, el);
2488 if (!el_item)
2489 return;
2490 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
2491 list_del(&el_item->list);
2492 kfree(el_item);
2493 }
2494 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
2495
mlxsw_core_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv,bool enabled)2496 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
2497 const struct mlxsw_listener *listener,
2498 void *priv, bool enabled)
2499 {
2500 if (listener->is_event) {
2501 WARN_ON(!enabled);
2502 return mlxsw_core_event_listener_register(mlxsw_core,
2503 &listener->event_listener,
2504 priv);
2505 } else {
2506 return mlxsw_core_rx_listener_register(mlxsw_core,
2507 &listener->rx_listener,
2508 priv, enabled);
2509 }
2510 }
2511
mlxsw_core_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2512 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
2513 const struct mlxsw_listener *listener,
2514 void *priv)
2515 {
2516 if (listener->is_event)
2517 mlxsw_core_event_listener_unregister(mlxsw_core,
2518 &listener->event_listener);
2519 else
2520 mlxsw_core_rx_listener_unregister(mlxsw_core,
2521 &listener->rx_listener);
2522 }
2523
mlxsw_core_trap_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2524 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
2525 const struct mlxsw_listener *listener, void *priv)
2526 {
2527 enum mlxsw_reg_htgt_trap_group trap_group;
2528 enum mlxsw_reg_hpkt_action action;
2529 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2530 int err;
2531
2532 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2533 return 0;
2534
2535 err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
2536 listener->enabled_on_register);
2537 if (err)
2538 return err;
2539
2540 action = listener->enabled_on_register ? listener->en_action :
2541 listener->dis_action;
2542 trap_group = listener->enabled_on_register ? listener->en_trap_group :
2543 listener->dis_trap_group;
2544 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2545 trap_group, listener->is_ctrl);
2546 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2547 if (err)
2548 goto err_trap_set;
2549
2550 return 0;
2551
2552 err_trap_set:
2553 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2554 return err;
2555 }
2556 EXPORT_SYMBOL(mlxsw_core_trap_register);
2557
mlxsw_core_trap_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2558 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
2559 const struct mlxsw_listener *listener,
2560 void *priv)
2561 {
2562 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2563
2564 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2565 return;
2566
2567 if (!listener->is_event) {
2568 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
2569 listener->trap_id, listener->dis_trap_group,
2570 listener->is_ctrl);
2571 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2572 }
2573
2574 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2575 }
2576 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
2577
mlxsw_core_traps_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listeners,size_t listeners_count,void * priv)2578 int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
2579 const struct mlxsw_listener *listeners,
2580 size_t listeners_count, void *priv)
2581 {
2582 int i, err;
2583
2584 for (i = 0; i < listeners_count; i++) {
2585 err = mlxsw_core_trap_register(mlxsw_core,
2586 &listeners[i],
2587 priv);
2588 if (err)
2589 goto err_listener_register;
2590 }
2591 return 0;
2592
2593 err_listener_register:
2594 for (i--; i >= 0; i--) {
2595 mlxsw_core_trap_unregister(mlxsw_core,
2596 &listeners[i],
2597 priv);
2598 }
2599 return err;
2600 }
2601 EXPORT_SYMBOL(mlxsw_core_traps_register);
2602
mlxsw_core_traps_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listeners,size_t listeners_count,void * priv)2603 void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
2604 const struct mlxsw_listener *listeners,
2605 size_t listeners_count, void *priv)
2606 {
2607 int i;
2608
2609 for (i = 0; i < listeners_count; i++) {
2610 mlxsw_core_trap_unregister(mlxsw_core,
2611 &listeners[i],
2612 priv);
2613 }
2614 }
2615 EXPORT_SYMBOL(mlxsw_core_traps_unregister);
2616
mlxsw_core_trap_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,bool enabled)2617 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
2618 const struct mlxsw_listener *listener,
2619 bool enabled)
2620 {
2621 enum mlxsw_reg_htgt_trap_group trap_group;
2622 enum mlxsw_reg_hpkt_action action;
2623 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2624 int err;
2625
2626 /* Not supported for event listener */
2627 if (WARN_ON(listener->is_event))
2628 return -EINVAL;
2629
2630 action = enabled ? listener->en_action : listener->dis_action;
2631 trap_group = enabled ? listener->en_trap_group :
2632 listener->dis_trap_group;
2633 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2634 trap_group, listener->is_ctrl);
2635 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2636 if (err)
2637 return err;
2638
2639 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
2640 enabled);
2641 return 0;
2642 }
2643 EXPORT_SYMBOL(mlxsw_core_trap_state_set);
2644
mlxsw_core_tid_get(struct mlxsw_core * mlxsw_core)2645 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
2646 {
2647 return atomic64_inc_return(&mlxsw_core->emad.tid);
2648 }
2649
mlxsw_core_reg_access_emad(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2650 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
2651 const struct mlxsw_reg_info *reg,
2652 char *payload,
2653 enum mlxsw_core_reg_access_type type,
2654 struct list_head *bulk_list,
2655 mlxsw_reg_trans_cb_t *cb,
2656 unsigned long cb_priv)
2657 {
2658 u64 tid = mlxsw_core_tid_get(mlxsw_core);
2659 struct mlxsw_reg_trans *trans;
2660 int err;
2661
2662 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
2663 if (!trans)
2664 return -ENOMEM;
2665
2666 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
2667 bulk_list, cb, cb_priv, tid);
2668 if (err) {
2669 kfree_rcu(trans, rcu);
2670 return err;
2671 }
2672 return 0;
2673 }
2674
mlxsw_reg_trans_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2675 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
2676 const struct mlxsw_reg_info *reg, char *payload,
2677 struct list_head *bulk_list,
2678 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2679 {
2680 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2681 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
2682 bulk_list, cb, cb_priv);
2683 }
2684 EXPORT_SYMBOL(mlxsw_reg_trans_query);
2685
mlxsw_reg_trans_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2686 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
2687 const struct mlxsw_reg_info *reg, char *payload,
2688 struct list_head *bulk_list,
2689 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2690 {
2691 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2692 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
2693 bulk_list, cb, cb_priv);
2694 }
2695 EXPORT_SYMBOL(mlxsw_reg_trans_write);
2696
2697 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
2698
mlxsw_reg_trans_wait(struct mlxsw_reg_trans * trans)2699 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
2700 {
2701 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
2702 struct mlxsw_core *mlxsw_core = trans->core;
2703 int err;
2704
2705 wait_for_completion(&trans->completion);
2706 cancel_delayed_work_sync(&trans->timeout_dw);
2707 err = trans->err;
2708
2709 if (trans->retries)
2710 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
2711 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
2712 if (err) {
2713 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
2714 trans->tid, trans->reg->id,
2715 mlxsw_reg_id_str(trans->reg->id),
2716 mlxsw_core_reg_access_type_str(trans->type),
2717 trans->emad_status,
2718 mlxsw_emad_op_tlv_status_str(trans->emad_status));
2719
2720 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
2721 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
2722 trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
2723 mlxsw_emad_op_tlv_status_str(trans->emad_status),
2724 trans->emad_err_string ? trans->emad_err_string : "");
2725
2726 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
2727 trans->emad_status, err_string);
2728
2729 kfree(trans->emad_err_string);
2730 }
2731
2732 list_del(&trans->bulk_list);
2733 kfree_rcu(trans, rcu);
2734 return err;
2735 }
2736
mlxsw_reg_trans_bulk_wait(struct list_head * bulk_list)2737 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
2738 {
2739 struct mlxsw_reg_trans *trans;
2740 struct mlxsw_reg_trans *tmp;
2741 int sum_err = 0;
2742 int err;
2743
2744 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
2745 err = mlxsw_reg_trans_wait(trans);
2746 if (err && sum_err == 0)
2747 sum_err = err; /* first error to be returned */
2748 }
2749 return sum_err;
2750 }
2751 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
2752
mlxsw_core_reg_access_cmd(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2753 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
2754 const struct mlxsw_reg_info *reg,
2755 char *payload,
2756 enum mlxsw_core_reg_access_type type)
2757 {
2758 enum mlxsw_emad_op_tlv_status status;
2759 int err, n_retry;
2760 bool reset_ok;
2761 char *in_mbox, *out_mbox, *tmp;
2762
2763 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
2764 reg->id, mlxsw_reg_id_str(reg->id),
2765 mlxsw_core_reg_access_type_str(type));
2766
2767 in_mbox = mlxsw_cmd_mbox_alloc();
2768 if (!in_mbox)
2769 return -ENOMEM;
2770
2771 out_mbox = mlxsw_cmd_mbox_alloc();
2772 if (!out_mbox) {
2773 err = -ENOMEM;
2774 goto free_in_mbox;
2775 }
2776
2777 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
2778 mlxsw_core_tid_get(mlxsw_core));
2779 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
2780 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
2781
2782 /* There is a special treatment needed for MRSR (reset) register.
2783 * The command interface will return error after the command
2784 * is executed, so tell the lower layer to expect it
2785 * and cope accordingly.
2786 */
2787 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
2788
2789 n_retry = 0;
2790 retry:
2791 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
2792 if (!err) {
2793 err = mlxsw_emad_process_status(out_mbox, &status);
2794 if (err) {
2795 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
2796 goto retry;
2797 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
2798 status, mlxsw_emad_op_tlv_status_str(status));
2799 }
2800 }
2801
2802 if (!err)
2803 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
2804 reg->len);
2805
2806 mlxsw_cmd_mbox_free(out_mbox);
2807 free_in_mbox:
2808 mlxsw_cmd_mbox_free(in_mbox);
2809 if (err)
2810 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
2811 reg->id, mlxsw_reg_id_str(reg->id),
2812 mlxsw_core_reg_access_type_str(type));
2813 return err;
2814 }
2815
mlxsw_core_reg_access_cb(struct mlxsw_core * mlxsw_core,char * payload,size_t payload_len,unsigned long cb_priv)2816 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
2817 char *payload, size_t payload_len,
2818 unsigned long cb_priv)
2819 {
2820 char *orig_payload = (char *) cb_priv;
2821
2822 memcpy(orig_payload, payload, payload_len);
2823 }
2824
mlxsw_core_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2825 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
2826 const struct mlxsw_reg_info *reg,
2827 char *payload,
2828 enum mlxsw_core_reg_access_type type)
2829 {
2830 LIST_HEAD(bulk_list);
2831 int err;
2832
2833 /* During initialization EMAD interface is not available to us,
2834 * so we default to command interface. We switch to EMAD interface
2835 * after setting the appropriate traps.
2836 */
2837 if (!mlxsw_core->emad.use_emad)
2838 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
2839 payload, type);
2840
2841 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
2842 payload, type, &bulk_list,
2843 mlxsw_core_reg_access_cb,
2844 (unsigned long) payload);
2845 if (err)
2846 return err;
2847 return mlxsw_reg_trans_bulk_wait(&bulk_list);
2848 }
2849
mlxsw_reg_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2850 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
2851 const struct mlxsw_reg_info *reg, char *payload)
2852 {
2853 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2854 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
2855 }
2856 EXPORT_SYMBOL(mlxsw_reg_query);
2857
mlxsw_reg_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2858 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
2859 const struct mlxsw_reg_info *reg, char *payload)
2860 {
2861 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2862 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
2863 }
2864 EXPORT_SYMBOL(mlxsw_reg_write);
2865
mlxsw_core_skb_receive(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,struct mlxsw_rx_info * rx_info)2866 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2867 struct mlxsw_rx_info *rx_info)
2868 {
2869 struct mlxsw_rx_listener_item *rxl_item;
2870 const struct mlxsw_rx_listener *rxl;
2871 u16 local_port;
2872 bool found = false;
2873
2874 if (rx_info->is_lag) {
2875 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
2876 __func__, rx_info->u.lag_id,
2877 rx_info->trap_id);
2878 /* Upper layer does not care if the skb came from LAG or not,
2879 * so just get the local_port for the lag port and push it up.
2880 */
2881 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
2882 rx_info->u.lag_id,
2883 rx_info->lag_port_index);
2884 } else {
2885 local_port = rx_info->u.sys_port;
2886 }
2887
2888 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
2889 __func__, local_port, rx_info->trap_id);
2890
2891 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
2892 (local_port >= mlxsw_core->max_ports))
2893 goto drop;
2894
2895 rcu_read_lock();
2896 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
2897 rxl = &rxl_item->rxl;
2898 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
2899 rxl->local_port == local_port) &&
2900 rxl->trap_id == rx_info->trap_id &&
2901 rxl->mirror_reason == rx_info->mirror_reason) {
2902 if (rxl_item->enabled)
2903 found = true;
2904 break;
2905 }
2906 }
2907 if (!found) {
2908 rcu_read_unlock();
2909 goto drop;
2910 }
2911
2912 rxl->func(skb, local_port, rxl_item->priv);
2913 rcu_read_unlock();
2914 return;
2915
2916 drop:
2917 dev_kfree_skb(skb);
2918 }
2919 EXPORT_SYMBOL(mlxsw_core_skb_receive);
2920
mlxsw_core_lag_mapping_index(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2921 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
2922 u16 lag_id, u8 port_index)
2923 {
2924 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
2925 port_index;
2926 }
2927
mlxsw_core_lag_mapping_set(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index,u16 local_port)2928 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
2929 u16 lag_id, u8 port_index, u16 local_port)
2930 {
2931 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2932 lag_id, port_index);
2933
2934 mlxsw_core->lag.mapping[index] = local_port;
2935 }
2936 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
2937
mlxsw_core_lag_mapping_get(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2938 u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
2939 u16 lag_id, u8 port_index)
2940 {
2941 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2942 lag_id, port_index);
2943
2944 return mlxsw_core->lag.mapping[index];
2945 }
2946 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
2947
mlxsw_core_lag_mapping_clear(struct mlxsw_core * mlxsw_core,u16 lag_id,u16 local_port)2948 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
2949 u16 lag_id, u16 local_port)
2950 {
2951 int i;
2952
2953 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
2954 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2955 lag_id, i);
2956
2957 if (mlxsw_core->lag.mapping[index] == local_port)
2958 mlxsw_core->lag.mapping[index] = 0;
2959 }
2960 }
2961 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
2962
mlxsw_core_res_valid(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2963 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
2964 enum mlxsw_res_id res_id)
2965 {
2966 return mlxsw_res_valid(&mlxsw_core->res, res_id);
2967 }
2968 EXPORT_SYMBOL(mlxsw_core_res_valid);
2969
mlxsw_core_res_get(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2970 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
2971 enum mlxsw_res_id res_id)
2972 {
2973 return mlxsw_res_get(&mlxsw_core->res, res_id);
2974 }
2975 EXPORT_SYMBOL(mlxsw_core_res_get);
2976
__mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u16 local_port,enum devlink_port_flavour flavour,u8 slot_index,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)2977 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
2978 enum devlink_port_flavour flavour,
2979 u8 slot_index, u32 port_number, bool split,
2980 u32 split_port_subnumber,
2981 bool splittable, u32 lanes,
2982 const unsigned char *switch_id,
2983 unsigned char switch_id_len)
2984 {
2985 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2986 struct mlxsw_core_port *mlxsw_core_port =
2987 &mlxsw_core->ports[local_port];
2988 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2989 struct devlink_port_attrs attrs = {};
2990 int err;
2991
2992 attrs.split = split;
2993 attrs.lanes = lanes;
2994 attrs.splittable = splittable;
2995 attrs.flavour = flavour;
2996 attrs.phys.port_number = port_number;
2997 attrs.phys.split_subport_number = split_port_subnumber;
2998 memcpy(attrs.switch_id.id, switch_id, switch_id_len);
2999 attrs.switch_id.id_len = switch_id_len;
3000 mlxsw_core_port->local_port = local_port;
3001 devlink_port_attrs_set(devlink_port, &attrs);
3002 if (slot_index) {
3003 struct mlxsw_linecard *linecard;
3004
3005 linecard = mlxsw_linecard_get(mlxsw_core->linecards,
3006 slot_index);
3007 mlxsw_core_port->linecard = linecard;
3008 devlink_port_linecard_set(devlink_port,
3009 linecard->devlink_linecard);
3010 }
3011 err = devl_port_register(devlink, devlink_port, local_port);
3012 if (err)
3013 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
3014 return err;
3015 }
3016
__mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u16 local_port)3017 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3018 {
3019 struct mlxsw_core_port *mlxsw_core_port =
3020 &mlxsw_core->ports[local_port];
3021 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3022
3023 devl_port_unregister(devlink_port);
3024 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
3025 }
3026
mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u16 local_port,u8 slot_index,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)3027 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
3028 u8 slot_index, u32 port_number, bool split,
3029 u32 split_port_subnumber,
3030 bool splittable, u32 lanes,
3031 const unsigned char *switch_id,
3032 unsigned char switch_id_len)
3033 {
3034 int err;
3035
3036 err = __mlxsw_core_port_init(mlxsw_core, local_port,
3037 DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
3038 port_number, split, split_port_subnumber,
3039 splittable, lanes,
3040 switch_id, switch_id_len);
3041 if (err)
3042 return err;
3043
3044 atomic_inc(&mlxsw_core->active_ports_count);
3045 return 0;
3046 }
3047 EXPORT_SYMBOL(mlxsw_core_port_init);
3048
mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u16 local_port)3049 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3050 {
3051 atomic_dec(&mlxsw_core->active_ports_count);
3052
3053 __mlxsw_core_port_fini(mlxsw_core, local_port);
3054 }
3055 EXPORT_SYMBOL(mlxsw_core_port_fini);
3056
mlxsw_core_cpu_port_init(struct mlxsw_core * mlxsw_core,void * port_driver_priv,const unsigned char * switch_id,unsigned char switch_id_len)3057 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
3058 void *port_driver_priv,
3059 const unsigned char *switch_id,
3060 unsigned char switch_id_len)
3061 {
3062 struct mlxsw_core_port *mlxsw_core_port =
3063 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
3064 int err;
3065
3066 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
3067 DEVLINK_PORT_FLAVOUR_CPU,
3068 0, 0, false, 0, false, 0,
3069 switch_id, switch_id_len);
3070 if (err)
3071 return err;
3072
3073 mlxsw_core_port->port_driver_priv = port_driver_priv;
3074 return 0;
3075 }
3076 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
3077
mlxsw_core_cpu_port_fini(struct mlxsw_core * mlxsw_core)3078 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
3079 {
3080 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
3081 }
3082 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
3083
mlxsw_core_port_eth_set(struct mlxsw_core * mlxsw_core,u16 local_port,void * port_driver_priv,struct net_device * dev)3084 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
3085 void *port_driver_priv, struct net_device *dev)
3086 {
3087 struct mlxsw_core_port *mlxsw_core_port =
3088 &mlxsw_core->ports[local_port];
3089 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3090
3091 mlxsw_core_port->port_driver_priv = port_driver_priv;
3092 devlink_port_type_eth_set(devlink_port, dev);
3093 }
3094 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
3095
mlxsw_core_port_ib_set(struct mlxsw_core * mlxsw_core,u16 local_port,void * port_driver_priv)3096 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
3097 void *port_driver_priv)
3098 {
3099 struct mlxsw_core_port *mlxsw_core_port =
3100 &mlxsw_core->ports[local_port];
3101 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3102
3103 mlxsw_core_port->port_driver_priv = port_driver_priv;
3104 devlink_port_type_ib_set(devlink_port, NULL);
3105 }
3106 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
3107
mlxsw_core_port_clear(struct mlxsw_core * mlxsw_core,u16 local_port,void * port_driver_priv)3108 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
3109 void *port_driver_priv)
3110 {
3111 struct mlxsw_core_port *mlxsw_core_port =
3112 &mlxsw_core->ports[local_port];
3113 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3114
3115 mlxsw_core_port->port_driver_priv = port_driver_priv;
3116 devlink_port_type_clear(devlink_port);
3117 }
3118 EXPORT_SYMBOL(mlxsw_core_port_clear);
3119
mlxsw_core_port_type_get(struct mlxsw_core * mlxsw_core,u16 local_port)3120 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
3121 u16 local_port)
3122 {
3123 struct mlxsw_core_port *mlxsw_core_port =
3124 &mlxsw_core->ports[local_port];
3125 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3126
3127 return devlink_port->type;
3128 }
3129 EXPORT_SYMBOL(mlxsw_core_port_type_get);
3130
3131
3132 struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core * mlxsw_core,u16 local_port)3133 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
3134 u16 local_port)
3135 {
3136 struct mlxsw_core_port *mlxsw_core_port =
3137 &mlxsw_core->ports[local_port];
3138 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3139
3140 return devlink_port;
3141 }
3142 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
3143
3144 struct mlxsw_linecard *
mlxsw_core_port_linecard_get(struct mlxsw_core * mlxsw_core,u16 local_port)3145 mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
3146 u16 local_port)
3147 {
3148 struct mlxsw_core_port *mlxsw_core_port =
3149 &mlxsw_core->ports[local_port];
3150
3151 return mlxsw_core_port->linecard;
3152 }
3153
mlxsw_core_port_is_xm(const struct mlxsw_core * mlxsw_core,u16 local_port)3154 bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
3155 {
3156 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
3157 int i;
3158
3159 for (i = 0; i < bus_info->xm_local_ports_count; i++)
3160 if (bus_info->xm_local_ports[i] == local_port)
3161 return true;
3162 return false;
3163 }
3164 EXPORT_SYMBOL(mlxsw_core_port_is_xm);
3165
mlxsw_core_ports_remove_selected(struct mlxsw_core * mlxsw_core,bool (* selector)(void * priv,u16 local_port),void * priv)3166 void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
3167 bool (*selector)(void *priv, u16 local_port),
3168 void *priv)
3169 {
3170 if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
3171 return;
3172 mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
3173 }
3174
mlxsw_core_env(const struct mlxsw_core * mlxsw_core)3175 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
3176 {
3177 return mlxsw_core->env;
3178 }
3179
mlxsw_core_buf_dump_dbg(struct mlxsw_core * mlxsw_core,const char * buf,size_t size)3180 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
3181 const char *buf, size_t size)
3182 {
3183 __be32 *m = (__be32 *) buf;
3184 int i;
3185 int count = size / sizeof(__be32);
3186
3187 for (i = count - 1; i >= 0; i--)
3188 if (m[i])
3189 break;
3190 i++;
3191 count = i ? i : 1;
3192 for (i = 0; i < count; i += 4)
3193 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
3194 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
3195 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
3196 }
3197
mlxsw_cmd_exec(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod,bool out_mbox_direct,bool reset_ok,char * in_mbox,size_t in_mbox_size,char * out_mbox,size_t out_mbox_size)3198 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
3199 u32 in_mod, bool out_mbox_direct, bool reset_ok,
3200 char *in_mbox, size_t in_mbox_size,
3201 char *out_mbox, size_t out_mbox_size)
3202 {
3203 u8 status;
3204 int err;
3205
3206 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
3207 if (!mlxsw_core->bus->cmd_exec)
3208 return -EOPNOTSUPP;
3209
3210 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3211 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
3212 if (in_mbox) {
3213 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
3214 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
3215 }
3216
3217 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
3218 opcode_mod, in_mod, out_mbox_direct,
3219 in_mbox, in_mbox_size,
3220 out_mbox, out_mbox_size, &status);
3221
3222 if (!err && out_mbox) {
3223 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
3224 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
3225 }
3226
3227 if (reset_ok && err == -EIO &&
3228 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
3229 err = 0;
3230 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
3231 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
3232 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3233 in_mod, status, mlxsw_cmd_status_str(status));
3234 } else if (err == -ETIMEDOUT) {
3235 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3236 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3237 in_mod);
3238 }
3239
3240 return err;
3241 }
3242 EXPORT_SYMBOL(mlxsw_cmd_exec);
3243
mlxsw_core_schedule_dw(struct delayed_work * dwork,unsigned long delay)3244 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
3245 {
3246 return queue_delayed_work(mlxsw_wq, dwork, delay);
3247 }
3248 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
3249
mlxsw_core_schedule_work(struct work_struct * work)3250 bool mlxsw_core_schedule_work(struct work_struct *work)
3251 {
3252 return queue_work(mlxsw_owq, work);
3253 }
3254 EXPORT_SYMBOL(mlxsw_core_schedule_work);
3255
mlxsw_core_flush_owq(void)3256 void mlxsw_core_flush_owq(void)
3257 {
3258 flush_workqueue(mlxsw_owq);
3259 }
3260 EXPORT_SYMBOL(mlxsw_core_flush_owq);
3261
mlxsw_core_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)3262 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3263 const struct mlxsw_config_profile *profile,
3264 u64 *p_single_size, u64 *p_double_size,
3265 u64 *p_linear_size)
3266 {
3267 struct mlxsw_driver *driver = mlxsw_core->driver;
3268
3269 if (!driver->kvd_sizes_get)
3270 return -EINVAL;
3271
3272 return driver->kvd_sizes_get(mlxsw_core, profile,
3273 p_single_size, p_double_size,
3274 p_linear_size);
3275 }
3276 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
3277
mlxsw_core_resources_query(struct mlxsw_core * mlxsw_core,char * mbox,struct mlxsw_res * res)3278 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
3279 struct mlxsw_res *res)
3280 {
3281 int index, i;
3282 u64 data;
3283 u16 id;
3284 int err;
3285
3286 mlxsw_cmd_mbox_zero(mbox);
3287
3288 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
3289 index++) {
3290 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
3291 if (err)
3292 return err;
3293
3294 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
3295 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
3296 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
3297
3298 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
3299 return 0;
3300
3301 mlxsw_res_parse(res, id, data);
3302 }
3303 }
3304
3305 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
3306 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
3307 */
3308 return -EIO;
3309 }
3310 EXPORT_SYMBOL(mlxsw_core_resources_query);
3311
mlxsw_core_read_frc_h(struct mlxsw_core * mlxsw_core)3312 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
3313 {
3314 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
3315 }
3316 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
3317
mlxsw_core_read_frc_l(struct mlxsw_core * mlxsw_core)3318 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
3319 {
3320 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
3321 }
3322 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
3323
mlxsw_core_emad_string_tlv_enable(struct mlxsw_core * mlxsw_core)3324 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
3325 {
3326 mlxsw_core->emad.enable_string_tlv = true;
3327 }
3328 EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
3329
mlxsw_core_module_init(void)3330 static int __init mlxsw_core_module_init(void)
3331 {
3332 int err;
3333
3334 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
3335 if (!mlxsw_wq)
3336 return -ENOMEM;
3337 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
3338 mlxsw_core_driver_name);
3339 if (!mlxsw_owq) {
3340 err = -ENOMEM;
3341 goto err_alloc_ordered_workqueue;
3342 }
3343 return 0;
3344
3345 err_alloc_ordered_workqueue:
3346 destroy_workqueue(mlxsw_wq);
3347 return err;
3348 }
3349
mlxsw_core_module_exit(void)3350 static void __exit mlxsw_core_module_exit(void)
3351 {
3352 destroy_workqueue(mlxsw_owq);
3353 destroy_workqueue(mlxsw_wq);
3354 }
3355
3356 module_init(mlxsw_core_module_init);
3357 module_exit(mlxsw_core_module_exit);
3358
3359 MODULE_LICENSE("Dual BSD/GPL");
3360 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3361 MODULE_DESCRIPTION("Mellanox switch device core driver");
3362