1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * include/linux/if_team.h - Network team device driver header
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 */
6 #ifndef _LINUX_IF_TEAM_H_
7 #define _LINUX_IF_TEAM_H_
8
9 #include <linux/netpoll.h>
10 #include <net/sch_generic.h>
11 #include <linux/types.h>
12 #include <uapi/linux/if_team.h>
13
14 struct team_pcpu_stats {
15 u64 rx_packets;
16 u64 rx_bytes;
17 u64 rx_multicast;
18 u64 tx_packets;
19 u64 tx_bytes;
20 struct u64_stats_sync syncp;
21 u32 rx_dropped;
22 u32 tx_dropped;
23 u32 rx_nohandler;
24 };
25
26 struct team;
27
28 struct team_port {
29 struct net_device *dev;
30 struct hlist_node hlist; /* node in enabled ports hash list */
31 struct list_head list; /* node in ordinary list */
32 struct team *team;
33 int index; /* index of enabled port. If disabled, it's set to -1 */
34
35 bool linkup; /* either state.linkup or user.linkup */
36
37 struct {
38 bool linkup;
39 u32 speed;
40 u8 duplex;
41 } state;
42
43 /* Values set by userspace */
44 struct {
45 bool linkup;
46 bool linkup_enabled;
47 } user;
48
49 /* Custom gennetlink interface related flags */
50 bool changed;
51 bool removed;
52
53 /*
54 * A place for storing original values of the device before it
55 * become a port.
56 */
57 struct {
58 unsigned char dev_addr[MAX_ADDR_LEN];
59 unsigned int mtu;
60 } orig;
61
62 #ifdef CONFIG_NET_POLL_CONTROLLER
63 struct netpoll *np;
64 #endif
65
66 s32 priority; /* lower number ~ higher priority */
67 u16 queue_id;
68 struct list_head qom_list; /* node in queue override mapping list */
69 struct rcu_head rcu;
70 long mode_priv[];
71 };
72
team_port_get_rcu(const struct net_device * dev)73 static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
74 {
75 return rcu_dereference(dev->rx_handler_data);
76 }
77
team_port_enabled(struct team_port * port)78 static inline bool team_port_enabled(struct team_port *port)
79 {
80 return port->index != -1;
81 }
82
team_port_txable(struct team_port * port)83 static inline bool team_port_txable(struct team_port *port)
84 {
85 return port->linkup && team_port_enabled(port);
86 }
87
team_port_dev_txable(const struct net_device * port_dev)88 static inline bool team_port_dev_txable(const struct net_device *port_dev)
89 {
90 struct team_port *port;
91 bool txable;
92
93 rcu_read_lock();
94 port = team_port_get_rcu(port_dev);
95 txable = port ? team_port_txable(port) : false;
96 rcu_read_unlock();
97
98 return txable;
99 }
100
101 #ifdef CONFIG_NET_POLL_CONTROLLER
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)102 static inline void team_netpoll_send_skb(struct team_port *port,
103 struct sk_buff *skb)
104 {
105 netpoll_send_skb(port->np, skb);
106 }
107 #else
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)108 static inline void team_netpoll_send_skb(struct team_port *port,
109 struct sk_buff *skb)
110 {
111 }
112 #endif
113
114 struct team_mode_ops {
115 int (*init)(struct team *team);
116 void (*exit)(struct team *team);
117 rx_handler_result_t (*receive)(struct team *team,
118 struct team_port *port,
119 struct sk_buff *skb);
120 bool (*transmit)(struct team *team, struct sk_buff *skb);
121 int (*port_enter)(struct team *team, struct team_port *port);
122 void (*port_leave)(struct team *team, struct team_port *port);
123 void (*port_change_dev_addr)(struct team *team, struct team_port *port);
124 void (*port_enabled)(struct team *team, struct team_port *port);
125 void (*port_disabled)(struct team *team, struct team_port *port);
126 };
127
128 extern int team_modeop_port_enter(struct team *team, struct team_port *port);
129 extern void team_modeop_port_change_dev_addr(struct team *team,
130 struct team_port *port);
131
132 enum team_option_type {
133 TEAM_OPTION_TYPE_U32,
134 TEAM_OPTION_TYPE_STRING,
135 TEAM_OPTION_TYPE_BINARY,
136 TEAM_OPTION_TYPE_BOOL,
137 TEAM_OPTION_TYPE_S32,
138 };
139
140 struct team_option_inst_info {
141 u32 array_index;
142 struct team_port *port; /* != NULL if per-port */
143 };
144
145 struct team_gsetter_ctx {
146 union {
147 u32 u32_val;
148 const char *str_val;
149 struct {
150 const void *ptr;
151 u32 len;
152 } bin_val;
153 bool bool_val;
154 s32 s32_val;
155 } data;
156 struct team_option_inst_info *info;
157 };
158
159 struct team_option {
160 struct list_head list;
161 const char *name;
162 bool per_port;
163 unsigned int array_size; /* != 0 means the option is array */
164 enum team_option_type type;
165 int (*init)(struct team *team, struct team_option_inst_info *info);
166 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
167 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
168 };
169
170 extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
171 extern void team_options_change_check(struct team *team);
172
173 struct team_mode {
174 const char *kind;
175 struct module *owner;
176 size_t priv_size;
177 size_t port_priv_size;
178 const struct team_mode_ops *ops;
179 enum netdev_lag_tx_type lag_tx_type;
180 };
181
182 #define TEAM_PORT_HASHBITS 4
183 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
184
185 #define TEAM_MODE_PRIV_LONGS 4
186 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
187
188 struct team {
189 struct net_device *dev; /* associated netdevice */
190 struct team_pcpu_stats __percpu *pcpu_stats;
191
192 struct mutex lock; /* used for overall locking, e.g. port lists write */
193
194 /*
195 * List of enabled ports and their count
196 */
197 int en_port_count;
198 struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
199
200 struct list_head port_list; /* list of all ports */
201
202 struct list_head option_list;
203 struct list_head option_inst_list; /* list of option instances */
204
205 const struct team_mode *mode;
206 struct team_mode_ops ops;
207 bool user_carrier_enabled;
208 bool queue_override_enabled;
209 struct list_head *qom_lists; /* array of queue override mapping lists */
210 bool port_mtu_change_allowed;
211 struct {
212 unsigned int count;
213 unsigned int interval; /* in ms */
214 atomic_t count_pending;
215 struct delayed_work dw;
216 } notify_peers;
217 struct {
218 unsigned int count;
219 unsigned int interval; /* in ms */
220 atomic_t count_pending;
221 struct delayed_work dw;
222 } mcast_rejoin;
223 struct lock_class_key team_lock_key;
224 long mode_priv[TEAM_MODE_PRIV_LONGS];
225 };
226
team_dev_queue_xmit(struct team * team,struct team_port * port,struct sk_buff * skb)227 static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
228 struct sk_buff *skb)
229 {
230 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
231 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
232 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
233
234 skb->dev = port->dev;
235 if (unlikely(netpoll_tx_running(team->dev))) {
236 team_netpoll_send_skb(port, skb);
237 return 0;
238 }
239 return dev_queue_xmit(skb);
240 }
241
team_port_index_hash(struct team * team,int port_index)242 static inline struct hlist_head *team_port_index_hash(struct team *team,
243 int port_index)
244 {
245 return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
246 }
247
team_get_port_by_index(struct team * team,int port_index)248 static inline struct team_port *team_get_port_by_index(struct team *team,
249 int port_index)
250 {
251 struct team_port *port;
252 struct hlist_head *head = team_port_index_hash(team, port_index);
253
254 hlist_for_each_entry(port, head, hlist)
255 if (port->index == port_index)
256 return port;
257 return NULL;
258 }
259
team_num_to_port_index(struct team * team,unsigned int num)260 static inline int team_num_to_port_index(struct team *team, unsigned int num)
261 {
262 int en_port_count = READ_ONCE(team->en_port_count);
263
264 if (unlikely(!en_port_count))
265 return 0;
266 return num % en_port_count;
267 }
268
team_get_port_by_index_rcu(struct team * team,int port_index)269 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
270 int port_index)
271 {
272 struct team_port *port;
273 struct hlist_head *head = team_port_index_hash(team, port_index);
274
275 hlist_for_each_entry_rcu(port, head, hlist)
276 if (port->index == port_index)
277 return port;
278 return NULL;
279 }
280
281 static inline struct team_port *
team_get_first_port_txable_rcu(struct team * team,struct team_port * port)282 team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
283 {
284 struct team_port *cur;
285
286 if (likely(team_port_txable(port)))
287 return port;
288 cur = port;
289 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
290 if (team_port_txable(cur))
291 return cur;
292 list_for_each_entry_rcu(cur, &team->port_list, list) {
293 if (cur == port)
294 break;
295 if (team_port_txable(cur))
296 return cur;
297 }
298 return NULL;
299 }
300
301 extern int team_options_register(struct team *team,
302 const struct team_option *option,
303 size_t option_count);
304 extern void team_options_unregister(struct team *team,
305 const struct team_option *option,
306 size_t option_count);
307 extern int team_mode_register(const struct team_mode *mode);
308 extern void team_mode_unregister(const struct team_mode *mode);
309
310 #define TEAM_DEFAULT_NUM_TX_QUEUES 16
311 #define TEAM_DEFAULT_NUM_RX_QUEUES 16
312
313 #define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
314
315 #endif /* _LINUX_IF_TEAM_H_ */
316