1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21
22 #define SMC_LLC_DATA_LEN 40
23
24 struct smc_llc_hdr {
25 struct smc_wr_rx_hdr common;
26 union {
27 struct {
28 u8 length; /* 44 */
29 #if defined(__BIG_ENDIAN_BITFIELD)
30 u8 reserved:4,
31 add_link_rej_rsn:4;
32 #elif defined(__LITTLE_ENDIAN_BITFIELD)
33 u8 add_link_rej_rsn:4,
34 reserved:4;
35 #endif
36 };
37 u16 length_v2; /* 44 - 8192*/
38 };
39 u8 flags;
40 } __packed; /* format defined in
41 * IBM Shared Memory Communications Version 2
42 * (https://www.ibm.com/support/pages/node/6326337)
43 */
44
45 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
46
47 struct smc_llc_msg_confirm_link { /* type 0x01 */
48 struct smc_llc_hdr hd;
49 u8 sender_mac[ETH_ALEN];
50 u8 sender_gid[SMC_GID_SIZE];
51 u8 sender_qp_num[3];
52 u8 link_num;
53 u8 link_uid[SMC_LGR_ID_SIZE];
54 u8 max_links;
55 u8 reserved[9];
56 };
57
58 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
59 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
60
61 #define SMC_LLC_ADD_LNK_MAX_LINKS 2
62
63 struct smc_llc_msg_add_link { /* type 0x02 */
64 struct smc_llc_hdr hd;
65 u8 sender_mac[ETH_ALEN];
66 u8 reserved2[2];
67 u8 sender_gid[SMC_GID_SIZE];
68 u8 sender_qp_num[3];
69 u8 link_num;
70 #if defined(__BIG_ENDIAN_BITFIELD)
71 u8 reserved3 : 4,
72 qp_mtu : 4;
73 #elif defined(__LITTLE_ENDIAN_BITFIELD)
74 u8 qp_mtu : 4,
75 reserved3 : 4;
76 #endif
77 u8 initial_psn[3];
78 u8 reserved[8];
79 };
80
81 struct smc_llc_msg_add_link_cont_rt {
82 __be32 rmb_key;
83 __be32 rmb_key_new;
84 __be64 rmb_vaddr_new;
85 };
86
87 struct smc_llc_msg_add_link_v2_ext {
88 #if defined(__BIG_ENDIAN_BITFIELD)
89 u8 v2_direct : 1,
90 reserved : 7;
91 #elif defined(__LITTLE_ENDIAN_BITFIELD)
92 u8 reserved : 7,
93 v2_direct : 1;
94 #endif
95 u8 reserved2;
96 u8 client_target_gid[SMC_GID_SIZE];
97 u8 reserved3[8];
98 u16 num_rkeys;
99 struct smc_llc_msg_add_link_cont_rt rt[];
100 } __packed; /* format defined in
101 * IBM Shared Memory Communications Version 2
102 * (https://www.ibm.com/support/pages/node/6326337)
103 */
104
105 struct smc_llc_msg_req_add_link_v2 {
106 struct smc_llc_hdr hd;
107 u8 reserved[20];
108 u8 gid_cnt;
109 u8 reserved2[3];
110 u8 gid[][SMC_GID_SIZE];
111 };
112
113 #define SMC_LLC_RKEYS_PER_CONT_MSG 2
114
115 struct smc_llc_msg_add_link_cont { /* type 0x03 */
116 struct smc_llc_hdr hd;
117 u8 link_num;
118 u8 num_rkeys;
119 u8 reserved2[2];
120 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
121 u8 reserved[4];
122 } __packed; /* format defined in RFC7609 */
123
124 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
125 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
126
127 struct smc_llc_msg_del_link { /* type 0x04 */
128 struct smc_llc_hdr hd;
129 u8 link_num;
130 __be32 reason;
131 u8 reserved[35];
132 } __packed; /* format defined in RFC7609 */
133
134 struct smc_llc_msg_test_link { /* type 0x07 */
135 struct smc_llc_hdr hd;
136 u8 user_data[16];
137 u8 reserved[24];
138 };
139
140 struct smc_rmb_rtoken {
141 union {
142 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
143 /* is actually the num of rtokens, first */
144 /* rtoken is always for the current link */
145 u8 link_id; /* link id of the rtoken */
146 };
147 __be32 rmb_key;
148 __be64 rmb_vaddr;
149 } __packed; /* format defined in RFC7609 */
150
151 #define SMC_LLC_RKEYS_PER_MSG 3
152 #define SMC_LLC_RKEYS_PER_MSG_V2 255
153
154 struct smc_llc_msg_confirm_rkey { /* type 0x06 */
155 struct smc_llc_hdr hd;
156 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
157 u8 reserved;
158 };
159
160 #define SMC_LLC_DEL_RKEY_MAX 8
161 #define SMC_LLC_FLAG_RKEY_RETRY 0x10
162 #define SMC_LLC_FLAG_RKEY_NEG 0x20
163
164 struct smc_llc_msg_delete_rkey { /* type 0x09 */
165 struct smc_llc_hdr hd;
166 u8 num_rkeys;
167 u8 err_mask;
168 u8 reserved[2];
169 __be32 rkey[8];
170 u8 reserved2[4];
171 };
172
173 struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */
174 struct smc_llc_hdr hd;
175 u8 num_rkeys;
176 u8 num_inval_rkeys;
177 u8 reserved[2];
178 __be32 rkey[];
179 };
180
181 union smc_llc_msg {
182 struct smc_llc_msg_confirm_link confirm_link;
183 struct smc_llc_msg_add_link add_link;
184 struct smc_llc_msg_req_add_link_v2 req_add_link;
185 struct smc_llc_msg_add_link_cont add_link_cont;
186 struct smc_llc_msg_del_link delete_link;
187
188 struct smc_llc_msg_confirm_rkey confirm_rkey;
189 struct smc_llc_msg_delete_rkey delete_rkey;
190
191 struct smc_llc_msg_test_link test_link;
192 struct {
193 struct smc_llc_hdr hdr;
194 u8 data[SMC_LLC_DATA_LEN];
195 } raw;
196 };
197
198 #define SMC_LLC_FLAG_RESP 0x80
199
200 struct smc_llc_qentry {
201 struct list_head list;
202 struct smc_link *link;
203 union smc_llc_msg msg;
204 };
205
206 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
207
smc_llc_flow_qentry_clr(struct smc_llc_flow * flow)208 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
209 {
210 struct smc_llc_qentry *qentry = flow->qentry;
211
212 flow->qentry = NULL;
213 return qentry;
214 }
215
smc_llc_flow_qentry_del(struct smc_llc_flow * flow)216 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
217 {
218 struct smc_llc_qentry *qentry;
219
220 if (flow->qentry) {
221 qentry = flow->qentry;
222 flow->qentry = NULL;
223 kfree(qentry);
224 }
225 }
226
smc_llc_flow_qentry_set(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)227 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
228 struct smc_llc_qentry *qentry)
229 {
230 flow->qentry = qentry;
231 }
232
smc_llc_flow_parallel(struct smc_link_group * lgr,u8 flow_type,struct smc_llc_qentry * qentry)233 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
234 struct smc_llc_qentry *qentry)
235 {
236 u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
237
238 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
239 flow_type != msg_type && !lgr->delayed_event) {
240 lgr->delayed_event = qentry;
241 return;
242 }
243 /* drop parallel or already-in-progress llc requests */
244 if (flow_type != msg_type)
245 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel "
246 "LLC msg: msg %d flow %d role %d\n",
247 SMC_LGR_ID_SIZE, &lgr->id,
248 lgr->net->net_cookie,
249 qentry->msg.raw.hdr.common.type,
250 flow_type, lgr->role);
251 kfree(qentry);
252 }
253
254 /* try to start a new llc flow, initiated by an incoming llc msg */
smc_llc_flow_start(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)255 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
256 struct smc_llc_qentry *qentry)
257 {
258 struct smc_link_group *lgr = qentry->link->lgr;
259
260 spin_lock_bh(&lgr->llc_flow_lock);
261 if (flow->type) {
262 /* a flow is already active */
263 smc_llc_flow_parallel(lgr, flow->type, qentry);
264 spin_unlock_bh(&lgr->llc_flow_lock);
265 return false;
266 }
267 switch (qentry->msg.raw.hdr.common.llc_type) {
268 case SMC_LLC_ADD_LINK:
269 flow->type = SMC_LLC_FLOW_ADD_LINK;
270 break;
271 case SMC_LLC_DELETE_LINK:
272 flow->type = SMC_LLC_FLOW_DEL_LINK;
273 break;
274 case SMC_LLC_CONFIRM_RKEY:
275 case SMC_LLC_DELETE_RKEY:
276 flow->type = SMC_LLC_FLOW_RKEY;
277 break;
278 default:
279 flow->type = SMC_LLC_FLOW_NONE;
280 }
281 smc_llc_flow_qentry_set(flow, qentry);
282 spin_unlock_bh(&lgr->llc_flow_lock);
283 return true;
284 }
285
286 /* start a new local llc flow, wait till current flow finished */
smc_llc_flow_initiate(struct smc_link_group * lgr,enum smc_llc_flowtype type)287 int smc_llc_flow_initiate(struct smc_link_group *lgr,
288 enum smc_llc_flowtype type)
289 {
290 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
291 int rc;
292
293 /* all flows except confirm_rkey and delete_rkey are exclusive,
294 * confirm/delete rkey flows can run concurrently (local and remote)
295 */
296 if (type == SMC_LLC_FLOW_RKEY)
297 allowed_remote = SMC_LLC_FLOW_RKEY;
298 again:
299 if (list_empty(&lgr->list))
300 return -ENODEV;
301 spin_lock_bh(&lgr->llc_flow_lock);
302 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
303 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
304 lgr->llc_flow_rmt.type == allowed_remote)) {
305 lgr->llc_flow_lcl.type = type;
306 spin_unlock_bh(&lgr->llc_flow_lock);
307 return 0;
308 }
309 spin_unlock_bh(&lgr->llc_flow_lock);
310 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
311 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
312 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
313 lgr->llc_flow_rmt.type == allowed_remote))),
314 SMC_LLC_WAIT_TIME * 10);
315 if (!rc)
316 return -ETIMEDOUT;
317 goto again;
318 }
319
320 /* finish the current llc flow */
smc_llc_flow_stop(struct smc_link_group * lgr,struct smc_llc_flow * flow)321 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
322 {
323 spin_lock_bh(&lgr->llc_flow_lock);
324 memset(flow, 0, sizeof(*flow));
325 flow->type = SMC_LLC_FLOW_NONE;
326 spin_unlock_bh(&lgr->llc_flow_lock);
327 if (!list_empty(&lgr->list) && lgr->delayed_event &&
328 flow == &lgr->llc_flow_lcl)
329 schedule_work(&lgr->llc_event_work);
330 else
331 wake_up(&lgr->llc_flow_waiter);
332 }
333
334 /* lnk is optional and used for early wakeup when link goes down, useful in
335 * cases where we wait for a response on the link after we sent a request
336 */
smc_llc_wait(struct smc_link_group * lgr,struct smc_link * lnk,int time_out,u8 exp_msg)337 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
338 struct smc_link *lnk,
339 int time_out, u8 exp_msg)
340 {
341 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
342 u8 rcv_msg;
343
344 wait_event_timeout(lgr->llc_msg_waiter,
345 (flow->qentry ||
346 (lnk && !smc_link_usable(lnk)) ||
347 list_empty(&lgr->list)),
348 time_out);
349 if (!flow->qentry ||
350 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
351 smc_llc_flow_qentry_del(flow);
352 goto out;
353 }
354 rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
355 if (exp_msg && rcv_msg != exp_msg) {
356 if (exp_msg == SMC_LLC_ADD_LINK &&
357 rcv_msg == SMC_LLC_DELETE_LINK) {
358 /* flow_start will delay the unexpected msg */
359 smc_llc_flow_start(&lgr->llc_flow_lcl,
360 smc_llc_flow_qentry_clr(flow));
361 return NULL;
362 }
363 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: "
364 "msg %d exp %d flow %d role %d flags %x\n",
365 SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie,
366 rcv_msg, exp_msg,
367 flow->type, lgr->role,
368 flow->qentry->msg.raw.hdr.flags);
369 smc_llc_flow_qentry_del(flow);
370 }
371 out:
372 return flow->qentry;
373 }
374
375 /********************************** send *************************************/
376
377 struct smc_llc_tx_pend {
378 };
379
380 /* handler for send/transmission completion of an LLC msg */
smc_llc_tx_handler(struct smc_wr_tx_pend_priv * pend,struct smc_link * link,enum ib_wc_status wc_status)381 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
382 struct smc_link *link,
383 enum ib_wc_status wc_status)
384 {
385 /* future work: handle wc_status error for recovery and failover */
386 }
387
388 /**
389 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
390 * @link: Pointer to SMC link used for sending LLC control message.
391 * @wr_buf: Out variable returning pointer to work request payload buffer.
392 * @pend: Out variable returning pointer to private pending WR tracking.
393 * It's the context the transmit complete handler will get.
394 *
395 * Reserves and pre-fills an entry for a pending work request send/tx.
396 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
397 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
398 *
399 * Return: 0 on success, otherwise an error value.
400 */
smc_llc_add_pending_send(struct smc_link * link,struct smc_wr_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)401 static int smc_llc_add_pending_send(struct smc_link *link,
402 struct smc_wr_buf **wr_buf,
403 struct smc_wr_tx_pend_priv **pend)
404 {
405 int rc;
406
407 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
408 pend);
409 if (rc < 0)
410 return rc;
411 BUILD_BUG_ON_MSG(
412 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
413 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
414 BUILD_BUG_ON_MSG(
415 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
416 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
417 BUILD_BUG_ON_MSG(
418 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
419 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
420 return 0;
421 }
422
smc_llc_add_pending_send_v2(struct smc_link * link,struct smc_wr_v2_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)423 static int smc_llc_add_pending_send_v2(struct smc_link *link,
424 struct smc_wr_v2_buf **wr_buf,
425 struct smc_wr_tx_pend_priv **pend)
426 {
427 int rc;
428
429 rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
430 if (rc < 0)
431 return rc;
432 return 0;
433 }
434
smc_llc_init_msg_hdr(struct smc_llc_hdr * hdr,struct smc_link_group * lgr,size_t len)435 static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
436 struct smc_link_group *lgr, size_t len)
437 {
438 if (lgr->smc_version == SMC_V2) {
439 hdr->common.llc_version = SMC_V2;
440 hdr->length_v2 = len;
441 } else {
442 hdr->common.llc_version = 0;
443 hdr->length = len;
444 }
445 }
446
447 /* high-level API to send LLC confirm link */
smc_llc_send_confirm_link(struct smc_link * link,enum smc_llc_reqresp reqresp)448 int smc_llc_send_confirm_link(struct smc_link *link,
449 enum smc_llc_reqresp reqresp)
450 {
451 struct smc_llc_msg_confirm_link *confllc;
452 struct smc_wr_tx_pend_priv *pend;
453 struct smc_wr_buf *wr_buf;
454 int rc;
455
456 if (!smc_wr_tx_link_hold(link))
457 return -ENOLINK;
458 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
459 if (rc)
460 goto put_out;
461 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
462 memset(confllc, 0, sizeof(*confllc));
463 confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
464 smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
465 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
466 if (reqresp == SMC_LLC_RESP)
467 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
468 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
469 ETH_ALEN);
470 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
471 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
472 confllc->link_num = link->link_id;
473 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
474 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
475 /* send llc message */
476 rc = smc_wr_tx_send(link, pend);
477 put_out:
478 smc_wr_tx_link_put(link);
479 return rc;
480 }
481
482 /* send LLC confirm rkey request */
smc_llc_send_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)483 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
484 struct smc_buf_desc *rmb_desc)
485 {
486 struct smc_llc_msg_confirm_rkey *rkeyllc;
487 struct smc_wr_tx_pend_priv *pend;
488 struct smc_wr_buf *wr_buf;
489 struct smc_link *link;
490 int i, rc, rtok_ix;
491
492 if (!smc_wr_tx_link_hold(send_link))
493 return -ENOLINK;
494 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
495 if (rc)
496 goto put_out;
497 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
498 memset(rkeyllc, 0, sizeof(*rkeyllc));
499 rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
500 smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
501
502 rtok_ix = 1;
503 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
504 link = &send_link->lgr->lnk[i];
505 if (smc_link_active(link) && link != send_link) {
506 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
507 rkeyllc->rtoken[rtok_ix].rmb_key =
508 htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
509 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
510 (u64)sg_dma_address(
511 rmb_desc->sgt[link->link_idx].sgl));
512 rtok_ix++;
513 }
514 }
515 /* rkey of send_link is in rtoken[0] */
516 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
517 rkeyllc->rtoken[0].rmb_key =
518 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
519 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
520 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
521 /* send llc message */
522 rc = smc_wr_tx_send(send_link, pend);
523 put_out:
524 smc_wr_tx_link_put(send_link);
525 return rc;
526 }
527
528 /* send LLC delete rkey request */
smc_llc_send_delete_rkey(struct smc_link * link,struct smc_buf_desc * rmb_desc)529 static int smc_llc_send_delete_rkey(struct smc_link *link,
530 struct smc_buf_desc *rmb_desc)
531 {
532 struct smc_llc_msg_delete_rkey *rkeyllc;
533 struct smc_wr_tx_pend_priv *pend;
534 struct smc_wr_buf *wr_buf;
535 int rc;
536
537 if (!smc_wr_tx_link_hold(link))
538 return -ENOLINK;
539 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
540 if (rc)
541 goto put_out;
542 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
543 memset(rkeyllc, 0, sizeof(*rkeyllc));
544 rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
545 smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
546 rkeyllc->num_rkeys = 1;
547 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
548 /* send llc message */
549 rc = smc_wr_tx_send(link, pend);
550 put_out:
551 smc_wr_tx_link_put(link);
552 return rc;
553 }
554
555 /* return first buffer from any of the next buf lists */
_smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst)556 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
557 int *buf_lst)
558 {
559 struct smc_buf_desc *buf_pos;
560
561 while (*buf_lst < SMC_RMBE_SIZES) {
562 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
563 struct smc_buf_desc, list);
564 if (buf_pos)
565 return buf_pos;
566 (*buf_lst)++;
567 }
568 return NULL;
569 }
570
571 /* return next rmb from buffer lists */
smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst,struct smc_buf_desc * buf_pos)572 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
573 int *buf_lst,
574 struct smc_buf_desc *buf_pos)
575 {
576 struct smc_buf_desc *buf_next;
577
578 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
579 (*buf_lst)++;
580 return _smc_llc_get_next_rmb(lgr, buf_lst);
581 }
582 buf_next = list_next_entry(buf_pos, list);
583 return buf_next;
584 }
585
smc_llc_get_first_rmb(struct smc_link_group * lgr,int * buf_lst)586 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
587 int *buf_lst)
588 {
589 *buf_lst = 0;
590 return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
591 }
592
smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext * ext,struct smc_link * link,struct smc_link * link_new)593 static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
594 struct smc_link *link, struct smc_link *link_new)
595 {
596 struct smc_link_group *lgr = link->lgr;
597 struct smc_buf_desc *buf_pos;
598 int prim_lnk_idx, lnk_idx, i;
599 struct smc_buf_desc *rmb;
600 int len = sizeof(*ext);
601 int buf_lst;
602
603 ext->v2_direct = !lgr->uses_gateway;
604 memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
605
606 prim_lnk_idx = link->link_idx;
607 lnk_idx = link_new->link_idx;
608 mutex_lock(&lgr->rmbs_lock);
609 ext->num_rkeys = lgr->conns_num;
610 if (!ext->num_rkeys)
611 goto out;
612 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
613 for (i = 0; i < ext->num_rkeys; i++) {
614 if (!buf_pos)
615 break;
616 rmb = buf_pos;
617 ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
618 ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
619 ext->rt[i].rmb_vaddr_new =
620 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
621 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
622 while (buf_pos && !(buf_pos)->used)
623 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
624 }
625 len += i * sizeof(ext->rt[0]);
626 out:
627 mutex_unlock(&lgr->rmbs_lock);
628 return len;
629 }
630
631 /* send ADD LINK request or response */
smc_llc_send_add_link(struct smc_link * link,u8 mac[],u8 gid[],struct smc_link * link_new,enum smc_llc_reqresp reqresp)632 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
633 struct smc_link *link_new,
634 enum smc_llc_reqresp reqresp)
635 {
636 struct smc_llc_msg_add_link_v2_ext *ext = NULL;
637 struct smc_llc_msg_add_link *addllc;
638 struct smc_wr_tx_pend_priv *pend;
639 int len = sizeof(*addllc);
640 int rc;
641
642 if (!smc_wr_tx_link_hold(link))
643 return -ENOLINK;
644 if (link->lgr->smc_version == SMC_V2) {
645 struct smc_wr_v2_buf *wr_buf;
646
647 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
648 if (rc)
649 goto put_out;
650 addllc = (struct smc_llc_msg_add_link *)wr_buf;
651 ext = (struct smc_llc_msg_add_link_v2_ext *)
652 &wr_buf->raw[sizeof(*addllc)];
653 memset(ext, 0, SMC_WR_TX_SIZE);
654 } else {
655 struct smc_wr_buf *wr_buf;
656
657 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
658 if (rc)
659 goto put_out;
660 addllc = (struct smc_llc_msg_add_link *)wr_buf;
661 }
662
663 memset(addllc, 0, sizeof(*addllc));
664 addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
665 if (reqresp == SMC_LLC_RESP)
666 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
667 memcpy(addllc->sender_mac, mac, ETH_ALEN);
668 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
669 if (link_new) {
670 addllc->link_num = link_new->link_id;
671 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
672 hton24(addllc->initial_psn, link_new->psn_initial);
673 if (reqresp == SMC_LLC_REQ)
674 addllc->qp_mtu = link_new->path_mtu;
675 else
676 addllc->qp_mtu = min(link_new->path_mtu,
677 link_new->peer_mtu);
678 }
679 if (ext && link_new)
680 len += smc_llc_fill_ext_v2(ext, link, link_new);
681 smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
682 /* send llc message */
683 if (link->lgr->smc_version == SMC_V2)
684 rc = smc_wr_tx_v2_send(link, pend, len);
685 else
686 rc = smc_wr_tx_send(link, pend);
687 put_out:
688 smc_wr_tx_link_put(link);
689 return rc;
690 }
691
692 /* send DELETE LINK request or response */
smc_llc_send_delete_link(struct smc_link * link,u8 link_del_id,enum smc_llc_reqresp reqresp,bool orderly,u32 reason)693 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
694 enum smc_llc_reqresp reqresp, bool orderly,
695 u32 reason)
696 {
697 struct smc_llc_msg_del_link *delllc;
698 struct smc_wr_tx_pend_priv *pend;
699 struct smc_wr_buf *wr_buf;
700 int rc;
701
702 if (!smc_wr_tx_link_hold(link))
703 return -ENOLINK;
704 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
705 if (rc)
706 goto put_out;
707 delllc = (struct smc_llc_msg_del_link *)wr_buf;
708
709 memset(delllc, 0, sizeof(*delllc));
710 delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
711 smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
712 if (reqresp == SMC_LLC_RESP)
713 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
714 if (orderly)
715 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
716 if (link_del_id)
717 delllc->link_num = link_del_id;
718 else
719 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
720 delllc->reason = htonl(reason);
721 /* send llc message */
722 rc = smc_wr_tx_send(link, pend);
723 put_out:
724 smc_wr_tx_link_put(link);
725 return rc;
726 }
727
728 /* send LLC test link request */
smc_llc_send_test_link(struct smc_link * link,u8 user_data[16])729 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
730 {
731 struct smc_llc_msg_test_link *testllc;
732 struct smc_wr_tx_pend_priv *pend;
733 struct smc_wr_buf *wr_buf;
734 int rc;
735
736 if (!smc_wr_tx_link_hold(link))
737 return -ENOLINK;
738 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
739 if (rc)
740 goto put_out;
741 testllc = (struct smc_llc_msg_test_link *)wr_buf;
742 memset(testllc, 0, sizeof(*testllc));
743 testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
744 smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
745 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
746 /* send llc message */
747 rc = smc_wr_tx_send(link, pend);
748 put_out:
749 smc_wr_tx_link_put(link);
750 return rc;
751 }
752
753 /* schedule an llc send on link, may wait for buffers */
smc_llc_send_message(struct smc_link * link,void * llcbuf)754 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
755 {
756 struct smc_wr_tx_pend_priv *pend;
757 struct smc_wr_buf *wr_buf;
758 int rc;
759
760 if (!smc_wr_tx_link_hold(link))
761 return -ENOLINK;
762 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
763 if (rc)
764 goto put_out;
765 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
766 rc = smc_wr_tx_send(link, pend);
767 put_out:
768 smc_wr_tx_link_put(link);
769 return rc;
770 }
771
772 /* schedule an llc send on link, may wait for buffers,
773 * and wait for send completion notification.
774 * @return 0 on success
775 */
smc_llc_send_message_wait(struct smc_link * link,void * llcbuf)776 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
777 {
778 struct smc_wr_tx_pend_priv *pend;
779 struct smc_wr_buf *wr_buf;
780 int rc;
781
782 if (!smc_wr_tx_link_hold(link))
783 return -ENOLINK;
784 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
785 if (rc)
786 goto put_out;
787 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
788 rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
789 put_out:
790 smc_wr_tx_link_put(link);
791 return rc;
792 }
793
794 /********************************* receive ***********************************/
795
smc_llc_alloc_alt_link(struct smc_link_group * lgr,enum smc_lgr_type lgr_new_t)796 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
797 enum smc_lgr_type lgr_new_t)
798 {
799 int i;
800
801 if (lgr->type == SMC_LGR_SYMMETRIC ||
802 (lgr->type != SMC_LGR_SINGLE &&
803 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
804 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
805 return -EMLINK;
806
807 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
808 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
809 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
810 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
811 return i;
812 } else {
813 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
814 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
815 return i;
816 }
817 return -EMLINK;
818 }
819
820 /* send one add_link_continue msg */
smc_llc_add_link_cont(struct smc_link * link,struct smc_link * link_new,u8 * num_rkeys_todo,int * buf_lst,struct smc_buf_desc ** buf_pos)821 static int smc_llc_add_link_cont(struct smc_link *link,
822 struct smc_link *link_new, u8 *num_rkeys_todo,
823 int *buf_lst, struct smc_buf_desc **buf_pos)
824 {
825 struct smc_llc_msg_add_link_cont *addc_llc;
826 struct smc_link_group *lgr = link->lgr;
827 int prim_lnk_idx, lnk_idx, i, rc;
828 struct smc_wr_tx_pend_priv *pend;
829 struct smc_wr_buf *wr_buf;
830 struct smc_buf_desc *rmb;
831 u8 n;
832
833 if (!smc_wr_tx_link_hold(link))
834 return -ENOLINK;
835 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
836 if (rc)
837 goto put_out;
838 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
839 memset(addc_llc, 0, sizeof(*addc_llc));
840
841 prim_lnk_idx = link->link_idx;
842 lnk_idx = link_new->link_idx;
843 addc_llc->link_num = link_new->link_id;
844 addc_llc->num_rkeys = *num_rkeys_todo;
845 n = *num_rkeys_todo;
846 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
847 if (!*buf_pos) {
848 addc_llc->num_rkeys = addc_llc->num_rkeys -
849 *num_rkeys_todo;
850 *num_rkeys_todo = 0;
851 break;
852 }
853 rmb = *buf_pos;
854
855 addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
856 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
857 addc_llc->rt[i].rmb_vaddr_new =
858 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
859
860 (*num_rkeys_todo)--;
861 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
862 while (*buf_pos && !(*buf_pos)->used)
863 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
864 }
865 addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
866 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
867 if (lgr->role == SMC_CLNT)
868 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
869 rc = smc_wr_tx_send(link, pend);
870 put_out:
871 smc_wr_tx_link_put(link);
872 return rc;
873 }
874
smc_llc_cli_rkey_exchange(struct smc_link * link,struct smc_link * link_new)875 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
876 struct smc_link *link_new)
877 {
878 struct smc_llc_msg_add_link_cont *addc_llc;
879 struct smc_link_group *lgr = link->lgr;
880 u8 max, num_rkeys_send, num_rkeys_recv;
881 struct smc_llc_qentry *qentry;
882 struct smc_buf_desc *buf_pos;
883 int buf_lst;
884 int rc = 0;
885 int i;
886
887 mutex_lock(&lgr->rmbs_lock);
888 num_rkeys_send = lgr->conns_num;
889 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
890 do {
891 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
892 SMC_LLC_ADD_LINK_CONT);
893 if (!qentry) {
894 rc = -ETIMEDOUT;
895 break;
896 }
897 addc_llc = &qentry->msg.add_link_cont;
898 num_rkeys_recv = addc_llc->num_rkeys;
899 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
900 for (i = 0; i < max; i++) {
901 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
902 addc_llc->rt[i].rmb_key,
903 addc_llc->rt[i].rmb_vaddr_new,
904 addc_llc->rt[i].rmb_key_new);
905 num_rkeys_recv--;
906 }
907 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
908 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
909 &buf_lst, &buf_pos);
910 if (rc)
911 break;
912 } while (num_rkeys_send || num_rkeys_recv);
913
914 mutex_unlock(&lgr->rmbs_lock);
915 return rc;
916 }
917
918 /* prepare and send an add link reject response */
smc_llc_cli_add_link_reject(struct smc_llc_qentry * qentry)919 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
920 {
921 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
922 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
923 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
924 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
925 sizeof(qentry->msg));
926 return smc_llc_send_message(qentry->link, &qentry->msg);
927 }
928
smc_llc_cli_conf_link(struct smc_link * link,struct smc_init_info * ini,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)929 static int smc_llc_cli_conf_link(struct smc_link *link,
930 struct smc_init_info *ini,
931 struct smc_link *link_new,
932 enum smc_lgr_type lgr_new_t)
933 {
934 struct smc_link_group *lgr = link->lgr;
935 struct smc_llc_qentry *qentry = NULL;
936 int rc = 0;
937
938 /* receive CONFIRM LINK request over RoCE fabric */
939 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
940 if (!qentry) {
941 rc = smc_llc_send_delete_link(link, link_new->link_id,
942 SMC_LLC_REQ, false,
943 SMC_LLC_DEL_LOST_PATH);
944 return -ENOLINK;
945 }
946 if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
947 /* received DELETE_LINK instead */
948 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
949 smc_llc_send_message(link, &qentry->msg);
950 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
951 return -ENOLINK;
952 }
953 smc_llc_save_peer_uid(qentry);
954 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
955
956 rc = smc_ib_modify_qp_rts(link_new);
957 if (rc) {
958 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
959 false, SMC_LLC_DEL_LOST_PATH);
960 return -ENOLINK;
961 }
962 smc_wr_remember_qp_attr(link_new);
963
964 rc = smcr_buf_reg_lgr(link_new);
965 if (rc) {
966 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
967 false, SMC_LLC_DEL_LOST_PATH);
968 return -ENOLINK;
969 }
970
971 /* send CONFIRM LINK response over RoCE fabric */
972 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
973 if (rc) {
974 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
975 false, SMC_LLC_DEL_LOST_PATH);
976 return -ENOLINK;
977 }
978 smc_llc_link_active(link_new);
979 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
980 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
981 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
982 else
983 smcr_lgr_set_type(lgr, lgr_new_t);
984 return 0;
985 }
986
smc_llc_save_add_link_rkeys(struct smc_link * link,struct smc_link * link_new)987 static void smc_llc_save_add_link_rkeys(struct smc_link *link,
988 struct smc_link *link_new)
989 {
990 struct smc_llc_msg_add_link_v2_ext *ext;
991 struct smc_link_group *lgr = link->lgr;
992 int max, i;
993
994 ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
995 SMC_WR_TX_SIZE);
996 max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
997 mutex_lock(&lgr->rmbs_lock);
998 for (i = 0; i < max; i++) {
999 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1000 ext->rt[i].rmb_key,
1001 ext->rt[i].rmb_vaddr_new,
1002 ext->rt[i].rmb_key_new);
1003 }
1004 mutex_unlock(&lgr->rmbs_lock);
1005 }
1006
smc_llc_save_add_link_info(struct smc_link * link,struct smc_llc_msg_add_link * add_llc)1007 static void smc_llc_save_add_link_info(struct smc_link *link,
1008 struct smc_llc_msg_add_link *add_llc)
1009 {
1010 link->peer_qpn = ntoh24(add_llc->sender_qp_num);
1011 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
1012 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
1013 link->peer_psn = ntoh24(add_llc->initial_psn);
1014 link->peer_mtu = add_llc->qp_mtu;
1015 }
1016
1017 /* as an SMC client, process an add link request */
smc_llc_cli_add_link(struct smc_link * link,struct smc_llc_qentry * qentry)1018 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
1019 {
1020 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
1021 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1022 struct smc_link_group *lgr = smc_get_lgr(link);
1023 struct smc_init_info *ini = NULL;
1024 struct smc_link *lnk_new = NULL;
1025 int lnk_idx, rc = 0;
1026
1027 if (!llc->qp_mtu)
1028 goto out_reject;
1029
1030 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1031 if (!ini) {
1032 rc = -ENOMEM;
1033 goto out_reject;
1034 }
1035
1036 ini->vlan_id = lgr->vlan_id;
1037 if (lgr->smc_version == SMC_V2) {
1038 ini->check_smcrv2 = true;
1039 ini->smcrv2.saddr = lgr->saddr;
1040 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
1041 }
1042 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1043 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1044 (lgr->smc_version == SMC_V2 ||
1045 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1046 if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
1047 goto out_reject;
1048 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1049 }
1050 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1051 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1052 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1053 ini->smcrv2.ib_port_v2 = link->ibport;
1054 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1055 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1056 ini->ib_dev = link->smcibdev;
1057 ini->ib_port = link->ibport;
1058 }
1059 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1060 if (lnk_idx < 0)
1061 goto out_reject;
1062 lnk_new = &lgr->lnk[lnk_idx];
1063 rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
1064 if (rc)
1065 goto out_reject;
1066 smc_llc_save_add_link_info(lnk_new, llc);
1067 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */
1068 smc_llc_link_set_uid(lnk_new);
1069
1070 rc = smc_ib_ready_link(lnk_new);
1071 if (rc)
1072 goto out_clear_lnk;
1073
1074 rc = smcr_buf_map_lgr(lnk_new);
1075 if (rc)
1076 goto out_clear_lnk;
1077
1078 rc = smc_llc_send_add_link(link,
1079 lnk_new->smcibdev->mac[lnk_new->ibport - 1],
1080 lnk_new->gid, lnk_new, SMC_LLC_RESP);
1081 if (rc)
1082 goto out_clear_lnk;
1083 if (lgr->smc_version == SMC_V2) {
1084 smc_llc_save_add_link_rkeys(link, lnk_new);
1085 } else {
1086 rc = smc_llc_cli_rkey_exchange(link, lnk_new);
1087 if (rc) {
1088 rc = 0;
1089 goto out_clear_lnk;
1090 }
1091 }
1092 rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
1093 if (!rc)
1094 goto out;
1095 out_clear_lnk:
1096 lnk_new->state = SMC_LNK_INACTIVE;
1097 smcr_link_clear(lnk_new, false);
1098 out_reject:
1099 smc_llc_cli_add_link_reject(qentry);
1100 out:
1101 kfree(ini);
1102 kfree(qentry);
1103 return rc;
1104 }
1105
smc_llc_send_request_add_link(struct smc_link * link)1106 static void smc_llc_send_request_add_link(struct smc_link *link)
1107 {
1108 struct smc_llc_msg_req_add_link_v2 *llc;
1109 struct smc_wr_tx_pend_priv *pend;
1110 struct smc_wr_v2_buf *wr_buf;
1111 struct smc_gidlist gidlist;
1112 int rc, len, i;
1113
1114 if (!smc_wr_tx_link_hold(link))
1115 return;
1116 if (link->lgr->type == SMC_LGR_SYMMETRIC ||
1117 link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1118 goto put_out;
1119
1120 smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
1121 if (gidlist.len <= 1)
1122 goto put_out;
1123
1124 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
1125 if (rc)
1126 goto put_out;
1127 llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
1128 memset(llc, 0, SMC_WR_TX_SIZE);
1129
1130 llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
1131 for (i = 0; i < gidlist.len; i++)
1132 memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
1133 llc->gid_cnt = gidlist.len;
1134 len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
1135 smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
1136 rc = smc_wr_tx_v2_send(link, pend, len);
1137 if (!rc)
1138 /* set REQ_ADD_LINK flow and wait for response from peer */
1139 link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
1140 put_out:
1141 smc_wr_tx_link_put(link);
1142 }
1143
1144 /* as an SMC client, invite server to start the add_link processing */
smc_llc_cli_add_link_invite(struct smc_link * link,struct smc_llc_qentry * qentry)1145 static void smc_llc_cli_add_link_invite(struct smc_link *link,
1146 struct smc_llc_qentry *qentry)
1147 {
1148 struct smc_link_group *lgr = smc_get_lgr(link);
1149 struct smc_init_info *ini = NULL;
1150
1151 if (lgr->smc_version == SMC_V2) {
1152 smc_llc_send_request_add_link(link);
1153 goto out;
1154 }
1155
1156 if (lgr->type == SMC_LGR_SYMMETRIC ||
1157 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1158 goto out;
1159
1160 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1161 if (!ini)
1162 goto out;
1163
1164 ini->vlan_id = lgr->vlan_id;
1165 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1166 if (!ini->ib_dev)
1167 goto out;
1168
1169 smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
1170 ini->ib_gid, NULL, SMC_LLC_REQ);
1171 out:
1172 kfree(ini);
1173 kfree(qentry);
1174 }
1175
smc_llc_is_empty_llc_message(union smc_llc_msg * llc)1176 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
1177 {
1178 int i;
1179
1180 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
1181 if (llc->raw.data[i])
1182 return false;
1183 return true;
1184 }
1185
smc_llc_is_local_add_link(union smc_llc_msg * llc)1186 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
1187 {
1188 if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
1189 smc_llc_is_empty_llc_message(llc))
1190 return true;
1191 return false;
1192 }
1193
smc_llc_process_cli_add_link(struct smc_link_group * lgr)1194 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
1195 {
1196 struct smc_llc_qentry *qentry;
1197
1198 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1199
1200 mutex_lock(&lgr->llc_conf_mutex);
1201 if (smc_llc_is_local_add_link(&qentry->msg))
1202 smc_llc_cli_add_link_invite(qentry->link, qentry);
1203 else
1204 smc_llc_cli_add_link(qentry->link, qentry);
1205 mutex_unlock(&lgr->llc_conf_mutex);
1206 }
1207
smc_llc_active_link_count(struct smc_link_group * lgr)1208 static int smc_llc_active_link_count(struct smc_link_group *lgr)
1209 {
1210 int i, link_count = 0;
1211
1212 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1213 if (!smc_link_active(&lgr->lnk[i]))
1214 continue;
1215 link_count++;
1216 }
1217 return link_count;
1218 }
1219
1220 /* find the asymmetric link when 3 links are established */
smc_llc_find_asym_link(struct smc_link_group * lgr)1221 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
1222 {
1223 int asym_idx = -ENOENT;
1224 int i, j, k;
1225 bool found;
1226
1227 /* determine asymmetric link */
1228 found = false;
1229 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1230 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
1231 if (!smc_link_usable(&lgr->lnk[i]) ||
1232 !smc_link_usable(&lgr->lnk[j]))
1233 continue;
1234 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
1235 SMC_GID_SIZE)) {
1236 found = true; /* asym_lnk is i or j */
1237 break;
1238 }
1239 }
1240 if (found)
1241 break;
1242 }
1243 if (!found)
1244 goto out; /* no asymmetric link */
1245 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
1246 if (!smc_link_usable(&lgr->lnk[k]))
1247 continue;
1248 if (k != i &&
1249 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
1250 SMC_GID_SIZE)) {
1251 asym_idx = i;
1252 break;
1253 }
1254 if (k != j &&
1255 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1256 SMC_GID_SIZE)) {
1257 asym_idx = j;
1258 break;
1259 }
1260 }
1261 out:
1262 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1263 }
1264
smc_llc_delete_asym_link(struct smc_link_group * lgr)1265 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1266 {
1267 struct smc_link *lnk_new = NULL, *lnk_asym;
1268 struct smc_llc_qentry *qentry;
1269 int rc;
1270
1271 lnk_asym = smc_llc_find_asym_link(lgr);
1272 if (!lnk_asym)
1273 return; /* no asymmetric link */
1274 if (!smc_link_downing(&lnk_asym->state))
1275 return;
1276 lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1277 smc_wr_tx_wait_no_pending_sends(lnk_asym);
1278 if (!lnk_new)
1279 goto out_free;
1280 /* change flow type from ADD_LINK into DEL_LINK */
1281 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1282 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1283 true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1284 if (rc) {
1285 smcr_link_down_cond(lnk_new);
1286 goto out_free;
1287 }
1288 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1289 SMC_LLC_DELETE_LINK);
1290 if (!qentry) {
1291 smcr_link_down_cond(lnk_new);
1292 goto out_free;
1293 }
1294 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1295 out_free:
1296 smcr_link_clear(lnk_asym, true);
1297 }
1298
smc_llc_srv_rkey_exchange(struct smc_link * link,struct smc_link * link_new)1299 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1300 struct smc_link *link_new)
1301 {
1302 struct smc_llc_msg_add_link_cont *addc_llc;
1303 struct smc_link_group *lgr = link->lgr;
1304 u8 max, num_rkeys_send, num_rkeys_recv;
1305 struct smc_llc_qentry *qentry = NULL;
1306 struct smc_buf_desc *buf_pos;
1307 int buf_lst;
1308 int rc = 0;
1309 int i;
1310
1311 mutex_lock(&lgr->rmbs_lock);
1312 num_rkeys_send = lgr->conns_num;
1313 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1314 do {
1315 smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1316 &buf_lst, &buf_pos);
1317 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1318 SMC_LLC_ADD_LINK_CONT);
1319 if (!qentry) {
1320 rc = -ETIMEDOUT;
1321 goto out;
1322 }
1323 addc_llc = &qentry->msg.add_link_cont;
1324 num_rkeys_recv = addc_llc->num_rkeys;
1325 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1326 for (i = 0; i < max; i++) {
1327 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1328 addc_llc->rt[i].rmb_key,
1329 addc_llc->rt[i].rmb_vaddr_new,
1330 addc_llc->rt[i].rmb_key_new);
1331 num_rkeys_recv--;
1332 }
1333 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1334 } while (num_rkeys_send || num_rkeys_recv);
1335 out:
1336 mutex_unlock(&lgr->rmbs_lock);
1337 return rc;
1338 }
1339
smc_llc_srv_conf_link(struct smc_link * link,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)1340 static int smc_llc_srv_conf_link(struct smc_link *link,
1341 struct smc_link *link_new,
1342 enum smc_lgr_type lgr_new_t)
1343 {
1344 struct smc_link_group *lgr = link->lgr;
1345 struct smc_llc_qentry *qentry = NULL;
1346 int rc;
1347
1348 /* send CONFIRM LINK request over the RoCE fabric */
1349 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1350 if (rc)
1351 return -ENOLINK;
1352 /* receive CONFIRM LINK response over the RoCE fabric */
1353 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1354 if (!qentry ||
1355 qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
1356 /* send DELETE LINK */
1357 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1358 false, SMC_LLC_DEL_LOST_PATH);
1359 if (qentry)
1360 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1361 return -ENOLINK;
1362 }
1363 smc_llc_save_peer_uid(qentry);
1364 smc_llc_link_active(link_new);
1365 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1366 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1367 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1368 else
1369 smcr_lgr_set_type(lgr, lgr_new_t);
1370 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1371 return 0;
1372 }
1373
smc_llc_send_req_add_link_response(struct smc_llc_qentry * qentry)1374 static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
1375 {
1376 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
1377 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
1378 sizeof(qentry->msg));
1379 memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
1380 smc_llc_send_message(qentry->link, &qentry->msg);
1381 }
1382
smc_llc_srv_add_link(struct smc_link * link,struct smc_llc_qentry * req_qentry)1383 int smc_llc_srv_add_link(struct smc_link *link,
1384 struct smc_llc_qentry *req_qentry)
1385 {
1386 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1387 struct smc_link_group *lgr = link->lgr;
1388 struct smc_llc_msg_add_link *add_llc;
1389 struct smc_llc_qentry *qentry = NULL;
1390 bool send_req_add_link_resp = false;
1391 struct smc_link *link_new = NULL;
1392 struct smc_init_info *ini = NULL;
1393 int lnk_idx, rc = 0;
1394
1395 if (req_qentry &&
1396 req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
1397 send_req_add_link_resp = true;
1398
1399 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1400 if (!ini) {
1401 rc = -ENOMEM;
1402 goto out;
1403 }
1404
1405 /* ignore client add link recommendation, start new flow */
1406 ini->vlan_id = lgr->vlan_id;
1407 if (lgr->smc_version == SMC_V2) {
1408 ini->check_smcrv2 = true;
1409 ini->smcrv2.saddr = lgr->saddr;
1410 if (send_req_add_link_resp) {
1411 struct smc_llc_msg_req_add_link_v2 *req_add =
1412 &req_qentry->msg.req_add_link;
1413
1414 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
1415 }
1416 }
1417 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1418 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1419 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1420 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1421 ini->smcrv2.ib_port_v2 = link->ibport;
1422 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1423 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1424 ini->ib_dev = link->smcibdev;
1425 ini->ib_port = link->ibport;
1426 }
1427 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1428 if (lnk_idx < 0) {
1429 rc = 0;
1430 goto out;
1431 }
1432
1433 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
1434 if (rc)
1435 goto out;
1436 link_new = &lgr->lnk[lnk_idx];
1437
1438 rc = smcr_buf_map_lgr(link_new);
1439 if (rc)
1440 goto out_err;
1441
1442 rc = smc_llc_send_add_link(link,
1443 link_new->smcibdev->mac[link_new->ibport-1],
1444 link_new->gid, link_new, SMC_LLC_REQ);
1445 if (rc)
1446 goto out_err;
1447 send_req_add_link_resp = false;
1448 /* receive ADD LINK response over the RoCE fabric */
1449 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1450 if (!qentry) {
1451 rc = -ETIMEDOUT;
1452 goto out_err;
1453 }
1454 add_llc = &qentry->msg.add_link;
1455 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1456 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1457 rc = -ENOLINK;
1458 goto out_err;
1459 }
1460 if (lgr->type == SMC_LGR_SINGLE &&
1461 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1462 (lgr->smc_version == SMC_V2 ||
1463 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
1464 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1465 }
1466 smc_llc_save_add_link_info(link_new, add_llc);
1467 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1468
1469 rc = smc_ib_ready_link(link_new);
1470 if (rc)
1471 goto out_err;
1472 rc = smcr_buf_reg_lgr(link_new);
1473 if (rc)
1474 goto out_err;
1475 if (lgr->smc_version == SMC_V2) {
1476 smc_llc_save_add_link_rkeys(link, link_new);
1477 } else {
1478 rc = smc_llc_srv_rkey_exchange(link, link_new);
1479 if (rc)
1480 goto out_err;
1481 }
1482 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1483 if (rc)
1484 goto out_err;
1485 kfree(ini);
1486 return 0;
1487 out_err:
1488 if (link_new) {
1489 link_new->state = SMC_LNK_INACTIVE;
1490 smcr_link_clear(link_new, false);
1491 }
1492 out:
1493 kfree(ini);
1494 if (send_req_add_link_resp)
1495 smc_llc_send_req_add_link_response(req_qentry);
1496 return rc;
1497 }
1498
smc_llc_process_srv_add_link(struct smc_link_group * lgr)1499 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1500 {
1501 struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1502 struct smc_llc_qentry *qentry;
1503 int rc;
1504
1505 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1506
1507 mutex_lock(&lgr->llc_conf_mutex);
1508 rc = smc_llc_srv_add_link(link, qentry);
1509 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1510 /* delete any asymmetric link */
1511 smc_llc_delete_asym_link(lgr);
1512 }
1513 mutex_unlock(&lgr->llc_conf_mutex);
1514 kfree(qentry);
1515 }
1516
1517 /* enqueue a local add_link req to trigger a new add_link flow */
smc_llc_add_link_local(struct smc_link * link)1518 void smc_llc_add_link_local(struct smc_link *link)
1519 {
1520 struct smc_llc_msg_add_link add_llc = {};
1521
1522 add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
1523 smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
1524 /* no dev and port needed */
1525 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1526 }
1527
1528 /* worker to process an add link message */
smc_llc_add_link_work(struct work_struct * work)1529 static void smc_llc_add_link_work(struct work_struct *work)
1530 {
1531 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1532 llc_add_link_work);
1533
1534 if (list_empty(&lgr->list)) {
1535 /* link group is terminating */
1536 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1537 goto out;
1538 }
1539
1540 if (lgr->role == SMC_CLNT)
1541 smc_llc_process_cli_add_link(lgr);
1542 else
1543 smc_llc_process_srv_add_link(lgr);
1544 out:
1545 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
1546 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1547 }
1548
1549 /* enqueue a local del_link msg to trigger a new del_link flow,
1550 * called only for role SMC_SERV
1551 */
smc_llc_srv_delete_link_local(struct smc_link * link,u8 del_link_id)1552 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1553 {
1554 struct smc_llc_msg_del_link del_llc = {};
1555
1556 del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1557 smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
1558 del_llc.link_num = del_link_id;
1559 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1560 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1561 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1562 }
1563
smc_llc_process_cli_delete_link(struct smc_link_group * lgr)1564 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1565 {
1566 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1567 struct smc_llc_msg_del_link *del_llc;
1568 struct smc_llc_qentry *qentry;
1569 int active_links;
1570 int lnk_idx;
1571
1572 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1573 lnk = qentry->link;
1574 del_llc = &qentry->msg.delete_link;
1575
1576 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1577 smc_lgr_terminate_sched(lgr);
1578 goto out;
1579 }
1580 mutex_lock(&lgr->llc_conf_mutex);
1581 /* delete single link */
1582 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1583 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1584 continue;
1585 lnk_del = &lgr->lnk[lnk_idx];
1586 break;
1587 }
1588 del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1589 if (!lnk_del) {
1590 /* link was not found */
1591 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1592 smc_llc_send_message(lnk, &qentry->msg);
1593 goto out_unlock;
1594 }
1595 lnk_asym = smc_llc_find_asym_link(lgr);
1596
1597 del_llc->reason = 0;
1598 smc_llc_send_message(lnk, &qentry->msg); /* response */
1599
1600 if (smc_link_downing(&lnk_del->state))
1601 smc_switch_conns(lgr, lnk_del, false);
1602 smcr_link_clear(lnk_del, true);
1603
1604 active_links = smc_llc_active_link_count(lgr);
1605 if (lnk_del == lnk_asym) {
1606 /* expected deletion of asym link, don't change lgr state */
1607 } else if (active_links == 1) {
1608 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1609 } else if (!active_links) {
1610 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1611 smc_lgr_terminate_sched(lgr);
1612 }
1613 out_unlock:
1614 mutex_unlock(&lgr->llc_conf_mutex);
1615 out:
1616 kfree(qentry);
1617 }
1618
1619 /* try to send a DELETE LINK ALL request on any active link,
1620 * waiting for send completion
1621 */
smc_llc_send_link_delete_all(struct smc_link_group * lgr,bool ord,u32 rsn)1622 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1623 {
1624 struct smc_llc_msg_del_link delllc = {};
1625 int i;
1626
1627 delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1628 smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
1629 if (ord)
1630 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1631 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1632 delllc.reason = htonl(rsn);
1633
1634 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1635 if (!smc_link_sendable(&lgr->lnk[i]))
1636 continue;
1637 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1638 break;
1639 }
1640 }
1641
smc_llc_process_srv_delete_link(struct smc_link_group * lgr)1642 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1643 {
1644 struct smc_llc_msg_del_link *del_llc;
1645 struct smc_link *lnk, *lnk_del;
1646 struct smc_llc_qentry *qentry;
1647 int active_links;
1648 int i;
1649
1650 mutex_lock(&lgr->llc_conf_mutex);
1651 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1652 lnk = qentry->link;
1653 del_llc = &qentry->msg.delete_link;
1654
1655 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1656 /* delete entire lgr */
1657 smc_llc_send_link_delete_all(lgr, true, ntohl(
1658 qentry->msg.delete_link.reason));
1659 smc_lgr_terminate_sched(lgr);
1660 goto out;
1661 }
1662 /* delete single link */
1663 lnk_del = NULL;
1664 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1665 if (lgr->lnk[i].link_id == del_llc->link_num) {
1666 lnk_del = &lgr->lnk[i];
1667 break;
1668 }
1669 }
1670 if (!lnk_del)
1671 goto out; /* asymmetric link already deleted */
1672
1673 if (smc_link_downing(&lnk_del->state)) {
1674 if (smc_switch_conns(lgr, lnk_del, false))
1675 smc_wr_tx_wait_no_pending_sends(lnk_del);
1676 }
1677 if (!list_empty(&lgr->list)) {
1678 /* qentry is either a request from peer (send it back to
1679 * initiate the DELETE_LINK processing), or a locally
1680 * enqueued DELETE_LINK request (forward it)
1681 */
1682 if (!smc_llc_send_message(lnk, &qentry->msg)) {
1683 struct smc_llc_qentry *qentry2;
1684
1685 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1686 SMC_LLC_DELETE_LINK);
1687 if (qentry2)
1688 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1689 }
1690 }
1691 smcr_link_clear(lnk_del, true);
1692
1693 active_links = smc_llc_active_link_count(lgr);
1694 if (active_links == 1) {
1695 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1696 } else if (!active_links) {
1697 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1698 smc_lgr_terminate_sched(lgr);
1699 }
1700
1701 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1702 /* trigger setup of asymm alt link */
1703 smc_llc_add_link_local(lnk);
1704 }
1705 out:
1706 mutex_unlock(&lgr->llc_conf_mutex);
1707 kfree(qentry);
1708 }
1709
smc_llc_delete_link_work(struct work_struct * work)1710 static void smc_llc_delete_link_work(struct work_struct *work)
1711 {
1712 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1713 llc_del_link_work);
1714
1715 if (list_empty(&lgr->list)) {
1716 /* link group is terminating */
1717 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1718 goto out;
1719 }
1720
1721 if (lgr->role == SMC_CLNT)
1722 smc_llc_process_cli_delete_link(lgr);
1723 else
1724 smc_llc_process_srv_delete_link(lgr);
1725 out:
1726 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1727 }
1728
1729 /* process a confirm_rkey request from peer, remote flow */
smc_llc_rmt_conf_rkey(struct smc_link_group * lgr)1730 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1731 {
1732 struct smc_llc_msg_confirm_rkey *llc;
1733 struct smc_llc_qentry *qentry;
1734 struct smc_link *link;
1735 int num_entries;
1736 int rk_idx;
1737 int i;
1738
1739 qentry = lgr->llc_flow_rmt.qentry;
1740 llc = &qentry->msg.confirm_rkey;
1741 link = qentry->link;
1742
1743 num_entries = llc->rtoken[0].num_rkeys;
1744 if (num_entries > SMC_LLC_RKEYS_PER_MSG)
1745 goto out_err;
1746 /* first rkey entry is for receiving link */
1747 rk_idx = smc_rtoken_add(link,
1748 llc->rtoken[0].rmb_vaddr,
1749 llc->rtoken[0].rmb_key);
1750 if (rk_idx < 0)
1751 goto out_err;
1752
1753 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1754 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1755 llc->rtoken[i].rmb_vaddr,
1756 llc->rtoken[i].rmb_key);
1757 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
1758 goto out;
1759 out_err:
1760 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1761 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1762 out:
1763 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1764 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1765 smc_llc_send_message(link, &qentry->msg);
1766 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1767 }
1768
1769 /* process a delete_rkey request from peer, remote flow */
smc_llc_rmt_delete_rkey(struct smc_link_group * lgr)1770 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1771 {
1772 struct smc_llc_msg_delete_rkey *llc;
1773 struct smc_llc_qentry *qentry;
1774 struct smc_link *link;
1775 u8 err_mask = 0;
1776 int i, max;
1777
1778 qentry = lgr->llc_flow_rmt.qentry;
1779 llc = &qentry->msg.delete_rkey;
1780 link = qentry->link;
1781
1782 if (lgr->smc_version == SMC_V2) {
1783 struct smc_llc_msg_delete_rkey_v2 *llcv2;
1784
1785 memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
1786 llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
1787 llcv2->num_inval_rkeys = 0;
1788
1789 max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1790 for (i = 0; i < max; i++) {
1791 if (smc_rtoken_delete(link, llcv2->rkey[i]))
1792 llcv2->num_inval_rkeys++;
1793 }
1794 memset(&llc->rkey[0], 0, sizeof(llc->rkey));
1795 memset(&llc->reserved2, 0, sizeof(llc->reserved2));
1796 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1797 if (llcv2->num_inval_rkeys) {
1798 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1799 llc->err_mask = llcv2->num_inval_rkeys;
1800 }
1801 goto finish;
1802 }
1803
1804 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1805 for (i = 0; i < max; i++) {
1806 if (smc_rtoken_delete(link, llc->rkey[i]))
1807 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1808 }
1809 if (err_mask) {
1810 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1811 llc->err_mask = err_mask;
1812 }
1813 finish:
1814 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1815 smc_llc_send_message(link, &qentry->msg);
1816 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1817 }
1818
smc_llc_protocol_violation(struct smc_link_group * lgr,u8 type)1819 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1820 {
1821 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: "
1822 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id,
1823 lgr->net->net_cookie, type);
1824 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1825 smc_lgr_terminate_sched(lgr);
1826 }
1827
1828 /* flush the llc event queue */
smc_llc_event_flush(struct smc_link_group * lgr)1829 static void smc_llc_event_flush(struct smc_link_group *lgr)
1830 {
1831 struct smc_llc_qentry *qentry, *q;
1832
1833 spin_lock_bh(&lgr->llc_event_q_lock);
1834 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1835 list_del_init(&qentry->list);
1836 kfree(qentry);
1837 }
1838 spin_unlock_bh(&lgr->llc_event_q_lock);
1839 }
1840
smc_llc_event_handler(struct smc_llc_qentry * qentry)1841 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1842 {
1843 union smc_llc_msg *llc = &qentry->msg;
1844 struct smc_link *link = qentry->link;
1845 struct smc_link_group *lgr = link->lgr;
1846
1847 if (!smc_link_usable(link))
1848 goto out;
1849
1850 switch (llc->raw.hdr.common.llc_type) {
1851 case SMC_LLC_TEST_LINK:
1852 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1853 smc_llc_send_message(link, llc);
1854 break;
1855 case SMC_LLC_ADD_LINK:
1856 if (list_empty(&lgr->list))
1857 goto out; /* lgr is terminating */
1858 if (lgr->role == SMC_CLNT) {
1859 if (smc_llc_is_local_add_link(llc)) {
1860 if (lgr->llc_flow_lcl.type ==
1861 SMC_LLC_FLOW_ADD_LINK)
1862 break; /* add_link in progress */
1863 if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1864 qentry)) {
1865 schedule_work(&lgr->llc_add_link_work);
1866 }
1867 return;
1868 }
1869 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1870 !lgr->llc_flow_lcl.qentry) {
1871 /* a flow is waiting for this message */
1872 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1873 qentry);
1874 wake_up(&lgr->llc_msg_waiter);
1875 return;
1876 }
1877 if (lgr->llc_flow_lcl.type ==
1878 SMC_LLC_FLOW_REQ_ADD_LINK) {
1879 /* server started add_link processing */
1880 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1881 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1882 qentry);
1883 schedule_work(&lgr->llc_add_link_work);
1884 return;
1885 }
1886 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1887 schedule_work(&lgr->llc_add_link_work);
1888 }
1889 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1890 /* as smc server, handle client suggestion */
1891 schedule_work(&lgr->llc_add_link_work);
1892 }
1893 return;
1894 case SMC_LLC_CONFIRM_LINK:
1895 case SMC_LLC_ADD_LINK_CONT:
1896 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1897 /* a flow is waiting for this message */
1898 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1899 wake_up(&lgr->llc_msg_waiter);
1900 return;
1901 }
1902 break;
1903 case SMC_LLC_DELETE_LINK:
1904 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1905 !lgr->llc_flow_lcl.qentry) {
1906 /* DEL LINK REQ during ADD LINK SEQ */
1907 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1908 wake_up(&lgr->llc_msg_waiter);
1909 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1910 schedule_work(&lgr->llc_del_link_work);
1911 }
1912 return;
1913 case SMC_LLC_CONFIRM_RKEY:
1914 /* new request from remote, assign to remote flow */
1915 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1916 /* process here, does not wait for more llc msgs */
1917 smc_llc_rmt_conf_rkey(lgr);
1918 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1919 }
1920 return;
1921 case SMC_LLC_CONFIRM_RKEY_CONT:
1922 /* not used because max links is 3, and 3 rkeys fit into
1923 * one CONFIRM_RKEY message
1924 */
1925 break;
1926 case SMC_LLC_DELETE_RKEY:
1927 /* new request from remote, assign to remote flow */
1928 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1929 /* process here, does not wait for more llc msgs */
1930 smc_llc_rmt_delete_rkey(lgr);
1931 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1932 }
1933 return;
1934 case SMC_LLC_REQ_ADD_LINK:
1935 /* handle response here, smc_llc_flow_stop() cannot be called
1936 * in tasklet context
1937 */
1938 if (lgr->role == SMC_CLNT &&
1939 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
1940 (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
1941 smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
1942 } else if (lgr->role == SMC_SERV) {
1943 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1944 /* as smc server, handle client suggestion */
1945 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1946 schedule_work(&lgr->llc_add_link_work);
1947 }
1948 return;
1949 }
1950 break;
1951 default:
1952 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1953 break;
1954 }
1955 out:
1956 kfree(qentry);
1957 }
1958
1959 /* worker to process llc messages on the event queue */
smc_llc_event_work(struct work_struct * work)1960 static void smc_llc_event_work(struct work_struct *work)
1961 {
1962 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1963 llc_event_work);
1964 struct smc_llc_qentry *qentry;
1965
1966 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1967 qentry = lgr->delayed_event;
1968 lgr->delayed_event = NULL;
1969 if (smc_link_usable(qentry->link))
1970 smc_llc_event_handler(qentry);
1971 else
1972 kfree(qentry);
1973 }
1974
1975 again:
1976 spin_lock_bh(&lgr->llc_event_q_lock);
1977 if (!list_empty(&lgr->llc_event_q)) {
1978 qentry = list_first_entry(&lgr->llc_event_q,
1979 struct smc_llc_qentry, list);
1980 list_del_init(&qentry->list);
1981 spin_unlock_bh(&lgr->llc_event_q_lock);
1982 smc_llc_event_handler(qentry);
1983 goto again;
1984 }
1985 spin_unlock_bh(&lgr->llc_event_q_lock);
1986 }
1987
1988 /* process llc responses in tasklet context */
smc_llc_rx_response(struct smc_link * link,struct smc_llc_qentry * qentry)1989 static void smc_llc_rx_response(struct smc_link *link,
1990 struct smc_llc_qentry *qentry)
1991 {
1992 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
1993 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
1994 u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
1995
1996 switch (llc_type) {
1997 case SMC_LLC_TEST_LINK:
1998 if (smc_link_active(link))
1999 complete(&link->llc_testlink_resp);
2000 break;
2001 case SMC_LLC_ADD_LINK:
2002 case SMC_LLC_ADD_LINK_CONT:
2003 case SMC_LLC_CONFIRM_LINK:
2004 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
2005 break; /* drop out-of-flow response */
2006 goto assign;
2007 case SMC_LLC_DELETE_LINK:
2008 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
2009 break; /* drop out-of-flow response */
2010 goto assign;
2011 case SMC_LLC_CONFIRM_RKEY:
2012 case SMC_LLC_DELETE_RKEY:
2013 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
2014 break; /* drop out-of-flow response */
2015 goto assign;
2016 case SMC_LLC_CONFIRM_RKEY_CONT:
2017 /* not used because max links is 3 */
2018 break;
2019 default:
2020 smc_llc_protocol_violation(link->lgr,
2021 qentry->msg.raw.hdr.common.type);
2022 break;
2023 }
2024 kfree(qentry);
2025 return;
2026 assign:
2027 /* assign responses to the local flow, we requested them */
2028 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
2029 wake_up(&link->lgr->llc_msg_waiter);
2030 }
2031
smc_llc_enqueue(struct smc_link * link,union smc_llc_msg * llc)2032 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
2033 {
2034 struct smc_link_group *lgr = link->lgr;
2035 struct smc_llc_qentry *qentry;
2036 unsigned long flags;
2037
2038 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
2039 if (!qentry)
2040 return;
2041 qentry->link = link;
2042 INIT_LIST_HEAD(&qentry->list);
2043 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
2044
2045 /* process responses immediately */
2046 if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
2047 llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
2048 smc_llc_rx_response(link, qentry);
2049 return;
2050 }
2051
2052 /* add requests to event queue */
2053 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
2054 list_add_tail(&qentry->list, &lgr->llc_event_q);
2055 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
2056 queue_work(system_highpri_wq, &lgr->llc_event_work);
2057 }
2058
2059 /* copy received msg and add it to the event queue */
smc_llc_rx_handler(struct ib_wc * wc,void * buf)2060 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
2061 {
2062 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
2063 union smc_llc_msg *llc = buf;
2064
2065 if (wc->byte_len < sizeof(*llc))
2066 return; /* short message */
2067 if (!llc->raw.hdr.common.llc_version) {
2068 if (llc->raw.hdr.length != sizeof(*llc))
2069 return; /* invalid message */
2070 } else {
2071 if (llc->raw.hdr.length_v2 < sizeof(*llc))
2072 return; /* invalid message */
2073 }
2074
2075 smc_llc_enqueue(link, llc);
2076 }
2077
2078 /***************************** worker, utils *********************************/
2079
smc_llc_testlink_work(struct work_struct * work)2080 static void smc_llc_testlink_work(struct work_struct *work)
2081 {
2082 struct smc_link *link = container_of(to_delayed_work(work),
2083 struct smc_link, llc_testlink_wrk);
2084 unsigned long next_interval;
2085 unsigned long expire_time;
2086 u8 user_data[16] = { 0 };
2087 int rc;
2088
2089 if (!smc_link_active(link))
2090 return; /* don't reschedule worker */
2091 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
2092 if (time_is_after_jiffies(expire_time)) {
2093 next_interval = expire_time - jiffies;
2094 goto out;
2095 }
2096 reinit_completion(&link->llc_testlink_resp);
2097 smc_llc_send_test_link(link, user_data);
2098 /* receive TEST LINK response over RoCE fabric */
2099 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
2100 SMC_LLC_WAIT_TIME);
2101 if (!smc_link_active(link))
2102 return; /* link state changed */
2103 if (rc <= 0) {
2104 smcr_link_down_cond_sched(link);
2105 return;
2106 }
2107 next_interval = link->llc_testlink_time;
2108 out:
2109 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
2110 }
2111
smc_llc_lgr_init(struct smc_link_group * lgr,struct smc_sock * smc)2112 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
2113 {
2114 struct net *net = sock_net(smc->clcsock->sk);
2115
2116 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
2117 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
2118 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
2119 INIT_LIST_HEAD(&lgr->llc_event_q);
2120 spin_lock_init(&lgr->llc_event_q_lock);
2121 spin_lock_init(&lgr->llc_flow_lock);
2122 init_waitqueue_head(&lgr->llc_flow_waiter);
2123 init_waitqueue_head(&lgr->llc_msg_waiter);
2124 mutex_init(&lgr->llc_conf_mutex);
2125 lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
2126 }
2127
2128 /* called after lgr was removed from lgr_list */
smc_llc_lgr_clear(struct smc_link_group * lgr)2129 void smc_llc_lgr_clear(struct smc_link_group *lgr)
2130 {
2131 smc_llc_event_flush(lgr);
2132 wake_up_all(&lgr->llc_flow_waiter);
2133 wake_up_all(&lgr->llc_msg_waiter);
2134 cancel_work_sync(&lgr->llc_event_work);
2135 cancel_work_sync(&lgr->llc_add_link_work);
2136 cancel_work_sync(&lgr->llc_del_link_work);
2137 if (lgr->delayed_event) {
2138 kfree(lgr->delayed_event);
2139 lgr->delayed_event = NULL;
2140 }
2141 }
2142
smc_llc_link_init(struct smc_link * link)2143 int smc_llc_link_init(struct smc_link *link)
2144 {
2145 init_completion(&link->llc_testlink_resp);
2146 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
2147 return 0;
2148 }
2149
smc_llc_link_active(struct smc_link * link)2150 void smc_llc_link_active(struct smc_link *link)
2151 {
2152 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, "
2153 "peerid %*phN, ibdev %s, ibport %d\n",
2154 SMC_LGR_ID_SIZE, &link->lgr->id,
2155 link->lgr->net->net_cookie,
2156 SMC_LGR_ID_SIZE, &link->link_uid,
2157 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2158 link->smcibdev->ibdev->name, link->ibport);
2159 link->state = SMC_LNK_ACTIVE;
2160 if (link->lgr->llc_testlink_time) {
2161 link->llc_testlink_time = link->lgr->llc_testlink_time;
2162 schedule_delayed_work(&link->llc_testlink_wrk,
2163 link->llc_testlink_time);
2164 }
2165 }
2166
2167 /* called in worker context */
smc_llc_link_clear(struct smc_link * link,bool log)2168 void smc_llc_link_clear(struct smc_link *link, bool log)
2169 {
2170 if (log)
2171 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN"
2172 ", peerid %*phN, ibdev %s, ibport %d\n",
2173 SMC_LGR_ID_SIZE, &link->lgr->id,
2174 link->lgr->net->net_cookie,
2175 SMC_LGR_ID_SIZE, &link->link_uid,
2176 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2177 link->smcibdev->ibdev->name, link->ibport);
2178 complete(&link->llc_testlink_resp);
2179 cancel_delayed_work_sync(&link->llc_testlink_wrk);
2180 }
2181
2182 /* register a new rtoken at the remote peer (for all links) */
smc_llc_do_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)2183 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
2184 struct smc_buf_desc *rmb_desc)
2185 {
2186 struct smc_link_group *lgr = send_link->lgr;
2187 struct smc_llc_qentry *qentry = NULL;
2188 int rc = 0;
2189
2190 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
2191 if (rc)
2192 goto out;
2193 /* receive CONFIRM RKEY response from server over RoCE fabric */
2194 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2195 SMC_LLC_CONFIRM_RKEY);
2196 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2197 rc = -EFAULT;
2198 out:
2199 if (qentry)
2200 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2201 return rc;
2202 }
2203
2204 /* unregister an rtoken at the remote peer */
smc_llc_do_delete_rkey(struct smc_link_group * lgr,struct smc_buf_desc * rmb_desc)2205 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
2206 struct smc_buf_desc *rmb_desc)
2207 {
2208 struct smc_llc_qentry *qentry = NULL;
2209 struct smc_link *send_link;
2210 int rc = 0;
2211
2212 send_link = smc_llc_usable_link(lgr);
2213 if (!send_link)
2214 return -ENOLINK;
2215
2216 /* protected by llc_flow control */
2217 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
2218 if (rc)
2219 goto out;
2220 /* receive DELETE RKEY response from server over RoCE fabric */
2221 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2222 SMC_LLC_DELETE_RKEY);
2223 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2224 rc = -EFAULT;
2225 out:
2226 if (qentry)
2227 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2228 return rc;
2229 }
2230
smc_llc_link_set_uid(struct smc_link * link)2231 void smc_llc_link_set_uid(struct smc_link *link)
2232 {
2233 __be32 link_uid;
2234
2235 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
2236 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
2237 }
2238
2239 /* save peers link user id, used for debug purposes */
smc_llc_save_peer_uid(struct smc_llc_qentry * qentry)2240 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
2241 {
2242 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
2243 SMC_LGR_ID_SIZE);
2244 }
2245
2246 /* evaluate confirm link request or response */
smc_llc_eval_conf_link(struct smc_llc_qentry * qentry,enum smc_llc_reqresp type)2247 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
2248 enum smc_llc_reqresp type)
2249 {
2250 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */
2251 qentry->link->link_id = qentry->msg.confirm_link.link_num;
2252 smc_llc_link_set_uid(qentry->link);
2253 }
2254 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
2255 return -ENOTSUPP;
2256 return 0;
2257 }
2258
2259 /***************************** init, exit, misc ******************************/
2260
2261 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
2262 {
2263 .handler = smc_llc_rx_handler,
2264 .type = SMC_LLC_CONFIRM_LINK
2265 },
2266 {
2267 .handler = smc_llc_rx_handler,
2268 .type = SMC_LLC_TEST_LINK
2269 },
2270 {
2271 .handler = smc_llc_rx_handler,
2272 .type = SMC_LLC_ADD_LINK
2273 },
2274 {
2275 .handler = smc_llc_rx_handler,
2276 .type = SMC_LLC_ADD_LINK_CONT
2277 },
2278 {
2279 .handler = smc_llc_rx_handler,
2280 .type = SMC_LLC_DELETE_LINK
2281 },
2282 {
2283 .handler = smc_llc_rx_handler,
2284 .type = SMC_LLC_CONFIRM_RKEY
2285 },
2286 {
2287 .handler = smc_llc_rx_handler,
2288 .type = SMC_LLC_CONFIRM_RKEY_CONT
2289 },
2290 {
2291 .handler = smc_llc_rx_handler,
2292 .type = SMC_LLC_DELETE_RKEY
2293 },
2294 /* V2 types */
2295 {
2296 .handler = smc_llc_rx_handler,
2297 .type = SMC_LLC_CONFIRM_LINK_V2
2298 },
2299 {
2300 .handler = smc_llc_rx_handler,
2301 .type = SMC_LLC_TEST_LINK_V2
2302 },
2303 {
2304 .handler = smc_llc_rx_handler,
2305 .type = SMC_LLC_ADD_LINK_V2
2306 },
2307 {
2308 .handler = smc_llc_rx_handler,
2309 .type = SMC_LLC_DELETE_LINK_V2
2310 },
2311 {
2312 .handler = smc_llc_rx_handler,
2313 .type = SMC_LLC_REQ_ADD_LINK_V2
2314 },
2315 {
2316 .handler = smc_llc_rx_handler,
2317 .type = SMC_LLC_CONFIRM_RKEY_V2
2318 },
2319 {
2320 .handler = smc_llc_rx_handler,
2321 .type = SMC_LLC_DELETE_RKEY_V2
2322 },
2323 {
2324 .handler = NULL,
2325 }
2326 };
2327
smc_llc_init(void)2328 int __init smc_llc_init(void)
2329 {
2330 struct smc_wr_rx_handler *handler;
2331 int rc = 0;
2332
2333 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
2334 INIT_HLIST_NODE(&handler->list);
2335 rc = smc_wr_rx_register_handler(handler);
2336 if (rc)
2337 break;
2338 }
2339 return rc;
2340 }
2341