1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21
22 #define SMC_LLC_DATA_LEN 40
23
24 struct smc_llc_hdr {
25 struct smc_wr_rx_hdr common;
26 union {
27 struct {
28 u8 length; /* 44 */
29 #if defined(__BIG_ENDIAN_BITFIELD)
30 u8 reserved:4,
31 add_link_rej_rsn:4;
32 #elif defined(__LITTLE_ENDIAN_BITFIELD)
33 u8 add_link_rej_rsn:4,
34 reserved:4;
35 #endif
36 };
37 u16 length_v2; /* 44 - 8192*/
38 };
39 u8 flags;
40 } __packed; /* format defined in
41 * IBM Shared Memory Communications Version 2
42 * (https://www.ibm.com/support/pages/node/6326337)
43 */
44
45 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
46
47 struct smc_llc_msg_confirm_link { /* type 0x01 */
48 struct smc_llc_hdr hd;
49 u8 sender_mac[ETH_ALEN];
50 u8 sender_gid[SMC_GID_SIZE];
51 u8 sender_qp_num[3];
52 u8 link_num;
53 u8 link_uid[SMC_LGR_ID_SIZE];
54 u8 max_links;
55 u8 reserved[9];
56 };
57
58 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
59 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
60
61 #define SMC_LLC_ADD_LNK_MAX_LINKS 2
62
63 struct smc_llc_msg_add_link { /* type 0x02 */
64 struct smc_llc_hdr hd;
65 u8 sender_mac[ETH_ALEN];
66 u8 reserved2[2];
67 u8 sender_gid[SMC_GID_SIZE];
68 u8 sender_qp_num[3];
69 u8 link_num;
70 #if defined(__BIG_ENDIAN_BITFIELD)
71 u8 reserved3 : 4,
72 qp_mtu : 4;
73 #elif defined(__LITTLE_ENDIAN_BITFIELD)
74 u8 qp_mtu : 4,
75 reserved3 : 4;
76 #endif
77 u8 initial_psn[3];
78 u8 reserved[8];
79 };
80
81 struct smc_llc_msg_add_link_cont_rt {
82 __be32 rmb_key;
83 __be32 rmb_key_new;
84 __be64 rmb_vaddr_new;
85 };
86
87 struct smc_llc_msg_add_link_v2_ext {
88 #if defined(__BIG_ENDIAN_BITFIELD)
89 u8 v2_direct : 1,
90 reserved : 7;
91 #elif defined(__LITTLE_ENDIAN_BITFIELD)
92 u8 reserved : 7,
93 v2_direct : 1;
94 #endif
95 u8 reserved2;
96 u8 client_target_gid[SMC_GID_SIZE];
97 u8 reserved3[8];
98 u16 num_rkeys;
99 struct smc_llc_msg_add_link_cont_rt rt[];
100 } __packed; /* format defined in
101 * IBM Shared Memory Communications Version 2
102 * (https://www.ibm.com/support/pages/node/6326337)
103 */
104
105 struct smc_llc_msg_req_add_link_v2 {
106 struct smc_llc_hdr hd;
107 u8 reserved[20];
108 u8 gid_cnt;
109 u8 reserved2[3];
110 u8 gid[][SMC_GID_SIZE];
111 };
112
113 #define SMC_LLC_RKEYS_PER_CONT_MSG 2
114
115 struct smc_llc_msg_add_link_cont { /* type 0x03 */
116 struct smc_llc_hdr hd;
117 u8 link_num;
118 u8 num_rkeys;
119 u8 reserved2[2];
120 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
121 u8 reserved[4];
122 } __packed; /* format defined in RFC7609 */
123
124 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
125 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
126
127 struct smc_llc_msg_del_link { /* type 0x04 */
128 struct smc_llc_hdr hd;
129 u8 link_num;
130 __be32 reason;
131 u8 reserved[35];
132 } __packed; /* format defined in RFC7609 */
133
134 struct smc_llc_msg_test_link { /* type 0x07 */
135 struct smc_llc_hdr hd;
136 u8 user_data[16];
137 u8 reserved[24];
138 };
139
140 struct smc_rmb_rtoken {
141 union {
142 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
143 /* is actually the num of rtokens, first */
144 /* rtoken is always for the current link */
145 u8 link_id; /* link id of the rtoken */
146 };
147 __be32 rmb_key;
148 __be64 rmb_vaddr;
149 } __packed; /* format defined in RFC7609 */
150
151 #define SMC_LLC_RKEYS_PER_MSG 3
152 #define SMC_LLC_RKEYS_PER_MSG_V2 255
153
154 struct smc_llc_msg_confirm_rkey { /* type 0x06 */
155 struct smc_llc_hdr hd;
156 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
157 u8 reserved;
158 };
159
160 #define SMC_LLC_DEL_RKEY_MAX 8
161 #define SMC_LLC_FLAG_RKEY_RETRY 0x10
162 #define SMC_LLC_FLAG_RKEY_NEG 0x20
163
164 struct smc_llc_msg_delete_rkey { /* type 0x09 */
165 struct smc_llc_hdr hd;
166 u8 num_rkeys;
167 u8 err_mask;
168 u8 reserved[2];
169 __be32 rkey[8];
170 u8 reserved2[4];
171 };
172
173 struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */
174 struct smc_llc_hdr hd;
175 u8 num_rkeys;
176 u8 num_inval_rkeys;
177 u8 reserved[2];
178 __be32 rkey[];
179 };
180
181 union smc_llc_msg {
182 struct smc_llc_msg_confirm_link confirm_link;
183 struct smc_llc_msg_add_link add_link;
184 struct smc_llc_msg_req_add_link_v2 req_add_link;
185 struct smc_llc_msg_add_link_cont add_link_cont;
186 struct smc_llc_msg_del_link delete_link;
187
188 struct smc_llc_msg_confirm_rkey confirm_rkey;
189 struct smc_llc_msg_delete_rkey delete_rkey;
190
191 struct smc_llc_msg_test_link test_link;
192 struct {
193 struct smc_llc_hdr hdr;
194 u8 data[SMC_LLC_DATA_LEN];
195 } raw;
196 };
197
198 #define SMC_LLC_FLAG_RESP 0x80
199
200 struct smc_llc_qentry {
201 struct list_head list;
202 struct smc_link *link;
203 union smc_llc_msg msg;
204 };
205
206 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
207
smc_llc_flow_qentry_clr(struct smc_llc_flow * flow)208 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
209 {
210 struct smc_llc_qentry *qentry = flow->qentry;
211
212 flow->qentry = NULL;
213 return qentry;
214 }
215
smc_llc_flow_qentry_del(struct smc_llc_flow * flow)216 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
217 {
218 struct smc_llc_qentry *qentry;
219
220 if (flow->qentry) {
221 qentry = flow->qentry;
222 flow->qentry = NULL;
223 kfree(qentry);
224 }
225 }
226
smc_llc_flow_qentry_set(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)227 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
228 struct smc_llc_qentry *qentry)
229 {
230 flow->qentry = qentry;
231 }
232
smc_llc_flow_parallel(struct smc_link_group * lgr,u8 flow_type,struct smc_llc_qentry * qentry)233 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
234 struct smc_llc_qentry *qentry)
235 {
236 u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
237
238 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
239 flow_type != msg_type && !lgr->delayed_event) {
240 lgr->delayed_event = qentry;
241 return;
242 }
243 /* drop parallel or already-in-progress llc requests */
244 if (flow_type != msg_type)
245 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel "
246 "LLC msg: msg %d flow %d role %d\n",
247 SMC_LGR_ID_SIZE, &lgr->id,
248 lgr->net->net_cookie,
249 qentry->msg.raw.hdr.common.type,
250 flow_type, lgr->role);
251 kfree(qentry);
252 }
253
254 /* try to start a new llc flow, initiated by an incoming llc msg */
smc_llc_flow_start(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)255 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
256 struct smc_llc_qentry *qentry)
257 {
258 struct smc_link_group *lgr = qentry->link->lgr;
259
260 spin_lock_bh(&lgr->llc_flow_lock);
261 if (flow->type) {
262 /* a flow is already active */
263 smc_llc_flow_parallel(lgr, flow->type, qentry);
264 spin_unlock_bh(&lgr->llc_flow_lock);
265 return false;
266 }
267 switch (qentry->msg.raw.hdr.common.llc_type) {
268 case SMC_LLC_ADD_LINK:
269 flow->type = SMC_LLC_FLOW_ADD_LINK;
270 break;
271 case SMC_LLC_DELETE_LINK:
272 flow->type = SMC_LLC_FLOW_DEL_LINK;
273 break;
274 case SMC_LLC_CONFIRM_RKEY:
275 case SMC_LLC_DELETE_RKEY:
276 flow->type = SMC_LLC_FLOW_RKEY;
277 break;
278 default:
279 flow->type = SMC_LLC_FLOW_NONE;
280 }
281 smc_llc_flow_qentry_set(flow, qentry);
282 spin_unlock_bh(&lgr->llc_flow_lock);
283 return true;
284 }
285
286 /* start a new local llc flow, wait till current flow finished */
smc_llc_flow_initiate(struct smc_link_group * lgr,enum smc_llc_flowtype type)287 int smc_llc_flow_initiate(struct smc_link_group *lgr,
288 enum smc_llc_flowtype type)
289 {
290 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
291 int rc;
292
293 /* all flows except confirm_rkey and delete_rkey are exclusive,
294 * confirm/delete rkey flows can run concurrently (local and remote)
295 */
296 if (type == SMC_LLC_FLOW_RKEY)
297 allowed_remote = SMC_LLC_FLOW_RKEY;
298 again:
299 if (list_empty(&lgr->list))
300 return -ENODEV;
301 spin_lock_bh(&lgr->llc_flow_lock);
302 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
303 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
304 lgr->llc_flow_rmt.type == allowed_remote)) {
305 lgr->llc_flow_lcl.type = type;
306 spin_unlock_bh(&lgr->llc_flow_lock);
307 return 0;
308 }
309 spin_unlock_bh(&lgr->llc_flow_lock);
310 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
311 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
312 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
313 lgr->llc_flow_rmt.type == allowed_remote))),
314 SMC_LLC_WAIT_TIME * 10);
315 if (!rc)
316 return -ETIMEDOUT;
317 goto again;
318 }
319
320 /* finish the current llc flow */
smc_llc_flow_stop(struct smc_link_group * lgr,struct smc_llc_flow * flow)321 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
322 {
323 spin_lock_bh(&lgr->llc_flow_lock);
324 memset(flow, 0, sizeof(*flow));
325 flow->type = SMC_LLC_FLOW_NONE;
326 spin_unlock_bh(&lgr->llc_flow_lock);
327 if (!list_empty(&lgr->list) && lgr->delayed_event &&
328 flow == &lgr->llc_flow_lcl)
329 schedule_work(&lgr->llc_event_work);
330 else
331 wake_up(&lgr->llc_flow_waiter);
332 }
333
334 /* lnk is optional and used for early wakeup when link goes down, useful in
335 * cases where we wait for a response on the link after we sent a request
336 */
smc_llc_wait(struct smc_link_group * lgr,struct smc_link * lnk,int time_out,u8 exp_msg)337 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
338 struct smc_link *lnk,
339 int time_out, u8 exp_msg)
340 {
341 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
342 u8 rcv_msg;
343
344 wait_event_timeout(lgr->llc_msg_waiter,
345 (flow->qentry ||
346 (lnk && !smc_link_usable(lnk)) ||
347 list_empty(&lgr->list)),
348 time_out);
349 if (!flow->qentry ||
350 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
351 smc_llc_flow_qentry_del(flow);
352 goto out;
353 }
354 rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
355 if (exp_msg && rcv_msg != exp_msg) {
356 if (exp_msg == SMC_LLC_ADD_LINK &&
357 rcv_msg == SMC_LLC_DELETE_LINK) {
358 /* flow_start will delay the unexpected msg */
359 smc_llc_flow_start(&lgr->llc_flow_lcl,
360 smc_llc_flow_qentry_clr(flow));
361 return NULL;
362 }
363 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: "
364 "msg %d exp %d flow %d role %d flags %x\n",
365 SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie,
366 rcv_msg, exp_msg,
367 flow->type, lgr->role,
368 flow->qentry->msg.raw.hdr.flags);
369 smc_llc_flow_qentry_del(flow);
370 }
371 out:
372 return flow->qentry;
373 }
374
375 /********************************** send *************************************/
376
377 struct smc_llc_tx_pend {
378 };
379
380 /* handler for send/transmission completion of an LLC msg */
smc_llc_tx_handler(struct smc_wr_tx_pend_priv * pend,struct smc_link * link,enum ib_wc_status wc_status)381 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
382 struct smc_link *link,
383 enum ib_wc_status wc_status)
384 {
385 /* future work: handle wc_status error for recovery and failover */
386 }
387
388 /**
389 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
390 * @link: Pointer to SMC link used for sending LLC control message.
391 * @wr_buf: Out variable returning pointer to work request payload buffer.
392 * @pend: Out variable returning pointer to private pending WR tracking.
393 * It's the context the transmit complete handler will get.
394 *
395 * Reserves and pre-fills an entry for a pending work request send/tx.
396 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
397 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
398 *
399 * Return: 0 on success, otherwise an error value.
400 */
smc_llc_add_pending_send(struct smc_link * link,struct smc_wr_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)401 static int smc_llc_add_pending_send(struct smc_link *link,
402 struct smc_wr_buf **wr_buf,
403 struct smc_wr_tx_pend_priv **pend)
404 {
405 int rc;
406
407 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
408 pend);
409 if (rc < 0)
410 return rc;
411 BUILD_BUG_ON_MSG(
412 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
413 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
414 BUILD_BUG_ON_MSG(
415 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
416 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
417 BUILD_BUG_ON_MSG(
418 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
419 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
420 return 0;
421 }
422
smc_llc_add_pending_send_v2(struct smc_link * link,struct smc_wr_v2_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)423 static int smc_llc_add_pending_send_v2(struct smc_link *link,
424 struct smc_wr_v2_buf **wr_buf,
425 struct smc_wr_tx_pend_priv **pend)
426 {
427 int rc;
428
429 rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
430 if (rc < 0)
431 return rc;
432 return 0;
433 }
434
smc_llc_init_msg_hdr(struct smc_llc_hdr * hdr,struct smc_link_group * lgr,size_t len)435 static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
436 struct smc_link_group *lgr, size_t len)
437 {
438 if (lgr->smc_version == SMC_V2) {
439 hdr->common.llc_version = SMC_V2;
440 hdr->length_v2 = len;
441 } else {
442 hdr->common.llc_version = 0;
443 hdr->length = len;
444 }
445 }
446
447 /* high-level API to send LLC confirm link */
smc_llc_send_confirm_link(struct smc_link * link,enum smc_llc_reqresp reqresp)448 int smc_llc_send_confirm_link(struct smc_link *link,
449 enum smc_llc_reqresp reqresp)
450 {
451 struct smc_llc_msg_confirm_link *confllc;
452 struct smc_wr_tx_pend_priv *pend;
453 struct smc_wr_buf *wr_buf;
454 int rc;
455
456 if (!smc_wr_tx_link_hold(link))
457 return -ENOLINK;
458 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
459 if (rc)
460 goto put_out;
461 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
462 memset(confllc, 0, sizeof(*confllc));
463 confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
464 smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
465 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
466 if (reqresp == SMC_LLC_RESP)
467 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
468 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
469 ETH_ALEN);
470 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
471 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
472 confllc->link_num = link->link_id;
473 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
474 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
475 /* send llc message */
476 rc = smc_wr_tx_send(link, pend);
477 put_out:
478 smc_wr_tx_link_put(link);
479 return rc;
480 }
481
482 /* send LLC confirm rkey request */
smc_llc_send_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)483 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
484 struct smc_buf_desc *rmb_desc)
485 {
486 struct smc_llc_msg_confirm_rkey *rkeyllc;
487 struct smc_wr_tx_pend_priv *pend;
488 struct smc_wr_buf *wr_buf;
489 struct smc_link *link;
490 int i, rc, rtok_ix;
491
492 if (!smc_wr_tx_link_hold(send_link))
493 return -ENOLINK;
494 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
495 if (rc)
496 goto put_out;
497 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
498 memset(rkeyllc, 0, sizeof(*rkeyllc));
499 rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
500 smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
501
502 rtok_ix = 1;
503 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
504 link = &send_link->lgr->lnk[i];
505 if (smc_link_active(link) && link != send_link) {
506 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
507 rkeyllc->rtoken[rtok_ix].rmb_key =
508 htonl(rmb_desc->mr[link->link_idx]->rkey);
509 rkeyllc->rtoken[rtok_ix].rmb_vaddr = rmb_desc->is_vm ?
510 cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
511 cpu_to_be64((u64)sg_dma_address
512 (rmb_desc->sgt[link->link_idx].sgl));
513 rtok_ix++;
514 }
515 }
516 /* rkey of send_link is in rtoken[0] */
517 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
518 rkeyllc->rtoken[0].rmb_key =
519 htonl(rmb_desc->mr[send_link->link_idx]->rkey);
520 rkeyllc->rtoken[0].rmb_vaddr = rmb_desc->is_vm ?
521 cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
522 cpu_to_be64((u64)sg_dma_address
523 (rmb_desc->sgt[send_link->link_idx].sgl));
524 /* send llc message */
525 rc = smc_wr_tx_send(send_link, pend);
526 put_out:
527 smc_wr_tx_link_put(send_link);
528 return rc;
529 }
530
531 /* send LLC delete rkey request */
smc_llc_send_delete_rkey(struct smc_link * link,struct smc_buf_desc * rmb_desc)532 static int smc_llc_send_delete_rkey(struct smc_link *link,
533 struct smc_buf_desc *rmb_desc)
534 {
535 struct smc_llc_msg_delete_rkey *rkeyllc;
536 struct smc_wr_tx_pend_priv *pend;
537 struct smc_wr_buf *wr_buf;
538 int rc;
539
540 if (!smc_wr_tx_link_hold(link))
541 return -ENOLINK;
542 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
543 if (rc)
544 goto put_out;
545 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
546 memset(rkeyllc, 0, sizeof(*rkeyllc));
547 rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
548 smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
549 rkeyllc->num_rkeys = 1;
550 rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey);
551 /* send llc message */
552 rc = smc_wr_tx_send(link, pend);
553 put_out:
554 smc_wr_tx_link_put(link);
555 return rc;
556 }
557
558 /* return first buffer from any of the next buf lists */
_smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst)559 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
560 int *buf_lst)
561 {
562 struct smc_buf_desc *buf_pos;
563
564 while (*buf_lst < SMC_RMBE_SIZES) {
565 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
566 struct smc_buf_desc, list);
567 if (buf_pos)
568 return buf_pos;
569 (*buf_lst)++;
570 }
571 return NULL;
572 }
573
574 /* return next rmb from buffer lists */
smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst,struct smc_buf_desc * buf_pos)575 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
576 int *buf_lst,
577 struct smc_buf_desc *buf_pos)
578 {
579 struct smc_buf_desc *buf_next;
580
581 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
582 (*buf_lst)++;
583 return _smc_llc_get_next_rmb(lgr, buf_lst);
584 }
585 buf_next = list_next_entry(buf_pos, list);
586 return buf_next;
587 }
588
smc_llc_get_first_rmb(struct smc_link_group * lgr,int * buf_lst)589 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
590 int *buf_lst)
591 {
592 *buf_lst = 0;
593 return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
594 }
595
smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext * ext,struct smc_link * link,struct smc_link * link_new)596 static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
597 struct smc_link *link, struct smc_link *link_new)
598 {
599 struct smc_link_group *lgr = link->lgr;
600 struct smc_buf_desc *buf_pos;
601 int prim_lnk_idx, lnk_idx, i;
602 struct smc_buf_desc *rmb;
603 int len = sizeof(*ext);
604 int buf_lst;
605
606 ext->v2_direct = !lgr->uses_gateway;
607 memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
608
609 prim_lnk_idx = link->link_idx;
610 lnk_idx = link_new->link_idx;
611 mutex_lock(&lgr->rmbs_lock);
612 ext->num_rkeys = lgr->conns_num;
613 if (!ext->num_rkeys)
614 goto out;
615 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
616 for (i = 0; i < ext->num_rkeys; i++) {
617 if (!buf_pos)
618 break;
619 rmb = buf_pos;
620 ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
621 ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
622 ext->rt[i].rmb_vaddr_new = rmb->is_vm ?
623 cpu_to_be64((uintptr_t)rmb->cpu_addr) :
624 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
625 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
626 while (buf_pos && !(buf_pos)->used)
627 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
628 }
629 len += i * sizeof(ext->rt[0]);
630 out:
631 mutex_unlock(&lgr->rmbs_lock);
632 return len;
633 }
634
635 /* send ADD LINK request or response */
smc_llc_send_add_link(struct smc_link * link,u8 mac[],u8 gid[],struct smc_link * link_new,enum smc_llc_reqresp reqresp)636 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
637 struct smc_link *link_new,
638 enum smc_llc_reqresp reqresp)
639 {
640 struct smc_llc_msg_add_link_v2_ext *ext = NULL;
641 struct smc_llc_msg_add_link *addllc;
642 struct smc_wr_tx_pend_priv *pend;
643 int len = sizeof(*addllc);
644 int rc;
645
646 if (!smc_wr_tx_link_hold(link))
647 return -ENOLINK;
648 if (link->lgr->smc_version == SMC_V2) {
649 struct smc_wr_v2_buf *wr_buf;
650
651 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
652 if (rc)
653 goto put_out;
654 addllc = (struct smc_llc_msg_add_link *)wr_buf;
655 ext = (struct smc_llc_msg_add_link_v2_ext *)
656 &wr_buf->raw[sizeof(*addllc)];
657 memset(ext, 0, SMC_WR_TX_SIZE);
658 } else {
659 struct smc_wr_buf *wr_buf;
660
661 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
662 if (rc)
663 goto put_out;
664 addllc = (struct smc_llc_msg_add_link *)wr_buf;
665 }
666
667 memset(addllc, 0, sizeof(*addllc));
668 addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
669 if (reqresp == SMC_LLC_RESP)
670 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
671 memcpy(addllc->sender_mac, mac, ETH_ALEN);
672 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
673 if (link_new) {
674 addllc->link_num = link_new->link_id;
675 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
676 hton24(addllc->initial_psn, link_new->psn_initial);
677 if (reqresp == SMC_LLC_REQ)
678 addllc->qp_mtu = link_new->path_mtu;
679 else
680 addllc->qp_mtu = min(link_new->path_mtu,
681 link_new->peer_mtu);
682 }
683 if (ext && link_new)
684 len += smc_llc_fill_ext_v2(ext, link, link_new);
685 smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
686 /* send llc message */
687 if (link->lgr->smc_version == SMC_V2)
688 rc = smc_wr_tx_v2_send(link, pend, len);
689 else
690 rc = smc_wr_tx_send(link, pend);
691 put_out:
692 smc_wr_tx_link_put(link);
693 return rc;
694 }
695
696 /* send DELETE LINK request or response */
smc_llc_send_delete_link(struct smc_link * link,u8 link_del_id,enum smc_llc_reqresp reqresp,bool orderly,u32 reason)697 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
698 enum smc_llc_reqresp reqresp, bool orderly,
699 u32 reason)
700 {
701 struct smc_llc_msg_del_link *delllc;
702 struct smc_wr_tx_pend_priv *pend;
703 struct smc_wr_buf *wr_buf;
704 int rc;
705
706 if (!smc_wr_tx_link_hold(link))
707 return -ENOLINK;
708 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
709 if (rc)
710 goto put_out;
711 delllc = (struct smc_llc_msg_del_link *)wr_buf;
712
713 memset(delllc, 0, sizeof(*delllc));
714 delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
715 smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
716 if (reqresp == SMC_LLC_RESP)
717 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
718 if (orderly)
719 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
720 if (link_del_id)
721 delllc->link_num = link_del_id;
722 else
723 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
724 delllc->reason = htonl(reason);
725 /* send llc message */
726 rc = smc_wr_tx_send(link, pend);
727 put_out:
728 smc_wr_tx_link_put(link);
729 return rc;
730 }
731
732 /* send LLC test link request */
smc_llc_send_test_link(struct smc_link * link,u8 user_data[16])733 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
734 {
735 struct smc_llc_msg_test_link *testllc;
736 struct smc_wr_tx_pend_priv *pend;
737 struct smc_wr_buf *wr_buf;
738 int rc;
739
740 if (!smc_wr_tx_link_hold(link))
741 return -ENOLINK;
742 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
743 if (rc)
744 goto put_out;
745 testllc = (struct smc_llc_msg_test_link *)wr_buf;
746 memset(testllc, 0, sizeof(*testllc));
747 testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
748 smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
749 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
750 /* send llc message */
751 rc = smc_wr_tx_send(link, pend);
752 put_out:
753 smc_wr_tx_link_put(link);
754 return rc;
755 }
756
757 /* schedule an llc send on link, may wait for buffers */
smc_llc_send_message(struct smc_link * link,void * llcbuf)758 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
759 {
760 struct smc_wr_tx_pend_priv *pend;
761 struct smc_wr_buf *wr_buf;
762 int rc;
763
764 if (!smc_wr_tx_link_hold(link))
765 return -ENOLINK;
766 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
767 if (rc)
768 goto put_out;
769 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
770 rc = smc_wr_tx_send(link, pend);
771 put_out:
772 smc_wr_tx_link_put(link);
773 return rc;
774 }
775
776 /* schedule an llc send on link, may wait for buffers,
777 * and wait for send completion notification.
778 * @return 0 on success
779 */
smc_llc_send_message_wait(struct smc_link * link,void * llcbuf)780 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
781 {
782 struct smc_wr_tx_pend_priv *pend;
783 struct smc_wr_buf *wr_buf;
784 int rc;
785
786 if (!smc_wr_tx_link_hold(link))
787 return -ENOLINK;
788 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
789 if (rc)
790 goto put_out;
791 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
792 rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
793 put_out:
794 smc_wr_tx_link_put(link);
795 return rc;
796 }
797
798 /********************************* receive ***********************************/
799
smc_llc_alloc_alt_link(struct smc_link_group * lgr,enum smc_lgr_type lgr_new_t)800 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
801 enum smc_lgr_type lgr_new_t)
802 {
803 int i;
804
805 if (lgr->type == SMC_LGR_SYMMETRIC ||
806 (lgr->type != SMC_LGR_SINGLE &&
807 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
808 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
809 return -EMLINK;
810
811 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
812 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
813 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
814 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
815 return i;
816 } else {
817 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
818 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
819 return i;
820 }
821 return -EMLINK;
822 }
823
824 /* send one add_link_continue msg */
smc_llc_add_link_cont(struct smc_link * link,struct smc_link * link_new,u8 * num_rkeys_todo,int * buf_lst,struct smc_buf_desc ** buf_pos)825 static int smc_llc_add_link_cont(struct smc_link *link,
826 struct smc_link *link_new, u8 *num_rkeys_todo,
827 int *buf_lst, struct smc_buf_desc **buf_pos)
828 {
829 struct smc_llc_msg_add_link_cont *addc_llc;
830 struct smc_link_group *lgr = link->lgr;
831 int prim_lnk_idx, lnk_idx, i, rc;
832 struct smc_wr_tx_pend_priv *pend;
833 struct smc_wr_buf *wr_buf;
834 struct smc_buf_desc *rmb;
835 u8 n;
836
837 if (!smc_wr_tx_link_hold(link))
838 return -ENOLINK;
839 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
840 if (rc)
841 goto put_out;
842 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
843 memset(addc_llc, 0, sizeof(*addc_llc));
844
845 prim_lnk_idx = link->link_idx;
846 lnk_idx = link_new->link_idx;
847 addc_llc->link_num = link_new->link_id;
848 addc_llc->num_rkeys = *num_rkeys_todo;
849 n = *num_rkeys_todo;
850 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
851 if (!*buf_pos) {
852 addc_llc->num_rkeys = addc_llc->num_rkeys -
853 *num_rkeys_todo;
854 *num_rkeys_todo = 0;
855 break;
856 }
857 rmb = *buf_pos;
858
859 addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
860 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
861 addc_llc->rt[i].rmb_vaddr_new = rmb->is_vm ?
862 cpu_to_be64((uintptr_t)rmb->cpu_addr) :
863 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
864
865 (*num_rkeys_todo)--;
866 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
867 while (*buf_pos && !(*buf_pos)->used)
868 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
869 }
870 addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
871 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
872 if (lgr->role == SMC_CLNT)
873 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
874 rc = smc_wr_tx_send(link, pend);
875 put_out:
876 smc_wr_tx_link_put(link);
877 return rc;
878 }
879
smc_llc_cli_rkey_exchange(struct smc_link * link,struct smc_link * link_new)880 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
881 struct smc_link *link_new)
882 {
883 struct smc_llc_msg_add_link_cont *addc_llc;
884 struct smc_link_group *lgr = link->lgr;
885 u8 max, num_rkeys_send, num_rkeys_recv;
886 struct smc_llc_qentry *qentry;
887 struct smc_buf_desc *buf_pos;
888 int buf_lst;
889 int rc = 0;
890 int i;
891
892 mutex_lock(&lgr->rmbs_lock);
893 num_rkeys_send = lgr->conns_num;
894 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
895 do {
896 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
897 SMC_LLC_ADD_LINK_CONT);
898 if (!qentry) {
899 rc = -ETIMEDOUT;
900 break;
901 }
902 addc_llc = &qentry->msg.add_link_cont;
903 num_rkeys_recv = addc_llc->num_rkeys;
904 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
905 for (i = 0; i < max; i++) {
906 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
907 addc_llc->rt[i].rmb_key,
908 addc_llc->rt[i].rmb_vaddr_new,
909 addc_llc->rt[i].rmb_key_new);
910 num_rkeys_recv--;
911 }
912 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
913 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
914 &buf_lst, &buf_pos);
915 if (rc)
916 break;
917 } while (num_rkeys_send || num_rkeys_recv);
918
919 mutex_unlock(&lgr->rmbs_lock);
920 return rc;
921 }
922
923 /* prepare and send an add link reject response */
smc_llc_cli_add_link_reject(struct smc_llc_qentry * qentry)924 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
925 {
926 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
927 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
928 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
929 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
930 sizeof(qentry->msg));
931 return smc_llc_send_message(qentry->link, &qentry->msg);
932 }
933
smc_llc_cli_conf_link(struct smc_link * link,struct smc_init_info * ini,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)934 static int smc_llc_cli_conf_link(struct smc_link *link,
935 struct smc_init_info *ini,
936 struct smc_link *link_new,
937 enum smc_lgr_type lgr_new_t)
938 {
939 struct smc_link_group *lgr = link->lgr;
940 struct smc_llc_qentry *qentry = NULL;
941 int rc = 0;
942
943 /* receive CONFIRM LINK request over RoCE fabric */
944 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
945 if (!qentry) {
946 rc = smc_llc_send_delete_link(link, link_new->link_id,
947 SMC_LLC_REQ, false,
948 SMC_LLC_DEL_LOST_PATH);
949 return -ENOLINK;
950 }
951 if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
952 /* received DELETE_LINK instead */
953 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
954 smc_llc_send_message(link, &qentry->msg);
955 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
956 return -ENOLINK;
957 }
958 smc_llc_save_peer_uid(qentry);
959 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
960
961 rc = smc_ib_modify_qp_rts(link_new);
962 if (rc) {
963 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
964 false, SMC_LLC_DEL_LOST_PATH);
965 return -ENOLINK;
966 }
967 smc_wr_remember_qp_attr(link_new);
968
969 rc = smcr_buf_reg_lgr(link_new);
970 if (rc) {
971 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
972 false, SMC_LLC_DEL_LOST_PATH);
973 return -ENOLINK;
974 }
975
976 /* send CONFIRM LINK response over RoCE fabric */
977 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
978 if (rc) {
979 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
980 false, SMC_LLC_DEL_LOST_PATH);
981 return -ENOLINK;
982 }
983 smc_llc_link_active(link_new);
984 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
985 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
986 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
987 else
988 smcr_lgr_set_type(lgr, lgr_new_t);
989 return 0;
990 }
991
smc_llc_save_add_link_rkeys(struct smc_link * link,struct smc_link * link_new)992 static void smc_llc_save_add_link_rkeys(struct smc_link *link,
993 struct smc_link *link_new)
994 {
995 struct smc_llc_msg_add_link_v2_ext *ext;
996 struct smc_link_group *lgr = link->lgr;
997 int max, i;
998
999 ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
1000 SMC_WR_TX_SIZE);
1001 max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1002 mutex_lock(&lgr->rmbs_lock);
1003 for (i = 0; i < max; i++) {
1004 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1005 ext->rt[i].rmb_key,
1006 ext->rt[i].rmb_vaddr_new,
1007 ext->rt[i].rmb_key_new);
1008 }
1009 mutex_unlock(&lgr->rmbs_lock);
1010 }
1011
smc_llc_save_add_link_info(struct smc_link * link,struct smc_llc_msg_add_link * add_llc)1012 static void smc_llc_save_add_link_info(struct smc_link *link,
1013 struct smc_llc_msg_add_link *add_llc)
1014 {
1015 link->peer_qpn = ntoh24(add_llc->sender_qp_num);
1016 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
1017 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
1018 link->peer_psn = ntoh24(add_llc->initial_psn);
1019 link->peer_mtu = add_llc->qp_mtu;
1020 }
1021
1022 /* as an SMC client, process an add link request */
smc_llc_cli_add_link(struct smc_link * link,struct smc_llc_qentry * qentry)1023 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
1024 {
1025 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
1026 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1027 struct smc_link_group *lgr = smc_get_lgr(link);
1028 struct smc_init_info *ini = NULL;
1029 struct smc_link *lnk_new = NULL;
1030 int lnk_idx, rc = 0;
1031
1032 if (!llc->qp_mtu)
1033 goto out_reject;
1034
1035 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1036 if (!ini) {
1037 rc = -ENOMEM;
1038 goto out_reject;
1039 }
1040
1041 ini->vlan_id = lgr->vlan_id;
1042 if (lgr->smc_version == SMC_V2) {
1043 ini->check_smcrv2 = true;
1044 ini->smcrv2.saddr = lgr->saddr;
1045 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
1046 }
1047 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1048 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1049 (lgr->smc_version == SMC_V2 ||
1050 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1051 if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
1052 goto out_reject;
1053 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1054 }
1055 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1056 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1057 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1058 ini->smcrv2.ib_port_v2 = link->ibport;
1059 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1060 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1061 ini->ib_dev = link->smcibdev;
1062 ini->ib_port = link->ibport;
1063 }
1064 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1065 if (lnk_idx < 0)
1066 goto out_reject;
1067 lnk_new = &lgr->lnk[lnk_idx];
1068 rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
1069 if (rc)
1070 goto out_reject;
1071 smc_llc_save_add_link_info(lnk_new, llc);
1072 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */
1073 smc_llc_link_set_uid(lnk_new);
1074
1075 rc = smc_ib_ready_link(lnk_new);
1076 if (rc)
1077 goto out_clear_lnk;
1078
1079 rc = smcr_buf_map_lgr(lnk_new);
1080 if (rc)
1081 goto out_clear_lnk;
1082
1083 rc = smc_llc_send_add_link(link,
1084 lnk_new->smcibdev->mac[lnk_new->ibport - 1],
1085 lnk_new->gid, lnk_new, SMC_LLC_RESP);
1086 if (rc)
1087 goto out_clear_lnk;
1088 if (lgr->smc_version == SMC_V2) {
1089 smc_llc_save_add_link_rkeys(link, lnk_new);
1090 } else {
1091 rc = smc_llc_cli_rkey_exchange(link, lnk_new);
1092 if (rc) {
1093 rc = 0;
1094 goto out_clear_lnk;
1095 }
1096 }
1097 rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
1098 if (!rc)
1099 goto out;
1100 out_clear_lnk:
1101 lnk_new->state = SMC_LNK_INACTIVE;
1102 smcr_link_clear(lnk_new, false);
1103 out_reject:
1104 smc_llc_cli_add_link_reject(qentry);
1105 out:
1106 kfree(ini);
1107 kfree(qentry);
1108 return rc;
1109 }
1110
smc_llc_send_request_add_link(struct smc_link * link)1111 static void smc_llc_send_request_add_link(struct smc_link *link)
1112 {
1113 struct smc_llc_msg_req_add_link_v2 *llc;
1114 struct smc_wr_tx_pend_priv *pend;
1115 struct smc_wr_v2_buf *wr_buf;
1116 struct smc_gidlist gidlist;
1117 int rc, len, i;
1118
1119 if (!smc_wr_tx_link_hold(link))
1120 return;
1121 if (link->lgr->type == SMC_LGR_SYMMETRIC ||
1122 link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1123 goto put_out;
1124
1125 smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
1126 if (gidlist.len <= 1)
1127 goto put_out;
1128
1129 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
1130 if (rc)
1131 goto put_out;
1132 llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
1133 memset(llc, 0, SMC_WR_TX_SIZE);
1134
1135 llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
1136 for (i = 0; i < gidlist.len; i++)
1137 memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
1138 llc->gid_cnt = gidlist.len;
1139 len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
1140 smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
1141 rc = smc_wr_tx_v2_send(link, pend, len);
1142 if (!rc)
1143 /* set REQ_ADD_LINK flow and wait for response from peer */
1144 link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
1145 put_out:
1146 smc_wr_tx_link_put(link);
1147 }
1148
1149 /* as an SMC client, invite server to start the add_link processing */
smc_llc_cli_add_link_invite(struct smc_link * link,struct smc_llc_qentry * qentry)1150 static void smc_llc_cli_add_link_invite(struct smc_link *link,
1151 struct smc_llc_qentry *qentry)
1152 {
1153 struct smc_link_group *lgr = smc_get_lgr(link);
1154 struct smc_init_info *ini = NULL;
1155
1156 if (lgr->smc_version == SMC_V2) {
1157 smc_llc_send_request_add_link(link);
1158 goto out;
1159 }
1160
1161 if (lgr->type == SMC_LGR_SYMMETRIC ||
1162 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1163 goto out;
1164
1165 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1166 if (!ini)
1167 goto out;
1168
1169 ini->vlan_id = lgr->vlan_id;
1170 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1171 if (!ini->ib_dev)
1172 goto out;
1173
1174 smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
1175 ini->ib_gid, NULL, SMC_LLC_REQ);
1176 out:
1177 kfree(ini);
1178 kfree(qentry);
1179 }
1180
smc_llc_is_empty_llc_message(union smc_llc_msg * llc)1181 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
1182 {
1183 int i;
1184
1185 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
1186 if (llc->raw.data[i])
1187 return false;
1188 return true;
1189 }
1190
smc_llc_is_local_add_link(union smc_llc_msg * llc)1191 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
1192 {
1193 if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
1194 smc_llc_is_empty_llc_message(llc))
1195 return true;
1196 return false;
1197 }
1198
smc_llc_process_cli_add_link(struct smc_link_group * lgr)1199 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
1200 {
1201 struct smc_llc_qentry *qentry;
1202
1203 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1204
1205 mutex_lock(&lgr->llc_conf_mutex);
1206 if (smc_llc_is_local_add_link(&qentry->msg))
1207 smc_llc_cli_add_link_invite(qentry->link, qentry);
1208 else
1209 smc_llc_cli_add_link(qentry->link, qentry);
1210 mutex_unlock(&lgr->llc_conf_mutex);
1211 }
1212
smc_llc_active_link_count(struct smc_link_group * lgr)1213 static int smc_llc_active_link_count(struct smc_link_group *lgr)
1214 {
1215 int i, link_count = 0;
1216
1217 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1218 if (!smc_link_active(&lgr->lnk[i]))
1219 continue;
1220 link_count++;
1221 }
1222 return link_count;
1223 }
1224
1225 /* find the asymmetric link when 3 links are established */
smc_llc_find_asym_link(struct smc_link_group * lgr)1226 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
1227 {
1228 int asym_idx = -ENOENT;
1229 int i, j, k;
1230 bool found;
1231
1232 /* determine asymmetric link */
1233 found = false;
1234 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1235 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
1236 if (!smc_link_usable(&lgr->lnk[i]) ||
1237 !smc_link_usable(&lgr->lnk[j]))
1238 continue;
1239 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
1240 SMC_GID_SIZE)) {
1241 found = true; /* asym_lnk is i or j */
1242 break;
1243 }
1244 }
1245 if (found)
1246 break;
1247 }
1248 if (!found)
1249 goto out; /* no asymmetric link */
1250 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
1251 if (!smc_link_usable(&lgr->lnk[k]))
1252 continue;
1253 if (k != i &&
1254 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
1255 SMC_GID_SIZE)) {
1256 asym_idx = i;
1257 break;
1258 }
1259 if (k != j &&
1260 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1261 SMC_GID_SIZE)) {
1262 asym_idx = j;
1263 break;
1264 }
1265 }
1266 out:
1267 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1268 }
1269
smc_llc_delete_asym_link(struct smc_link_group * lgr)1270 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1271 {
1272 struct smc_link *lnk_new = NULL, *lnk_asym;
1273 struct smc_llc_qentry *qentry;
1274 int rc;
1275
1276 lnk_asym = smc_llc_find_asym_link(lgr);
1277 if (!lnk_asym)
1278 return; /* no asymmetric link */
1279 if (!smc_link_downing(&lnk_asym->state))
1280 return;
1281 lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1282 smc_wr_tx_wait_no_pending_sends(lnk_asym);
1283 if (!lnk_new)
1284 goto out_free;
1285 /* change flow type from ADD_LINK into DEL_LINK */
1286 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1287 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1288 true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1289 if (rc) {
1290 smcr_link_down_cond(lnk_new);
1291 goto out_free;
1292 }
1293 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1294 SMC_LLC_DELETE_LINK);
1295 if (!qentry) {
1296 smcr_link_down_cond(lnk_new);
1297 goto out_free;
1298 }
1299 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1300 out_free:
1301 smcr_link_clear(lnk_asym, true);
1302 }
1303
smc_llc_srv_rkey_exchange(struct smc_link * link,struct smc_link * link_new)1304 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1305 struct smc_link *link_new)
1306 {
1307 struct smc_llc_msg_add_link_cont *addc_llc;
1308 struct smc_link_group *lgr = link->lgr;
1309 u8 max, num_rkeys_send, num_rkeys_recv;
1310 struct smc_llc_qentry *qentry = NULL;
1311 struct smc_buf_desc *buf_pos;
1312 int buf_lst;
1313 int rc = 0;
1314 int i;
1315
1316 mutex_lock(&lgr->rmbs_lock);
1317 num_rkeys_send = lgr->conns_num;
1318 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1319 do {
1320 smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1321 &buf_lst, &buf_pos);
1322 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1323 SMC_LLC_ADD_LINK_CONT);
1324 if (!qentry) {
1325 rc = -ETIMEDOUT;
1326 goto out;
1327 }
1328 addc_llc = &qentry->msg.add_link_cont;
1329 num_rkeys_recv = addc_llc->num_rkeys;
1330 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1331 for (i = 0; i < max; i++) {
1332 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1333 addc_llc->rt[i].rmb_key,
1334 addc_llc->rt[i].rmb_vaddr_new,
1335 addc_llc->rt[i].rmb_key_new);
1336 num_rkeys_recv--;
1337 }
1338 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1339 } while (num_rkeys_send || num_rkeys_recv);
1340 out:
1341 mutex_unlock(&lgr->rmbs_lock);
1342 return rc;
1343 }
1344
smc_llc_srv_conf_link(struct smc_link * link,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)1345 static int smc_llc_srv_conf_link(struct smc_link *link,
1346 struct smc_link *link_new,
1347 enum smc_lgr_type lgr_new_t)
1348 {
1349 struct smc_link_group *lgr = link->lgr;
1350 struct smc_llc_qentry *qentry = NULL;
1351 int rc;
1352
1353 /* send CONFIRM LINK request over the RoCE fabric */
1354 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1355 if (rc)
1356 return -ENOLINK;
1357 /* receive CONFIRM LINK response over the RoCE fabric */
1358 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1359 if (!qentry ||
1360 qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
1361 /* send DELETE LINK */
1362 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1363 false, SMC_LLC_DEL_LOST_PATH);
1364 if (qentry)
1365 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1366 return -ENOLINK;
1367 }
1368 smc_llc_save_peer_uid(qentry);
1369 smc_llc_link_active(link_new);
1370 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1371 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1372 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1373 else
1374 smcr_lgr_set_type(lgr, lgr_new_t);
1375 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1376 return 0;
1377 }
1378
smc_llc_send_req_add_link_response(struct smc_llc_qentry * qentry)1379 static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
1380 {
1381 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
1382 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
1383 sizeof(qentry->msg));
1384 memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
1385 smc_llc_send_message(qentry->link, &qentry->msg);
1386 }
1387
smc_llc_srv_add_link(struct smc_link * link,struct smc_llc_qentry * req_qentry)1388 int smc_llc_srv_add_link(struct smc_link *link,
1389 struct smc_llc_qentry *req_qentry)
1390 {
1391 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1392 struct smc_link_group *lgr = link->lgr;
1393 struct smc_llc_msg_add_link *add_llc;
1394 struct smc_llc_qentry *qentry = NULL;
1395 bool send_req_add_link_resp = false;
1396 struct smc_link *link_new = NULL;
1397 struct smc_init_info *ini = NULL;
1398 int lnk_idx, rc = 0;
1399
1400 if (req_qentry &&
1401 req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
1402 send_req_add_link_resp = true;
1403
1404 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1405 if (!ini) {
1406 rc = -ENOMEM;
1407 goto out;
1408 }
1409
1410 /* ignore client add link recommendation, start new flow */
1411 ini->vlan_id = lgr->vlan_id;
1412 if (lgr->smc_version == SMC_V2) {
1413 ini->check_smcrv2 = true;
1414 ini->smcrv2.saddr = lgr->saddr;
1415 if (send_req_add_link_resp) {
1416 struct smc_llc_msg_req_add_link_v2 *req_add =
1417 &req_qentry->msg.req_add_link;
1418
1419 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
1420 }
1421 }
1422 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1423 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1424 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1425 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1426 ini->smcrv2.ib_port_v2 = link->ibport;
1427 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1428 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1429 ini->ib_dev = link->smcibdev;
1430 ini->ib_port = link->ibport;
1431 }
1432 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1433 if (lnk_idx < 0) {
1434 rc = 0;
1435 goto out;
1436 }
1437
1438 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
1439 if (rc)
1440 goto out;
1441 link_new = &lgr->lnk[lnk_idx];
1442
1443 rc = smcr_buf_map_lgr(link_new);
1444 if (rc)
1445 goto out_err;
1446
1447 rc = smc_llc_send_add_link(link,
1448 link_new->smcibdev->mac[link_new->ibport-1],
1449 link_new->gid, link_new, SMC_LLC_REQ);
1450 if (rc)
1451 goto out_err;
1452 send_req_add_link_resp = false;
1453 /* receive ADD LINK response over the RoCE fabric */
1454 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1455 if (!qentry) {
1456 rc = -ETIMEDOUT;
1457 goto out_err;
1458 }
1459 add_llc = &qentry->msg.add_link;
1460 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1461 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1462 rc = -ENOLINK;
1463 goto out_err;
1464 }
1465 if (lgr->type == SMC_LGR_SINGLE &&
1466 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1467 (lgr->smc_version == SMC_V2 ||
1468 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
1469 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1470 }
1471 smc_llc_save_add_link_info(link_new, add_llc);
1472 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1473
1474 rc = smc_ib_ready_link(link_new);
1475 if (rc)
1476 goto out_err;
1477 rc = smcr_buf_reg_lgr(link_new);
1478 if (rc)
1479 goto out_err;
1480 if (lgr->smc_version == SMC_V2) {
1481 smc_llc_save_add_link_rkeys(link, link_new);
1482 } else {
1483 rc = smc_llc_srv_rkey_exchange(link, link_new);
1484 if (rc)
1485 goto out_err;
1486 }
1487 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1488 if (rc)
1489 goto out_err;
1490 kfree(ini);
1491 return 0;
1492 out_err:
1493 if (link_new) {
1494 link_new->state = SMC_LNK_INACTIVE;
1495 smcr_link_clear(link_new, false);
1496 }
1497 out:
1498 kfree(ini);
1499 if (send_req_add_link_resp)
1500 smc_llc_send_req_add_link_response(req_qentry);
1501 return rc;
1502 }
1503
smc_llc_process_srv_add_link(struct smc_link_group * lgr)1504 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1505 {
1506 struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1507 struct smc_llc_qentry *qentry;
1508 int rc;
1509
1510 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1511
1512 mutex_lock(&lgr->llc_conf_mutex);
1513 rc = smc_llc_srv_add_link(link, qentry);
1514 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1515 /* delete any asymmetric link */
1516 smc_llc_delete_asym_link(lgr);
1517 }
1518 mutex_unlock(&lgr->llc_conf_mutex);
1519 kfree(qentry);
1520 }
1521
1522 /* enqueue a local add_link req to trigger a new add_link flow */
smc_llc_add_link_local(struct smc_link * link)1523 void smc_llc_add_link_local(struct smc_link *link)
1524 {
1525 struct smc_llc_msg_add_link add_llc = {};
1526
1527 add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
1528 smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
1529 /* no dev and port needed */
1530 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1531 }
1532
1533 /* worker to process an add link message */
smc_llc_add_link_work(struct work_struct * work)1534 static void smc_llc_add_link_work(struct work_struct *work)
1535 {
1536 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1537 llc_add_link_work);
1538
1539 if (list_empty(&lgr->list)) {
1540 /* link group is terminating */
1541 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1542 goto out;
1543 }
1544
1545 if (lgr->role == SMC_CLNT)
1546 smc_llc_process_cli_add_link(lgr);
1547 else
1548 smc_llc_process_srv_add_link(lgr);
1549 out:
1550 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
1551 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1552 }
1553
1554 /* enqueue a local del_link msg to trigger a new del_link flow,
1555 * called only for role SMC_SERV
1556 */
smc_llc_srv_delete_link_local(struct smc_link * link,u8 del_link_id)1557 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1558 {
1559 struct smc_llc_msg_del_link del_llc = {};
1560
1561 del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1562 smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
1563 del_llc.link_num = del_link_id;
1564 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1565 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1566 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1567 }
1568
smc_llc_process_cli_delete_link(struct smc_link_group * lgr)1569 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1570 {
1571 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1572 struct smc_llc_msg_del_link *del_llc;
1573 struct smc_llc_qentry *qentry;
1574 int active_links;
1575 int lnk_idx;
1576
1577 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1578 lnk = qentry->link;
1579 del_llc = &qentry->msg.delete_link;
1580
1581 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1582 smc_lgr_terminate_sched(lgr);
1583 goto out;
1584 }
1585 mutex_lock(&lgr->llc_conf_mutex);
1586 /* delete single link */
1587 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1588 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1589 continue;
1590 lnk_del = &lgr->lnk[lnk_idx];
1591 break;
1592 }
1593 del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1594 if (!lnk_del) {
1595 /* link was not found */
1596 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1597 smc_llc_send_message(lnk, &qentry->msg);
1598 goto out_unlock;
1599 }
1600 lnk_asym = smc_llc_find_asym_link(lgr);
1601
1602 del_llc->reason = 0;
1603 smc_llc_send_message(lnk, &qentry->msg); /* response */
1604
1605 if (smc_link_downing(&lnk_del->state))
1606 smc_switch_conns(lgr, lnk_del, false);
1607 smcr_link_clear(lnk_del, true);
1608
1609 active_links = smc_llc_active_link_count(lgr);
1610 if (lnk_del == lnk_asym) {
1611 /* expected deletion of asym link, don't change lgr state */
1612 } else if (active_links == 1) {
1613 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1614 } else if (!active_links) {
1615 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1616 smc_lgr_terminate_sched(lgr);
1617 }
1618 out_unlock:
1619 mutex_unlock(&lgr->llc_conf_mutex);
1620 out:
1621 kfree(qentry);
1622 }
1623
1624 /* try to send a DELETE LINK ALL request on any active link,
1625 * waiting for send completion
1626 */
smc_llc_send_link_delete_all(struct smc_link_group * lgr,bool ord,u32 rsn)1627 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1628 {
1629 struct smc_llc_msg_del_link delllc = {};
1630 int i;
1631
1632 delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1633 smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
1634 if (ord)
1635 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1636 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1637 delllc.reason = htonl(rsn);
1638
1639 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1640 if (!smc_link_sendable(&lgr->lnk[i]))
1641 continue;
1642 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1643 break;
1644 }
1645 }
1646
smc_llc_process_srv_delete_link(struct smc_link_group * lgr)1647 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1648 {
1649 struct smc_llc_msg_del_link *del_llc;
1650 struct smc_link *lnk, *lnk_del;
1651 struct smc_llc_qentry *qentry;
1652 int active_links;
1653 int i;
1654
1655 mutex_lock(&lgr->llc_conf_mutex);
1656 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1657 lnk = qentry->link;
1658 del_llc = &qentry->msg.delete_link;
1659
1660 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1661 /* delete entire lgr */
1662 smc_llc_send_link_delete_all(lgr, true, ntohl(
1663 qentry->msg.delete_link.reason));
1664 smc_lgr_terminate_sched(lgr);
1665 goto out;
1666 }
1667 /* delete single link */
1668 lnk_del = NULL;
1669 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1670 if (lgr->lnk[i].link_id == del_llc->link_num) {
1671 lnk_del = &lgr->lnk[i];
1672 break;
1673 }
1674 }
1675 if (!lnk_del)
1676 goto out; /* asymmetric link already deleted */
1677
1678 if (smc_link_downing(&lnk_del->state)) {
1679 if (smc_switch_conns(lgr, lnk_del, false))
1680 smc_wr_tx_wait_no_pending_sends(lnk_del);
1681 }
1682 if (!list_empty(&lgr->list)) {
1683 /* qentry is either a request from peer (send it back to
1684 * initiate the DELETE_LINK processing), or a locally
1685 * enqueued DELETE_LINK request (forward it)
1686 */
1687 if (!smc_llc_send_message(lnk, &qentry->msg)) {
1688 struct smc_llc_qentry *qentry2;
1689
1690 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1691 SMC_LLC_DELETE_LINK);
1692 if (qentry2)
1693 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1694 }
1695 }
1696 smcr_link_clear(lnk_del, true);
1697
1698 active_links = smc_llc_active_link_count(lgr);
1699 if (active_links == 1) {
1700 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1701 } else if (!active_links) {
1702 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1703 smc_lgr_terminate_sched(lgr);
1704 }
1705
1706 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1707 /* trigger setup of asymm alt link */
1708 smc_llc_add_link_local(lnk);
1709 }
1710 out:
1711 mutex_unlock(&lgr->llc_conf_mutex);
1712 kfree(qentry);
1713 }
1714
smc_llc_delete_link_work(struct work_struct * work)1715 static void smc_llc_delete_link_work(struct work_struct *work)
1716 {
1717 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1718 llc_del_link_work);
1719
1720 if (list_empty(&lgr->list)) {
1721 /* link group is terminating */
1722 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1723 goto out;
1724 }
1725
1726 if (lgr->role == SMC_CLNT)
1727 smc_llc_process_cli_delete_link(lgr);
1728 else
1729 smc_llc_process_srv_delete_link(lgr);
1730 out:
1731 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1732 }
1733
1734 /* process a confirm_rkey request from peer, remote flow */
smc_llc_rmt_conf_rkey(struct smc_link_group * lgr)1735 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1736 {
1737 struct smc_llc_msg_confirm_rkey *llc;
1738 struct smc_llc_qentry *qentry;
1739 struct smc_link *link;
1740 int num_entries;
1741 int rk_idx;
1742 int i;
1743
1744 qentry = lgr->llc_flow_rmt.qentry;
1745 llc = &qentry->msg.confirm_rkey;
1746 link = qentry->link;
1747
1748 num_entries = llc->rtoken[0].num_rkeys;
1749 if (num_entries > SMC_LLC_RKEYS_PER_MSG)
1750 goto out_err;
1751 /* first rkey entry is for receiving link */
1752 rk_idx = smc_rtoken_add(link,
1753 llc->rtoken[0].rmb_vaddr,
1754 llc->rtoken[0].rmb_key);
1755 if (rk_idx < 0)
1756 goto out_err;
1757
1758 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1759 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1760 llc->rtoken[i].rmb_vaddr,
1761 llc->rtoken[i].rmb_key);
1762 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
1763 goto out;
1764 out_err:
1765 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1766 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1767 out:
1768 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1769 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1770 smc_llc_send_message(link, &qentry->msg);
1771 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1772 }
1773
1774 /* process a delete_rkey request from peer, remote flow */
smc_llc_rmt_delete_rkey(struct smc_link_group * lgr)1775 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1776 {
1777 struct smc_llc_msg_delete_rkey *llc;
1778 struct smc_llc_qentry *qentry;
1779 struct smc_link *link;
1780 u8 err_mask = 0;
1781 int i, max;
1782
1783 qentry = lgr->llc_flow_rmt.qentry;
1784 llc = &qentry->msg.delete_rkey;
1785 link = qentry->link;
1786
1787 if (lgr->smc_version == SMC_V2) {
1788 struct smc_llc_msg_delete_rkey_v2 *llcv2;
1789
1790 memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
1791 llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
1792 llcv2->num_inval_rkeys = 0;
1793
1794 max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1795 for (i = 0; i < max; i++) {
1796 if (smc_rtoken_delete(link, llcv2->rkey[i]))
1797 llcv2->num_inval_rkeys++;
1798 }
1799 memset(&llc->rkey[0], 0, sizeof(llc->rkey));
1800 memset(&llc->reserved2, 0, sizeof(llc->reserved2));
1801 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1802 if (llcv2->num_inval_rkeys) {
1803 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1804 llc->err_mask = llcv2->num_inval_rkeys;
1805 }
1806 goto finish;
1807 }
1808
1809 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1810 for (i = 0; i < max; i++) {
1811 if (smc_rtoken_delete(link, llc->rkey[i]))
1812 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1813 }
1814 if (err_mask) {
1815 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1816 llc->err_mask = err_mask;
1817 }
1818 finish:
1819 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1820 smc_llc_send_message(link, &qentry->msg);
1821 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1822 }
1823
smc_llc_protocol_violation(struct smc_link_group * lgr,u8 type)1824 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1825 {
1826 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: "
1827 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id,
1828 lgr->net->net_cookie, type);
1829 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1830 smc_lgr_terminate_sched(lgr);
1831 }
1832
1833 /* flush the llc event queue */
smc_llc_event_flush(struct smc_link_group * lgr)1834 static void smc_llc_event_flush(struct smc_link_group *lgr)
1835 {
1836 struct smc_llc_qentry *qentry, *q;
1837
1838 spin_lock_bh(&lgr->llc_event_q_lock);
1839 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1840 list_del_init(&qentry->list);
1841 kfree(qentry);
1842 }
1843 spin_unlock_bh(&lgr->llc_event_q_lock);
1844 }
1845
smc_llc_event_handler(struct smc_llc_qentry * qentry)1846 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1847 {
1848 union smc_llc_msg *llc = &qentry->msg;
1849 struct smc_link *link = qentry->link;
1850 struct smc_link_group *lgr = link->lgr;
1851
1852 if (!smc_link_usable(link))
1853 goto out;
1854
1855 switch (llc->raw.hdr.common.llc_type) {
1856 case SMC_LLC_TEST_LINK:
1857 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1858 smc_llc_send_message(link, llc);
1859 break;
1860 case SMC_LLC_ADD_LINK:
1861 if (list_empty(&lgr->list))
1862 goto out; /* lgr is terminating */
1863 if (lgr->role == SMC_CLNT) {
1864 if (smc_llc_is_local_add_link(llc)) {
1865 if (lgr->llc_flow_lcl.type ==
1866 SMC_LLC_FLOW_ADD_LINK)
1867 break; /* add_link in progress */
1868 if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1869 qentry)) {
1870 schedule_work(&lgr->llc_add_link_work);
1871 }
1872 return;
1873 }
1874 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1875 !lgr->llc_flow_lcl.qentry) {
1876 /* a flow is waiting for this message */
1877 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1878 qentry);
1879 wake_up(&lgr->llc_msg_waiter);
1880 return;
1881 }
1882 if (lgr->llc_flow_lcl.type ==
1883 SMC_LLC_FLOW_REQ_ADD_LINK) {
1884 /* server started add_link processing */
1885 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1886 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1887 qentry);
1888 schedule_work(&lgr->llc_add_link_work);
1889 return;
1890 }
1891 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1892 schedule_work(&lgr->llc_add_link_work);
1893 }
1894 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1895 /* as smc server, handle client suggestion */
1896 schedule_work(&lgr->llc_add_link_work);
1897 }
1898 return;
1899 case SMC_LLC_CONFIRM_LINK:
1900 case SMC_LLC_ADD_LINK_CONT:
1901 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1902 /* a flow is waiting for this message */
1903 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1904 wake_up(&lgr->llc_msg_waiter);
1905 return;
1906 }
1907 break;
1908 case SMC_LLC_DELETE_LINK:
1909 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1910 !lgr->llc_flow_lcl.qentry) {
1911 /* DEL LINK REQ during ADD LINK SEQ */
1912 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1913 wake_up(&lgr->llc_msg_waiter);
1914 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1915 schedule_work(&lgr->llc_del_link_work);
1916 }
1917 return;
1918 case SMC_LLC_CONFIRM_RKEY:
1919 /* new request from remote, assign to remote flow */
1920 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1921 /* process here, does not wait for more llc msgs */
1922 smc_llc_rmt_conf_rkey(lgr);
1923 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1924 }
1925 return;
1926 case SMC_LLC_CONFIRM_RKEY_CONT:
1927 /* not used because max links is 3, and 3 rkeys fit into
1928 * one CONFIRM_RKEY message
1929 */
1930 break;
1931 case SMC_LLC_DELETE_RKEY:
1932 /* new request from remote, assign to remote flow */
1933 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1934 /* process here, does not wait for more llc msgs */
1935 smc_llc_rmt_delete_rkey(lgr);
1936 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1937 }
1938 return;
1939 case SMC_LLC_REQ_ADD_LINK:
1940 /* handle response here, smc_llc_flow_stop() cannot be called
1941 * in tasklet context
1942 */
1943 if (lgr->role == SMC_CLNT &&
1944 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
1945 (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
1946 smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
1947 } else if (lgr->role == SMC_SERV) {
1948 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1949 /* as smc server, handle client suggestion */
1950 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1951 schedule_work(&lgr->llc_add_link_work);
1952 }
1953 return;
1954 }
1955 break;
1956 default:
1957 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1958 break;
1959 }
1960 out:
1961 kfree(qentry);
1962 }
1963
1964 /* worker to process llc messages on the event queue */
smc_llc_event_work(struct work_struct * work)1965 static void smc_llc_event_work(struct work_struct *work)
1966 {
1967 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1968 llc_event_work);
1969 struct smc_llc_qentry *qentry;
1970
1971 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1972 qentry = lgr->delayed_event;
1973 lgr->delayed_event = NULL;
1974 if (smc_link_usable(qentry->link))
1975 smc_llc_event_handler(qentry);
1976 else
1977 kfree(qentry);
1978 }
1979
1980 again:
1981 spin_lock_bh(&lgr->llc_event_q_lock);
1982 if (!list_empty(&lgr->llc_event_q)) {
1983 qentry = list_first_entry(&lgr->llc_event_q,
1984 struct smc_llc_qentry, list);
1985 list_del_init(&qentry->list);
1986 spin_unlock_bh(&lgr->llc_event_q_lock);
1987 smc_llc_event_handler(qentry);
1988 goto again;
1989 }
1990 spin_unlock_bh(&lgr->llc_event_q_lock);
1991 }
1992
1993 /* process llc responses in tasklet context */
smc_llc_rx_response(struct smc_link * link,struct smc_llc_qentry * qentry)1994 static void smc_llc_rx_response(struct smc_link *link,
1995 struct smc_llc_qentry *qentry)
1996 {
1997 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
1998 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
1999 u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
2000
2001 switch (llc_type) {
2002 case SMC_LLC_TEST_LINK:
2003 if (smc_link_active(link))
2004 complete(&link->llc_testlink_resp);
2005 break;
2006 case SMC_LLC_ADD_LINK:
2007 case SMC_LLC_ADD_LINK_CONT:
2008 case SMC_LLC_CONFIRM_LINK:
2009 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
2010 break; /* drop out-of-flow response */
2011 goto assign;
2012 case SMC_LLC_DELETE_LINK:
2013 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
2014 break; /* drop out-of-flow response */
2015 goto assign;
2016 case SMC_LLC_CONFIRM_RKEY:
2017 case SMC_LLC_DELETE_RKEY:
2018 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
2019 break; /* drop out-of-flow response */
2020 goto assign;
2021 case SMC_LLC_CONFIRM_RKEY_CONT:
2022 /* not used because max links is 3 */
2023 break;
2024 default:
2025 smc_llc_protocol_violation(link->lgr,
2026 qentry->msg.raw.hdr.common.type);
2027 break;
2028 }
2029 kfree(qentry);
2030 return;
2031 assign:
2032 /* assign responses to the local flow, we requested them */
2033 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
2034 wake_up(&link->lgr->llc_msg_waiter);
2035 }
2036
smc_llc_enqueue(struct smc_link * link,union smc_llc_msg * llc)2037 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
2038 {
2039 struct smc_link_group *lgr = link->lgr;
2040 struct smc_llc_qentry *qentry;
2041 unsigned long flags;
2042
2043 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
2044 if (!qentry)
2045 return;
2046 qentry->link = link;
2047 INIT_LIST_HEAD(&qentry->list);
2048 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
2049
2050 /* process responses immediately */
2051 if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
2052 llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
2053 smc_llc_rx_response(link, qentry);
2054 return;
2055 }
2056
2057 /* add requests to event queue */
2058 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
2059 list_add_tail(&qentry->list, &lgr->llc_event_q);
2060 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
2061 queue_work(system_highpri_wq, &lgr->llc_event_work);
2062 }
2063
2064 /* copy received msg and add it to the event queue */
smc_llc_rx_handler(struct ib_wc * wc,void * buf)2065 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
2066 {
2067 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
2068 union smc_llc_msg *llc = buf;
2069
2070 if (wc->byte_len < sizeof(*llc))
2071 return; /* short message */
2072 if (!llc->raw.hdr.common.llc_version) {
2073 if (llc->raw.hdr.length != sizeof(*llc))
2074 return; /* invalid message */
2075 } else {
2076 if (llc->raw.hdr.length_v2 < sizeof(*llc))
2077 return; /* invalid message */
2078 }
2079
2080 smc_llc_enqueue(link, llc);
2081 }
2082
2083 /***************************** worker, utils *********************************/
2084
smc_llc_testlink_work(struct work_struct * work)2085 static void smc_llc_testlink_work(struct work_struct *work)
2086 {
2087 struct smc_link *link = container_of(to_delayed_work(work),
2088 struct smc_link, llc_testlink_wrk);
2089 unsigned long next_interval;
2090 unsigned long expire_time;
2091 u8 user_data[16] = { 0 };
2092 int rc;
2093
2094 if (!smc_link_active(link))
2095 return; /* don't reschedule worker */
2096 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
2097 if (time_is_after_jiffies(expire_time)) {
2098 next_interval = expire_time - jiffies;
2099 goto out;
2100 }
2101 reinit_completion(&link->llc_testlink_resp);
2102 smc_llc_send_test_link(link, user_data);
2103 /* receive TEST LINK response over RoCE fabric */
2104 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
2105 SMC_LLC_WAIT_TIME);
2106 if (!smc_link_active(link))
2107 return; /* link state changed */
2108 if (rc <= 0) {
2109 smcr_link_down_cond_sched(link);
2110 return;
2111 }
2112 next_interval = link->llc_testlink_time;
2113 out:
2114 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
2115 }
2116
smc_llc_lgr_init(struct smc_link_group * lgr,struct smc_sock * smc)2117 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
2118 {
2119 struct net *net = sock_net(smc->clcsock->sk);
2120
2121 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
2122 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
2123 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
2124 INIT_LIST_HEAD(&lgr->llc_event_q);
2125 spin_lock_init(&lgr->llc_event_q_lock);
2126 spin_lock_init(&lgr->llc_flow_lock);
2127 init_waitqueue_head(&lgr->llc_flow_waiter);
2128 init_waitqueue_head(&lgr->llc_msg_waiter);
2129 mutex_init(&lgr->llc_conf_mutex);
2130 lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time);
2131 }
2132
2133 /* called after lgr was removed from lgr_list */
smc_llc_lgr_clear(struct smc_link_group * lgr)2134 void smc_llc_lgr_clear(struct smc_link_group *lgr)
2135 {
2136 smc_llc_event_flush(lgr);
2137 wake_up_all(&lgr->llc_flow_waiter);
2138 wake_up_all(&lgr->llc_msg_waiter);
2139 cancel_work_sync(&lgr->llc_event_work);
2140 cancel_work_sync(&lgr->llc_add_link_work);
2141 cancel_work_sync(&lgr->llc_del_link_work);
2142 if (lgr->delayed_event) {
2143 kfree(lgr->delayed_event);
2144 lgr->delayed_event = NULL;
2145 }
2146 }
2147
smc_llc_link_init(struct smc_link * link)2148 int smc_llc_link_init(struct smc_link *link)
2149 {
2150 init_completion(&link->llc_testlink_resp);
2151 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
2152 return 0;
2153 }
2154
smc_llc_link_active(struct smc_link * link)2155 void smc_llc_link_active(struct smc_link *link)
2156 {
2157 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, "
2158 "peerid %*phN, ibdev %s, ibport %d\n",
2159 SMC_LGR_ID_SIZE, &link->lgr->id,
2160 link->lgr->net->net_cookie,
2161 SMC_LGR_ID_SIZE, &link->link_uid,
2162 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2163 link->smcibdev->ibdev->name, link->ibport);
2164 link->state = SMC_LNK_ACTIVE;
2165 if (link->lgr->llc_testlink_time) {
2166 link->llc_testlink_time = link->lgr->llc_testlink_time;
2167 schedule_delayed_work(&link->llc_testlink_wrk,
2168 link->llc_testlink_time);
2169 }
2170 }
2171
2172 /* called in worker context */
smc_llc_link_clear(struct smc_link * link,bool log)2173 void smc_llc_link_clear(struct smc_link *link, bool log)
2174 {
2175 if (log)
2176 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN"
2177 ", peerid %*phN, ibdev %s, ibport %d\n",
2178 SMC_LGR_ID_SIZE, &link->lgr->id,
2179 link->lgr->net->net_cookie,
2180 SMC_LGR_ID_SIZE, &link->link_uid,
2181 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2182 link->smcibdev->ibdev->name, link->ibport);
2183 complete(&link->llc_testlink_resp);
2184 cancel_delayed_work_sync(&link->llc_testlink_wrk);
2185 }
2186
2187 /* register a new rtoken at the remote peer (for all links) */
smc_llc_do_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)2188 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
2189 struct smc_buf_desc *rmb_desc)
2190 {
2191 struct smc_link_group *lgr = send_link->lgr;
2192 struct smc_llc_qentry *qentry = NULL;
2193 int rc = 0;
2194
2195 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
2196 if (rc)
2197 goto out;
2198 /* receive CONFIRM RKEY response from server over RoCE fabric */
2199 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2200 SMC_LLC_CONFIRM_RKEY);
2201 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2202 rc = -EFAULT;
2203 out:
2204 if (qentry)
2205 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2206 return rc;
2207 }
2208
2209 /* unregister an rtoken at the remote peer */
smc_llc_do_delete_rkey(struct smc_link_group * lgr,struct smc_buf_desc * rmb_desc)2210 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
2211 struct smc_buf_desc *rmb_desc)
2212 {
2213 struct smc_llc_qentry *qentry = NULL;
2214 struct smc_link *send_link;
2215 int rc = 0;
2216
2217 send_link = smc_llc_usable_link(lgr);
2218 if (!send_link)
2219 return -ENOLINK;
2220
2221 /* protected by llc_flow control */
2222 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
2223 if (rc)
2224 goto out;
2225 /* receive DELETE RKEY response from server over RoCE fabric */
2226 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2227 SMC_LLC_DELETE_RKEY);
2228 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2229 rc = -EFAULT;
2230 out:
2231 if (qentry)
2232 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2233 return rc;
2234 }
2235
smc_llc_link_set_uid(struct smc_link * link)2236 void smc_llc_link_set_uid(struct smc_link *link)
2237 {
2238 __be32 link_uid;
2239
2240 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
2241 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
2242 }
2243
2244 /* save peers link user id, used for debug purposes */
smc_llc_save_peer_uid(struct smc_llc_qentry * qentry)2245 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
2246 {
2247 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
2248 SMC_LGR_ID_SIZE);
2249 }
2250
2251 /* evaluate confirm link request or response */
smc_llc_eval_conf_link(struct smc_llc_qentry * qentry,enum smc_llc_reqresp type)2252 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
2253 enum smc_llc_reqresp type)
2254 {
2255 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */
2256 qentry->link->link_id = qentry->msg.confirm_link.link_num;
2257 smc_llc_link_set_uid(qentry->link);
2258 }
2259 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
2260 return -ENOTSUPP;
2261 return 0;
2262 }
2263
2264 /***************************** init, exit, misc ******************************/
2265
2266 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
2267 {
2268 .handler = smc_llc_rx_handler,
2269 .type = SMC_LLC_CONFIRM_LINK
2270 },
2271 {
2272 .handler = smc_llc_rx_handler,
2273 .type = SMC_LLC_TEST_LINK
2274 },
2275 {
2276 .handler = smc_llc_rx_handler,
2277 .type = SMC_LLC_ADD_LINK
2278 },
2279 {
2280 .handler = smc_llc_rx_handler,
2281 .type = SMC_LLC_ADD_LINK_CONT
2282 },
2283 {
2284 .handler = smc_llc_rx_handler,
2285 .type = SMC_LLC_DELETE_LINK
2286 },
2287 {
2288 .handler = smc_llc_rx_handler,
2289 .type = SMC_LLC_CONFIRM_RKEY
2290 },
2291 {
2292 .handler = smc_llc_rx_handler,
2293 .type = SMC_LLC_CONFIRM_RKEY_CONT
2294 },
2295 {
2296 .handler = smc_llc_rx_handler,
2297 .type = SMC_LLC_DELETE_RKEY
2298 },
2299 /* V2 types */
2300 {
2301 .handler = smc_llc_rx_handler,
2302 .type = SMC_LLC_CONFIRM_LINK_V2
2303 },
2304 {
2305 .handler = smc_llc_rx_handler,
2306 .type = SMC_LLC_TEST_LINK_V2
2307 },
2308 {
2309 .handler = smc_llc_rx_handler,
2310 .type = SMC_LLC_ADD_LINK_V2
2311 },
2312 {
2313 .handler = smc_llc_rx_handler,
2314 .type = SMC_LLC_DELETE_LINK_V2
2315 },
2316 {
2317 .handler = smc_llc_rx_handler,
2318 .type = SMC_LLC_REQ_ADD_LINK_V2
2319 },
2320 {
2321 .handler = smc_llc_rx_handler,
2322 .type = SMC_LLC_CONFIRM_RKEY_V2
2323 },
2324 {
2325 .handler = smc_llc_rx_handler,
2326 .type = SMC_LLC_DELETE_RKEY_V2
2327 },
2328 {
2329 .handler = NULL,
2330 }
2331 };
2332
smc_llc_init(void)2333 int __init smc_llc_init(void)
2334 {
2335 struct smc_wr_rx_handler *handler;
2336 int rc = 0;
2337
2338 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
2339 INIT_HLIST_NODE(&handler->list);
2340 rc = smc_wr_rx_register_handler(handler);
2341 if (rc)
2342 break;
2343 }
2344 return rc;
2345 }
2346