1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ceph msgr2 protocol implementation
4 *
5 * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6 */
7
8 #include <linux/ceph/ceph_debug.h>
9
10 #include <crypto/aead.h>
11 #include <crypto/algapi.h> /* for crypto_memneq() */
12 #include <crypto/hash.h>
13 #include <crypto/sha2.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27
28 #include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29
30 #define FRAME_TAG_HELLO 1
31 #define FRAME_TAG_AUTH_REQUEST 2
32 #define FRAME_TAG_AUTH_BAD_METHOD 3
33 #define FRAME_TAG_AUTH_REPLY_MORE 4
34 #define FRAME_TAG_AUTH_REQUEST_MORE 5
35 #define FRAME_TAG_AUTH_DONE 6
36 #define FRAME_TAG_AUTH_SIGNATURE 7
37 #define FRAME_TAG_CLIENT_IDENT 8
38 #define FRAME_TAG_SERVER_IDENT 9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT 11
41 #define FRAME_TAG_SESSION_RESET 12
42 #define FRAME_TAG_SESSION_RETRY 13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL 14
44 #define FRAME_TAG_SESSION_RECONNECT_OK 15
45 #define FRAME_TAG_WAIT 16
46 #define FRAME_TAG_MESSAGE 17
47 #define FRAME_TAG_KEEPALIVE2 18
48 #define FRAME_TAG_KEEPALIVE2_ACK 19
49 #define FRAME_TAG_ACK 20
50
51 #define FRAME_LATE_STATUS_ABORTED 0x1
52 #define FRAME_LATE_STATUS_COMPLETE 0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK 0xf
54
55 #define IN_S_HANDLE_PREAMBLE 1
56 #define IN_S_HANDLE_CONTROL 2
57 #define IN_S_HANDLE_CONTROL_REMAINDER 3
58 #define IN_S_PREPARE_READ_DATA 4
59 #define IN_S_PREPARE_READ_DATA_CONT 5
60 #define IN_S_PREPARE_READ_ENC_PAGE 6
61 #define IN_S_HANDLE_EPILOGUE 7
62 #define IN_S_FINISH_SKIP 8
63
64 #define OUT_S_QUEUE_DATA 1
65 #define OUT_S_QUEUE_DATA_CONT 2
66 #define OUT_S_QUEUE_ENC_PAGE 3
67 #define OUT_S_QUEUE_ZEROS 4
68 #define OUT_S_FINISH_MESSAGE 5
69 #define OUT_S_GET_NEXT 6
70
71 #define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN)
72 #define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
73 #define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
74 #define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
75
76 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
77
do_recvmsg(struct socket * sock,struct iov_iter * it)78 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
79 {
80 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
81 int ret;
82
83 msg.msg_iter = *it;
84 while (iov_iter_count(it)) {
85 ret = sock_recvmsg(sock, &msg, msg.msg_flags);
86 if (ret <= 0) {
87 if (ret == -EAGAIN)
88 ret = 0;
89 return ret;
90 }
91
92 iov_iter_advance(it, ret);
93 }
94
95 WARN_ON(msg_data_left(&msg));
96 return 1;
97 }
98
99 /*
100 * Read as much as possible.
101 *
102 * Return:
103 * 1 - done, nothing (else) to read
104 * 0 - socket is empty, need to wait
105 * <0 - error
106 */
ceph_tcp_recv(struct ceph_connection * con)107 static int ceph_tcp_recv(struct ceph_connection *con)
108 {
109 int ret;
110
111 dout("%s con %p %s %zu\n", __func__, con,
112 iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
113 iov_iter_count(&con->v2.in_iter));
114 ret = do_recvmsg(con->sock, &con->v2.in_iter);
115 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
116 iov_iter_count(&con->v2.in_iter));
117 return ret;
118 }
119
do_sendmsg(struct socket * sock,struct iov_iter * it)120 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
121 {
122 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
123 int ret;
124
125 msg.msg_iter = *it;
126 while (iov_iter_count(it)) {
127 ret = sock_sendmsg(sock, &msg);
128 if (ret <= 0) {
129 if (ret == -EAGAIN)
130 ret = 0;
131 return ret;
132 }
133
134 iov_iter_advance(it, ret);
135 }
136
137 WARN_ON(msg_data_left(&msg));
138 return 1;
139 }
140
do_try_sendpage(struct socket * sock,struct iov_iter * it)141 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
142 {
143 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
144 struct bio_vec bv;
145 int ret;
146
147 if (WARN_ON(!iov_iter_is_bvec(it)))
148 return -EINVAL;
149
150 while (iov_iter_count(it)) {
151 /* iov_iter_iovec() for ITER_BVEC */
152 bv.bv_page = it->bvec->bv_page;
153 bv.bv_offset = it->bvec->bv_offset + it->iov_offset;
154 bv.bv_len = min(iov_iter_count(it),
155 it->bvec->bv_len - it->iov_offset);
156
157 /*
158 * sendpage cannot properly handle pages with
159 * page_count == 0, we need to fall back to sendmsg if
160 * that's the case.
161 *
162 * Same goes for slab pages: skb_can_coalesce() allows
163 * coalescing neighboring slab objects into a single frag
164 * which triggers one of hardened usercopy checks.
165 */
166 if (sendpage_ok(bv.bv_page)) {
167 ret = sock->ops->sendpage(sock, bv.bv_page,
168 bv.bv_offset, bv.bv_len,
169 CEPH_MSG_FLAGS);
170 } else {
171 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len);
172 ret = sock_sendmsg(sock, &msg);
173 }
174 if (ret <= 0) {
175 if (ret == -EAGAIN)
176 ret = 0;
177 return ret;
178 }
179
180 iov_iter_advance(it, ret);
181 }
182
183 return 1;
184 }
185
186 /*
187 * Write as much as possible. The socket is expected to be corked,
188 * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here.
189 *
190 * Return:
191 * 1 - done, nothing (else) to write
192 * 0 - socket is full, need to wait
193 * <0 - error
194 */
ceph_tcp_send(struct ceph_connection * con)195 static int ceph_tcp_send(struct ceph_connection *con)
196 {
197 int ret;
198
199 dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
200 iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
201 if (con->v2.out_iter_sendpage)
202 ret = do_try_sendpage(con->sock, &con->v2.out_iter);
203 else
204 ret = do_sendmsg(con->sock, &con->v2.out_iter);
205 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
206 iov_iter_count(&con->v2.out_iter));
207 return ret;
208 }
209
add_in_kvec(struct ceph_connection * con,void * buf,int len)210 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
211 {
212 BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
213 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
214
215 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
216 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
217 con->v2.in_kvec_cnt++;
218
219 con->v2.in_iter.nr_segs++;
220 con->v2.in_iter.count += len;
221 }
222
reset_in_kvecs(struct ceph_connection * con)223 static void reset_in_kvecs(struct ceph_connection *con)
224 {
225 WARN_ON(iov_iter_count(&con->v2.in_iter));
226
227 con->v2.in_kvec_cnt = 0;
228 iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0);
229 }
230
set_in_bvec(struct ceph_connection * con,const struct bio_vec * bv)231 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
232 {
233 WARN_ON(iov_iter_count(&con->v2.in_iter));
234
235 con->v2.in_bvec = *bv;
236 iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len);
237 }
238
set_in_skip(struct ceph_connection * con,int len)239 static void set_in_skip(struct ceph_connection *con, int len)
240 {
241 WARN_ON(iov_iter_count(&con->v2.in_iter));
242
243 dout("%s con %p len %d\n", __func__, con, len);
244 iov_iter_discard(&con->v2.in_iter, READ, len);
245 }
246
add_out_kvec(struct ceph_connection * con,void * buf,int len)247 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
248 {
249 BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
250 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
251 WARN_ON(con->v2.out_zero);
252
253 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
254 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
255 con->v2.out_kvec_cnt++;
256
257 con->v2.out_iter.nr_segs++;
258 con->v2.out_iter.count += len;
259 }
260
reset_out_kvecs(struct ceph_connection * con)261 static void reset_out_kvecs(struct ceph_connection *con)
262 {
263 WARN_ON(iov_iter_count(&con->v2.out_iter));
264 WARN_ON(con->v2.out_zero);
265
266 con->v2.out_kvec_cnt = 0;
267
268 iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0);
269 con->v2.out_iter_sendpage = false;
270 }
271
set_out_bvec(struct ceph_connection * con,const struct bio_vec * bv,bool zerocopy)272 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
273 bool zerocopy)
274 {
275 WARN_ON(iov_iter_count(&con->v2.out_iter));
276 WARN_ON(con->v2.out_zero);
277
278 con->v2.out_bvec = *bv;
279 con->v2.out_iter_sendpage = zerocopy;
280 iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
281 con->v2.out_bvec.bv_len);
282 }
283
set_out_bvec_zero(struct ceph_connection * con)284 static void set_out_bvec_zero(struct ceph_connection *con)
285 {
286 WARN_ON(iov_iter_count(&con->v2.out_iter));
287 WARN_ON(!con->v2.out_zero);
288
289 con->v2.out_bvec.bv_page = ceph_zero_page;
290 con->v2.out_bvec.bv_offset = 0;
291 con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE);
292 con->v2.out_iter_sendpage = true;
293 iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
294 con->v2.out_bvec.bv_len);
295 }
296
out_zero_add(struct ceph_connection * con,int len)297 static void out_zero_add(struct ceph_connection *con, int len)
298 {
299 dout("%s con %p len %d\n", __func__, con, len);
300 con->v2.out_zero += len;
301 }
302
alloc_conn_buf(struct ceph_connection * con,int len)303 static void *alloc_conn_buf(struct ceph_connection *con, int len)
304 {
305 void *buf;
306
307 dout("%s con %p len %d\n", __func__, con, len);
308
309 if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
310 return NULL;
311
312 buf = kvmalloc(len, GFP_NOIO);
313 if (!buf)
314 return NULL;
315
316 con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
317 return buf;
318 }
319
free_conn_bufs(struct ceph_connection * con)320 static void free_conn_bufs(struct ceph_connection *con)
321 {
322 while (con->v2.conn_buf_cnt)
323 kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
324 }
325
add_in_sign_kvec(struct ceph_connection * con,void * buf,int len)326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 {
328 BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329
330 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
331 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
332 con->v2.in_sign_kvec_cnt++;
333 }
334
clear_in_sign_kvecs(struct ceph_connection * con)335 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 {
337 con->v2.in_sign_kvec_cnt = 0;
338 }
339
add_out_sign_kvec(struct ceph_connection * con,void * buf,int len)340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 {
342 BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343
344 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
345 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
346 con->v2.out_sign_kvec_cnt++;
347 }
348
clear_out_sign_kvecs(struct ceph_connection * con)349 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 {
351 con->v2.out_sign_kvec_cnt = 0;
352 }
353
con_secure(struct ceph_connection * con)354 static bool con_secure(struct ceph_connection *con)
355 {
356 return con->v2.con_mode == CEPH_CON_MODE_SECURE;
357 }
358
front_len(const struct ceph_msg * msg)359 static int front_len(const struct ceph_msg *msg)
360 {
361 return le32_to_cpu(msg->hdr.front_len);
362 }
363
middle_len(const struct ceph_msg * msg)364 static int middle_len(const struct ceph_msg *msg)
365 {
366 return le32_to_cpu(msg->hdr.middle_len);
367 }
368
data_len(const struct ceph_msg * msg)369 static int data_len(const struct ceph_msg *msg)
370 {
371 return le32_to_cpu(msg->hdr.data_len);
372 }
373
need_padding(int len)374 static bool need_padding(int len)
375 {
376 return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
377 }
378
padded_len(int len)379 static int padded_len(int len)
380 {
381 return ALIGN(len, CEPH_GCM_BLOCK_LEN);
382 }
383
padding_len(int len)384 static int padding_len(int len)
385 {
386 return padded_len(len) - len;
387 }
388
389 /* preamble + control segment */
head_onwire_len(int ctrl_len,bool secure)390 static int head_onwire_len(int ctrl_len, bool secure)
391 {
392 int head_len;
393 int rem_len;
394
395 if (secure) {
396 head_len = CEPH_PREAMBLE_SECURE_LEN;
397 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
398 rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
399 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
400 }
401 } else {
402 head_len = CEPH_PREAMBLE_PLAIN_LEN;
403 if (ctrl_len)
404 head_len += ctrl_len + CEPH_CRC_LEN;
405 }
406 return head_len;
407 }
408
409 /* front, middle and data segments + epilogue */
__tail_onwire_len(int front_len,int middle_len,int data_len,bool secure)410 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
411 bool secure)
412 {
413 if (!front_len && !middle_len && !data_len)
414 return 0;
415
416 if (!secure)
417 return front_len + middle_len + data_len +
418 CEPH_EPILOGUE_PLAIN_LEN;
419
420 return padded_len(front_len) + padded_len(middle_len) +
421 padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
422 }
423
tail_onwire_len(const struct ceph_msg * msg,bool secure)424 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
425 {
426 return __tail_onwire_len(front_len(msg), middle_len(msg),
427 data_len(msg), secure);
428 }
429
430 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
431 #define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \
432 sizeof(struct ceph_msg_header2) + \
433 CEPH_CRC_LEN)
434
435 static const int frame_aligns[] = {
436 sizeof(void *),
437 sizeof(void *),
438 sizeof(void *),
439 PAGE_SIZE
440 };
441
442 /*
443 * Discards trailing empty segments, unless there is just one segment.
444 * A frame always has at least one (possibly empty) segment.
445 */
calc_segment_count(const int * lens,int len_cnt)446 static int calc_segment_count(const int *lens, int len_cnt)
447 {
448 int i;
449
450 for (i = len_cnt - 1; i >= 0; i--) {
451 if (lens[i])
452 return i + 1;
453 }
454
455 return 1;
456 }
457
init_frame_desc(struct ceph_frame_desc * desc,int tag,const int * lens,int len_cnt)458 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
459 const int *lens, int len_cnt)
460 {
461 int i;
462
463 memset(desc, 0, sizeof(*desc));
464
465 desc->fd_tag = tag;
466 desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
467 BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
468 for (i = 0; i < desc->fd_seg_cnt; i++) {
469 desc->fd_lens[i] = lens[i];
470 desc->fd_aligns[i] = frame_aligns[i];
471 }
472 }
473
474 /*
475 * Preamble crc covers everything up to itself (28 bytes) and
476 * is calculated and verified irrespective of the connection mode
477 * (i.e. even if the frame is encrypted).
478 */
encode_preamble(const struct ceph_frame_desc * desc,void * p)479 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
480 {
481 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
482 void *start = p;
483 int i;
484
485 memset(p, 0, CEPH_PREAMBLE_LEN);
486
487 ceph_encode_8(&p, desc->fd_tag);
488 ceph_encode_8(&p, desc->fd_seg_cnt);
489 for (i = 0; i < desc->fd_seg_cnt; i++) {
490 ceph_encode_32(&p, desc->fd_lens[i]);
491 ceph_encode_16(&p, desc->fd_aligns[i]);
492 }
493
494 put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
495 }
496
decode_preamble(void * p,struct ceph_frame_desc * desc)497 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
498 {
499 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
500 u32 crc, expected_crc;
501 int i;
502
503 crc = crc32c(0, p, crcp - p);
504 expected_crc = get_unaligned_le32(crcp);
505 if (crc != expected_crc) {
506 pr_err("bad preamble crc, calculated %u, expected %u\n",
507 crc, expected_crc);
508 return -EBADMSG;
509 }
510
511 memset(desc, 0, sizeof(*desc));
512
513 desc->fd_tag = ceph_decode_8(&p);
514 desc->fd_seg_cnt = ceph_decode_8(&p);
515 if (desc->fd_seg_cnt < 1 ||
516 desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
517 pr_err("bad segment count %d\n", desc->fd_seg_cnt);
518 return -EINVAL;
519 }
520 for (i = 0; i < desc->fd_seg_cnt; i++) {
521 desc->fd_lens[i] = ceph_decode_32(&p);
522 desc->fd_aligns[i] = ceph_decode_16(&p);
523 }
524
525 /*
526 * This would fire for FRAME_TAG_WAIT (it has one empty
527 * segment), but we should never get it as client.
528 */
529 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
530 pr_err("last segment empty\n");
531 return -EINVAL;
532 }
533
534 if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
535 pr_err("control segment too big %d\n", desc->fd_lens[0]);
536 return -EINVAL;
537 }
538 if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
539 pr_err("front segment too big %d\n", desc->fd_lens[1]);
540 return -EINVAL;
541 }
542 if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
543 pr_err("middle segment too big %d\n", desc->fd_lens[2]);
544 return -EINVAL;
545 }
546 if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
547 pr_err("data segment too big %d\n", desc->fd_lens[3]);
548 return -EINVAL;
549 }
550
551 return 0;
552 }
553
encode_epilogue_plain(struct ceph_connection * con,bool aborted)554 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
555 {
556 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
557 FRAME_LATE_STATUS_COMPLETE;
558 cpu_to_le32s(&con->v2.out_epil.front_crc);
559 cpu_to_le32s(&con->v2.out_epil.middle_crc);
560 cpu_to_le32s(&con->v2.out_epil.data_crc);
561 }
562
encode_epilogue_secure(struct ceph_connection * con,bool aborted)563 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
564 {
565 memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
566 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
567 FRAME_LATE_STATUS_COMPLETE;
568 }
569
decode_epilogue(void * p,u32 * front_crc,u32 * middle_crc,u32 * data_crc)570 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
571 u32 *data_crc)
572 {
573 u8 late_status;
574
575 late_status = ceph_decode_8(&p);
576 if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
577 FRAME_LATE_STATUS_COMPLETE) {
578 /* we should never get an aborted message as client */
579 pr_err("bad late_status 0x%x\n", late_status);
580 return -EINVAL;
581 }
582
583 if (front_crc && middle_crc && data_crc) {
584 *front_crc = ceph_decode_32(&p);
585 *middle_crc = ceph_decode_32(&p);
586 *data_crc = ceph_decode_32(&p);
587 }
588
589 return 0;
590 }
591
fill_header(struct ceph_msg_header * hdr,const struct ceph_msg_header2 * hdr2,int front_len,int middle_len,int data_len,const struct ceph_entity_name * peer_name)592 static void fill_header(struct ceph_msg_header *hdr,
593 const struct ceph_msg_header2 *hdr2,
594 int front_len, int middle_len, int data_len,
595 const struct ceph_entity_name *peer_name)
596 {
597 hdr->seq = hdr2->seq;
598 hdr->tid = hdr2->tid;
599 hdr->type = hdr2->type;
600 hdr->priority = hdr2->priority;
601 hdr->version = hdr2->version;
602 hdr->front_len = cpu_to_le32(front_len);
603 hdr->middle_len = cpu_to_le32(middle_len);
604 hdr->data_len = cpu_to_le32(data_len);
605 hdr->data_off = hdr2->data_off;
606 hdr->src = *peer_name;
607 hdr->compat_version = hdr2->compat_version;
608 hdr->reserved = 0;
609 hdr->crc = 0;
610 }
611
fill_header2(struct ceph_msg_header2 * hdr2,const struct ceph_msg_header * hdr,u64 ack_seq)612 static void fill_header2(struct ceph_msg_header2 *hdr2,
613 const struct ceph_msg_header *hdr, u64 ack_seq)
614 {
615 hdr2->seq = hdr->seq;
616 hdr2->tid = hdr->tid;
617 hdr2->type = hdr->type;
618 hdr2->priority = hdr->priority;
619 hdr2->version = hdr->version;
620 hdr2->data_pre_padding_len = 0;
621 hdr2->data_off = hdr->data_off;
622 hdr2->ack_seq = cpu_to_le64(ack_seq);
623 hdr2->flags = 0;
624 hdr2->compat_version = hdr->compat_version;
625 hdr2->reserved = 0;
626 }
627
verify_control_crc(struct ceph_connection * con)628 static int verify_control_crc(struct ceph_connection *con)
629 {
630 int ctrl_len = con->v2.in_desc.fd_lens[0];
631 u32 crc, expected_crc;
632
633 WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
634 WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
635
636 crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
637 expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
638 if (crc != expected_crc) {
639 pr_err("bad control crc, calculated %u, expected %u\n",
640 crc, expected_crc);
641 return -EBADMSG;
642 }
643
644 return 0;
645 }
646
verify_epilogue_crcs(struct ceph_connection * con,u32 front_crc,u32 middle_crc,u32 data_crc)647 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
648 u32 middle_crc, u32 data_crc)
649 {
650 if (front_len(con->in_msg)) {
651 con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
652 front_len(con->in_msg));
653 } else {
654 WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
655 con->in_front_crc = -1;
656 }
657
658 if (middle_len(con->in_msg))
659 con->in_middle_crc = crc32c(-1,
660 con->in_msg->middle->vec.iov_base,
661 middle_len(con->in_msg));
662 else if (data_len(con->in_msg))
663 con->in_middle_crc = -1;
664 else
665 con->in_middle_crc = 0;
666
667 if (!data_len(con->in_msg))
668 con->in_data_crc = 0;
669
670 dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
671 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
672
673 if (con->in_front_crc != front_crc) {
674 pr_err("bad front crc, calculated %u, expected %u\n",
675 con->in_front_crc, front_crc);
676 return -EBADMSG;
677 }
678 if (con->in_middle_crc != middle_crc) {
679 pr_err("bad middle crc, calculated %u, expected %u\n",
680 con->in_middle_crc, middle_crc);
681 return -EBADMSG;
682 }
683 if (con->in_data_crc != data_crc) {
684 pr_err("bad data crc, calculated %u, expected %u\n",
685 con->in_data_crc, data_crc);
686 return -EBADMSG;
687 }
688
689 return 0;
690 }
691
setup_crypto(struct ceph_connection * con,const u8 * session_key,int session_key_len,const u8 * con_secret,int con_secret_len)692 static int setup_crypto(struct ceph_connection *con,
693 const u8 *session_key, int session_key_len,
694 const u8 *con_secret, int con_secret_len)
695 {
696 unsigned int noio_flag;
697 int ret;
698
699 dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
700 __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
701 WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
702
703 if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
704 con->v2.con_mode != CEPH_CON_MODE_SECURE) {
705 pr_err("bad con_mode %d\n", con->v2.con_mode);
706 return -EINVAL;
707 }
708
709 if (!session_key_len) {
710 WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
711 WARN_ON(con_secret_len);
712 return 0; /* auth_none */
713 }
714
715 noio_flag = memalloc_noio_save();
716 con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
717 memalloc_noio_restore(noio_flag);
718 if (IS_ERR(con->v2.hmac_tfm)) {
719 ret = PTR_ERR(con->v2.hmac_tfm);
720 con->v2.hmac_tfm = NULL;
721 pr_err("failed to allocate hmac tfm context: %d\n", ret);
722 return ret;
723 }
724
725 WARN_ON((unsigned long)session_key &
726 crypto_shash_alignmask(con->v2.hmac_tfm));
727 ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
728 session_key_len);
729 if (ret) {
730 pr_err("failed to set hmac key: %d\n", ret);
731 return ret;
732 }
733
734 if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
735 WARN_ON(con_secret_len);
736 return 0; /* auth_x, plain mode */
737 }
738
739 if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
740 pr_err("con_secret too small %d\n", con_secret_len);
741 return -EINVAL;
742 }
743
744 noio_flag = memalloc_noio_save();
745 con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
746 memalloc_noio_restore(noio_flag);
747 if (IS_ERR(con->v2.gcm_tfm)) {
748 ret = PTR_ERR(con->v2.gcm_tfm);
749 con->v2.gcm_tfm = NULL;
750 pr_err("failed to allocate gcm tfm context: %d\n", ret);
751 return ret;
752 }
753
754 WARN_ON((unsigned long)con_secret &
755 crypto_aead_alignmask(con->v2.gcm_tfm));
756 ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
757 if (ret) {
758 pr_err("failed to set gcm key: %d\n", ret);
759 return ret;
760 }
761
762 WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
763 ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
764 if (ret) {
765 pr_err("failed to set gcm tag size: %d\n", ret);
766 return ret;
767 }
768
769 con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
770 if (!con->v2.gcm_req) {
771 pr_err("failed to allocate gcm request\n");
772 return -ENOMEM;
773 }
774
775 crypto_init_wait(&con->v2.gcm_wait);
776 aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
777 crypto_req_done, &con->v2.gcm_wait);
778
779 memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
780 CEPH_GCM_IV_LEN);
781 memcpy(&con->v2.out_gcm_nonce,
782 con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
783 CEPH_GCM_IV_LEN);
784 return 0; /* auth_x, secure mode */
785 }
786
hmac_sha256(struct ceph_connection * con,const struct kvec * kvecs,int kvec_cnt,u8 * hmac)787 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
788 int kvec_cnt, u8 *hmac)
789 {
790 SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
791 int ret;
792 int i;
793
794 dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
795 con->v2.hmac_tfm, kvec_cnt);
796
797 if (!con->v2.hmac_tfm) {
798 memset(hmac, 0, SHA256_DIGEST_SIZE);
799 return 0; /* auth_none */
800 }
801
802 desc->tfm = con->v2.hmac_tfm;
803 ret = crypto_shash_init(desc);
804 if (ret)
805 goto out;
806
807 for (i = 0; i < kvec_cnt; i++) {
808 WARN_ON((unsigned long)kvecs[i].iov_base &
809 crypto_shash_alignmask(con->v2.hmac_tfm));
810 ret = crypto_shash_update(desc, kvecs[i].iov_base,
811 kvecs[i].iov_len);
812 if (ret)
813 goto out;
814 }
815
816 ret = crypto_shash_final(desc, hmac);
817
818 out:
819 shash_desc_zero(desc);
820 return ret; /* auth_x, both plain and secure modes */
821 }
822
gcm_inc_nonce(struct ceph_gcm_nonce * nonce)823 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
824 {
825 u64 counter;
826
827 counter = le64_to_cpu(nonce->counter);
828 nonce->counter = cpu_to_le64(counter + 1);
829 }
830
gcm_crypt(struct ceph_connection * con,bool encrypt,struct scatterlist * src,struct scatterlist * dst,int src_len)831 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
832 struct scatterlist *src, struct scatterlist *dst,
833 int src_len)
834 {
835 struct ceph_gcm_nonce *nonce;
836 int ret;
837
838 nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
839
840 aead_request_set_ad(con->v2.gcm_req, 0); /* no AAD */
841 aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
842 ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
843 crypto_aead_decrypt(con->v2.gcm_req),
844 &con->v2.gcm_wait);
845 if (ret)
846 return ret;
847
848 gcm_inc_nonce(nonce);
849 return 0;
850 }
851
get_bvec_at(struct ceph_msg_data_cursor * cursor,struct bio_vec * bv)852 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
853 struct bio_vec *bv)
854 {
855 struct page *page;
856 size_t off, len;
857
858 WARN_ON(!cursor->total_resid);
859
860 /* skip zero-length data items */
861 while (!cursor->resid)
862 ceph_msg_data_advance(cursor, 0);
863
864 /* get a piece of data, cursor isn't advanced */
865 page = ceph_msg_data_next(cursor, &off, &len);
866
867 bv->bv_page = page;
868 bv->bv_offset = off;
869 bv->bv_len = len;
870 }
871
calc_sg_cnt(void * buf,int buf_len)872 static int calc_sg_cnt(void *buf, int buf_len)
873 {
874 int sg_cnt;
875
876 if (!buf_len)
877 return 0;
878
879 sg_cnt = need_padding(buf_len) ? 1 : 0;
880 if (is_vmalloc_addr(buf)) {
881 WARN_ON(offset_in_page(buf));
882 sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
883 } else {
884 sg_cnt++;
885 }
886
887 return sg_cnt;
888 }
889
calc_sg_cnt_cursor(struct ceph_msg_data_cursor * cursor)890 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
891 {
892 int data_len = cursor->total_resid;
893 struct bio_vec bv;
894 int sg_cnt;
895
896 if (!data_len)
897 return 0;
898
899 sg_cnt = need_padding(data_len) ? 1 : 0;
900 do {
901 get_bvec_at(cursor, &bv);
902 sg_cnt++;
903
904 ceph_msg_data_advance(cursor, bv.bv_len);
905 } while (cursor->total_resid);
906
907 return sg_cnt;
908 }
909
init_sgs(struct scatterlist ** sg,void * buf,int buf_len,u8 * pad)910 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
911 {
912 void *end = buf + buf_len;
913 struct page *page;
914 int len;
915 void *p;
916
917 if (!buf_len)
918 return;
919
920 if (is_vmalloc_addr(buf)) {
921 p = buf;
922 do {
923 page = vmalloc_to_page(p);
924 len = min_t(int, end - p, PAGE_SIZE);
925 WARN_ON(!page || !len || offset_in_page(p));
926 sg_set_page(*sg, page, len, 0);
927 *sg = sg_next(*sg);
928 p += len;
929 } while (p != end);
930 } else {
931 sg_set_buf(*sg, buf, buf_len);
932 *sg = sg_next(*sg);
933 }
934
935 if (need_padding(buf_len)) {
936 sg_set_buf(*sg, pad, padding_len(buf_len));
937 *sg = sg_next(*sg);
938 }
939 }
940
init_sgs_cursor(struct scatterlist ** sg,struct ceph_msg_data_cursor * cursor,u8 * pad)941 static void init_sgs_cursor(struct scatterlist **sg,
942 struct ceph_msg_data_cursor *cursor, u8 *pad)
943 {
944 int data_len = cursor->total_resid;
945 struct bio_vec bv;
946
947 if (!data_len)
948 return;
949
950 do {
951 get_bvec_at(cursor, &bv);
952 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
953 *sg = sg_next(*sg);
954
955 ceph_msg_data_advance(cursor, bv.bv_len);
956 } while (cursor->total_resid);
957
958 if (need_padding(data_len)) {
959 sg_set_buf(*sg, pad, padding_len(data_len));
960 *sg = sg_next(*sg);
961 }
962 }
963
setup_message_sgs(struct sg_table * sgt,struct ceph_msg * msg,u8 * front_pad,u8 * middle_pad,u8 * data_pad,void * epilogue,bool add_tag)964 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
965 u8 *front_pad, u8 *middle_pad, u8 *data_pad,
966 void *epilogue, bool add_tag)
967 {
968 struct ceph_msg_data_cursor cursor;
969 struct scatterlist *cur_sg;
970 int sg_cnt;
971 int ret;
972
973 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
974 return 0;
975
976 sg_cnt = 1; /* epilogue + [auth tag] */
977 if (front_len(msg))
978 sg_cnt += calc_sg_cnt(msg->front.iov_base,
979 front_len(msg));
980 if (middle_len(msg))
981 sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
982 middle_len(msg));
983 if (data_len(msg)) {
984 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
985 sg_cnt += calc_sg_cnt_cursor(&cursor);
986 }
987
988 ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
989 if (ret)
990 return ret;
991
992 cur_sg = sgt->sgl;
993 if (front_len(msg))
994 init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
995 front_pad);
996 if (middle_len(msg))
997 init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
998 middle_pad);
999 if (data_len(msg)) {
1000 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
1001 init_sgs_cursor(&cur_sg, &cursor, data_pad);
1002 }
1003
1004 WARN_ON(!sg_is_last(cur_sg));
1005 sg_set_buf(cur_sg, epilogue,
1006 CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1007 return 0;
1008 }
1009
decrypt_preamble(struct ceph_connection * con)1010 static int decrypt_preamble(struct ceph_connection *con)
1011 {
1012 struct scatterlist sg;
1013
1014 sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1015 return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1016 }
1017
decrypt_control_remainder(struct ceph_connection * con)1018 static int decrypt_control_remainder(struct ceph_connection *con)
1019 {
1020 int ctrl_len = con->v2.in_desc.fd_lens[0];
1021 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1022 int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1023 struct scatterlist sgs[2];
1024
1025 WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1026 WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1027
1028 sg_init_table(sgs, 2);
1029 sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1030 sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1031
1032 return gcm_crypt(con, false, sgs, sgs,
1033 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1034 }
1035
decrypt_tail(struct ceph_connection * con)1036 static int decrypt_tail(struct ceph_connection *con)
1037 {
1038 struct sg_table enc_sgt = {};
1039 struct sg_table sgt = {};
1040 int tail_len;
1041 int ret;
1042
1043 tail_len = tail_onwire_len(con->in_msg, true);
1044 ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1045 con->v2.in_enc_page_cnt, 0, tail_len,
1046 GFP_NOIO);
1047 if (ret)
1048 goto out;
1049
1050 ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1051 MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1052 con->v2.in_buf, true);
1053 if (ret)
1054 goto out;
1055
1056 dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1057 con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1058 ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1059 if (ret)
1060 goto out;
1061
1062 WARN_ON(!con->v2.in_enc_page_cnt);
1063 ceph_release_page_vector(con->v2.in_enc_pages,
1064 con->v2.in_enc_page_cnt);
1065 con->v2.in_enc_pages = NULL;
1066 con->v2.in_enc_page_cnt = 0;
1067
1068 out:
1069 sg_free_table(&sgt);
1070 sg_free_table(&enc_sgt);
1071 return ret;
1072 }
1073
prepare_banner(struct ceph_connection * con)1074 static int prepare_banner(struct ceph_connection *con)
1075 {
1076 int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1077 void *buf, *p;
1078
1079 buf = alloc_conn_buf(con, buf_len);
1080 if (!buf)
1081 return -ENOMEM;
1082
1083 p = buf;
1084 ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1085 ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1086 ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1087 ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1088 WARN_ON(p != buf + buf_len);
1089
1090 add_out_kvec(con, buf, buf_len);
1091 add_out_sign_kvec(con, buf, buf_len);
1092 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1093 return 0;
1094 }
1095
1096 /*
1097 * base:
1098 * preamble
1099 * control body (ctrl_len bytes)
1100 * space for control crc
1101 *
1102 * extdata (optional):
1103 * control body (extdata_len bytes)
1104 *
1105 * Compute control crc and gather base and extdata into:
1106 *
1107 * preamble
1108 * control body (ctrl_len + extdata_len bytes)
1109 * control crc
1110 *
1111 * Preamble should already be encoded at the start of base.
1112 */
prepare_head_plain(struct ceph_connection * con,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1113 static void prepare_head_plain(struct ceph_connection *con, void *base,
1114 int ctrl_len, void *extdata, int extdata_len,
1115 bool to_be_signed)
1116 {
1117 int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1118 void *crcp = base + base_len - CEPH_CRC_LEN;
1119 u32 crc;
1120
1121 crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1122 if (extdata_len)
1123 crc = crc32c(crc, extdata, extdata_len);
1124 put_unaligned_le32(crc, crcp);
1125
1126 if (!extdata_len) {
1127 add_out_kvec(con, base, base_len);
1128 if (to_be_signed)
1129 add_out_sign_kvec(con, base, base_len);
1130 return;
1131 }
1132
1133 add_out_kvec(con, base, crcp - base);
1134 add_out_kvec(con, extdata, extdata_len);
1135 add_out_kvec(con, crcp, CEPH_CRC_LEN);
1136 if (to_be_signed) {
1137 add_out_sign_kvec(con, base, crcp - base);
1138 add_out_sign_kvec(con, extdata, extdata_len);
1139 add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1140 }
1141 }
1142
prepare_head_secure_small(struct ceph_connection * con,void * base,int ctrl_len)1143 static int prepare_head_secure_small(struct ceph_connection *con,
1144 void *base, int ctrl_len)
1145 {
1146 struct scatterlist sg;
1147 int ret;
1148
1149 /* inline buffer padding? */
1150 if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1151 memset(CTRL_BODY(base) + ctrl_len, 0,
1152 CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1153
1154 sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1155 ret = gcm_crypt(con, true, &sg, &sg,
1156 CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1157 if (ret)
1158 return ret;
1159
1160 add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1161 return 0;
1162 }
1163
1164 /*
1165 * base:
1166 * preamble
1167 * control body (ctrl_len bytes)
1168 * space for padding, if needed
1169 * space for control remainder auth tag
1170 * space for preamble auth tag
1171 *
1172 * Encrypt preamble and the inline portion, then encrypt the remainder
1173 * and gather into:
1174 *
1175 * preamble
1176 * control body (48 bytes)
1177 * preamble auth tag
1178 * control body (ctrl_len - 48 bytes)
1179 * zero padding, if needed
1180 * control remainder auth tag
1181 *
1182 * Preamble should already be encoded at the start of base.
1183 */
prepare_head_secure_big(struct ceph_connection * con,void * base,int ctrl_len)1184 static int prepare_head_secure_big(struct ceph_connection *con,
1185 void *base, int ctrl_len)
1186 {
1187 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1188 void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1189 void *rem_tag = rem + padded_len(rem_len);
1190 void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1191 struct scatterlist sgs[2];
1192 int ret;
1193
1194 sg_init_table(sgs, 2);
1195 sg_set_buf(&sgs[0], base, rem - base);
1196 sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1197 ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1198 if (ret)
1199 return ret;
1200
1201 /* control remainder padding? */
1202 if (need_padding(rem_len))
1203 memset(rem + rem_len, 0, padding_len(rem_len));
1204
1205 sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1206 ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1207 if (ret)
1208 return ret;
1209
1210 add_out_kvec(con, base, rem - base);
1211 add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1212 add_out_kvec(con, rem, pmbl_tag - rem);
1213 return 0;
1214 }
1215
__prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1216 static int __prepare_control(struct ceph_connection *con, int tag,
1217 void *base, int ctrl_len, void *extdata,
1218 int extdata_len, bool to_be_signed)
1219 {
1220 int total_len = ctrl_len + extdata_len;
1221 struct ceph_frame_desc desc;
1222 int ret;
1223
1224 dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1225 total_len, ctrl_len, extdata_len);
1226
1227 /* extdata may be vmalloc'ed but not base */
1228 if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1229 return -EINVAL;
1230
1231 init_frame_desc(&desc, tag, &total_len, 1);
1232 encode_preamble(&desc, base);
1233
1234 if (con_secure(con)) {
1235 if (WARN_ON(extdata_len || to_be_signed))
1236 return -EINVAL;
1237
1238 if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1239 /* fully inlined, inline buffer may need padding */
1240 ret = prepare_head_secure_small(con, base, ctrl_len);
1241 else
1242 /* partially inlined, inline buffer is full */
1243 ret = prepare_head_secure_big(con, base, ctrl_len);
1244 if (ret)
1245 return ret;
1246 } else {
1247 prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1248 to_be_signed);
1249 }
1250
1251 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1252 return 0;
1253 }
1254
prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len)1255 static int prepare_control(struct ceph_connection *con, int tag,
1256 void *base, int ctrl_len)
1257 {
1258 return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1259 }
1260
prepare_hello(struct ceph_connection * con)1261 static int prepare_hello(struct ceph_connection *con)
1262 {
1263 void *buf, *p;
1264 int ctrl_len;
1265
1266 ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1267 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1268 if (!buf)
1269 return -ENOMEM;
1270
1271 p = CTRL_BODY(buf);
1272 ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1273 ceph_encode_entity_addr(&p, &con->peer_addr);
1274 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1275
1276 return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1277 NULL, 0, true);
1278 }
1279
1280 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1281 #define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1282
prepare_auth_request(struct ceph_connection * con)1283 static int prepare_auth_request(struct ceph_connection *con)
1284 {
1285 void *authorizer, *authorizer_copy;
1286 int ctrl_len, authorizer_len;
1287 void *buf;
1288 int ret;
1289
1290 ctrl_len = AUTH_BUF_LEN;
1291 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1292 if (!buf)
1293 return -ENOMEM;
1294
1295 mutex_unlock(&con->mutex);
1296 ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1297 &authorizer, &authorizer_len);
1298 mutex_lock(&con->mutex);
1299 if (con->state != CEPH_CON_S_V2_HELLO) {
1300 dout("%s con %p state changed to %d\n", __func__, con,
1301 con->state);
1302 return -EAGAIN;
1303 }
1304
1305 dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1306 if (ret)
1307 return ret;
1308
1309 authorizer_copy = alloc_conn_buf(con, authorizer_len);
1310 if (!authorizer_copy)
1311 return -ENOMEM;
1312
1313 memcpy(authorizer_copy, authorizer, authorizer_len);
1314
1315 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1316 authorizer_copy, authorizer_len, true);
1317 }
1318
prepare_auth_request_more(struct ceph_connection * con,void * reply,int reply_len)1319 static int prepare_auth_request_more(struct ceph_connection *con,
1320 void *reply, int reply_len)
1321 {
1322 int ctrl_len, authorizer_len;
1323 void *authorizer;
1324 void *buf;
1325 int ret;
1326
1327 ctrl_len = AUTH_BUF_LEN;
1328 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1329 if (!buf)
1330 return -ENOMEM;
1331
1332 mutex_unlock(&con->mutex);
1333 ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1334 CTRL_BODY(buf), &ctrl_len,
1335 &authorizer, &authorizer_len);
1336 mutex_lock(&con->mutex);
1337 if (con->state != CEPH_CON_S_V2_AUTH) {
1338 dout("%s con %p state changed to %d\n", __func__, con,
1339 con->state);
1340 return -EAGAIN;
1341 }
1342
1343 dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1344 if (ret)
1345 return ret;
1346
1347 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1348 ctrl_len, authorizer, authorizer_len, true);
1349 }
1350
prepare_auth_signature(struct ceph_connection * con)1351 static int prepare_auth_signature(struct ceph_connection *con)
1352 {
1353 void *buf;
1354 int ret;
1355
1356 buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1357 con_secure(con)));
1358 if (!buf)
1359 return -ENOMEM;
1360
1361 ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1362 CTRL_BODY(buf));
1363 if (ret)
1364 return ret;
1365
1366 return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1367 SHA256_DIGEST_SIZE);
1368 }
1369
prepare_client_ident(struct ceph_connection * con)1370 static int prepare_client_ident(struct ceph_connection *con)
1371 {
1372 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1373 struct ceph_client *client = from_msgr(con->msgr);
1374 u64 global_id = ceph_client_gid(client);
1375 void *buf, *p;
1376 int ctrl_len;
1377
1378 WARN_ON(con->v2.server_cookie);
1379 WARN_ON(con->v2.connect_seq);
1380 WARN_ON(con->v2.peer_global_seq);
1381
1382 if (!con->v2.client_cookie) {
1383 do {
1384 get_random_bytes(&con->v2.client_cookie,
1385 sizeof(con->v2.client_cookie));
1386 } while (!con->v2.client_cookie);
1387 dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1388 con->v2.client_cookie);
1389 } else {
1390 dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1391 con->v2.client_cookie);
1392 }
1393
1394 dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1395 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1396 ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1397 global_id, con->v2.global_seq, client->supported_features,
1398 client->required_features, con->v2.client_cookie);
1399
1400 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1401 ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1402 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1403 if (!buf)
1404 return -ENOMEM;
1405
1406 p = CTRL_BODY(buf);
1407 ceph_encode_8(&p, 2); /* addrvec marker */
1408 ceph_encode_32(&p, 1); /* addr_cnt */
1409 ceph_encode_entity_addr(&p, my_addr);
1410 ceph_encode_entity_addr(&p, &con->peer_addr);
1411 ceph_encode_64(&p, global_id);
1412 ceph_encode_64(&p, con->v2.global_seq);
1413 ceph_encode_64(&p, client->supported_features);
1414 ceph_encode_64(&p, client->required_features);
1415 ceph_encode_64(&p, 0); /* flags */
1416 ceph_encode_64(&p, con->v2.client_cookie);
1417 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1418
1419 return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1420 }
1421
prepare_session_reconnect(struct ceph_connection * con)1422 static int prepare_session_reconnect(struct ceph_connection *con)
1423 {
1424 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1425 void *buf, *p;
1426 int ctrl_len;
1427
1428 WARN_ON(!con->v2.client_cookie);
1429 WARN_ON(!con->v2.server_cookie);
1430 WARN_ON(!con->v2.connect_seq);
1431 WARN_ON(!con->v2.peer_global_seq);
1432
1433 dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1434 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1435 con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1436 con->v2.connect_seq, con->in_seq);
1437
1438 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1439 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1440 if (!buf)
1441 return -ENOMEM;
1442
1443 p = CTRL_BODY(buf);
1444 ceph_encode_8(&p, 2); /* entity_addrvec_t marker */
1445 ceph_encode_32(&p, 1); /* my_addrs len */
1446 ceph_encode_entity_addr(&p, my_addr);
1447 ceph_encode_64(&p, con->v2.client_cookie);
1448 ceph_encode_64(&p, con->v2.server_cookie);
1449 ceph_encode_64(&p, con->v2.global_seq);
1450 ceph_encode_64(&p, con->v2.connect_seq);
1451 ceph_encode_64(&p, con->in_seq);
1452 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1453
1454 return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1455 }
1456
prepare_keepalive2(struct ceph_connection * con)1457 static int prepare_keepalive2(struct ceph_connection *con)
1458 {
1459 struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1460 struct timespec64 now;
1461
1462 ktime_get_real_ts64(&now);
1463 dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1464 now.tv_nsec);
1465
1466 ceph_encode_timespec64(ts, &now);
1467
1468 reset_out_kvecs(con);
1469 return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1470 sizeof(struct ceph_timespec));
1471 }
1472
prepare_ack(struct ceph_connection * con)1473 static int prepare_ack(struct ceph_connection *con)
1474 {
1475 void *p;
1476
1477 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1478 con->in_seq_acked, con->in_seq);
1479 con->in_seq_acked = con->in_seq;
1480
1481 p = CTRL_BODY(con->v2.out_buf);
1482 ceph_encode_64(&p, con->in_seq_acked);
1483
1484 reset_out_kvecs(con);
1485 return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1486 }
1487
prepare_epilogue_plain(struct ceph_connection * con,bool aborted)1488 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
1489 {
1490 dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1491 con->out_msg, aborted, con->v2.out_epil.front_crc,
1492 con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1493
1494 encode_epilogue_plain(con, aborted);
1495 add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1496 }
1497
1498 /*
1499 * For "used" empty segments, crc is -1. For unused (trailing)
1500 * segments, crc is 0.
1501 */
prepare_message_plain(struct ceph_connection * con)1502 static void prepare_message_plain(struct ceph_connection *con)
1503 {
1504 struct ceph_msg *msg = con->out_msg;
1505
1506 prepare_head_plain(con, con->v2.out_buf,
1507 sizeof(struct ceph_msg_header2), NULL, 0, false);
1508
1509 if (!front_len(msg) && !middle_len(msg)) {
1510 if (!data_len(msg)) {
1511 /*
1512 * Empty message: once the head is written,
1513 * we are done -- there is no epilogue.
1514 */
1515 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1516 return;
1517 }
1518
1519 con->v2.out_epil.front_crc = -1;
1520 con->v2.out_epil.middle_crc = -1;
1521 con->v2.out_state = OUT_S_QUEUE_DATA;
1522 return;
1523 }
1524
1525 if (front_len(msg)) {
1526 con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1527 front_len(msg));
1528 add_out_kvec(con, msg->front.iov_base, front_len(msg));
1529 } else {
1530 /* middle (at least) is there, checked above */
1531 con->v2.out_epil.front_crc = -1;
1532 }
1533
1534 if (middle_len(msg)) {
1535 con->v2.out_epil.middle_crc =
1536 crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1537 add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1538 } else {
1539 con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1540 }
1541
1542 if (data_len(msg)) {
1543 con->v2.out_state = OUT_S_QUEUE_DATA;
1544 } else {
1545 con->v2.out_epil.data_crc = 0;
1546 prepare_epilogue_plain(con, false);
1547 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1548 }
1549 }
1550
1551 /*
1552 * Unfortunately the kernel crypto API doesn't support streaming
1553 * (piecewise) operation for AEAD algorithms, so we can't get away
1554 * with a fixed size buffer and a couple sgs. Instead, we have to
1555 * allocate pages for the entire tail of the message (currently up
1556 * to ~32M) and two sgs arrays (up to ~256K each)...
1557 */
prepare_message_secure(struct ceph_connection * con)1558 static int prepare_message_secure(struct ceph_connection *con)
1559 {
1560 void *zerop = page_address(ceph_zero_page);
1561 struct sg_table enc_sgt = {};
1562 struct sg_table sgt = {};
1563 struct page **enc_pages;
1564 int enc_page_cnt;
1565 int tail_len;
1566 int ret;
1567
1568 ret = prepare_head_secure_small(con, con->v2.out_buf,
1569 sizeof(struct ceph_msg_header2));
1570 if (ret)
1571 return ret;
1572
1573 tail_len = tail_onwire_len(con->out_msg, true);
1574 if (!tail_len) {
1575 /*
1576 * Empty message: once the head is written,
1577 * we are done -- there is no epilogue.
1578 */
1579 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1580 return 0;
1581 }
1582
1583 encode_epilogue_secure(con, false);
1584 ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
1585 &con->v2.out_epil, false);
1586 if (ret)
1587 goto out;
1588
1589 enc_page_cnt = calc_pages_for(0, tail_len);
1590 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1591 if (IS_ERR(enc_pages)) {
1592 ret = PTR_ERR(enc_pages);
1593 goto out;
1594 }
1595
1596 WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1597 con->v2.out_enc_pages = enc_pages;
1598 con->v2.out_enc_page_cnt = enc_page_cnt;
1599 con->v2.out_enc_resid = tail_len;
1600 con->v2.out_enc_i = 0;
1601
1602 ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1603 0, tail_len, GFP_NOIO);
1604 if (ret)
1605 goto out;
1606
1607 ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1608 tail_len - CEPH_GCM_TAG_LEN);
1609 if (ret)
1610 goto out;
1611
1612 dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1613 con->out_msg, sgt.orig_nents, enc_page_cnt);
1614 con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1615
1616 out:
1617 sg_free_table(&sgt);
1618 sg_free_table(&enc_sgt);
1619 return ret;
1620 }
1621
prepare_message(struct ceph_connection * con)1622 static int prepare_message(struct ceph_connection *con)
1623 {
1624 int lens[] = {
1625 sizeof(struct ceph_msg_header2),
1626 front_len(con->out_msg),
1627 middle_len(con->out_msg),
1628 data_len(con->out_msg)
1629 };
1630 struct ceph_frame_desc desc;
1631 int ret;
1632
1633 dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1634 con->out_msg, lens[0], lens[1], lens[2], lens[3]);
1635
1636 if (con->in_seq > con->in_seq_acked) {
1637 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1638 con->in_seq_acked, con->in_seq);
1639 con->in_seq_acked = con->in_seq;
1640 }
1641
1642 reset_out_kvecs(con);
1643 init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1644 encode_preamble(&desc, con->v2.out_buf);
1645 fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
1646 con->in_seq_acked);
1647
1648 if (con_secure(con)) {
1649 ret = prepare_message_secure(con);
1650 if (ret)
1651 return ret;
1652 } else {
1653 prepare_message_plain(con);
1654 }
1655
1656 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1657 return 0;
1658 }
1659
prepare_read_banner_prefix(struct ceph_connection * con)1660 static int prepare_read_banner_prefix(struct ceph_connection *con)
1661 {
1662 void *buf;
1663
1664 buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1665 if (!buf)
1666 return -ENOMEM;
1667
1668 reset_in_kvecs(con);
1669 add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1670 add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1671 con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1672 return 0;
1673 }
1674
prepare_read_banner_payload(struct ceph_connection * con,int payload_len)1675 static int prepare_read_banner_payload(struct ceph_connection *con,
1676 int payload_len)
1677 {
1678 void *buf;
1679
1680 buf = alloc_conn_buf(con, payload_len);
1681 if (!buf)
1682 return -ENOMEM;
1683
1684 reset_in_kvecs(con);
1685 add_in_kvec(con, buf, payload_len);
1686 add_in_sign_kvec(con, buf, payload_len);
1687 con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1688 return 0;
1689 }
1690
prepare_read_preamble(struct ceph_connection * con)1691 static void prepare_read_preamble(struct ceph_connection *con)
1692 {
1693 reset_in_kvecs(con);
1694 add_in_kvec(con, con->v2.in_buf,
1695 con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1696 CEPH_PREAMBLE_PLAIN_LEN);
1697 con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1698 }
1699
prepare_read_control(struct ceph_connection * con)1700 static int prepare_read_control(struct ceph_connection *con)
1701 {
1702 int ctrl_len = con->v2.in_desc.fd_lens[0];
1703 int head_len;
1704 void *buf;
1705
1706 reset_in_kvecs(con);
1707 if (con->state == CEPH_CON_S_V2_HELLO ||
1708 con->state == CEPH_CON_S_V2_AUTH) {
1709 head_len = head_onwire_len(ctrl_len, false);
1710 buf = alloc_conn_buf(con, head_len);
1711 if (!buf)
1712 return -ENOMEM;
1713
1714 /* preserve preamble */
1715 memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1716
1717 add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1718 add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1719 add_in_sign_kvec(con, buf, head_len);
1720 } else {
1721 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1722 buf = alloc_conn_buf(con, ctrl_len);
1723 if (!buf)
1724 return -ENOMEM;
1725
1726 add_in_kvec(con, buf, ctrl_len);
1727 } else {
1728 add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1729 }
1730 add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1731 }
1732 con->v2.in_state = IN_S_HANDLE_CONTROL;
1733 return 0;
1734 }
1735
prepare_read_control_remainder(struct ceph_connection * con)1736 static int prepare_read_control_remainder(struct ceph_connection *con)
1737 {
1738 int ctrl_len = con->v2.in_desc.fd_lens[0];
1739 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1740 void *buf;
1741
1742 buf = alloc_conn_buf(con, ctrl_len);
1743 if (!buf)
1744 return -ENOMEM;
1745
1746 memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1747
1748 reset_in_kvecs(con);
1749 add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1750 add_in_kvec(con, con->v2.in_buf,
1751 padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1752 con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1753 return 0;
1754 }
1755
prepare_read_data(struct ceph_connection * con)1756 static int prepare_read_data(struct ceph_connection *con)
1757 {
1758 struct bio_vec bv;
1759
1760 con->in_data_crc = -1;
1761 ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1762 data_len(con->in_msg));
1763
1764 get_bvec_at(&con->v2.in_cursor, &bv);
1765 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1766 if (unlikely(!con->bounce_page)) {
1767 con->bounce_page = alloc_page(GFP_NOIO);
1768 if (!con->bounce_page) {
1769 pr_err("failed to allocate bounce page\n");
1770 return -ENOMEM;
1771 }
1772 }
1773
1774 bv.bv_page = con->bounce_page;
1775 bv.bv_offset = 0;
1776 }
1777 set_in_bvec(con, &bv);
1778 con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1779 return 0;
1780 }
1781
prepare_read_data_cont(struct ceph_connection * con)1782 static void prepare_read_data_cont(struct ceph_connection *con)
1783 {
1784 struct bio_vec bv;
1785
1786 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1787 con->in_data_crc = crc32c(con->in_data_crc,
1788 page_address(con->bounce_page),
1789 con->v2.in_bvec.bv_len);
1790
1791 get_bvec_at(&con->v2.in_cursor, &bv);
1792 memcpy_to_page(bv.bv_page, bv.bv_offset,
1793 page_address(con->bounce_page),
1794 con->v2.in_bvec.bv_len);
1795 } else {
1796 con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1797 con->v2.in_bvec.bv_page,
1798 con->v2.in_bvec.bv_offset,
1799 con->v2.in_bvec.bv_len);
1800 }
1801
1802 ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1803 if (con->v2.in_cursor.total_resid) {
1804 get_bvec_at(&con->v2.in_cursor, &bv);
1805 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1806 bv.bv_page = con->bounce_page;
1807 bv.bv_offset = 0;
1808 }
1809 set_in_bvec(con, &bv);
1810 WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1811 return;
1812 }
1813
1814 /*
1815 * We've read all data. Prepare to read epilogue.
1816 */
1817 reset_in_kvecs(con);
1818 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1819 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1820 }
1821
prepare_read_tail_plain(struct ceph_connection * con)1822 static int prepare_read_tail_plain(struct ceph_connection *con)
1823 {
1824 struct ceph_msg *msg = con->in_msg;
1825
1826 if (!front_len(msg) && !middle_len(msg)) {
1827 WARN_ON(!data_len(msg));
1828 return prepare_read_data(con);
1829 }
1830
1831 reset_in_kvecs(con);
1832 if (front_len(msg)) {
1833 add_in_kvec(con, msg->front.iov_base, front_len(msg));
1834 WARN_ON(msg->front.iov_len != front_len(msg));
1835 }
1836 if (middle_len(msg)) {
1837 add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1838 WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
1839 }
1840
1841 if (data_len(msg)) {
1842 con->v2.in_state = IN_S_PREPARE_READ_DATA;
1843 } else {
1844 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1845 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1846 }
1847 return 0;
1848 }
1849
prepare_read_enc_page(struct ceph_connection * con)1850 static void prepare_read_enc_page(struct ceph_connection *con)
1851 {
1852 struct bio_vec bv;
1853
1854 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
1855 con->v2.in_enc_resid);
1856 WARN_ON(!con->v2.in_enc_resid);
1857
1858 bv.bv_page = con->v2.in_enc_pages[con->v2.in_enc_i];
1859 bv.bv_offset = 0;
1860 bv.bv_len = min(con->v2.in_enc_resid, (int)PAGE_SIZE);
1861
1862 set_in_bvec(con, &bv);
1863 con->v2.in_enc_i++;
1864 con->v2.in_enc_resid -= bv.bv_len;
1865
1866 if (con->v2.in_enc_resid) {
1867 con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
1868 return;
1869 }
1870
1871 /*
1872 * We are set to read the last piece of ciphertext (ending
1873 * with epilogue) + auth tag.
1874 */
1875 WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
1876 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1877 }
1878
prepare_read_tail_secure(struct ceph_connection * con)1879 static int prepare_read_tail_secure(struct ceph_connection *con)
1880 {
1881 struct page **enc_pages;
1882 int enc_page_cnt;
1883 int tail_len;
1884
1885 tail_len = tail_onwire_len(con->in_msg, true);
1886 WARN_ON(!tail_len);
1887
1888 enc_page_cnt = calc_pages_for(0, tail_len);
1889 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1890 if (IS_ERR(enc_pages))
1891 return PTR_ERR(enc_pages);
1892
1893 WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
1894 con->v2.in_enc_pages = enc_pages;
1895 con->v2.in_enc_page_cnt = enc_page_cnt;
1896 con->v2.in_enc_resid = tail_len;
1897 con->v2.in_enc_i = 0;
1898
1899 prepare_read_enc_page(con);
1900 return 0;
1901 }
1902
__finish_skip(struct ceph_connection * con)1903 static void __finish_skip(struct ceph_connection *con)
1904 {
1905 con->in_seq++;
1906 prepare_read_preamble(con);
1907 }
1908
prepare_skip_message(struct ceph_connection * con)1909 static void prepare_skip_message(struct ceph_connection *con)
1910 {
1911 struct ceph_frame_desc *desc = &con->v2.in_desc;
1912 int tail_len;
1913
1914 dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
1915 desc->fd_lens[2], desc->fd_lens[3]);
1916
1917 tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
1918 desc->fd_lens[3], con_secure(con));
1919 if (!tail_len) {
1920 __finish_skip(con);
1921 } else {
1922 set_in_skip(con, tail_len);
1923 con->v2.in_state = IN_S_FINISH_SKIP;
1924 }
1925 }
1926
process_banner_prefix(struct ceph_connection * con)1927 static int process_banner_prefix(struct ceph_connection *con)
1928 {
1929 int payload_len;
1930 void *p;
1931
1932 WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
1933
1934 p = con->v2.in_kvecs[0].iov_base;
1935 if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
1936 if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
1937 con->error_msg = "server is speaking msgr1 protocol";
1938 else
1939 con->error_msg = "protocol error, bad banner";
1940 return -EINVAL;
1941 }
1942
1943 p += CEPH_BANNER_V2_LEN;
1944 payload_len = ceph_decode_16(&p);
1945 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
1946
1947 return prepare_read_banner_payload(con, payload_len);
1948 }
1949
process_banner_payload(struct ceph_connection * con)1950 static int process_banner_payload(struct ceph_connection *con)
1951 {
1952 void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
1953 u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
1954 u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
1955 u64 server_feat, server_req_feat;
1956 void *p;
1957 int ret;
1958
1959 p = con->v2.in_kvecs[0].iov_base;
1960 ceph_decode_64_safe(&p, end, server_feat, bad);
1961 ceph_decode_64_safe(&p, end, server_req_feat, bad);
1962
1963 dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
1964 __func__, con, server_feat, server_req_feat);
1965
1966 if (req_feat & ~server_feat) {
1967 pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
1968 server_feat, req_feat & ~server_feat);
1969 con->error_msg = "missing required protocol features";
1970 return -EINVAL;
1971 }
1972 if (server_req_feat & ~feat) {
1973 pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
1974 feat, server_req_feat & ~feat);
1975 con->error_msg = "missing required protocol features";
1976 return -EINVAL;
1977 }
1978
1979 /* no reset_out_kvecs() as our banner may still be pending */
1980 ret = prepare_hello(con);
1981 if (ret) {
1982 pr_err("prepare_hello failed: %d\n", ret);
1983 return ret;
1984 }
1985
1986 con->state = CEPH_CON_S_V2_HELLO;
1987 prepare_read_preamble(con);
1988 return 0;
1989
1990 bad:
1991 pr_err("failed to decode banner payload\n");
1992 return -EINVAL;
1993 }
1994
process_hello(struct ceph_connection * con,void * p,void * end)1995 static int process_hello(struct ceph_connection *con, void *p, void *end)
1996 {
1997 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1998 struct ceph_entity_addr addr_for_me;
1999 u8 entity_type;
2000 int ret;
2001
2002 if (con->state != CEPH_CON_S_V2_HELLO) {
2003 con->error_msg = "protocol error, unexpected hello";
2004 return -EINVAL;
2005 }
2006
2007 ceph_decode_8_safe(&p, end, entity_type, bad);
2008 ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2009 if (ret) {
2010 pr_err("failed to decode addr_for_me: %d\n", ret);
2011 return ret;
2012 }
2013
2014 dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2015 entity_type, ceph_pr_addr(&addr_for_me));
2016
2017 if (entity_type != con->peer_name.type) {
2018 pr_err("bad peer type, want %d, got %d\n",
2019 con->peer_name.type, entity_type);
2020 con->error_msg = "wrong peer at address";
2021 return -EINVAL;
2022 }
2023
2024 /*
2025 * Set our address to the address our first peer (i.e. monitor)
2026 * sees that we are connecting from. If we are behind some sort
2027 * of NAT and want to be identified by some private (not NATed)
2028 * address, ip option should be used.
2029 */
2030 if (ceph_addr_is_blank(my_addr)) {
2031 memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2032 sizeof(my_addr->in_addr));
2033 ceph_addr_set_port(my_addr, 0);
2034 dout("%s con %p set my addr %s, as seen by peer %s\n",
2035 __func__, con, ceph_pr_addr(my_addr),
2036 ceph_pr_addr(&con->peer_addr));
2037 } else {
2038 dout("%s con %p my addr already set %s\n",
2039 __func__, con, ceph_pr_addr(my_addr));
2040 }
2041
2042 WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2043 WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2044 WARN_ON(!my_addr->nonce);
2045
2046 /* no reset_out_kvecs() as our hello may still be pending */
2047 ret = prepare_auth_request(con);
2048 if (ret) {
2049 if (ret != -EAGAIN)
2050 pr_err("prepare_auth_request failed: %d\n", ret);
2051 return ret;
2052 }
2053
2054 con->state = CEPH_CON_S_V2_AUTH;
2055 return 0;
2056
2057 bad:
2058 pr_err("failed to decode hello\n");
2059 return -EINVAL;
2060 }
2061
process_auth_bad_method(struct ceph_connection * con,void * p,void * end)2062 static int process_auth_bad_method(struct ceph_connection *con,
2063 void *p, void *end)
2064 {
2065 int allowed_protos[8], allowed_modes[8];
2066 int allowed_proto_cnt, allowed_mode_cnt;
2067 int used_proto, result;
2068 int ret;
2069 int i;
2070
2071 if (con->state != CEPH_CON_S_V2_AUTH) {
2072 con->error_msg = "protocol error, unexpected auth_bad_method";
2073 return -EINVAL;
2074 }
2075
2076 ceph_decode_32_safe(&p, end, used_proto, bad);
2077 ceph_decode_32_safe(&p, end, result, bad);
2078 dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2079 result);
2080
2081 ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2082 if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2083 pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2084 return -EINVAL;
2085 }
2086 for (i = 0; i < allowed_proto_cnt; i++) {
2087 ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2088 dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2089 i, allowed_protos[i]);
2090 }
2091
2092 ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2093 if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2094 pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2095 return -EINVAL;
2096 }
2097 for (i = 0; i < allowed_mode_cnt; i++) {
2098 ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2099 dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2100 i, allowed_modes[i]);
2101 }
2102
2103 mutex_unlock(&con->mutex);
2104 ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2105 allowed_protos,
2106 allowed_proto_cnt,
2107 allowed_modes,
2108 allowed_mode_cnt);
2109 mutex_lock(&con->mutex);
2110 if (con->state != CEPH_CON_S_V2_AUTH) {
2111 dout("%s con %p state changed to %d\n", __func__, con,
2112 con->state);
2113 return -EAGAIN;
2114 }
2115
2116 dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2117 return ret;
2118
2119 bad:
2120 pr_err("failed to decode auth_bad_method\n");
2121 return -EINVAL;
2122 }
2123
process_auth_reply_more(struct ceph_connection * con,void * p,void * end)2124 static int process_auth_reply_more(struct ceph_connection *con,
2125 void *p, void *end)
2126 {
2127 int payload_len;
2128 int ret;
2129
2130 if (con->state != CEPH_CON_S_V2_AUTH) {
2131 con->error_msg = "protocol error, unexpected auth_reply_more";
2132 return -EINVAL;
2133 }
2134
2135 ceph_decode_32_safe(&p, end, payload_len, bad);
2136 ceph_decode_need(&p, end, payload_len, bad);
2137
2138 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2139
2140 reset_out_kvecs(con);
2141 ret = prepare_auth_request_more(con, p, payload_len);
2142 if (ret) {
2143 if (ret != -EAGAIN)
2144 pr_err("prepare_auth_request_more failed: %d\n", ret);
2145 return ret;
2146 }
2147
2148 return 0;
2149
2150 bad:
2151 pr_err("failed to decode auth_reply_more\n");
2152 return -EINVAL;
2153 }
2154
2155 /*
2156 * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2157 * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2158 * setup_crypto(). __aligned(16) isn't guaranteed to work for stack
2159 * objects, so do it by hand.
2160 */
process_auth_done(struct ceph_connection * con,void * p,void * end)2161 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2162 {
2163 u8 session_key_buf[CEPH_KEY_LEN + 16];
2164 u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2165 u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2166 u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2167 int session_key_len, con_secret_len;
2168 int payload_len;
2169 u64 global_id;
2170 int ret;
2171
2172 if (con->state != CEPH_CON_S_V2_AUTH) {
2173 con->error_msg = "protocol error, unexpected auth_done";
2174 return -EINVAL;
2175 }
2176
2177 ceph_decode_64_safe(&p, end, global_id, bad);
2178 ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2179 ceph_decode_32_safe(&p, end, payload_len, bad);
2180
2181 dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2182 __func__, con, global_id, con->v2.con_mode, payload_len);
2183
2184 mutex_unlock(&con->mutex);
2185 session_key_len = 0;
2186 con_secret_len = 0;
2187 ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2188 session_key, &session_key_len,
2189 con_secret, &con_secret_len);
2190 mutex_lock(&con->mutex);
2191 if (con->state != CEPH_CON_S_V2_AUTH) {
2192 dout("%s con %p state changed to %d\n", __func__, con,
2193 con->state);
2194 ret = -EAGAIN;
2195 goto out;
2196 }
2197
2198 dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2199 if (ret)
2200 goto out;
2201
2202 ret = setup_crypto(con, session_key, session_key_len, con_secret,
2203 con_secret_len);
2204 if (ret)
2205 goto out;
2206
2207 reset_out_kvecs(con);
2208 ret = prepare_auth_signature(con);
2209 if (ret) {
2210 pr_err("prepare_auth_signature failed: %d\n", ret);
2211 goto out;
2212 }
2213
2214 con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2215
2216 out:
2217 memzero_explicit(session_key_buf, sizeof(session_key_buf));
2218 memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2219 return ret;
2220
2221 bad:
2222 pr_err("failed to decode auth_done\n");
2223 return -EINVAL;
2224 }
2225
process_auth_signature(struct ceph_connection * con,void * p,void * end)2226 static int process_auth_signature(struct ceph_connection *con,
2227 void *p, void *end)
2228 {
2229 u8 hmac[SHA256_DIGEST_SIZE];
2230 int ret;
2231
2232 if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2233 con->error_msg = "protocol error, unexpected auth_signature";
2234 return -EINVAL;
2235 }
2236
2237 ret = hmac_sha256(con, con->v2.out_sign_kvecs,
2238 con->v2.out_sign_kvec_cnt, hmac);
2239 if (ret)
2240 return ret;
2241
2242 ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2243 if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2244 con->error_msg = "integrity error, bad auth signature";
2245 return -EBADMSG;
2246 }
2247
2248 dout("%s con %p auth signature ok\n", __func__, con);
2249
2250 /* no reset_out_kvecs() as our auth_signature may still be pending */
2251 if (!con->v2.server_cookie) {
2252 ret = prepare_client_ident(con);
2253 if (ret) {
2254 pr_err("prepare_client_ident failed: %d\n", ret);
2255 return ret;
2256 }
2257
2258 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2259 } else {
2260 ret = prepare_session_reconnect(con);
2261 if (ret) {
2262 pr_err("prepare_session_reconnect failed: %d\n", ret);
2263 return ret;
2264 }
2265
2266 con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2267 }
2268
2269 return 0;
2270
2271 bad:
2272 pr_err("failed to decode auth_signature\n");
2273 return -EINVAL;
2274 }
2275
process_server_ident(struct ceph_connection * con,void * p,void * end)2276 static int process_server_ident(struct ceph_connection *con,
2277 void *p, void *end)
2278 {
2279 struct ceph_client *client = from_msgr(con->msgr);
2280 u64 features, required_features;
2281 struct ceph_entity_addr addr;
2282 u64 global_seq;
2283 u64 global_id;
2284 u64 cookie;
2285 u64 flags;
2286 int ret;
2287
2288 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2289 con->error_msg = "protocol error, unexpected server_ident";
2290 return -EINVAL;
2291 }
2292
2293 ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2294 if (ret) {
2295 pr_err("failed to decode server addrs: %d\n", ret);
2296 return ret;
2297 }
2298
2299 ceph_decode_64_safe(&p, end, global_id, bad);
2300 ceph_decode_64_safe(&p, end, global_seq, bad);
2301 ceph_decode_64_safe(&p, end, features, bad);
2302 ceph_decode_64_safe(&p, end, required_features, bad);
2303 ceph_decode_64_safe(&p, end, flags, bad);
2304 ceph_decode_64_safe(&p, end, cookie, bad);
2305
2306 dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2307 __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2308 global_id, global_seq, features, required_features, flags, cookie);
2309
2310 /* is this who we intended to talk to? */
2311 if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2312 pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2313 ceph_pr_addr(&con->peer_addr),
2314 le32_to_cpu(con->peer_addr.nonce),
2315 ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2316 con->error_msg = "wrong peer at address";
2317 return -EINVAL;
2318 }
2319
2320 if (client->required_features & ~features) {
2321 pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2322 features, client->required_features & ~features);
2323 con->error_msg = "missing required protocol features";
2324 return -EINVAL;
2325 }
2326
2327 /*
2328 * Both name->type and name->num are set in ceph_con_open() but
2329 * name->num may be bogus in the initial monmap. name->type is
2330 * verified in handle_hello().
2331 */
2332 WARN_ON(!con->peer_name.type);
2333 con->peer_name.num = cpu_to_le64(global_id);
2334 con->v2.peer_global_seq = global_seq;
2335 con->peer_features = features;
2336 WARN_ON(required_features & ~client->supported_features);
2337 con->v2.server_cookie = cookie;
2338
2339 if (flags & CEPH_MSG_CONNECT_LOSSY) {
2340 ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2341 WARN_ON(con->v2.server_cookie);
2342 } else {
2343 WARN_ON(!con->v2.server_cookie);
2344 }
2345
2346 clear_in_sign_kvecs(con);
2347 clear_out_sign_kvecs(con);
2348 free_conn_bufs(con);
2349 con->delay = 0; /* reset backoff memory */
2350
2351 con->state = CEPH_CON_S_OPEN;
2352 con->v2.out_state = OUT_S_GET_NEXT;
2353 return 0;
2354
2355 bad:
2356 pr_err("failed to decode server_ident\n");
2357 return -EINVAL;
2358 }
2359
process_ident_missing_features(struct ceph_connection * con,void * p,void * end)2360 static int process_ident_missing_features(struct ceph_connection *con,
2361 void *p, void *end)
2362 {
2363 struct ceph_client *client = from_msgr(con->msgr);
2364 u64 missing_features;
2365
2366 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2367 con->error_msg = "protocol error, unexpected ident_missing_features";
2368 return -EINVAL;
2369 }
2370
2371 ceph_decode_64_safe(&p, end, missing_features, bad);
2372 pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2373 client->supported_features, missing_features);
2374 con->error_msg = "missing required protocol features";
2375 return -EINVAL;
2376
2377 bad:
2378 pr_err("failed to decode ident_missing_features\n");
2379 return -EINVAL;
2380 }
2381
process_session_reconnect_ok(struct ceph_connection * con,void * p,void * end)2382 static int process_session_reconnect_ok(struct ceph_connection *con,
2383 void *p, void *end)
2384 {
2385 u64 seq;
2386
2387 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2388 con->error_msg = "protocol error, unexpected session_reconnect_ok";
2389 return -EINVAL;
2390 }
2391
2392 ceph_decode_64_safe(&p, end, seq, bad);
2393
2394 dout("%s con %p seq %llu\n", __func__, con, seq);
2395 ceph_con_discard_requeued(con, seq);
2396
2397 clear_in_sign_kvecs(con);
2398 clear_out_sign_kvecs(con);
2399 free_conn_bufs(con);
2400 con->delay = 0; /* reset backoff memory */
2401
2402 con->state = CEPH_CON_S_OPEN;
2403 con->v2.out_state = OUT_S_GET_NEXT;
2404 return 0;
2405
2406 bad:
2407 pr_err("failed to decode session_reconnect_ok\n");
2408 return -EINVAL;
2409 }
2410
process_session_retry(struct ceph_connection * con,void * p,void * end)2411 static int process_session_retry(struct ceph_connection *con,
2412 void *p, void *end)
2413 {
2414 u64 connect_seq;
2415 int ret;
2416
2417 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2418 con->error_msg = "protocol error, unexpected session_retry";
2419 return -EINVAL;
2420 }
2421
2422 ceph_decode_64_safe(&p, end, connect_seq, bad);
2423
2424 dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2425 WARN_ON(connect_seq <= con->v2.connect_seq);
2426 con->v2.connect_seq = connect_seq + 1;
2427
2428 free_conn_bufs(con);
2429
2430 reset_out_kvecs(con);
2431 ret = prepare_session_reconnect(con);
2432 if (ret) {
2433 pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2434 return ret;
2435 }
2436
2437 return 0;
2438
2439 bad:
2440 pr_err("failed to decode session_retry\n");
2441 return -EINVAL;
2442 }
2443
process_session_retry_global(struct ceph_connection * con,void * p,void * end)2444 static int process_session_retry_global(struct ceph_connection *con,
2445 void *p, void *end)
2446 {
2447 u64 global_seq;
2448 int ret;
2449
2450 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2451 con->error_msg = "protocol error, unexpected session_retry_global";
2452 return -EINVAL;
2453 }
2454
2455 ceph_decode_64_safe(&p, end, global_seq, bad);
2456
2457 dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2458 WARN_ON(global_seq <= con->v2.global_seq);
2459 con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2460
2461 free_conn_bufs(con);
2462
2463 reset_out_kvecs(con);
2464 ret = prepare_session_reconnect(con);
2465 if (ret) {
2466 pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2467 return ret;
2468 }
2469
2470 return 0;
2471
2472 bad:
2473 pr_err("failed to decode session_retry_global\n");
2474 return -EINVAL;
2475 }
2476
process_session_reset(struct ceph_connection * con,void * p,void * end)2477 static int process_session_reset(struct ceph_connection *con,
2478 void *p, void *end)
2479 {
2480 bool full;
2481 int ret;
2482
2483 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2484 con->error_msg = "protocol error, unexpected session_reset";
2485 return -EINVAL;
2486 }
2487
2488 ceph_decode_8_safe(&p, end, full, bad);
2489 if (!full) {
2490 con->error_msg = "protocol error, bad session_reset";
2491 return -EINVAL;
2492 }
2493
2494 pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2495 ceph_pr_addr(&con->peer_addr));
2496 ceph_con_reset_session(con);
2497
2498 mutex_unlock(&con->mutex);
2499 if (con->ops->peer_reset)
2500 con->ops->peer_reset(con);
2501 mutex_lock(&con->mutex);
2502 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2503 dout("%s con %p state changed to %d\n", __func__, con,
2504 con->state);
2505 return -EAGAIN;
2506 }
2507
2508 free_conn_bufs(con);
2509
2510 reset_out_kvecs(con);
2511 ret = prepare_client_ident(con);
2512 if (ret) {
2513 pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2514 return ret;
2515 }
2516
2517 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2518 return 0;
2519
2520 bad:
2521 pr_err("failed to decode session_reset\n");
2522 return -EINVAL;
2523 }
2524
process_keepalive2_ack(struct ceph_connection * con,void * p,void * end)2525 static int process_keepalive2_ack(struct ceph_connection *con,
2526 void *p, void *end)
2527 {
2528 if (con->state != CEPH_CON_S_OPEN) {
2529 con->error_msg = "protocol error, unexpected keepalive2_ack";
2530 return -EINVAL;
2531 }
2532
2533 ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2534 ceph_decode_timespec64(&con->last_keepalive_ack, p);
2535
2536 dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2537 con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2538
2539 return 0;
2540
2541 bad:
2542 pr_err("failed to decode keepalive2_ack\n");
2543 return -EINVAL;
2544 }
2545
process_ack(struct ceph_connection * con,void * p,void * end)2546 static int process_ack(struct ceph_connection *con, void *p, void *end)
2547 {
2548 u64 seq;
2549
2550 if (con->state != CEPH_CON_S_OPEN) {
2551 con->error_msg = "protocol error, unexpected ack";
2552 return -EINVAL;
2553 }
2554
2555 ceph_decode_64_safe(&p, end, seq, bad);
2556
2557 dout("%s con %p seq %llu\n", __func__, con, seq);
2558 ceph_con_discard_sent(con, seq);
2559 return 0;
2560
2561 bad:
2562 pr_err("failed to decode ack\n");
2563 return -EINVAL;
2564 }
2565
process_control(struct ceph_connection * con,void * p,void * end)2566 static int process_control(struct ceph_connection *con, void *p, void *end)
2567 {
2568 int tag = con->v2.in_desc.fd_tag;
2569 int ret;
2570
2571 dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2572
2573 switch (tag) {
2574 case FRAME_TAG_HELLO:
2575 ret = process_hello(con, p, end);
2576 break;
2577 case FRAME_TAG_AUTH_BAD_METHOD:
2578 ret = process_auth_bad_method(con, p, end);
2579 break;
2580 case FRAME_TAG_AUTH_REPLY_MORE:
2581 ret = process_auth_reply_more(con, p, end);
2582 break;
2583 case FRAME_TAG_AUTH_DONE:
2584 ret = process_auth_done(con, p, end);
2585 break;
2586 case FRAME_TAG_AUTH_SIGNATURE:
2587 ret = process_auth_signature(con, p, end);
2588 break;
2589 case FRAME_TAG_SERVER_IDENT:
2590 ret = process_server_ident(con, p, end);
2591 break;
2592 case FRAME_TAG_IDENT_MISSING_FEATURES:
2593 ret = process_ident_missing_features(con, p, end);
2594 break;
2595 case FRAME_TAG_SESSION_RECONNECT_OK:
2596 ret = process_session_reconnect_ok(con, p, end);
2597 break;
2598 case FRAME_TAG_SESSION_RETRY:
2599 ret = process_session_retry(con, p, end);
2600 break;
2601 case FRAME_TAG_SESSION_RETRY_GLOBAL:
2602 ret = process_session_retry_global(con, p, end);
2603 break;
2604 case FRAME_TAG_SESSION_RESET:
2605 ret = process_session_reset(con, p, end);
2606 break;
2607 case FRAME_TAG_KEEPALIVE2_ACK:
2608 ret = process_keepalive2_ack(con, p, end);
2609 break;
2610 case FRAME_TAG_ACK:
2611 ret = process_ack(con, p, end);
2612 break;
2613 default:
2614 pr_err("bad tag %d\n", tag);
2615 con->error_msg = "protocol error, bad tag";
2616 return -EINVAL;
2617 }
2618 if (ret) {
2619 dout("%s con %p error %d\n", __func__, con, ret);
2620 return ret;
2621 }
2622
2623 prepare_read_preamble(con);
2624 return 0;
2625 }
2626
2627 /*
2628 * Return:
2629 * 1 - con->in_msg set, read message
2630 * 0 - skip message
2631 * <0 - error
2632 */
process_message_header(struct ceph_connection * con,void * p,void * end)2633 static int process_message_header(struct ceph_connection *con,
2634 void *p, void *end)
2635 {
2636 struct ceph_frame_desc *desc = &con->v2.in_desc;
2637 struct ceph_msg_header2 *hdr2 = p;
2638 struct ceph_msg_header hdr;
2639 int skip;
2640 int ret;
2641 u64 seq;
2642
2643 /* verify seq# */
2644 seq = le64_to_cpu(hdr2->seq);
2645 if ((s64)seq - (s64)con->in_seq < 1) {
2646 pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2647 ENTITY_NAME(con->peer_name),
2648 ceph_pr_addr(&con->peer_addr),
2649 seq, con->in_seq + 1);
2650 return 0;
2651 }
2652 if ((s64)seq - (s64)con->in_seq > 1) {
2653 pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2654 con->error_msg = "bad message sequence # for incoming message";
2655 return -EBADE;
2656 }
2657
2658 ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2659
2660 fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2661 desc->fd_lens[3], &con->peer_name);
2662 ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2663 if (ret)
2664 return ret;
2665
2666 WARN_ON(!con->in_msg ^ skip);
2667 if (skip)
2668 return 0;
2669
2670 WARN_ON(!con->in_msg);
2671 WARN_ON(con->in_msg->con != con);
2672 return 1;
2673 }
2674
process_message(struct ceph_connection * con)2675 static int process_message(struct ceph_connection *con)
2676 {
2677 ceph_con_process_message(con);
2678
2679 /*
2680 * We could have been closed by ceph_con_close() because
2681 * ceph_con_process_message() temporarily drops con->mutex.
2682 */
2683 if (con->state != CEPH_CON_S_OPEN) {
2684 dout("%s con %p state changed to %d\n", __func__, con,
2685 con->state);
2686 return -EAGAIN;
2687 }
2688
2689 prepare_read_preamble(con);
2690 return 0;
2691 }
2692
__handle_control(struct ceph_connection * con,void * p)2693 static int __handle_control(struct ceph_connection *con, void *p)
2694 {
2695 void *end = p + con->v2.in_desc.fd_lens[0];
2696 struct ceph_msg *msg;
2697 int ret;
2698
2699 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2700 return process_control(con, p, end);
2701
2702 ret = process_message_header(con, p, end);
2703 if (ret < 0)
2704 return ret;
2705 if (ret == 0) {
2706 prepare_skip_message(con);
2707 return 0;
2708 }
2709
2710 msg = con->in_msg; /* set in process_message_header() */
2711 if (front_len(msg)) {
2712 WARN_ON(front_len(msg) > msg->front_alloc_len);
2713 msg->front.iov_len = front_len(msg);
2714 } else {
2715 msg->front.iov_len = 0;
2716 }
2717 if (middle_len(msg)) {
2718 WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2719 msg->middle->vec.iov_len = middle_len(msg);
2720 } else if (msg->middle) {
2721 msg->middle->vec.iov_len = 0;
2722 }
2723
2724 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2725 return process_message(con);
2726
2727 if (con_secure(con))
2728 return prepare_read_tail_secure(con);
2729
2730 return prepare_read_tail_plain(con);
2731 }
2732
handle_preamble(struct ceph_connection * con)2733 static int handle_preamble(struct ceph_connection *con)
2734 {
2735 struct ceph_frame_desc *desc = &con->v2.in_desc;
2736 int ret;
2737
2738 if (con_secure(con)) {
2739 ret = decrypt_preamble(con);
2740 if (ret) {
2741 if (ret == -EBADMSG)
2742 con->error_msg = "integrity error, bad preamble auth tag";
2743 return ret;
2744 }
2745 }
2746
2747 ret = decode_preamble(con->v2.in_buf, desc);
2748 if (ret) {
2749 if (ret == -EBADMSG)
2750 con->error_msg = "integrity error, bad crc";
2751 else
2752 con->error_msg = "protocol error, bad preamble";
2753 return ret;
2754 }
2755
2756 dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2757 con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2758 desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2759
2760 if (!con_secure(con))
2761 return prepare_read_control(con);
2762
2763 if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2764 return prepare_read_control_remainder(con);
2765
2766 return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2767 }
2768
handle_control(struct ceph_connection * con)2769 static int handle_control(struct ceph_connection *con)
2770 {
2771 int ctrl_len = con->v2.in_desc.fd_lens[0];
2772 void *buf;
2773 int ret;
2774
2775 WARN_ON(con_secure(con));
2776
2777 ret = verify_control_crc(con);
2778 if (ret) {
2779 con->error_msg = "integrity error, bad crc";
2780 return ret;
2781 }
2782
2783 if (con->state == CEPH_CON_S_V2_AUTH) {
2784 buf = alloc_conn_buf(con, ctrl_len);
2785 if (!buf)
2786 return -ENOMEM;
2787
2788 memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2789 return __handle_control(con, buf);
2790 }
2791
2792 return __handle_control(con, con->v2.in_kvecs[0].iov_base);
2793 }
2794
handle_control_remainder(struct ceph_connection * con)2795 static int handle_control_remainder(struct ceph_connection *con)
2796 {
2797 int ret;
2798
2799 WARN_ON(!con_secure(con));
2800
2801 ret = decrypt_control_remainder(con);
2802 if (ret) {
2803 if (ret == -EBADMSG)
2804 con->error_msg = "integrity error, bad control remainder auth tag";
2805 return ret;
2806 }
2807
2808 return __handle_control(con, con->v2.in_kvecs[0].iov_base -
2809 CEPH_PREAMBLE_INLINE_LEN);
2810 }
2811
handle_epilogue(struct ceph_connection * con)2812 static int handle_epilogue(struct ceph_connection *con)
2813 {
2814 u32 front_crc, middle_crc, data_crc;
2815 int ret;
2816
2817 if (con_secure(con)) {
2818 ret = decrypt_tail(con);
2819 if (ret) {
2820 if (ret == -EBADMSG)
2821 con->error_msg = "integrity error, bad epilogue auth tag";
2822 return ret;
2823 }
2824
2825 /* just late_status */
2826 ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
2827 if (ret) {
2828 con->error_msg = "protocol error, bad epilogue";
2829 return ret;
2830 }
2831 } else {
2832 ret = decode_epilogue(con->v2.in_buf, &front_crc,
2833 &middle_crc, &data_crc);
2834 if (ret) {
2835 con->error_msg = "protocol error, bad epilogue";
2836 return ret;
2837 }
2838
2839 ret = verify_epilogue_crcs(con, front_crc, middle_crc,
2840 data_crc);
2841 if (ret) {
2842 con->error_msg = "integrity error, bad crc";
2843 return ret;
2844 }
2845 }
2846
2847 return process_message(con);
2848 }
2849
finish_skip(struct ceph_connection * con)2850 static void finish_skip(struct ceph_connection *con)
2851 {
2852 dout("%s con %p\n", __func__, con);
2853
2854 if (con_secure(con))
2855 gcm_inc_nonce(&con->v2.in_gcm_nonce);
2856
2857 __finish_skip(con);
2858 }
2859
populate_in_iter(struct ceph_connection * con)2860 static int populate_in_iter(struct ceph_connection *con)
2861 {
2862 int ret;
2863
2864 dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
2865 con->v2.in_state);
2866 WARN_ON(iov_iter_count(&con->v2.in_iter));
2867
2868 if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
2869 ret = process_banner_prefix(con);
2870 } else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
2871 ret = process_banner_payload(con);
2872 } else if ((con->state >= CEPH_CON_S_V2_HELLO &&
2873 con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
2874 con->state == CEPH_CON_S_OPEN) {
2875 switch (con->v2.in_state) {
2876 case IN_S_HANDLE_PREAMBLE:
2877 ret = handle_preamble(con);
2878 break;
2879 case IN_S_HANDLE_CONTROL:
2880 ret = handle_control(con);
2881 break;
2882 case IN_S_HANDLE_CONTROL_REMAINDER:
2883 ret = handle_control_remainder(con);
2884 break;
2885 case IN_S_PREPARE_READ_DATA:
2886 ret = prepare_read_data(con);
2887 break;
2888 case IN_S_PREPARE_READ_DATA_CONT:
2889 prepare_read_data_cont(con);
2890 ret = 0;
2891 break;
2892 case IN_S_PREPARE_READ_ENC_PAGE:
2893 prepare_read_enc_page(con);
2894 ret = 0;
2895 break;
2896 case IN_S_HANDLE_EPILOGUE:
2897 ret = handle_epilogue(con);
2898 break;
2899 case IN_S_FINISH_SKIP:
2900 finish_skip(con);
2901 ret = 0;
2902 break;
2903 default:
2904 WARN(1, "bad in_state %d", con->v2.in_state);
2905 return -EINVAL;
2906 }
2907 } else {
2908 WARN(1, "bad state %d", con->state);
2909 return -EINVAL;
2910 }
2911 if (ret) {
2912 dout("%s con %p error %d\n", __func__, con, ret);
2913 return ret;
2914 }
2915
2916 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2917 return -ENODATA;
2918 dout("%s con %p populated %zu\n", __func__, con,
2919 iov_iter_count(&con->v2.in_iter));
2920 return 1;
2921 }
2922
ceph_con_v2_try_read(struct ceph_connection * con)2923 int ceph_con_v2_try_read(struct ceph_connection *con)
2924 {
2925 int ret;
2926
2927 dout("%s con %p state %d need %zu\n", __func__, con, con->state,
2928 iov_iter_count(&con->v2.in_iter));
2929
2930 if (con->state == CEPH_CON_S_PREOPEN)
2931 return 0;
2932
2933 /*
2934 * We should always have something pending here. If not,
2935 * avoid calling populate_in_iter() as if we read something
2936 * (ceph_tcp_recv() would immediately return 1).
2937 */
2938 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2939 return -ENODATA;
2940
2941 for (;;) {
2942 ret = ceph_tcp_recv(con);
2943 if (ret <= 0)
2944 return ret;
2945
2946 ret = populate_in_iter(con);
2947 if (ret <= 0) {
2948 if (ret && ret != -EAGAIN && !con->error_msg)
2949 con->error_msg = "read processing error";
2950 return ret;
2951 }
2952 }
2953 }
2954
queue_data(struct ceph_connection * con)2955 static void queue_data(struct ceph_connection *con)
2956 {
2957 struct bio_vec bv;
2958
2959 con->v2.out_epil.data_crc = -1;
2960 ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
2961 data_len(con->out_msg));
2962
2963 get_bvec_at(&con->v2.out_cursor, &bv);
2964 set_out_bvec(con, &bv, true);
2965 con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
2966 }
2967
queue_data_cont(struct ceph_connection * con)2968 static void queue_data_cont(struct ceph_connection *con)
2969 {
2970 struct bio_vec bv;
2971
2972 con->v2.out_epil.data_crc = ceph_crc32c_page(
2973 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
2974 con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
2975
2976 ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
2977 if (con->v2.out_cursor.total_resid) {
2978 get_bvec_at(&con->v2.out_cursor, &bv);
2979 set_out_bvec(con, &bv, true);
2980 WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
2981 return;
2982 }
2983
2984 /*
2985 * We've written all data. Queue epilogue. Once it's written,
2986 * we are done.
2987 */
2988 reset_out_kvecs(con);
2989 prepare_epilogue_plain(con, false);
2990 con->v2.out_state = OUT_S_FINISH_MESSAGE;
2991 }
2992
queue_enc_page(struct ceph_connection * con)2993 static void queue_enc_page(struct ceph_connection *con)
2994 {
2995 struct bio_vec bv;
2996
2997 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
2998 con->v2.out_enc_resid);
2999 WARN_ON(!con->v2.out_enc_resid);
3000
3001 bv.bv_page = con->v2.out_enc_pages[con->v2.out_enc_i];
3002 bv.bv_offset = 0;
3003 bv.bv_len = min(con->v2.out_enc_resid, (int)PAGE_SIZE);
3004
3005 set_out_bvec(con, &bv, false);
3006 con->v2.out_enc_i++;
3007 con->v2.out_enc_resid -= bv.bv_len;
3008
3009 if (con->v2.out_enc_resid) {
3010 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3011 return;
3012 }
3013
3014 /*
3015 * We've queued the last piece of ciphertext (ending with
3016 * epilogue) + auth tag. Once it's written, we are done.
3017 */
3018 WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3019 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3020 }
3021
queue_zeros(struct ceph_connection * con)3022 static void queue_zeros(struct ceph_connection *con)
3023 {
3024 dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3025
3026 if (con->v2.out_zero) {
3027 set_out_bvec_zero(con);
3028 con->v2.out_zero -= con->v2.out_bvec.bv_len;
3029 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3030 return;
3031 }
3032
3033 /*
3034 * We've zero-filled everything up to epilogue. Queue epilogue
3035 * with late_status set to ABORTED and crcs adjusted for zeros.
3036 * Once it's written, we are done patching up for the revoke.
3037 */
3038 reset_out_kvecs(con);
3039 prepare_epilogue_plain(con, true);
3040 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3041 }
3042
finish_message(struct ceph_connection * con)3043 static void finish_message(struct ceph_connection *con)
3044 {
3045 dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3046
3047 /* we end up here both plain and secure modes */
3048 if (con->v2.out_enc_pages) {
3049 WARN_ON(!con->v2.out_enc_page_cnt);
3050 ceph_release_page_vector(con->v2.out_enc_pages,
3051 con->v2.out_enc_page_cnt);
3052 con->v2.out_enc_pages = NULL;
3053 con->v2.out_enc_page_cnt = 0;
3054 }
3055 /* message may have been revoked */
3056 if (con->out_msg) {
3057 ceph_msg_put(con->out_msg);
3058 con->out_msg = NULL;
3059 }
3060
3061 con->v2.out_state = OUT_S_GET_NEXT;
3062 }
3063
populate_out_iter(struct ceph_connection * con)3064 static int populate_out_iter(struct ceph_connection *con)
3065 {
3066 int ret;
3067
3068 dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3069 con->v2.out_state);
3070 WARN_ON(iov_iter_count(&con->v2.out_iter));
3071
3072 if (con->state != CEPH_CON_S_OPEN) {
3073 WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3074 con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3075 goto nothing_pending;
3076 }
3077
3078 switch (con->v2.out_state) {
3079 case OUT_S_QUEUE_DATA:
3080 WARN_ON(!con->out_msg);
3081 queue_data(con);
3082 goto populated;
3083 case OUT_S_QUEUE_DATA_CONT:
3084 WARN_ON(!con->out_msg);
3085 queue_data_cont(con);
3086 goto populated;
3087 case OUT_S_QUEUE_ENC_PAGE:
3088 queue_enc_page(con);
3089 goto populated;
3090 case OUT_S_QUEUE_ZEROS:
3091 WARN_ON(con->out_msg); /* revoked */
3092 queue_zeros(con);
3093 goto populated;
3094 case OUT_S_FINISH_MESSAGE:
3095 finish_message(con);
3096 break;
3097 case OUT_S_GET_NEXT:
3098 break;
3099 default:
3100 WARN(1, "bad out_state %d", con->v2.out_state);
3101 return -EINVAL;
3102 }
3103
3104 WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3105 if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3106 ret = prepare_keepalive2(con);
3107 if (ret) {
3108 pr_err("prepare_keepalive2 failed: %d\n", ret);
3109 return ret;
3110 }
3111 } else if (!list_empty(&con->out_queue)) {
3112 ceph_con_get_out_msg(con);
3113 ret = prepare_message(con);
3114 if (ret) {
3115 pr_err("prepare_message failed: %d\n", ret);
3116 return ret;
3117 }
3118 } else if (con->in_seq > con->in_seq_acked) {
3119 ret = prepare_ack(con);
3120 if (ret) {
3121 pr_err("prepare_ack failed: %d\n", ret);
3122 return ret;
3123 }
3124 } else {
3125 goto nothing_pending;
3126 }
3127
3128 populated:
3129 if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3130 return -ENODATA;
3131 dout("%s con %p populated %zu\n", __func__, con,
3132 iov_iter_count(&con->v2.out_iter));
3133 return 1;
3134
3135 nothing_pending:
3136 WARN_ON(iov_iter_count(&con->v2.out_iter));
3137 dout("%s con %p nothing pending\n", __func__, con);
3138 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3139 return 0;
3140 }
3141
ceph_con_v2_try_write(struct ceph_connection * con)3142 int ceph_con_v2_try_write(struct ceph_connection *con)
3143 {
3144 int ret;
3145
3146 dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3147 iov_iter_count(&con->v2.out_iter));
3148
3149 /* open the socket first? */
3150 if (con->state == CEPH_CON_S_PREOPEN) {
3151 WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3152
3153 /*
3154 * Always bump global_seq. Bump connect_seq only if
3155 * there is a session (i.e. we are reconnecting and will
3156 * send session_reconnect instead of client_ident).
3157 */
3158 con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3159 if (con->v2.server_cookie)
3160 con->v2.connect_seq++;
3161
3162 ret = prepare_read_banner_prefix(con);
3163 if (ret) {
3164 pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3165 con->error_msg = "connect error";
3166 return ret;
3167 }
3168
3169 reset_out_kvecs(con);
3170 ret = prepare_banner(con);
3171 if (ret) {
3172 pr_err("prepare_banner failed: %d\n", ret);
3173 con->error_msg = "connect error";
3174 return ret;
3175 }
3176
3177 ret = ceph_tcp_connect(con);
3178 if (ret) {
3179 pr_err("ceph_tcp_connect failed: %d\n", ret);
3180 con->error_msg = "connect error";
3181 return ret;
3182 }
3183 }
3184
3185 if (!iov_iter_count(&con->v2.out_iter)) {
3186 ret = populate_out_iter(con);
3187 if (ret <= 0) {
3188 if (ret && ret != -EAGAIN && !con->error_msg)
3189 con->error_msg = "write processing error";
3190 return ret;
3191 }
3192 }
3193
3194 tcp_sock_set_cork(con->sock->sk, true);
3195 for (;;) {
3196 ret = ceph_tcp_send(con);
3197 if (ret <= 0)
3198 break;
3199
3200 ret = populate_out_iter(con);
3201 if (ret <= 0) {
3202 if (ret && ret != -EAGAIN && !con->error_msg)
3203 con->error_msg = "write processing error";
3204 break;
3205 }
3206 }
3207
3208 tcp_sock_set_cork(con->sock->sk, false);
3209 return ret;
3210 }
3211
crc32c_zeros(u32 crc,int zero_len)3212 static u32 crc32c_zeros(u32 crc, int zero_len)
3213 {
3214 int len;
3215
3216 while (zero_len) {
3217 len = min(zero_len, (int)PAGE_SIZE);
3218 crc = crc32c(crc, page_address(ceph_zero_page), len);
3219 zero_len -= len;
3220 }
3221
3222 return crc;
3223 }
3224
prepare_zero_front(struct ceph_connection * con,int resid)3225 static void prepare_zero_front(struct ceph_connection *con, int resid)
3226 {
3227 int sent;
3228
3229 WARN_ON(!resid || resid > front_len(con->out_msg));
3230 sent = front_len(con->out_msg) - resid;
3231 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3232
3233 if (sent) {
3234 con->v2.out_epil.front_crc =
3235 crc32c(-1, con->out_msg->front.iov_base, sent);
3236 con->v2.out_epil.front_crc =
3237 crc32c_zeros(con->v2.out_epil.front_crc, resid);
3238 } else {
3239 con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3240 }
3241
3242 con->v2.out_iter.count -= resid;
3243 out_zero_add(con, resid);
3244 }
3245
prepare_zero_middle(struct ceph_connection * con,int resid)3246 static void prepare_zero_middle(struct ceph_connection *con, int resid)
3247 {
3248 int sent;
3249
3250 WARN_ON(!resid || resid > middle_len(con->out_msg));
3251 sent = middle_len(con->out_msg) - resid;
3252 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3253
3254 if (sent) {
3255 con->v2.out_epil.middle_crc =
3256 crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
3257 con->v2.out_epil.middle_crc =
3258 crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3259 } else {
3260 con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3261 }
3262
3263 con->v2.out_iter.count -= resid;
3264 out_zero_add(con, resid);
3265 }
3266
prepare_zero_data(struct ceph_connection * con)3267 static void prepare_zero_data(struct ceph_connection *con)
3268 {
3269 dout("%s con %p\n", __func__, con);
3270 con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
3271 out_zero_add(con, data_len(con->out_msg));
3272 }
3273
revoke_at_queue_data(struct ceph_connection * con)3274 static void revoke_at_queue_data(struct ceph_connection *con)
3275 {
3276 int boundary;
3277 int resid;
3278
3279 WARN_ON(!data_len(con->out_msg));
3280 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3281 resid = iov_iter_count(&con->v2.out_iter);
3282
3283 boundary = front_len(con->out_msg) + middle_len(con->out_msg);
3284 if (resid > boundary) {
3285 resid -= boundary;
3286 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3287 dout("%s con %p was sending head\n", __func__, con);
3288 if (front_len(con->out_msg))
3289 prepare_zero_front(con, front_len(con->out_msg));
3290 if (middle_len(con->out_msg))
3291 prepare_zero_middle(con, middle_len(con->out_msg));
3292 prepare_zero_data(con);
3293 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3294 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3295 return;
3296 }
3297
3298 boundary = middle_len(con->out_msg);
3299 if (resid > boundary) {
3300 resid -= boundary;
3301 dout("%s con %p was sending front\n", __func__, con);
3302 prepare_zero_front(con, resid);
3303 if (middle_len(con->out_msg))
3304 prepare_zero_middle(con, middle_len(con->out_msg));
3305 prepare_zero_data(con);
3306 queue_zeros(con);
3307 return;
3308 }
3309
3310 WARN_ON(!resid);
3311 dout("%s con %p was sending middle\n", __func__, con);
3312 prepare_zero_middle(con, resid);
3313 prepare_zero_data(con);
3314 queue_zeros(con);
3315 }
3316
revoke_at_queue_data_cont(struct ceph_connection * con)3317 static void revoke_at_queue_data_cont(struct ceph_connection *con)
3318 {
3319 int sent, resid; /* current piece of data */
3320
3321 WARN_ON(!data_len(con->out_msg));
3322 WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3323 resid = iov_iter_count(&con->v2.out_iter);
3324 WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3325 sent = con->v2.out_bvec.bv_len - resid;
3326 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3327
3328 if (sent) {
3329 con->v2.out_epil.data_crc = ceph_crc32c_page(
3330 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3331 con->v2.out_bvec.bv_offset, sent);
3332 ceph_msg_data_advance(&con->v2.out_cursor, sent);
3333 }
3334 WARN_ON(resid > con->v2.out_cursor.total_resid);
3335 con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3336 con->v2.out_cursor.total_resid);
3337
3338 con->v2.out_iter.count -= resid;
3339 out_zero_add(con, con->v2.out_cursor.total_resid);
3340 queue_zeros(con);
3341 }
3342
revoke_at_finish_message(struct ceph_connection * con)3343 static void revoke_at_finish_message(struct ceph_connection *con)
3344 {
3345 int boundary;
3346 int resid;
3347
3348 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3349 resid = iov_iter_count(&con->v2.out_iter);
3350
3351 if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
3352 !data_len(con->out_msg)) {
3353 WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3354 dout("%s con %p was sending head (empty message) - noop\n",
3355 __func__, con);
3356 return;
3357 }
3358
3359 boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
3360 CEPH_EPILOGUE_PLAIN_LEN;
3361 if (resid > boundary) {
3362 resid -= boundary;
3363 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3364 dout("%s con %p was sending head\n", __func__, con);
3365 if (front_len(con->out_msg))
3366 prepare_zero_front(con, front_len(con->out_msg));
3367 if (middle_len(con->out_msg))
3368 prepare_zero_middle(con, middle_len(con->out_msg));
3369 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3370 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3371 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3372 return;
3373 }
3374
3375 boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3376 if (resid > boundary) {
3377 resid -= boundary;
3378 dout("%s con %p was sending front\n", __func__, con);
3379 prepare_zero_front(con, resid);
3380 if (middle_len(con->out_msg))
3381 prepare_zero_middle(con, middle_len(con->out_msg));
3382 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3383 queue_zeros(con);
3384 return;
3385 }
3386
3387 boundary = CEPH_EPILOGUE_PLAIN_LEN;
3388 if (resid > boundary) {
3389 resid -= boundary;
3390 dout("%s con %p was sending middle\n", __func__, con);
3391 prepare_zero_middle(con, resid);
3392 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3393 queue_zeros(con);
3394 return;
3395 }
3396
3397 WARN_ON(!resid);
3398 dout("%s con %p was sending epilogue - noop\n", __func__, con);
3399 }
3400
ceph_con_v2_revoke(struct ceph_connection * con)3401 void ceph_con_v2_revoke(struct ceph_connection *con)
3402 {
3403 WARN_ON(con->v2.out_zero);
3404
3405 if (con_secure(con)) {
3406 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3407 con->v2.out_state != OUT_S_FINISH_MESSAGE);
3408 dout("%s con %p secure - noop\n", __func__, con);
3409 return;
3410 }
3411
3412 switch (con->v2.out_state) {
3413 case OUT_S_QUEUE_DATA:
3414 revoke_at_queue_data(con);
3415 break;
3416 case OUT_S_QUEUE_DATA_CONT:
3417 revoke_at_queue_data_cont(con);
3418 break;
3419 case OUT_S_FINISH_MESSAGE:
3420 revoke_at_finish_message(con);
3421 break;
3422 default:
3423 WARN(1, "bad out_state %d", con->v2.out_state);
3424 break;
3425 }
3426 }
3427
revoke_at_prepare_read_data(struct ceph_connection * con)3428 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3429 {
3430 int remaining;
3431 int resid;
3432
3433 WARN_ON(con_secure(con));
3434 WARN_ON(!data_len(con->in_msg));
3435 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3436 resid = iov_iter_count(&con->v2.in_iter);
3437 WARN_ON(!resid);
3438
3439 remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3440 dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3441 remaining);
3442 con->v2.in_iter.count -= resid;
3443 set_in_skip(con, resid + remaining);
3444 con->v2.in_state = IN_S_FINISH_SKIP;
3445 }
3446
revoke_at_prepare_read_data_cont(struct ceph_connection * con)3447 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3448 {
3449 int recved, resid; /* current piece of data */
3450 int remaining;
3451
3452 WARN_ON(con_secure(con));
3453 WARN_ON(!data_len(con->in_msg));
3454 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3455 resid = iov_iter_count(&con->v2.in_iter);
3456 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3457 recved = con->v2.in_bvec.bv_len - resid;
3458 dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3459
3460 if (recved)
3461 ceph_msg_data_advance(&con->v2.in_cursor, recved);
3462 WARN_ON(resid > con->v2.in_cursor.total_resid);
3463
3464 remaining = CEPH_EPILOGUE_PLAIN_LEN;
3465 dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3466 con->v2.in_cursor.total_resid, remaining);
3467 con->v2.in_iter.count -= resid;
3468 set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3469 con->v2.in_state = IN_S_FINISH_SKIP;
3470 }
3471
revoke_at_prepare_read_enc_page(struct ceph_connection * con)3472 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3473 {
3474 int resid; /* current enc page (not necessarily data) */
3475
3476 WARN_ON(!con_secure(con));
3477 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3478 resid = iov_iter_count(&con->v2.in_iter);
3479 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3480
3481 dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3482 con->v2.in_enc_resid);
3483 con->v2.in_iter.count -= resid;
3484 set_in_skip(con, resid + con->v2.in_enc_resid);
3485 con->v2.in_state = IN_S_FINISH_SKIP;
3486 }
3487
revoke_at_handle_epilogue(struct ceph_connection * con)3488 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3489 {
3490 int resid;
3491
3492 resid = iov_iter_count(&con->v2.in_iter);
3493 WARN_ON(!resid);
3494
3495 dout("%s con %p resid %d\n", __func__, con, resid);
3496 con->v2.in_iter.count -= resid;
3497 set_in_skip(con, resid);
3498 con->v2.in_state = IN_S_FINISH_SKIP;
3499 }
3500
ceph_con_v2_revoke_incoming(struct ceph_connection * con)3501 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3502 {
3503 switch (con->v2.in_state) {
3504 case IN_S_PREPARE_READ_DATA:
3505 revoke_at_prepare_read_data(con);
3506 break;
3507 case IN_S_PREPARE_READ_DATA_CONT:
3508 revoke_at_prepare_read_data_cont(con);
3509 break;
3510 case IN_S_PREPARE_READ_ENC_PAGE:
3511 revoke_at_prepare_read_enc_page(con);
3512 break;
3513 case IN_S_HANDLE_EPILOGUE:
3514 revoke_at_handle_epilogue(con);
3515 break;
3516 default:
3517 WARN(1, "bad in_state %d", con->v2.in_state);
3518 break;
3519 }
3520 }
3521
ceph_con_v2_opened(struct ceph_connection * con)3522 bool ceph_con_v2_opened(struct ceph_connection *con)
3523 {
3524 return con->v2.peer_global_seq;
3525 }
3526
ceph_con_v2_reset_session(struct ceph_connection * con)3527 void ceph_con_v2_reset_session(struct ceph_connection *con)
3528 {
3529 con->v2.client_cookie = 0;
3530 con->v2.server_cookie = 0;
3531 con->v2.global_seq = 0;
3532 con->v2.connect_seq = 0;
3533 con->v2.peer_global_seq = 0;
3534 }
3535
ceph_con_v2_reset_protocol(struct ceph_connection * con)3536 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3537 {
3538 iov_iter_truncate(&con->v2.in_iter, 0);
3539 iov_iter_truncate(&con->v2.out_iter, 0);
3540 con->v2.out_zero = 0;
3541
3542 clear_in_sign_kvecs(con);
3543 clear_out_sign_kvecs(con);
3544 free_conn_bufs(con);
3545
3546 if (con->v2.in_enc_pages) {
3547 WARN_ON(!con->v2.in_enc_page_cnt);
3548 ceph_release_page_vector(con->v2.in_enc_pages,
3549 con->v2.in_enc_page_cnt);
3550 con->v2.in_enc_pages = NULL;
3551 con->v2.in_enc_page_cnt = 0;
3552 }
3553 if (con->v2.out_enc_pages) {
3554 WARN_ON(!con->v2.out_enc_page_cnt);
3555 ceph_release_page_vector(con->v2.out_enc_pages,
3556 con->v2.out_enc_page_cnt);
3557 con->v2.out_enc_pages = NULL;
3558 con->v2.out_enc_page_cnt = 0;
3559 }
3560
3561 con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3562 memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3563 memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3564
3565 if (con->v2.hmac_tfm) {
3566 crypto_free_shash(con->v2.hmac_tfm);
3567 con->v2.hmac_tfm = NULL;
3568 }
3569 if (con->v2.gcm_req) {
3570 aead_request_free(con->v2.gcm_req);
3571 con->v2.gcm_req = NULL;
3572 }
3573 if (con->v2.gcm_tfm) {
3574 crypto_free_aead(con->v2.gcm_tfm);
3575 con->v2.gcm_tfm = NULL;
3576 }
3577 }
3578