1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * RDMA Network Block Driver
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9 #ifndef RNBD_PROTO_H
10 #define RNBD_PROTO_H
11
12 #include <linux/types.h>
13 #include <linux/blk-mq.h>
14 #include <linux/limits.h>
15 #include <linux/inet.h>
16 #include <linux/in.h>
17 #include <linux/in6.h>
18 #include <rdma/ib.h>
19
20 #define RNBD_PROTO_VER_MAJOR 2
21 #define RNBD_PROTO_VER_MINOR 0
22
23 /* The default port number the RTRS server is listening on. */
24 #define RTRS_PORT 1234
25
26 /**
27 * enum rnbd_msg_types - RNBD message types
28 * @RNBD_MSG_SESS_INFO: initial session info from client to server
29 * @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
30 * @RNBD_MSG_OPEN: open (map) device request
31 * @RNBD_MSG_OPEN_RSP: response to an @RNBD_MSG_OPEN
32 * @RNBD_MSG_IO: block IO request operation
33 * @RNBD_MSG_CLOSE: close (unmap) device request
34 */
35 enum rnbd_msg_type {
36 RNBD_MSG_SESS_INFO,
37 RNBD_MSG_SESS_INFO_RSP,
38 RNBD_MSG_OPEN,
39 RNBD_MSG_OPEN_RSP,
40 RNBD_MSG_IO,
41 RNBD_MSG_CLOSE,
42 };
43
44 /**
45 * struct rnbd_msg_hdr - header of RNBD messages
46 * @type: Message type, valid values see: enum rnbd_msg_types
47 */
48 struct rnbd_msg_hdr {
49 __le16 type;
50 __le16 __padding;
51 };
52
53 /**
54 * We allow to map RO many times and RW only once. We allow to map yet another
55 * time RW, if MIGRATION is provided (second RW export can be required for
56 * example for VM migration)
57 */
58 enum rnbd_access_mode {
59 RNBD_ACCESS_RO,
60 RNBD_ACCESS_RW,
61 RNBD_ACCESS_MIGRATION,
62 };
63
64 static const __maybe_unused struct {
65 enum rnbd_access_mode mode;
66 const char *str;
67 } rnbd_access_modes[] = {
68 [RNBD_ACCESS_RO] = {RNBD_ACCESS_RO, "ro"},
69 [RNBD_ACCESS_RW] = {RNBD_ACCESS_RW, "rw"},
70 [RNBD_ACCESS_MIGRATION] = {RNBD_ACCESS_MIGRATION, "migration"},
71 };
72
73 /**
74 * struct rnbd_msg_sess_info - initial session info from client to server
75 * @hdr: message header
76 * @ver: RNBD protocol version
77 */
78 struct rnbd_msg_sess_info {
79 struct rnbd_msg_hdr hdr;
80 u8 ver;
81 u8 reserved[31];
82 };
83
84 /**
85 * struct rnbd_msg_sess_info_rsp - initial session info from server to client
86 * @hdr: message header
87 * @ver: RNBD protocol version
88 */
89 struct rnbd_msg_sess_info_rsp {
90 struct rnbd_msg_hdr hdr;
91 u8 ver;
92 u8 reserved[31];
93 };
94
95 /**
96 * struct rnbd_msg_open - request to open a remote device.
97 * @hdr: message header
98 * @access_mode: the mode to open remote device, valid values see:
99 * enum rnbd_access_mode
100 * @device_name: device path on remote side
101 */
102 struct rnbd_msg_open {
103 struct rnbd_msg_hdr hdr;
104 u8 access_mode;
105 u8 resv1;
106 s8 dev_name[NAME_MAX];
107 u8 reserved[3];
108 };
109
110 /**
111 * struct rnbd_msg_close - request to close a remote device.
112 * @hdr: message header
113 * @device_id: device_id on server side to identify the device
114 */
115 struct rnbd_msg_close {
116 struct rnbd_msg_hdr hdr;
117 __le32 device_id;
118 };
119
120 enum rnbd_cache_policy {
121 RNBD_FUA = 1 << 0,
122 RNBD_WRITEBACK = 1 << 1,
123 };
124
125 /**
126 * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
127 * @hdr: message header
128 * @device_id: device_id on server side to identify the device
129 * @nsectors: number of sectors in the usual 512b unit
130 * @max_hw_sectors: max hardware sectors in the usual 512b unit
131 * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
132 * @max_discard_sectors: max. sectors that can be discarded at once in 512b
133 * unit.
134 * @discard_granularity: size of the internal discard allocation unit in bytes
135 * @discard_alignment: offset from internal allocation assignment in bytes
136 * @physical_block_size: physical block size device supports in bytes
137 * @logical_block_size: logical block size device supports in bytes
138 * @max_segments: max segments hardware support in one transfer
139 * @secure_discard: supports secure discard
140 * @obsolete_rotational: obsolete, not in used.
141 * @cache_policy: support write-back caching or FUA?
142 */
143 struct rnbd_msg_open_rsp {
144 struct rnbd_msg_hdr hdr;
145 __le32 device_id;
146 __le64 nsectors;
147 __le32 max_hw_sectors;
148 __le32 max_write_same_sectors;
149 __le32 max_discard_sectors;
150 __le32 discard_granularity;
151 __le32 discard_alignment;
152 __le16 physical_block_size;
153 __le16 logical_block_size;
154 __le16 max_segments;
155 __le16 secure_discard;
156 u8 obsolete_rotational;
157 u8 cache_policy;
158 u8 reserved[10];
159 };
160
161 /**
162 * struct rnbd_msg_io - message for I/O read/write
163 * @hdr: message header
164 * @device_id: device_id on server side to find the right device
165 * @sector: bi_sector attribute from struct bio
166 * @rw: valid values are defined in enum rnbd_io_flags
167 * @bi_size: number of bytes for I/O read/write
168 * @prio: priority
169 */
170 struct rnbd_msg_io {
171 struct rnbd_msg_hdr hdr;
172 __le32 device_id;
173 __le64 sector;
174 __le32 rw;
175 __le32 bi_size;
176 __le16 prio;
177 };
178
179 #define RNBD_OP_BITS 8
180 #define RNBD_OP_MASK ((1 << RNBD_OP_BITS) - 1)
181
182 /**
183 * enum rnbd_io_flags - RNBD request types from rq_flag_bits
184 * @RNBD_OP_READ: read sectors from the device
185 * @RNBD_OP_WRITE: write sectors to the device
186 * @RNBD_OP_FLUSH: flush the volatile write cache
187 * @RNBD_OP_DISCARD: discard sectors
188 * @RNBD_OP_SECURE_ERASE: securely erase sectors
189 * @RNBD_OP_WRITE_SAME: write the same sectors many times
190
191 * @RNBD_F_SYNC: request is sync (sync write or read)
192 * @RNBD_F_FUA: forced unit access
193 */
194 enum rnbd_io_flags {
195
196 /* Operations */
197 RNBD_OP_READ = 0,
198 RNBD_OP_WRITE = 1,
199 RNBD_OP_FLUSH = 2,
200 RNBD_OP_DISCARD = 3,
201 RNBD_OP_SECURE_ERASE = 4,
202 RNBD_OP_WRITE_SAME = 5,
203
204 /* Flags */
205 RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
206 RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
207 };
208
rnbd_op(u32 flags)209 static inline u32 rnbd_op(u32 flags)
210 {
211 return flags & RNBD_OP_MASK;
212 }
213
rnbd_flags(u32 flags)214 static inline u32 rnbd_flags(u32 flags)
215 {
216 return flags & ~RNBD_OP_MASK;
217 }
218
rnbd_to_bio_flags(u32 rnbd_opf)219 static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
220 {
221 blk_opf_t bio_opf;
222
223 switch (rnbd_op(rnbd_opf)) {
224 case RNBD_OP_READ:
225 bio_opf = REQ_OP_READ;
226 break;
227 case RNBD_OP_WRITE:
228 bio_opf = REQ_OP_WRITE;
229 break;
230 case RNBD_OP_FLUSH:
231 bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
232 break;
233 case RNBD_OP_DISCARD:
234 bio_opf = REQ_OP_DISCARD;
235 break;
236 case RNBD_OP_SECURE_ERASE:
237 bio_opf = REQ_OP_SECURE_ERASE;
238 break;
239 default:
240 WARN(1, "Unknown RNBD type: %d (flags %d)\n",
241 rnbd_op(rnbd_opf), rnbd_opf);
242 bio_opf = 0;
243 }
244
245 if (rnbd_opf & RNBD_F_SYNC)
246 bio_opf |= REQ_SYNC;
247
248 if (rnbd_opf & RNBD_F_FUA)
249 bio_opf |= REQ_FUA;
250
251 return bio_opf;
252 }
253
rq_to_rnbd_flags(struct request * rq)254 static inline u32 rq_to_rnbd_flags(struct request *rq)
255 {
256 u32 rnbd_opf;
257
258 switch (req_op(rq)) {
259 case REQ_OP_READ:
260 rnbd_opf = RNBD_OP_READ;
261 break;
262 case REQ_OP_WRITE:
263 rnbd_opf = RNBD_OP_WRITE;
264 break;
265 case REQ_OP_DISCARD:
266 rnbd_opf = RNBD_OP_DISCARD;
267 break;
268 case REQ_OP_SECURE_ERASE:
269 rnbd_opf = RNBD_OP_SECURE_ERASE;
270 break;
271 case REQ_OP_FLUSH:
272 rnbd_opf = RNBD_OP_FLUSH;
273 break;
274 default:
275 WARN(1, "Unknown request type %d (flags %llu)\n",
276 (__force u32)req_op(rq),
277 (__force unsigned long long)rq->cmd_flags);
278 rnbd_opf = 0;
279 }
280
281 if (op_is_sync(rq->cmd_flags))
282 rnbd_opf |= RNBD_F_SYNC;
283
284 if (op_is_flush(rq->cmd_flags))
285 rnbd_opf |= RNBD_F_FUA;
286
287 return rnbd_opf;
288 }
289
290 const char *rnbd_access_mode_str(enum rnbd_access_mode mode);
291
292 #endif /* RNBD_PROTO_H */
293