1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * This file contains main functions related to the iSCSI Target Core Driver.
4 *
5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8 *
9 ******************************************************************************/
10
11 #include <crypto/hash.h>
12 #include <linux/string.h>
13 #include <linux/kthread.h>
14 #include <linux/completion.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/idr.h>
18 #include <linux/delay.h>
19 #include <linux/sched/signal.h>
20 #include <asm/unaligned.h>
21 #include <linux/inet.h>
22 #include <net/ipv6.h>
23 #include <scsi/scsi_proto.h>
24 #include <scsi/iscsi_proto.h>
25 #include <scsi/scsi_tcq.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28
29 #include <target/iscsi/iscsi_target_core.h>
30 #include "iscsi_target_parameters.h"
31 #include "iscsi_target_seq_pdu_list.h"
32 #include "iscsi_target_datain_values.h"
33 #include "iscsi_target_erl0.h"
34 #include "iscsi_target_erl1.h"
35 #include "iscsi_target_erl2.h"
36 #include "iscsi_target_login.h"
37 #include "iscsi_target_tmr.h"
38 #include "iscsi_target_tpg.h"
39 #include "iscsi_target_util.h"
40 #include "iscsi_target.h"
41 #include "iscsi_target_device.h"
42 #include <target/iscsi/iscsi_target_stat.h>
43
44 #include <target/iscsi/iscsi_transport.h>
45
46 static LIST_HEAD(g_tiqn_list);
47 static LIST_HEAD(g_np_list);
48 static DEFINE_SPINLOCK(tiqn_lock);
49 static DEFINE_MUTEX(np_lock);
50
51 static struct idr tiqn_idr;
52 DEFINE_IDA(sess_ida);
53 struct mutex auth_id_lock;
54
55 struct iscsit_global *iscsit_global;
56
57 struct kmem_cache *lio_qr_cache;
58 struct kmem_cache *lio_dr_cache;
59 struct kmem_cache *lio_ooo_cache;
60 struct kmem_cache *lio_r2t_cache;
61
62 static int iscsit_handle_immediate_data(struct iscsit_cmd *,
63 struct iscsi_scsi_req *, u32);
64
iscsit_get_tiqn_for_login(unsigned char * buf)65 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
66 {
67 struct iscsi_tiqn *tiqn = NULL;
68
69 spin_lock(&tiqn_lock);
70 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
71 if (!strcmp(tiqn->tiqn, buf)) {
72
73 spin_lock(&tiqn->tiqn_state_lock);
74 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
75 tiqn->tiqn_access_count++;
76 spin_unlock(&tiqn->tiqn_state_lock);
77 spin_unlock(&tiqn_lock);
78 return tiqn;
79 }
80 spin_unlock(&tiqn->tiqn_state_lock);
81 }
82 }
83 spin_unlock(&tiqn_lock);
84
85 return NULL;
86 }
87
iscsit_set_tiqn_shutdown(struct iscsi_tiqn * tiqn)88 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
89 {
90 spin_lock(&tiqn->tiqn_state_lock);
91 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
92 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
93 spin_unlock(&tiqn->tiqn_state_lock);
94 return 0;
95 }
96 spin_unlock(&tiqn->tiqn_state_lock);
97
98 return -1;
99 }
100
iscsit_put_tiqn_for_login(struct iscsi_tiqn * tiqn)101 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
102 {
103 spin_lock(&tiqn->tiqn_state_lock);
104 tiqn->tiqn_access_count--;
105 spin_unlock(&tiqn->tiqn_state_lock);
106 }
107
108 /*
109 * Note that IQN formatting is expected to be done in userspace, and
110 * no explict IQN format checks are done here.
111 */
iscsit_add_tiqn(unsigned char * buf)112 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
113 {
114 struct iscsi_tiqn *tiqn = NULL;
115 int ret;
116
117 if (strlen(buf) >= ISCSI_IQN_LEN) {
118 pr_err("Target IQN exceeds %d bytes\n",
119 ISCSI_IQN_LEN);
120 return ERR_PTR(-EINVAL);
121 }
122
123 tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
124 if (!tiqn)
125 return ERR_PTR(-ENOMEM);
126
127 sprintf(tiqn->tiqn, "%s", buf);
128 INIT_LIST_HEAD(&tiqn->tiqn_list);
129 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
130 spin_lock_init(&tiqn->tiqn_state_lock);
131 spin_lock_init(&tiqn->tiqn_tpg_lock);
132 spin_lock_init(&tiqn->sess_err_stats.lock);
133 spin_lock_init(&tiqn->login_stats.lock);
134 spin_lock_init(&tiqn->logout_stats.lock);
135
136 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
137
138 idr_preload(GFP_KERNEL);
139 spin_lock(&tiqn_lock);
140
141 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
142 if (ret < 0) {
143 pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
144 spin_unlock(&tiqn_lock);
145 idr_preload_end();
146 kfree(tiqn);
147 return ERR_PTR(ret);
148 }
149 tiqn->tiqn_index = ret;
150 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
151
152 spin_unlock(&tiqn_lock);
153 idr_preload_end();
154
155 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
156
157 return tiqn;
158
159 }
160
iscsit_wait_for_tiqn(struct iscsi_tiqn * tiqn)161 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
162 {
163 /*
164 * Wait for accesses to said struct iscsi_tiqn to end.
165 */
166 spin_lock(&tiqn->tiqn_state_lock);
167 while (tiqn->tiqn_access_count != 0) {
168 spin_unlock(&tiqn->tiqn_state_lock);
169 msleep(10);
170 spin_lock(&tiqn->tiqn_state_lock);
171 }
172 spin_unlock(&tiqn->tiqn_state_lock);
173 }
174
iscsit_del_tiqn(struct iscsi_tiqn * tiqn)175 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
176 {
177 /*
178 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
179 * while holding tiqn->tiqn_state_lock. This means that all subsequent
180 * attempts to access this struct iscsi_tiqn will fail from both transport
181 * fabric and control code paths.
182 */
183 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
184 pr_err("iscsit_set_tiqn_shutdown() failed\n");
185 return;
186 }
187
188 iscsit_wait_for_tiqn(tiqn);
189
190 spin_lock(&tiqn_lock);
191 list_del(&tiqn->tiqn_list);
192 idr_remove(&tiqn_idr, tiqn->tiqn_index);
193 spin_unlock(&tiqn_lock);
194
195 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
196 tiqn->tiqn);
197 kfree(tiqn);
198 }
199
iscsit_access_np(struct iscsi_np * np,struct iscsi_portal_group * tpg)200 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
201 {
202 int ret;
203 /*
204 * Determine if the network portal is accepting storage traffic.
205 */
206 spin_lock_bh(&np->np_thread_lock);
207 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
208 spin_unlock_bh(&np->np_thread_lock);
209 return -1;
210 }
211 spin_unlock_bh(&np->np_thread_lock);
212 /*
213 * Determine if the portal group is accepting storage traffic.
214 */
215 spin_lock_bh(&tpg->tpg_state_lock);
216 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
217 spin_unlock_bh(&tpg->tpg_state_lock);
218 return -1;
219 }
220 spin_unlock_bh(&tpg->tpg_state_lock);
221
222 /*
223 * Here we serialize access across the TIQN+TPG Tuple.
224 */
225 ret = down_interruptible(&tpg->np_login_sem);
226 if (ret != 0)
227 return -1;
228
229 spin_lock_bh(&tpg->tpg_state_lock);
230 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
231 spin_unlock_bh(&tpg->tpg_state_lock);
232 up(&tpg->np_login_sem);
233 return -1;
234 }
235 spin_unlock_bh(&tpg->tpg_state_lock);
236
237 return 0;
238 }
239
iscsit_login_kref_put(struct kref * kref)240 void iscsit_login_kref_put(struct kref *kref)
241 {
242 struct iscsi_tpg_np *tpg_np = container_of(kref,
243 struct iscsi_tpg_np, tpg_np_kref);
244
245 complete(&tpg_np->tpg_np_comp);
246 }
247
iscsit_deaccess_np(struct iscsi_np * np,struct iscsi_portal_group * tpg,struct iscsi_tpg_np * tpg_np)248 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
249 struct iscsi_tpg_np *tpg_np)
250 {
251 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
252
253 up(&tpg->np_login_sem);
254
255 if (tpg_np)
256 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
257
258 if (tiqn)
259 iscsit_put_tiqn_for_login(tiqn);
260
261 return 0;
262 }
263
iscsit_check_np_match(struct sockaddr_storage * sockaddr,struct iscsi_np * np,int network_transport)264 bool iscsit_check_np_match(
265 struct sockaddr_storage *sockaddr,
266 struct iscsi_np *np,
267 int network_transport)
268 {
269 struct sockaddr_in *sock_in, *sock_in_e;
270 struct sockaddr_in6 *sock_in6, *sock_in6_e;
271 bool ip_match = false;
272 u16 port, port_e;
273
274 if (sockaddr->ss_family == AF_INET6) {
275 sock_in6 = (struct sockaddr_in6 *)sockaddr;
276 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
277
278 if (!memcmp(&sock_in6->sin6_addr.in6_u,
279 &sock_in6_e->sin6_addr.in6_u,
280 sizeof(struct in6_addr)))
281 ip_match = true;
282
283 port = ntohs(sock_in6->sin6_port);
284 port_e = ntohs(sock_in6_e->sin6_port);
285 } else {
286 sock_in = (struct sockaddr_in *)sockaddr;
287 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
288
289 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
290 ip_match = true;
291
292 port = ntohs(sock_in->sin_port);
293 port_e = ntohs(sock_in_e->sin_port);
294 }
295
296 if (ip_match && (port_e == port) &&
297 (np->np_network_transport == network_transport))
298 return true;
299
300 return false;
301 }
302
iscsit_get_np(struct sockaddr_storage * sockaddr,int network_transport)303 static struct iscsi_np *iscsit_get_np(
304 struct sockaddr_storage *sockaddr,
305 int network_transport)
306 {
307 struct iscsi_np *np;
308 bool match;
309
310 lockdep_assert_held(&np_lock);
311
312 list_for_each_entry(np, &g_np_list, np_list) {
313 spin_lock_bh(&np->np_thread_lock);
314 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
315 spin_unlock_bh(&np->np_thread_lock);
316 continue;
317 }
318
319 match = iscsit_check_np_match(sockaddr, np, network_transport);
320 if (match) {
321 /*
322 * Increment the np_exports reference count now to
323 * prevent iscsit_del_np() below from being called
324 * while iscsi_tpg_add_network_portal() is called.
325 */
326 np->np_exports++;
327 spin_unlock_bh(&np->np_thread_lock);
328 return np;
329 }
330 spin_unlock_bh(&np->np_thread_lock);
331 }
332
333 return NULL;
334 }
335
iscsit_add_np(struct sockaddr_storage * sockaddr,int network_transport)336 struct iscsi_np *iscsit_add_np(
337 struct sockaddr_storage *sockaddr,
338 int network_transport)
339 {
340 struct iscsi_np *np;
341 int ret;
342
343 mutex_lock(&np_lock);
344
345 /*
346 * Locate the existing struct iscsi_np if already active..
347 */
348 np = iscsit_get_np(sockaddr, network_transport);
349 if (np) {
350 mutex_unlock(&np_lock);
351 return np;
352 }
353
354 np = kzalloc(sizeof(*np), GFP_KERNEL);
355 if (!np) {
356 mutex_unlock(&np_lock);
357 return ERR_PTR(-ENOMEM);
358 }
359
360 np->np_flags |= NPF_IP_NETWORK;
361 np->np_network_transport = network_transport;
362 spin_lock_init(&np->np_thread_lock);
363 init_completion(&np->np_restart_comp);
364 INIT_LIST_HEAD(&np->np_list);
365
366 timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
367
368 ret = iscsi_target_setup_login_socket(np, sockaddr);
369 if (ret != 0) {
370 kfree(np);
371 mutex_unlock(&np_lock);
372 return ERR_PTR(ret);
373 }
374
375 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
376 if (IS_ERR(np->np_thread)) {
377 pr_err("Unable to create kthread: iscsi_np\n");
378 ret = PTR_ERR(np->np_thread);
379 kfree(np);
380 mutex_unlock(&np_lock);
381 return ERR_PTR(ret);
382 }
383 /*
384 * Increment the np_exports reference count now to prevent
385 * iscsit_del_np() below from being run while a new call to
386 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
387 * active. We don't need to hold np->np_thread_lock at this
388 * point because iscsi_np has not been added to g_np_list yet.
389 */
390 np->np_exports = 1;
391 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
392
393 list_add_tail(&np->np_list, &g_np_list);
394 mutex_unlock(&np_lock);
395
396 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
397 &np->np_sockaddr, np->np_transport->name);
398
399 return np;
400 }
401
iscsit_reset_np_thread(struct iscsi_np * np,struct iscsi_tpg_np * tpg_np,struct iscsi_portal_group * tpg,bool shutdown)402 int iscsit_reset_np_thread(
403 struct iscsi_np *np,
404 struct iscsi_tpg_np *tpg_np,
405 struct iscsi_portal_group *tpg,
406 bool shutdown)
407 {
408 spin_lock_bh(&np->np_thread_lock);
409 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
410 spin_unlock_bh(&np->np_thread_lock);
411 return 0;
412 }
413 np->np_thread_state = ISCSI_NP_THREAD_RESET;
414 atomic_inc(&np->np_reset_count);
415
416 if (np->np_thread) {
417 spin_unlock_bh(&np->np_thread_lock);
418 send_sig(SIGINT, np->np_thread, 1);
419 wait_for_completion(&np->np_restart_comp);
420 spin_lock_bh(&np->np_thread_lock);
421 }
422 spin_unlock_bh(&np->np_thread_lock);
423
424 if (tpg_np && shutdown) {
425 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
426
427 wait_for_completion(&tpg_np->tpg_np_comp);
428 }
429
430 return 0;
431 }
432
iscsit_free_np(struct iscsi_np * np)433 static void iscsit_free_np(struct iscsi_np *np)
434 {
435 if (np->np_socket)
436 sock_release(np->np_socket);
437 }
438
iscsit_del_np(struct iscsi_np * np)439 int iscsit_del_np(struct iscsi_np *np)
440 {
441 spin_lock_bh(&np->np_thread_lock);
442 np->np_exports--;
443 if (np->np_exports) {
444 np->enabled = true;
445 spin_unlock_bh(&np->np_thread_lock);
446 return 0;
447 }
448 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
449 spin_unlock_bh(&np->np_thread_lock);
450
451 if (np->np_thread) {
452 /*
453 * We need to send the signal to wakeup Linux/Net
454 * which may be sleeping in sock_accept()..
455 */
456 send_sig(SIGINT, np->np_thread, 1);
457 kthread_stop(np->np_thread);
458 np->np_thread = NULL;
459 }
460
461 np->np_transport->iscsit_free_np(np);
462
463 mutex_lock(&np_lock);
464 list_del(&np->np_list);
465 mutex_unlock(&np_lock);
466
467 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
468 &np->np_sockaddr, np->np_transport->name);
469
470 iscsit_put_transport(np->np_transport);
471 kfree(np);
472 return 0;
473 }
474
475 static void iscsit_get_rx_pdu(struct iscsit_conn *);
476
iscsit_queue_rsp(struct iscsit_conn * conn,struct iscsit_cmd * cmd)477 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
478 {
479 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
480 }
481 EXPORT_SYMBOL(iscsit_queue_rsp);
482
iscsit_aborted_task(struct iscsit_conn * conn,struct iscsit_cmd * cmd)483 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
484 {
485 spin_lock_bh(&conn->cmd_lock);
486 if (!list_empty(&cmd->i_conn_node))
487 list_del_init(&cmd->i_conn_node);
488 spin_unlock_bh(&conn->cmd_lock);
489
490 __iscsit_free_cmd(cmd, true);
491 }
492 EXPORT_SYMBOL(iscsit_aborted_task);
493
494 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
495 u32, u32, const void *, void *);
496 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
497
498 static int
iscsit_xmit_nondatain_pdu(struct iscsit_conn * conn,struct iscsit_cmd * cmd,const void * data_buf,u32 data_buf_len)499 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
500 const void *data_buf, u32 data_buf_len)
501 {
502 struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
503 struct kvec *iov;
504 u32 niov = 0, tx_size = ISCSI_HDR_LEN;
505 int ret;
506
507 iov = &cmd->iov_misc[0];
508 iov[niov].iov_base = cmd->pdu;
509 iov[niov++].iov_len = ISCSI_HDR_LEN;
510
511 if (conn->conn_ops->HeaderDigest) {
512 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
513
514 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
515 ISCSI_HDR_LEN, 0, NULL,
516 header_digest);
517
518 iov[0].iov_len += ISCSI_CRC_LEN;
519 tx_size += ISCSI_CRC_LEN;
520 pr_debug("Attaching CRC32C HeaderDigest"
521 " to opcode 0x%x 0x%08x\n",
522 hdr->opcode, *header_digest);
523 }
524
525 if (data_buf_len) {
526 u32 padding = ((-data_buf_len) & 3);
527
528 iov[niov].iov_base = (void *)data_buf;
529 iov[niov++].iov_len = data_buf_len;
530 tx_size += data_buf_len;
531
532 if (padding != 0) {
533 iov[niov].iov_base = &cmd->pad_bytes;
534 iov[niov++].iov_len = padding;
535 tx_size += padding;
536 pr_debug("Attaching %u additional"
537 " padding bytes.\n", padding);
538 }
539
540 if (conn->conn_ops->DataDigest) {
541 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
542 data_buf, data_buf_len,
543 padding, &cmd->pad_bytes,
544 &cmd->data_crc);
545
546 iov[niov].iov_base = &cmd->data_crc;
547 iov[niov++].iov_len = ISCSI_CRC_LEN;
548 tx_size += ISCSI_CRC_LEN;
549 pr_debug("Attached DataDigest for %u"
550 " bytes opcode 0x%x, CRC 0x%08x\n",
551 data_buf_len, hdr->opcode, cmd->data_crc);
552 }
553 }
554
555 cmd->iov_misc_count = niov;
556 cmd->tx_size = tx_size;
557
558 ret = iscsit_send_tx_data(cmd, conn, 1);
559 if (ret < 0) {
560 iscsit_tx_thread_wait_for_tcp(conn);
561 return ret;
562 }
563
564 return 0;
565 }
566
567 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
568 u32 data_offset, u32 data_length);
569 static void iscsit_unmap_iovec(struct iscsit_cmd *);
570 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
571 u32, u32, u32, u8 *);
572 static int
iscsit_xmit_datain_pdu(struct iscsit_conn * conn,struct iscsit_cmd * cmd,const struct iscsi_datain * datain)573 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
574 const struct iscsi_datain *datain)
575 {
576 struct kvec *iov;
577 u32 iov_count = 0, tx_size = 0;
578 int ret, iov_ret;
579
580 iov = &cmd->iov_data[0];
581 iov[iov_count].iov_base = cmd->pdu;
582 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
583 tx_size += ISCSI_HDR_LEN;
584
585 if (conn->conn_ops->HeaderDigest) {
586 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
587
588 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
589 ISCSI_HDR_LEN, 0, NULL,
590 header_digest);
591
592 iov[0].iov_len += ISCSI_CRC_LEN;
593 tx_size += ISCSI_CRC_LEN;
594
595 pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
596 *header_digest);
597 }
598
599 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
600 cmd->orig_iov_data_count - (iov_count + 2),
601 datain->offset, datain->length);
602 if (iov_ret < 0)
603 return -1;
604
605 iov_count += iov_ret;
606 tx_size += datain->length;
607
608 cmd->padding = ((-datain->length) & 3);
609 if (cmd->padding) {
610 iov[iov_count].iov_base = cmd->pad_bytes;
611 iov[iov_count++].iov_len = cmd->padding;
612 tx_size += cmd->padding;
613
614 pr_debug("Attaching %u padding bytes\n", cmd->padding);
615 }
616
617 if (conn->conn_ops->DataDigest) {
618 cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
619 cmd, datain->offset,
620 datain->length,
621 cmd->padding,
622 cmd->pad_bytes);
623
624 iov[iov_count].iov_base = &cmd->data_crc;
625 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
626 tx_size += ISCSI_CRC_LEN;
627
628 pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
629 datain->length + cmd->padding, cmd->data_crc);
630 }
631
632 cmd->iov_data_count = iov_count;
633 cmd->tx_size = tx_size;
634
635 ret = iscsit_fe_sendpage_sg(cmd, conn);
636
637 iscsit_unmap_iovec(cmd);
638
639 if (ret < 0) {
640 iscsit_tx_thread_wait_for_tcp(conn);
641 return ret;
642 }
643
644 return 0;
645 }
646
iscsit_xmit_pdu(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_datain_req * dr,const void * buf,u32 buf_len)647 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
648 struct iscsi_datain_req *dr, const void *buf,
649 u32 buf_len)
650 {
651 if (dr)
652 return iscsit_xmit_datain_pdu(conn, cmd, buf);
653 else
654 return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
655 }
656
iscsit_get_sup_prot_ops(struct iscsit_conn * conn)657 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
658 {
659 return TARGET_PROT_NORMAL;
660 }
661
662 static struct iscsit_transport iscsi_target_transport = {
663 .name = "iSCSI/TCP",
664 .transport_type = ISCSI_TCP,
665 .rdma_shutdown = false,
666 .owner = NULL,
667 .iscsit_setup_np = iscsit_setup_np,
668 .iscsit_accept_np = iscsit_accept_np,
669 .iscsit_free_np = iscsit_free_np,
670 .iscsit_get_login_rx = iscsit_get_login_rx,
671 .iscsit_put_login_tx = iscsit_put_login_tx,
672 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
673 .iscsit_immediate_queue = iscsit_immediate_queue,
674 .iscsit_response_queue = iscsit_response_queue,
675 .iscsit_queue_data_in = iscsit_queue_rsp,
676 .iscsit_queue_status = iscsit_queue_rsp,
677 .iscsit_aborted_task = iscsit_aborted_task,
678 .iscsit_xmit_pdu = iscsit_xmit_pdu,
679 .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
680 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
681 };
682
iscsi_target_init_module(void)683 static int __init iscsi_target_init_module(void)
684 {
685 int ret = 0, size;
686
687 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
688 iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
689 if (!iscsit_global)
690 return -1;
691
692 spin_lock_init(&iscsit_global->ts_bitmap_lock);
693 mutex_init(&auth_id_lock);
694 idr_init(&tiqn_idr);
695
696 ret = target_register_template(&iscsi_ops);
697 if (ret)
698 goto out;
699
700 size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
701 iscsit_global->ts_bitmap = vzalloc(size);
702 if (!iscsit_global->ts_bitmap)
703 goto configfs_out;
704
705 if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
706 pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
707 goto bitmap_out;
708 }
709 cpumask_setall(iscsit_global->allowed_cpumask);
710
711 lio_qr_cache = kmem_cache_create("lio_qr_cache",
712 sizeof(struct iscsi_queue_req),
713 __alignof__(struct iscsi_queue_req), 0, NULL);
714 if (!lio_qr_cache) {
715 pr_err("Unable to kmem_cache_create() for"
716 " lio_qr_cache\n");
717 goto cpumask_out;
718 }
719
720 lio_dr_cache = kmem_cache_create("lio_dr_cache",
721 sizeof(struct iscsi_datain_req),
722 __alignof__(struct iscsi_datain_req), 0, NULL);
723 if (!lio_dr_cache) {
724 pr_err("Unable to kmem_cache_create() for"
725 " lio_dr_cache\n");
726 goto qr_out;
727 }
728
729 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
730 sizeof(struct iscsi_ooo_cmdsn),
731 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
732 if (!lio_ooo_cache) {
733 pr_err("Unable to kmem_cache_create() for"
734 " lio_ooo_cache\n");
735 goto dr_out;
736 }
737
738 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
739 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
740 0, NULL);
741 if (!lio_r2t_cache) {
742 pr_err("Unable to kmem_cache_create() for"
743 " lio_r2t_cache\n");
744 goto ooo_out;
745 }
746
747 iscsit_register_transport(&iscsi_target_transport);
748
749 if (iscsit_load_discovery_tpg() < 0)
750 goto r2t_out;
751
752 return ret;
753 r2t_out:
754 iscsit_unregister_transport(&iscsi_target_transport);
755 kmem_cache_destroy(lio_r2t_cache);
756 ooo_out:
757 kmem_cache_destroy(lio_ooo_cache);
758 dr_out:
759 kmem_cache_destroy(lio_dr_cache);
760 qr_out:
761 kmem_cache_destroy(lio_qr_cache);
762 cpumask_out:
763 free_cpumask_var(iscsit_global->allowed_cpumask);
764 bitmap_out:
765 vfree(iscsit_global->ts_bitmap);
766 configfs_out:
767 /* XXX: this probably wants it to be it's own unwind step.. */
768 if (iscsit_global->discovery_tpg)
769 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
770 target_unregister_template(&iscsi_ops);
771 out:
772 kfree(iscsit_global);
773 return -ENOMEM;
774 }
775
iscsi_target_cleanup_module(void)776 static void __exit iscsi_target_cleanup_module(void)
777 {
778 iscsit_release_discovery_tpg();
779 iscsit_unregister_transport(&iscsi_target_transport);
780 kmem_cache_destroy(lio_qr_cache);
781 kmem_cache_destroy(lio_dr_cache);
782 kmem_cache_destroy(lio_ooo_cache);
783 kmem_cache_destroy(lio_r2t_cache);
784
785 /*
786 * Shutdown discovery sessions and disable discovery TPG
787 */
788 if (iscsit_global->discovery_tpg)
789 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
790
791 target_unregister_template(&iscsi_ops);
792
793 free_cpumask_var(iscsit_global->allowed_cpumask);
794 vfree(iscsit_global->ts_bitmap);
795 kfree(iscsit_global);
796 }
797
iscsit_add_reject(struct iscsit_conn * conn,u8 reason,unsigned char * buf)798 int iscsit_add_reject(
799 struct iscsit_conn *conn,
800 u8 reason,
801 unsigned char *buf)
802 {
803 struct iscsit_cmd *cmd;
804
805 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
806 if (!cmd)
807 return -1;
808
809 cmd->iscsi_opcode = ISCSI_OP_REJECT;
810 cmd->reject_reason = reason;
811
812 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
813 if (!cmd->buf_ptr) {
814 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
815 iscsit_free_cmd(cmd, false);
816 return -1;
817 }
818
819 spin_lock_bh(&conn->cmd_lock);
820 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
821 spin_unlock_bh(&conn->cmd_lock);
822
823 cmd->i_state = ISTATE_SEND_REJECT;
824 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
825
826 return -1;
827 }
828 EXPORT_SYMBOL(iscsit_add_reject);
829
iscsit_add_reject_from_cmd(struct iscsit_cmd * cmd,u8 reason,bool add_to_conn,unsigned char * buf)830 static int iscsit_add_reject_from_cmd(
831 struct iscsit_cmd *cmd,
832 u8 reason,
833 bool add_to_conn,
834 unsigned char *buf)
835 {
836 struct iscsit_conn *conn;
837 const bool do_put = cmd->se_cmd.se_tfo != NULL;
838
839 if (!cmd->conn) {
840 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
841 cmd->init_task_tag);
842 return -1;
843 }
844 conn = cmd->conn;
845
846 cmd->iscsi_opcode = ISCSI_OP_REJECT;
847 cmd->reject_reason = reason;
848
849 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
850 if (!cmd->buf_ptr) {
851 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
852 iscsit_free_cmd(cmd, false);
853 return -1;
854 }
855
856 if (add_to_conn) {
857 spin_lock_bh(&conn->cmd_lock);
858 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
859 spin_unlock_bh(&conn->cmd_lock);
860 }
861
862 cmd->i_state = ISTATE_SEND_REJECT;
863 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
864 /*
865 * Perform the kref_put now if se_cmd has already been setup by
866 * scsit_setup_scsi_cmd()
867 */
868 if (do_put) {
869 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
870 target_put_sess_cmd(&cmd->se_cmd);
871 }
872 return -1;
873 }
874
iscsit_add_reject_cmd(struct iscsit_cmd * cmd,u8 reason,unsigned char * buf)875 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
876 unsigned char *buf)
877 {
878 return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
879 }
880
iscsit_reject_cmd(struct iscsit_cmd * cmd,u8 reason,unsigned char * buf)881 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
882 {
883 return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
884 }
885 EXPORT_SYMBOL(iscsit_reject_cmd);
886
887 /*
888 * Map some portion of the allocated scatterlist to an iovec, suitable for
889 * kernel sockets to copy data in/out.
890 */
iscsit_map_iovec(struct iscsit_cmd * cmd,struct kvec * iov,int nvec,u32 data_offset,u32 data_length)891 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
892 u32 data_offset, u32 data_length)
893 {
894 u32 i = 0, orig_data_length = data_length;
895 struct scatterlist *sg;
896 unsigned int page_off;
897
898 /*
899 * We know each entry in t_data_sg contains a page.
900 */
901 u32 ent = data_offset / PAGE_SIZE;
902
903 if (!data_length)
904 return 0;
905
906 if (ent >= cmd->se_cmd.t_data_nents) {
907 pr_err("Initial page entry out-of-bounds\n");
908 goto overflow;
909 }
910
911 sg = &cmd->se_cmd.t_data_sg[ent];
912 page_off = (data_offset % PAGE_SIZE);
913
914 cmd->first_data_sg = sg;
915 cmd->first_data_sg_off = page_off;
916
917 while (data_length) {
918 u32 cur_len;
919
920 if (WARN_ON_ONCE(!sg || i >= nvec))
921 goto overflow;
922
923 cur_len = min_t(u32, data_length, sg->length - page_off);
924
925 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
926 iov[i].iov_len = cur_len;
927
928 data_length -= cur_len;
929 page_off = 0;
930 sg = sg_next(sg);
931 i++;
932 }
933
934 cmd->kmapped_nents = i;
935
936 return i;
937
938 overflow:
939 pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
940 data_offset, orig_data_length, i, nvec);
941 for_each_sg(cmd->se_cmd.t_data_sg, sg,
942 cmd->se_cmd.t_data_nents, i) {
943 pr_err("[%d] off %d len %d\n",
944 i, sg->offset, sg->length);
945 }
946 return -1;
947 }
948
iscsit_unmap_iovec(struct iscsit_cmd * cmd)949 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
950 {
951 u32 i;
952 struct scatterlist *sg;
953
954 sg = cmd->first_data_sg;
955
956 for (i = 0; i < cmd->kmapped_nents; i++)
957 kunmap(sg_page(&sg[i]));
958 }
959
iscsit_ack_from_expstatsn(struct iscsit_conn * conn,u32 exp_statsn)960 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
961 {
962 LIST_HEAD(ack_list);
963 struct iscsit_cmd *cmd, *cmd_p;
964
965 conn->exp_statsn = exp_statsn;
966
967 if (conn->sess->sess_ops->RDMAExtensions)
968 return;
969
970 spin_lock_bh(&conn->cmd_lock);
971 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
972 spin_lock(&cmd->istate_lock);
973 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
974 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
975 cmd->i_state = ISTATE_REMOVE;
976 spin_unlock(&cmd->istate_lock);
977 list_move_tail(&cmd->i_conn_node, &ack_list);
978 continue;
979 }
980 spin_unlock(&cmd->istate_lock);
981 }
982 spin_unlock_bh(&conn->cmd_lock);
983
984 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
985 list_del_init(&cmd->i_conn_node);
986 iscsit_free_cmd(cmd, false);
987 }
988 }
989
iscsit_allocate_iovecs(struct iscsit_cmd * cmd)990 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
991 {
992 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
993
994 iov_count += ISCSI_IOV_DATA_BUFFER;
995 cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
996 if (!cmd->iov_data)
997 return -ENOMEM;
998
999 cmd->orig_iov_data_count = iov_count;
1000 return 0;
1001 }
1002
iscsit_setup_scsi_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)1003 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1004 unsigned char *buf)
1005 {
1006 int data_direction, payload_length;
1007 struct iscsi_ecdb_ahdr *ecdb_ahdr;
1008 struct iscsi_scsi_req *hdr;
1009 int iscsi_task_attr;
1010 unsigned char *cdb;
1011 int sam_task_attr;
1012
1013 atomic_long_inc(&conn->sess->cmd_pdus);
1014
1015 hdr = (struct iscsi_scsi_req *) buf;
1016 payload_length = ntoh24(hdr->dlength);
1017
1018 /* FIXME; Add checks for AdditionalHeaderSegment */
1019
1020 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1021 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1022 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1023 " not set. Bad iSCSI Initiator.\n");
1024 return iscsit_add_reject_cmd(cmd,
1025 ISCSI_REASON_BOOKMARK_INVALID, buf);
1026 }
1027
1028 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1029 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1030 /*
1031 * From RFC-3720 Section 10.3.1:
1032 *
1033 * "Either or both of R and W MAY be 1 when either the
1034 * Expected Data Transfer Length and/or Bidirectional Read
1035 * Expected Data Transfer Length are 0"
1036 *
1037 * For this case, go ahead and clear the unnecssary bits
1038 * to avoid any confusion with ->data_direction.
1039 */
1040 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1041 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1042
1043 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1044 " set when Expected Data Transfer Length is 0 for"
1045 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1046 }
1047
1048 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1049 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1050 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1051 " MUST be set if Expected Data Transfer Length is not 0."
1052 " Bad iSCSI Initiator\n");
1053 return iscsit_add_reject_cmd(cmd,
1054 ISCSI_REASON_BOOKMARK_INVALID, buf);
1055 }
1056
1057 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1058 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1059 pr_err("Bidirectional operations not supported!\n");
1060 return iscsit_add_reject_cmd(cmd,
1061 ISCSI_REASON_BOOKMARK_INVALID, buf);
1062 }
1063
1064 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1065 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1066 " Scsi Command PDU.\n");
1067 return iscsit_add_reject_cmd(cmd,
1068 ISCSI_REASON_BOOKMARK_INVALID, buf);
1069 }
1070
1071 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1072 pr_err("ImmediateData=No but DataSegmentLength=%u,"
1073 " protocol error.\n", payload_length);
1074 return iscsit_add_reject_cmd(cmd,
1075 ISCSI_REASON_PROTOCOL_ERROR, buf);
1076 }
1077
1078 if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1079 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1080 pr_err("Expected Data Transfer Length and Length of"
1081 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1082 " bit is not set protocol error\n");
1083 return iscsit_add_reject_cmd(cmd,
1084 ISCSI_REASON_PROTOCOL_ERROR, buf);
1085 }
1086
1087 if (payload_length > be32_to_cpu(hdr->data_length)) {
1088 pr_err("DataSegmentLength: %u is greater than"
1089 " EDTL: %u, protocol error.\n", payload_length,
1090 hdr->data_length);
1091 return iscsit_add_reject_cmd(cmd,
1092 ISCSI_REASON_PROTOCOL_ERROR, buf);
1093 }
1094
1095 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1096 pr_err("DataSegmentLength: %u is greater than"
1097 " MaxXmitDataSegmentLength: %u, protocol error.\n",
1098 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1099 return iscsit_add_reject_cmd(cmd,
1100 ISCSI_REASON_PROTOCOL_ERROR, buf);
1101 }
1102
1103 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1104 pr_err("DataSegmentLength: %u is greater than"
1105 " FirstBurstLength: %u, protocol error.\n",
1106 payload_length, conn->sess->sess_ops->FirstBurstLength);
1107 return iscsit_add_reject_cmd(cmd,
1108 ISCSI_REASON_BOOKMARK_INVALID, buf);
1109 }
1110
1111 cdb = hdr->cdb;
1112
1113 if (hdr->hlength) {
1114 ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
1115 if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
1116 pr_err("Additional Header Segment type %d not supported!\n",
1117 ecdb_ahdr->ahstype);
1118 return iscsit_add_reject_cmd(cmd,
1119 ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
1120 }
1121
1122 cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
1123 GFP_KERNEL);
1124 if (cdb == NULL)
1125 return iscsit_add_reject_cmd(cmd,
1126 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1127 memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
1128 memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
1129 be16_to_cpu(ecdb_ahdr->ahslength) - 1);
1130 }
1131
1132 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1133 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1134 DMA_NONE;
1135
1136 cmd->data_direction = data_direction;
1137 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1138 /*
1139 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1140 */
1141 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1142 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1143 sam_task_attr = TCM_SIMPLE_TAG;
1144 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1145 sam_task_attr = TCM_ORDERED_TAG;
1146 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1147 sam_task_attr = TCM_HEAD_TAG;
1148 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1149 sam_task_attr = TCM_ACA_TAG;
1150 else {
1151 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1152 " TCM_SIMPLE_TAG\n", iscsi_task_attr);
1153 sam_task_attr = TCM_SIMPLE_TAG;
1154 }
1155
1156 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
1157 cmd->i_state = ISTATE_NEW_CMD;
1158 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1159 cmd->immediate_data = (payload_length) ? 1 : 0;
1160 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1161 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1162 if (cmd->unsolicited_data)
1163 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1164
1165 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1166 if (hdr->flags & ISCSI_FLAG_CMD_READ)
1167 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1168 else
1169 cmd->targ_xfer_tag = 0xFFFFFFFF;
1170 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1171 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1172 cmd->first_burst_len = payload_length;
1173
1174 if (!conn->sess->sess_ops->RDMAExtensions &&
1175 cmd->data_direction == DMA_FROM_DEVICE) {
1176 struct iscsi_datain_req *dr;
1177
1178 dr = iscsit_allocate_datain_req();
1179 if (!dr) {
1180 if (cdb != hdr->cdb)
1181 kfree(cdb);
1182 return iscsit_add_reject_cmd(cmd,
1183 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1184 }
1185
1186 iscsit_attach_datain_req(cmd, dr);
1187 }
1188
1189 /*
1190 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1191 */
1192 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1193 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1194 cmd->data_direction, sam_task_attr,
1195 cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
1196
1197 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1198 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1199 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1200 conn->cid);
1201
1202 target_get_sess_cmd(&cmd->se_cmd, true);
1203
1204 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1205 cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1206 GFP_KERNEL);
1207
1208 if (cdb != hdr->cdb)
1209 kfree(cdb);
1210
1211 if (cmd->sense_reason) {
1212 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1213 return iscsit_add_reject_cmd(cmd,
1214 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1215 }
1216
1217 goto attach_cmd;
1218 }
1219
1220 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1221 if (cmd->sense_reason)
1222 goto attach_cmd;
1223
1224 cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1225 if (cmd->sense_reason)
1226 goto attach_cmd;
1227
1228 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1229 return iscsit_add_reject_cmd(cmd,
1230 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1231 }
1232
1233 attach_cmd:
1234 spin_lock_bh(&conn->cmd_lock);
1235 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1236 spin_unlock_bh(&conn->cmd_lock);
1237 /*
1238 * Check if we need to delay processing because of ALUA
1239 * Active/NonOptimized primary access state..
1240 */
1241 core_alua_check_nonop_delay(&cmd->se_cmd);
1242
1243 return 0;
1244 }
1245 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1246
iscsit_set_unsolicited_dataout(struct iscsit_cmd * cmd)1247 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
1248 {
1249 iscsit_set_dataout_sequence_values(cmd);
1250
1251 spin_lock_bh(&cmd->dataout_timeout_lock);
1252 iscsit_start_dataout_timer(cmd, cmd->conn);
1253 spin_unlock_bh(&cmd->dataout_timeout_lock);
1254 }
1255 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1256
iscsit_process_scsi_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_scsi_req * hdr)1257 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1258 struct iscsi_scsi_req *hdr)
1259 {
1260 int cmdsn_ret = 0;
1261 /*
1262 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1263 * the Immediate Bit is not set, and no Immediate
1264 * Data is attached.
1265 *
1266 * A PDU/CmdSN carrying Immediate Data can only
1267 * be processed after the DataCRC has passed.
1268 * If the DataCRC fails, the CmdSN MUST NOT
1269 * be acknowledged. (See below)
1270 */
1271 if (!cmd->immediate_data) {
1272 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1273 (unsigned char *)hdr, hdr->cmdsn);
1274 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1275 return -1;
1276 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1277 target_put_sess_cmd(&cmd->se_cmd);
1278 return 0;
1279 }
1280 }
1281
1282 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1283
1284 /*
1285 * If no Immediate Data is attached, it's OK to return now.
1286 */
1287 if (!cmd->immediate_data) {
1288 if (!cmd->sense_reason && cmd->unsolicited_data)
1289 iscsit_set_unsolicited_dataout(cmd);
1290 if (!cmd->sense_reason)
1291 return 0;
1292
1293 target_put_sess_cmd(&cmd->se_cmd);
1294 return 0;
1295 }
1296
1297 /*
1298 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1299 * execution. These exceptions are processed in CmdSN order using
1300 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1301 */
1302 if (cmd->sense_reason)
1303 return 1;
1304 /*
1305 * Call directly into transport_generic_new_cmd() to perform
1306 * the backend memory allocation.
1307 */
1308 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1309 if (cmd->sense_reason)
1310 return 1;
1311
1312 return 0;
1313 }
1314 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1315
1316 static int
iscsit_get_immediate_data(struct iscsit_cmd * cmd,struct iscsi_scsi_req * hdr,bool dump_payload)1317 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
1318 bool dump_payload)
1319 {
1320 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1321 int rc;
1322
1323 /*
1324 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1325 */
1326 if (dump_payload) {
1327 u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1328 cmd->first_burst_len);
1329
1330 pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
1331 cmd->se_cmd.data_length, cmd->write_data_done,
1332 cmd->first_burst_len, length);
1333 rc = iscsit_dump_data_payload(cmd->conn, length, 1);
1334 pr_debug("Finished dumping immediate data\n");
1335 if (rc < 0)
1336 immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
1337 } else {
1338 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1339 cmd->first_burst_len);
1340 }
1341
1342 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1343 /*
1344 * A PDU/CmdSN carrying Immediate Data passed
1345 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1346 * Immediate Bit is not set.
1347 */
1348 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1349 (unsigned char *)hdr, hdr->cmdsn);
1350 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1351 return -1;
1352
1353 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1354 target_put_sess_cmd(&cmd->se_cmd);
1355
1356 return 0;
1357 } else if (cmd->unsolicited_data)
1358 iscsit_set_unsolicited_dataout(cmd);
1359
1360 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1361 /*
1362 * Immediate Data failed DataCRC and ERL>=1,
1363 * silently drop this PDU and let the initiator
1364 * plug the CmdSN gap.
1365 *
1366 * FIXME: Send Unsolicited NOPIN with reserved
1367 * TTT here to help the initiator figure out
1368 * the missing CmdSN, although they should be
1369 * intelligent enough to determine the missing
1370 * CmdSN and issue a retry to plug the sequence.
1371 */
1372 cmd->i_state = ISTATE_REMOVE;
1373 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1374 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1375 return -1;
1376
1377 return 0;
1378 }
1379
1380 static int
iscsit_handle_scsi_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)1381 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1382 unsigned char *buf)
1383 {
1384 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1385 int rc, immed_data;
1386 bool dump_payload = false;
1387
1388 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1389 if (rc < 0)
1390 return 0;
1391 /*
1392 * Allocation iovecs needed for struct socket operations for
1393 * traditional iSCSI block I/O.
1394 */
1395 if (iscsit_allocate_iovecs(cmd) < 0) {
1396 return iscsit_reject_cmd(cmd,
1397 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1398 }
1399 immed_data = cmd->immediate_data;
1400
1401 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1402 if (rc < 0)
1403 return rc;
1404 else if (rc > 0)
1405 dump_payload = true;
1406
1407 if (!immed_data)
1408 return 0;
1409
1410 return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1411 }
1412
iscsit_do_crypto_hash_sg(struct ahash_request * hash,struct iscsit_cmd * cmd,u32 data_offset,u32 data_length,u32 padding,u8 * pad_bytes)1413 static u32 iscsit_do_crypto_hash_sg(
1414 struct ahash_request *hash,
1415 struct iscsit_cmd *cmd,
1416 u32 data_offset,
1417 u32 data_length,
1418 u32 padding,
1419 u8 *pad_bytes)
1420 {
1421 u32 data_crc;
1422 struct scatterlist *sg;
1423 unsigned int page_off;
1424
1425 crypto_ahash_init(hash);
1426
1427 sg = cmd->first_data_sg;
1428 page_off = cmd->first_data_sg_off;
1429
1430 if (data_length && page_off) {
1431 struct scatterlist first_sg;
1432 u32 len = min_t(u32, data_length, sg->length - page_off);
1433
1434 sg_init_table(&first_sg, 1);
1435 sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
1436
1437 ahash_request_set_crypt(hash, &first_sg, NULL, len);
1438 crypto_ahash_update(hash);
1439
1440 data_length -= len;
1441 sg = sg_next(sg);
1442 }
1443
1444 while (data_length) {
1445 u32 cur_len = min_t(u32, data_length, sg->length);
1446
1447 ahash_request_set_crypt(hash, sg, NULL, cur_len);
1448 crypto_ahash_update(hash);
1449
1450 data_length -= cur_len;
1451 /* iscsit_map_iovec has already checked for invalid sg pointers */
1452 sg = sg_next(sg);
1453 }
1454
1455 if (padding) {
1456 struct scatterlist pad_sg;
1457
1458 sg_init_one(&pad_sg, pad_bytes, padding);
1459 ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1460 padding);
1461 crypto_ahash_finup(hash);
1462 } else {
1463 ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1464 crypto_ahash_final(hash);
1465 }
1466
1467 return data_crc;
1468 }
1469
iscsit_do_crypto_hash_buf(struct ahash_request * hash,const void * buf,u32 payload_length,u32 padding,const void * pad_bytes,void * data_crc)1470 static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1471 const void *buf, u32 payload_length, u32 padding,
1472 const void *pad_bytes, void *data_crc)
1473 {
1474 struct scatterlist sg[2];
1475
1476 sg_init_table(sg, ARRAY_SIZE(sg));
1477 sg_set_buf(sg, buf, payload_length);
1478 if (padding)
1479 sg_set_buf(sg + 1, pad_bytes, padding);
1480
1481 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1482
1483 crypto_ahash_digest(hash);
1484 }
1485
1486 int
__iscsit_check_dataout_hdr(struct iscsit_conn * conn,void * buf,struct iscsit_cmd * cmd,u32 payload_length,bool * success)1487 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1488 struct iscsit_cmd *cmd, u32 payload_length,
1489 bool *success)
1490 {
1491 struct iscsi_data *hdr = buf;
1492 struct se_cmd *se_cmd;
1493 int rc;
1494
1495 /* iSCSI write */
1496 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1497
1498 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1499 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1500 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1501 payload_length, conn->cid);
1502
1503 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1504 pr_err("Command ITT: 0x%08x received DataOUT after"
1505 " last DataOUT received, dumping payload\n",
1506 cmd->init_task_tag);
1507 return iscsit_dump_data_payload(conn, payload_length, 1);
1508 }
1509
1510 if (cmd->data_direction != DMA_TO_DEVICE) {
1511 pr_err("Command ITT: 0x%08x received DataOUT for a"
1512 " NON-WRITE command.\n", cmd->init_task_tag);
1513 return iscsit_dump_data_payload(conn, payload_length, 1);
1514 }
1515 se_cmd = &cmd->se_cmd;
1516 iscsit_mod_dataout_timer(cmd);
1517
1518 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1519 pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1520 be32_to_cpu(hdr->offset), payload_length,
1521 cmd->se_cmd.data_length);
1522 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1523 }
1524
1525 if (cmd->unsolicited_data) {
1526 int dump_unsolicited_data = 0;
1527
1528 if (conn->sess->sess_ops->InitialR2T) {
1529 pr_err("Received unexpected unsolicited data"
1530 " while InitialR2T=Yes, protocol error.\n");
1531 transport_send_check_condition_and_sense(&cmd->se_cmd,
1532 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1533 return -1;
1534 }
1535 /*
1536 * Special case for dealing with Unsolicited DataOUT
1537 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1538 * failures;
1539 */
1540
1541 /* Something's amiss if we're not in WRITE_PENDING state... */
1542 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1543 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1544 dump_unsolicited_data = 1;
1545
1546 if (dump_unsolicited_data) {
1547 /*
1548 * Check if a delayed TASK_ABORTED status needs to
1549 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1550 * received with the unsolicited data out.
1551 */
1552 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1553 iscsit_stop_dataout_timer(cmd);
1554
1555 return iscsit_dump_data_payload(conn, payload_length, 1);
1556 }
1557 } else {
1558 /*
1559 * For the normal solicited data path:
1560 *
1561 * Check for a delayed TASK_ABORTED status and dump any
1562 * incoming data out payload if one exists. Also, when the
1563 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1564 * data out sequence, we decrement outstanding_r2ts. Once
1565 * outstanding_r2ts reaches zero, go ahead and send the delayed
1566 * TASK_ABORTED status.
1567 */
1568 if (se_cmd->transport_state & CMD_T_ABORTED) {
1569 if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1570 --cmd->outstanding_r2ts < 1)
1571 iscsit_stop_dataout_timer(cmd);
1572
1573 return iscsit_dump_data_payload(conn, payload_length, 1);
1574 }
1575 }
1576 /*
1577 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1578 * within-command recovery checks before receiving the payload.
1579 */
1580 rc = iscsit_check_pre_dataout(cmd, buf);
1581 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1582 return 0;
1583 else if (rc == DATAOUT_CANNOT_RECOVER)
1584 return -1;
1585 *success = true;
1586 return 0;
1587 }
1588 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1589
1590 int
iscsit_check_dataout_hdr(struct iscsit_conn * conn,void * buf,struct iscsit_cmd ** out_cmd)1591 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1592 struct iscsit_cmd **out_cmd)
1593 {
1594 struct iscsi_data *hdr = buf;
1595 struct iscsit_cmd *cmd;
1596 u32 payload_length = ntoh24(hdr->dlength);
1597 int rc;
1598 bool success = false;
1599
1600 if (!payload_length) {
1601 pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1602 return 0;
1603 }
1604
1605 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1606 pr_err_ratelimited("DataSegmentLength: %u is greater than"
1607 " MaxXmitDataSegmentLength: %u\n", payload_length,
1608 conn->conn_ops->MaxXmitDataSegmentLength);
1609 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1610 }
1611
1612 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1613 if (!cmd)
1614 return 0;
1615
1616 rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1617
1618 if (success)
1619 *out_cmd = cmd;
1620
1621 return rc;
1622 }
1623 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1624
1625 static int
iscsit_get_dataout(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_data * hdr)1626 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1627 struct iscsi_data *hdr)
1628 {
1629 struct kvec *iov;
1630 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1631 u32 payload_length;
1632 int iov_ret, data_crc_failed = 0;
1633
1634 payload_length = min_t(u32, cmd->se_cmd.data_length,
1635 ntoh24(hdr->dlength));
1636 rx_size += payload_length;
1637 iov = &cmd->iov_data[0];
1638
1639 iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
1640 be32_to_cpu(hdr->offset), payload_length);
1641 if (iov_ret < 0)
1642 return -1;
1643
1644 iov_count += iov_ret;
1645
1646 padding = ((-payload_length) & 3);
1647 if (padding != 0) {
1648 iov[iov_count].iov_base = cmd->pad_bytes;
1649 iov[iov_count++].iov_len = padding;
1650 rx_size += padding;
1651 pr_debug("Receiving %u padding bytes.\n", padding);
1652 }
1653
1654 if (conn->conn_ops->DataDigest) {
1655 iov[iov_count].iov_base = &checksum;
1656 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1657 rx_size += ISCSI_CRC_LEN;
1658 }
1659
1660 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
1661 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1662
1663 iscsit_unmap_iovec(cmd);
1664
1665 if (rx_got != rx_size)
1666 return -1;
1667
1668 if (conn->conn_ops->DataDigest) {
1669 u32 data_crc;
1670
1671 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1672 be32_to_cpu(hdr->offset),
1673 payload_length, padding,
1674 cmd->pad_bytes);
1675
1676 if (checksum != data_crc) {
1677 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1678 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1679 " does not match computed 0x%08x\n",
1680 hdr->itt, hdr->offset, payload_length,
1681 hdr->datasn, checksum, data_crc);
1682 data_crc_failed = 1;
1683 } else {
1684 pr_debug("Got CRC32C DataDigest 0x%08x for"
1685 " %u bytes of Data Out\n", checksum,
1686 payload_length);
1687 }
1688 }
1689
1690 return data_crc_failed;
1691 }
1692
1693 int
iscsit_check_dataout_payload(struct iscsit_cmd * cmd,struct iscsi_data * hdr,bool data_crc_failed)1694 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
1695 bool data_crc_failed)
1696 {
1697 struct iscsit_conn *conn = cmd->conn;
1698 int rc, ooo_cmdsn;
1699 /*
1700 * Increment post receive data and CRC values or perform
1701 * within-command recovery.
1702 */
1703 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1704 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1705 return 0;
1706 else if (rc == DATAOUT_SEND_R2T) {
1707 iscsit_set_dataout_sequence_values(cmd);
1708 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1709 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1710 /*
1711 * Handle extra special case for out of order
1712 * Unsolicited Data Out.
1713 */
1714 spin_lock_bh(&cmd->istate_lock);
1715 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1716 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1717 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1718 spin_unlock_bh(&cmd->istate_lock);
1719
1720 iscsit_stop_dataout_timer(cmd);
1721 if (ooo_cmdsn)
1722 return 0;
1723 target_execute_cmd(&cmd->se_cmd);
1724 return 0;
1725 } else /* DATAOUT_CANNOT_RECOVER */
1726 return -1;
1727
1728 return 0;
1729 }
1730 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1731
iscsit_handle_data_out(struct iscsit_conn * conn,unsigned char * buf)1732 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
1733 {
1734 struct iscsit_cmd *cmd = NULL;
1735 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1736 int rc;
1737 bool data_crc_failed = false;
1738
1739 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1740 if (rc < 0)
1741 return 0;
1742 else if (!cmd)
1743 return 0;
1744
1745 rc = iscsit_get_dataout(conn, cmd, hdr);
1746 if (rc < 0)
1747 return rc;
1748 else if (rc > 0)
1749 data_crc_failed = true;
1750
1751 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1752 }
1753
iscsit_setup_nop_out(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_nopout * hdr)1754 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1755 struct iscsi_nopout *hdr)
1756 {
1757 u32 payload_length = ntoh24(hdr->dlength);
1758
1759 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1760 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1761 if (!cmd)
1762 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1763 (unsigned char *)hdr);
1764
1765 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1766 (unsigned char *)hdr);
1767 }
1768
1769 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1770 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1771 " not set, protocol error.\n");
1772 if (!cmd)
1773 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1774 (unsigned char *)hdr);
1775
1776 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1777 (unsigned char *)hdr);
1778 }
1779
1780 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1781 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1782 " greater than MaxXmitDataSegmentLength: %u, protocol"
1783 " error.\n", payload_length,
1784 conn->conn_ops->MaxXmitDataSegmentLength);
1785 if (!cmd)
1786 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1787 (unsigned char *)hdr);
1788
1789 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1790 (unsigned char *)hdr);
1791 }
1792
1793 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1794 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1795 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1796 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1797 payload_length);
1798 /*
1799 * This is not a response to a Unsolicited NopIN, which means
1800 * it can either be a NOPOUT ping request (with a valid ITT),
1801 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1802 * Either way, make sure we allocate an struct iscsit_cmd, as both
1803 * can contain ping data.
1804 */
1805 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1806 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1807 cmd->i_state = ISTATE_SEND_NOPIN;
1808 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1809 1 : 0);
1810 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1811 cmd->targ_xfer_tag = 0xFFFFFFFF;
1812 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1813 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1814 cmd->data_direction = DMA_NONE;
1815 }
1816
1817 return 0;
1818 }
1819 EXPORT_SYMBOL(iscsit_setup_nop_out);
1820
iscsit_process_nop_out(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_nopout * hdr)1821 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1822 struct iscsi_nopout *hdr)
1823 {
1824 struct iscsit_cmd *cmd_p = NULL;
1825 int cmdsn_ret = 0;
1826 /*
1827 * Initiator is expecting a NopIN ping reply..
1828 */
1829 if (hdr->itt != RESERVED_ITT) {
1830 if (!cmd)
1831 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1832 (unsigned char *)hdr);
1833
1834 spin_lock_bh(&conn->cmd_lock);
1835 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1836 spin_unlock_bh(&conn->cmd_lock);
1837
1838 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1839
1840 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1841 iscsit_add_cmd_to_response_queue(cmd, conn,
1842 cmd->i_state);
1843 return 0;
1844 }
1845
1846 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1847 (unsigned char *)hdr, hdr->cmdsn);
1848 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1849 return 0;
1850 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1851 return -1;
1852
1853 return 0;
1854 }
1855 /*
1856 * This was a response to a unsolicited NOPIN ping.
1857 */
1858 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1859 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1860 if (!cmd_p)
1861 return -EINVAL;
1862
1863 iscsit_stop_nopin_response_timer(conn);
1864
1865 cmd_p->i_state = ISTATE_REMOVE;
1866 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1867
1868 iscsit_start_nopin_timer(conn);
1869 return 0;
1870 }
1871 /*
1872 * Otherwise, initiator is not expecting a NOPIN is response.
1873 * Just ignore for now.
1874 */
1875
1876 if (cmd)
1877 iscsit_free_cmd(cmd, false);
1878
1879 return 0;
1880 }
1881 EXPORT_SYMBOL(iscsit_process_nop_out);
1882
iscsit_handle_nop_out(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)1883 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1884 unsigned char *buf)
1885 {
1886 unsigned char *ping_data = NULL;
1887 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1888 struct kvec *iov = NULL;
1889 u32 payload_length = ntoh24(hdr->dlength);
1890 int ret;
1891
1892 ret = iscsit_setup_nop_out(conn, cmd, hdr);
1893 if (ret < 0)
1894 return 0;
1895 /*
1896 * Handle NOP-OUT payload for traditional iSCSI sockets
1897 */
1898 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1899 u32 checksum, data_crc, padding = 0;
1900 int niov = 0, rx_got, rx_size = payload_length;
1901
1902 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1903 if (!ping_data) {
1904 ret = -1;
1905 goto out;
1906 }
1907
1908 iov = &cmd->iov_misc[0];
1909 iov[niov].iov_base = ping_data;
1910 iov[niov++].iov_len = payload_length;
1911
1912 padding = ((-payload_length) & 3);
1913 if (padding != 0) {
1914 pr_debug("Receiving %u additional bytes"
1915 " for padding.\n", padding);
1916 iov[niov].iov_base = &cmd->pad_bytes;
1917 iov[niov++].iov_len = padding;
1918 rx_size += padding;
1919 }
1920 if (conn->conn_ops->DataDigest) {
1921 iov[niov].iov_base = &checksum;
1922 iov[niov++].iov_len = ISCSI_CRC_LEN;
1923 rx_size += ISCSI_CRC_LEN;
1924 }
1925
1926 WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
1927 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1928 if (rx_got != rx_size) {
1929 ret = -1;
1930 goto out;
1931 }
1932
1933 if (conn->conn_ops->DataDigest) {
1934 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1935 payload_length, padding,
1936 cmd->pad_bytes, &data_crc);
1937
1938 if (checksum != data_crc) {
1939 pr_err("Ping data CRC32C DataDigest"
1940 " 0x%08x does not match computed 0x%08x\n",
1941 checksum, data_crc);
1942 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1943 pr_err("Unable to recover from"
1944 " NOPOUT Ping DataCRC failure while in"
1945 " ERL=0.\n");
1946 ret = -1;
1947 goto out;
1948 } else {
1949 /*
1950 * Silently drop this PDU and let the
1951 * initiator plug the CmdSN gap.
1952 */
1953 pr_debug("Dropping NOPOUT"
1954 " Command CmdSN: 0x%08x due to"
1955 " DataCRC error.\n", hdr->cmdsn);
1956 ret = 0;
1957 goto out;
1958 }
1959 } else {
1960 pr_debug("Got CRC32C DataDigest"
1961 " 0x%08x for %u bytes of ping data.\n",
1962 checksum, payload_length);
1963 }
1964 }
1965
1966 ping_data[payload_length] = '\0';
1967 /*
1968 * Attach ping data to struct iscsit_cmd->buf_ptr.
1969 */
1970 cmd->buf_ptr = ping_data;
1971 cmd->buf_ptr_size = payload_length;
1972
1973 pr_debug("Got %u bytes of NOPOUT ping"
1974 " data.\n", payload_length);
1975 pr_debug("Ping Data: \"%s\"\n", ping_data);
1976 }
1977
1978 return iscsit_process_nop_out(conn, cmd, hdr);
1979 out:
1980 if (cmd)
1981 iscsit_free_cmd(cmd, false);
1982
1983 kfree(ping_data);
1984 return ret;
1985 }
1986
iscsit_convert_tmf(u8 iscsi_tmf)1987 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1988 {
1989 switch (iscsi_tmf) {
1990 case ISCSI_TM_FUNC_ABORT_TASK:
1991 return TMR_ABORT_TASK;
1992 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1993 return TMR_ABORT_TASK_SET;
1994 case ISCSI_TM_FUNC_CLEAR_ACA:
1995 return TMR_CLEAR_ACA;
1996 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1997 return TMR_CLEAR_TASK_SET;
1998 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1999 return TMR_LUN_RESET;
2000 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2001 return TMR_TARGET_WARM_RESET;
2002 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2003 return TMR_TARGET_COLD_RESET;
2004 default:
2005 return TMR_UNKNOWN;
2006 }
2007 }
2008
2009 int
iscsit_handle_task_mgt_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)2010 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2011 unsigned char *buf)
2012 {
2013 struct se_tmr_req *se_tmr;
2014 struct iscsi_tmr_req *tmr_req;
2015 struct iscsi_tm *hdr;
2016 int out_of_order_cmdsn = 0, ret;
2017 u8 function, tcm_function = TMR_UNKNOWN;
2018
2019 hdr = (struct iscsi_tm *) buf;
2020 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2021 function = hdr->flags;
2022
2023 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
2024 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
2025 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
2026 hdr->rtt, hdr->refcmdsn, conn->cid);
2027
2028 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2029 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2030 hdr->rtt != RESERVED_ITT)) {
2031 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
2032 hdr->rtt = RESERVED_ITT;
2033 }
2034
2035 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
2036 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2037 pr_err("Task Management Request TASK_REASSIGN not"
2038 " issued as immediate command, bad iSCSI Initiator"
2039 "implementation\n");
2040 return iscsit_add_reject_cmd(cmd,
2041 ISCSI_REASON_PROTOCOL_ERROR, buf);
2042 }
2043 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2044 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
2045 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
2046
2047 cmd->data_direction = DMA_NONE;
2048 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
2049 if (!cmd->tmr_req) {
2050 return iscsit_add_reject_cmd(cmd,
2051 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2052 buf);
2053 }
2054
2055 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2056 conn->sess->se_sess, 0, DMA_NONE,
2057 TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
2058 scsilun_to_int(&hdr->lun));
2059
2060 target_get_sess_cmd(&cmd->se_cmd, true);
2061
2062 /*
2063 * TASK_REASSIGN for ERL=2 / connection stays inside of
2064 * LIO-Target $FABRIC_MOD
2065 */
2066 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2067 tcm_function = iscsit_convert_tmf(function);
2068 if (tcm_function == TMR_UNKNOWN) {
2069 pr_err("Unknown iSCSI TMR Function:"
2070 " 0x%02x\n", function);
2071 return iscsit_add_reject_cmd(cmd,
2072 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2073 }
2074 }
2075 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2076 GFP_KERNEL);
2077 if (ret < 0)
2078 return iscsit_add_reject_cmd(cmd,
2079 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2080
2081 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2082
2083 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
2084 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
2085 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2086 cmd->init_task_tag = hdr->itt;
2087 cmd->targ_xfer_tag = 0xFFFFFFFF;
2088 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2089 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2090 se_tmr = cmd->se_cmd.se_tmr_req;
2091 tmr_req = cmd->tmr_req;
2092 /*
2093 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2094 */
2095 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2096 ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2097 if (ret < 0) {
2098 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2099 goto attach;
2100 }
2101 }
2102
2103 switch (function) {
2104 case ISCSI_TM_FUNC_ABORT_TASK:
2105 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2106 if (se_tmr->response)
2107 goto attach;
2108 break;
2109 case ISCSI_TM_FUNC_ABORT_TASK_SET:
2110 case ISCSI_TM_FUNC_CLEAR_ACA:
2111 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2112 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2113 break;
2114 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2115 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2116 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2117 goto attach;
2118 }
2119 break;
2120 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2121 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2122 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2123 goto attach;
2124 }
2125 break;
2126 case ISCSI_TM_FUNC_TASK_REASSIGN:
2127 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2128 /*
2129 * Perform sanity checks on the ExpDataSN only if the
2130 * TASK_REASSIGN was successful.
2131 */
2132 if (se_tmr->response)
2133 break;
2134
2135 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2136 return iscsit_add_reject_cmd(cmd,
2137 ISCSI_REASON_BOOKMARK_INVALID, buf);
2138 break;
2139 default:
2140 pr_err("Unknown TMR function: 0x%02x, protocol"
2141 " error.\n", function);
2142 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2143 goto attach;
2144 }
2145
2146 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2147 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2148 se_tmr->call_transport = 1;
2149 attach:
2150 spin_lock_bh(&conn->cmd_lock);
2151 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2152 spin_unlock_bh(&conn->cmd_lock);
2153
2154 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2155 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2156 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2157 out_of_order_cmdsn = 1;
2158 } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2159 target_put_sess_cmd(&cmd->se_cmd);
2160 return 0;
2161 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2162 return -1;
2163 }
2164 }
2165 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2166
2167 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2168 return 0;
2169 /*
2170 * Found the referenced task, send to transport for processing.
2171 */
2172 if (se_tmr->call_transport)
2173 return transport_generic_handle_tmr(&cmd->se_cmd);
2174
2175 /*
2176 * Could not find the referenced LUN, task, or Task Management
2177 * command not authorized or supported. Change state and
2178 * let the tx_thread send the response.
2179 *
2180 * For connection recovery, this is also the default action for
2181 * TMR TASK_REASSIGN.
2182 */
2183 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2184 target_put_sess_cmd(&cmd->se_cmd);
2185 return 0;
2186 }
2187 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2188
2189 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2190 int
iscsit_setup_text_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_text * hdr)2191 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2192 struct iscsi_text *hdr)
2193 {
2194 u32 payload_length = ntoh24(hdr->dlength);
2195
2196 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2197 pr_err("Unable to accept text parameter length: %u"
2198 "greater than MaxXmitDataSegmentLength %u.\n",
2199 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2200 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2201 (unsigned char *)hdr);
2202 }
2203
2204 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2205 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2206 pr_err("Multi sequence text commands currently not supported\n");
2207 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2208 (unsigned char *)hdr);
2209 }
2210
2211 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2212 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2213 hdr->exp_statsn, payload_length);
2214
2215 cmd->iscsi_opcode = ISCSI_OP_TEXT;
2216 cmd->i_state = ISTATE_SEND_TEXTRSP;
2217 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2218 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2219 cmd->targ_xfer_tag = 0xFFFFFFFF;
2220 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2221 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2222 cmd->data_direction = DMA_NONE;
2223 kfree(cmd->text_in_ptr);
2224 cmd->text_in_ptr = NULL;
2225
2226 return 0;
2227 }
2228 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2229
2230 int
iscsit_process_text_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,struct iscsi_text * hdr)2231 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2232 struct iscsi_text *hdr)
2233 {
2234 unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2235 int cmdsn_ret;
2236
2237 if (!text_in) {
2238 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2239 if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2240 pr_err("Unable to locate text_in buffer for sendtargets"
2241 " discovery\n");
2242 goto reject;
2243 }
2244 goto empty_sendtargets;
2245 }
2246 if (strncmp("SendTargets=", text_in, 12) != 0) {
2247 pr_err("Received Text Data that is not"
2248 " SendTargets, cannot continue.\n");
2249 goto reject;
2250 }
2251 /* '=' confirmed in strncmp */
2252 text_ptr = strchr(text_in, '=');
2253 BUG_ON(!text_ptr);
2254 if (!strncmp("=All", text_ptr, 5)) {
2255 cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2256 } else if (!strncmp("=iqn.", text_ptr, 5) ||
2257 !strncmp("=eui.", text_ptr, 5)) {
2258 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2259 } else {
2260 pr_err("Unable to locate valid SendTargets%s value\n",
2261 text_ptr);
2262 goto reject;
2263 }
2264
2265 spin_lock_bh(&conn->cmd_lock);
2266 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2267 spin_unlock_bh(&conn->cmd_lock);
2268
2269 empty_sendtargets:
2270 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2271
2272 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2273 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2274 (unsigned char *)hdr, hdr->cmdsn);
2275 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2276 return -1;
2277
2278 return 0;
2279 }
2280
2281 return iscsit_execute_cmd(cmd, 0);
2282
2283 reject:
2284 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2285 (unsigned char *)hdr);
2286 }
2287 EXPORT_SYMBOL(iscsit_process_text_cmd);
2288
2289 static int
iscsit_handle_text_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)2290 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2291 unsigned char *buf)
2292 {
2293 struct iscsi_text *hdr = (struct iscsi_text *)buf;
2294 char *text_in = NULL;
2295 u32 payload_length = ntoh24(hdr->dlength);
2296 int rx_size, rc;
2297
2298 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2299 if (rc < 0)
2300 return 0;
2301
2302 rx_size = payload_length;
2303 if (payload_length) {
2304 u32 checksum = 0, data_crc = 0;
2305 u32 padding = 0;
2306 int niov = 0, rx_got;
2307 struct kvec iov[2];
2308
2309 rx_size = ALIGN(payload_length, 4);
2310 text_in = kzalloc(rx_size, GFP_KERNEL);
2311 if (!text_in)
2312 goto reject;
2313
2314 cmd->text_in_ptr = text_in;
2315
2316 memset(iov, 0, sizeof(iov));
2317 iov[niov].iov_base = text_in;
2318 iov[niov++].iov_len = rx_size;
2319
2320 padding = rx_size - payload_length;
2321 if (padding)
2322 pr_debug("Receiving %u additional bytes"
2323 " for padding.\n", padding);
2324 if (conn->conn_ops->DataDigest) {
2325 iov[niov].iov_base = &checksum;
2326 iov[niov++].iov_len = ISCSI_CRC_LEN;
2327 rx_size += ISCSI_CRC_LEN;
2328 }
2329
2330 WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
2331 rx_got = rx_data(conn, &iov[0], niov, rx_size);
2332 if (rx_got != rx_size)
2333 goto reject;
2334
2335 if (conn->conn_ops->DataDigest) {
2336 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2337 text_in, rx_size, 0, NULL,
2338 &data_crc);
2339
2340 if (checksum != data_crc) {
2341 pr_err("Text data CRC32C DataDigest"
2342 " 0x%08x does not match computed"
2343 " 0x%08x\n", checksum, data_crc);
2344 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2345 pr_err("Unable to recover from"
2346 " Text Data digest failure while in"
2347 " ERL=0.\n");
2348 goto reject;
2349 } else {
2350 /*
2351 * Silently drop this PDU and let the
2352 * initiator plug the CmdSN gap.
2353 */
2354 pr_debug("Dropping Text"
2355 " Command CmdSN: 0x%08x due to"
2356 " DataCRC error.\n", hdr->cmdsn);
2357 kfree(text_in);
2358 return 0;
2359 }
2360 } else {
2361 pr_debug("Got CRC32C DataDigest"
2362 " 0x%08x for %u bytes of text data.\n",
2363 checksum, payload_length);
2364 }
2365 }
2366 text_in[payload_length - 1] = '\0';
2367 pr_debug("Successfully read %d bytes of text"
2368 " data.\n", payload_length);
2369 }
2370
2371 return iscsit_process_text_cmd(conn, cmd, hdr);
2372
2373 reject:
2374 kfree(cmd->text_in_ptr);
2375 cmd->text_in_ptr = NULL;
2376 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2377 }
2378
iscsit_logout_closesession(struct iscsit_cmd * cmd,struct iscsit_conn * conn)2379 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2380 {
2381 struct iscsit_conn *conn_p;
2382 struct iscsit_session *sess = conn->sess;
2383
2384 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2385 " for SID: %u.\n", conn->cid, conn->sess->sid);
2386
2387 atomic_set(&sess->session_logout, 1);
2388 atomic_set(&conn->conn_logout_remove, 1);
2389 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2390
2391 iscsit_inc_conn_usage_count(conn);
2392 iscsit_inc_session_usage_count(sess);
2393
2394 spin_lock_bh(&sess->conn_lock);
2395 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2396 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2397 continue;
2398
2399 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2400 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2401 }
2402 spin_unlock_bh(&sess->conn_lock);
2403
2404 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2405
2406 return 0;
2407 }
2408
iscsit_logout_closeconnection(struct iscsit_cmd * cmd,struct iscsit_conn * conn)2409 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2410 {
2411 struct iscsit_conn *l_conn;
2412 struct iscsit_session *sess = conn->sess;
2413
2414 pr_debug("Received logout request CLOSECONNECTION for CID:"
2415 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2416
2417 /*
2418 * A Logout Request with a CLOSECONNECTION reason code for a CID
2419 * can arrive on a connection with a differing CID.
2420 */
2421 if (conn->cid == cmd->logout_cid) {
2422 spin_lock_bh(&conn->state_lock);
2423 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2424 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2425
2426 atomic_set(&conn->conn_logout_remove, 1);
2427 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2428 iscsit_inc_conn_usage_count(conn);
2429
2430 spin_unlock_bh(&conn->state_lock);
2431 } else {
2432 /*
2433 * Handle all different cid CLOSECONNECTION requests in
2434 * iscsit_logout_post_handler_diffcid() as to give enough
2435 * time for any non immediate command's CmdSN to be
2436 * acknowledged on the connection in question.
2437 *
2438 * Here we simply make sure the CID is still around.
2439 */
2440 l_conn = iscsit_get_conn_from_cid(sess,
2441 cmd->logout_cid);
2442 if (!l_conn) {
2443 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2444 iscsit_add_cmd_to_response_queue(cmd, conn,
2445 cmd->i_state);
2446 return 0;
2447 }
2448
2449 iscsit_dec_conn_usage_count(l_conn);
2450 }
2451
2452 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2453
2454 return 0;
2455 }
2456
iscsit_logout_removeconnforrecovery(struct iscsit_cmd * cmd,struct iscsit_conn * conn)2457 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2458 {
2459 struct iscsit_session *sess = conn->sess;
2460
2461 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2462 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2463
2464 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2465 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2466 " while ERL!=2.\n");
2467 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2468 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2469 return 0;
2470 }
2471
2472 if (conn->cid == cmd->logout_cid) {
2473 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2474 " with CID: %hu on CID: %hu, implementation error.\n",
2475 cmd->logout_cid, conn->cid);
2476 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2477 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2478 return 0;
2479 }
2480
2481 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2482
2483 return 0;
2484 }
2485
2486 int
iscsit_handle_logout_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf)2487 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2488 unsigned char *buf)
2489 {
2490 int cmdsn_ret, logout_remove = 0;
2491 u8 reason_code = 0;
2492 struct iscsi_logout *hdr;
2493 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2494
2495 hdr = (struct iscsi_logout *) buf;
2496 reason_code = (hdr->flags & 0x7f);
2497
2498 if (tiqn) {
2499 spin_lock(&tiqn->logout_stats.lock);
2500 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2501 tiqn->logout_stats.normal_logouts++;
2502 else
2503 tiqn->logout_stats.abnormal_logouts++;
2504 spin_unlock(&tiqn->logout_stats.lock);
2505 }
2506
2507 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2508 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2509 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2510 hdr->cid, conn->cid);
2511
2512 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2513 pr_err("Received logout request on connection that"
2514 " is not in logged in state, ignoring request.\n");
2515 iscsit_free_cmd(cmd, false);
2516 return 0;
2517 }
2518
2519 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2520 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2521 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2522 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2523 cmd->targ_xfer_tag = 0xFFFFFFFF;
2524 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2525 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2526 cmd->logout_cid = be16_to_cpu(hdr->cid);
2527 cmd->logout_reason = reason_code;
2528 cmd->data_direction = DMA_NONE;
2529
2530 /*
2531 * We need to sleep in these cases (by returning 1) until the Logout
2532 * Response gets sent in the tx thread.
2533 */
2534 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2535 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2536 be16_to_cpu(hdr->cid) == conn->cid))
2537 logout_remove = 1;
2538
2539 spin_lock_bh(&conn->cmd_lock);
2540 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2541 spin_unlock_bh(&conn->cmd_lock);
2542
2543 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2544 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2545
2546 /*
2547 * Immediate commands are executed, well, immediately.
2548 * Non-Immediate Logout Commands are executed in CmdSN order.
2549 */
2550 if (cmd->immediate_cmd) {
2551 int ret = iscsit_execute_cmd(cmd, 0);
2552
2553 if (ret < 0)
2554 return ret;
2555 } else {
2556 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2557 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2558 logout_remove = 0;
2559 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2560 return -1;
2561 }
2562
2563 return logout_remove;
2564 }
2565 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2566
iscsit_handle_snack(struct iscsit_conn * conn,unsigned char * buf)2567 int iscsit_handle_snack(
2568 struct iscsit_conn *conn,
2569 unsigned char *buf)
2570 {
2571 struct iscsi_snack *hdr;
2572
2573 hdr = (struct iscsi_snack *) buf;
2574 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2575
2576 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2577 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2578 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2579 hdr->begrun, hdr->runlength, conn->cid);
2580
2581 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2582 pr_err("Initiator sent SNACK request while in"
2583 " ErrorRecoveryLevel=0.\n");
2584 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2585 buf);
2586 }
2587 /*
2588 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2589 * call from inside iscsi_send_recovery_datain_or_r2t().
2590 */
2591 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2592 case 0:
2593 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2594 hdr->itt,
2595 be32_to_cpu(hdr->ttt),
2596 be32_to_cpu(hdr->begrun),
2597 be32_to_cpu(hdr->runlength));
2598 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2599 return iscsit_handle_status_snack(conn, hdr->itt,
2600 be32_to_cpu(hdr->ttt),
2601 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2602 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2603 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2604 be32_to_cpu(hdr->begrun),
2605 be32_to_cpu(hdr->runlength));
2606 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2607 /* FIXME: Support R-Data SNACK */
2608 pr_err("R-Data SNACK Not Supported.\n");
2609 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2610 buf);
2611 default:
2612 pr_err("Unknown SNACK type 0x%02x, protocol"
2613 " error.\n", hdr->flags & 0x0f);
2614 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2615 buf);
2616 }
2617
2618 return 0;
2619 }
2620 EXPORT_SYMBOL(iscsit_handle_snack);
2621
iscsit_rx_thread_wait_for_tcp(struct iscsit_conn * conn)2622 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
2623 {
2624 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2625 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2626 wait_for_completion_interruptible_timeout(
2627 &conn->rx_half_close_comp,
2628 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2629 }
2630 }
2631
iscsit_handle_immediate_data(struct iscsit_cmd * cmd,struct iscsi_scsi_req * hdr,u32 length)2632 static int iscsit_handle_immediate_data(
2633 struct iscsit_cmd *cmd,
2634 struct iscsi_scsi_req *hdr,
2635 u32 length)
2636 {
2637 int iov_ret, rx_got = 0, rx_size = 0;
2638 u32 checksum, iov_count = 0, padding = 0;
2639 struct iscsit_conn *conn = cmd->conn;
2640 struct kvec *iov;
2641 void *overflow_buf = NULL;
2642
2643 BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2644 rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2645 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
2646 cmd->orig_iov_data_count - 2,
2647 cmd->write_data_done, rx_size);
2648 if (iov_ret < 0)
2649 return IMMEDIATE_DATA_CANNOT_RECOVER;
2650
2651 iov_count = iov_ret;
2652 iov = &cmd->iov_data[0];
2653 if (rx_size < length) {
2654 /*
2655 * Special case: length of immediate data exceeds the data
2656 * buffer size derived from the CDB.
2657 */
2658 overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
2659 if (!overflow_buf) {
2660 iscsit_unmap_iovec(cmd);
2661 return IMMEDIATE_DATA_CANNOT_RECOVER;
2662 }
2663 cmd->overflow_buf = overflow_buf;
2664 iov[iov_count].iov_base = overflow_buf;
2665 iov[iov_count].iov_len = length - rx_size;
2666 iov_count++;
2667 rx_size = length;
2668 }
2669
2670 padding = ((-length) & 3);
2671 if (padding != 0) {
2672 iov[iov_count].iov_base = cmd->pad_bytes;
2673 iov[iov_count++].iov_len = padding;
2674 rx_size += padding;
2675 }
2676
2677 if (conn->conn_ops->DataDigest) {
2678 iov[iov_count].iov_base = &checksum;
2679 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2680 rx_size += ISCSI_CRC_LEN;
2681 }
2682
2683 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
2684 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2685
2686 iscsit_unmap_iovec(cmd);
2687
2688 if (rx_got != rx_size) {
2689 iscsit_rx_thread_wait_for_tcp(conn);
2690 return IMMEDIATE_DATA_CANNOT_RECOVER;
2691 }
2692
2693 if (conn->conn_ops->DataDigest) {
2694 u32 data_crc;
2695
2696 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2697 cmd->write_data_done, length, padding,
2698 cmd->pad_bytes);
2699
2700 if (checksum != data_crc) {
2701 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2702 " does not match computed 0x%08x\n", checksum,
2703 data_crc);
2704
2705 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2706 pr_err("Unable to recover from"
2707 " Immediate Data digest failure while"
2708 " in ERL=0.\n");
2709 iscsit_reject_cmd(cmd,
2710 ISCSI_REASON_DATA_DIGEST_ERROR,
2711 (unsigned char *)hdr);
2712 return IMMEDIATE_DATA_CANNOT_RECOVER;
2713 } else {
2714 iscsit_reject_cmd(cmd,
2715 ISCSI_REASON_DATA_DIGEST_ERROR,
2716 (unsigned char *)hdr);
2717 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2718 }
2719 } else {
2720 pr_debug("Got CRC32C DataDigest 0x%08x for"
2721 " %u bytes of Immediate Data\n", checksum,
2722 length);
2723 }
2724 }
2725
2726 cmd->write_data_done += length;
2727
2728 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2729 spin_lock_bh(&cmd->istate_lock);
2730 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2731 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2732 spin_unlock_bh(&cmd->istate_lock);
2733 }
2734
2735 return IMMEDIATE_DATA_NORMAL_OPERATION;
2736 }
2737
2738 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2739 with active network interface */
iscsit_build_conn_drop_async_message(struct iscsit_conn * conn)2740 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
2741 {
2742 struct iscsit_cmd *cmd;
2743 struct iscsit_conn *conn_p;
2744 bool found = false;
2745
2746 lockdep_assert_held(&conn->sess->conn_lock);
2747
2748 /*
2749 * Only send a Asynchronous Message on connections whos network
2750 * interface is still functional.
2751 */
2752 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2753 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2754 iscsit_inc_conn_usage_count(conn_p);
2755 found = true;
2756 break;
2757 }
2758 }
2759
2760 if (!found)
2761 return;
2762
2763 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2764 if (!cmd) {
2765 iscsit_dec_conn_usage_count(conn_p);
2766 return;
2767 }
2768
2769 cmd->logout_cid = conn->cid;
2770 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2771 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2772
2773 spin_lock_bh(&conn_p->cmd_lock);
2774 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2775 spin_unlock_bh(&conn_p->cmd_lock);
2776
2777 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2778 iscsit_dec_conn_usage_count(conn_p);
2779 }
2780
iscsit_send_conn_drop_async_message(struct iscsit_cmd * cmd,struct iscsit_conn * conn)2781 static int iscsit_send_conn_drop_async_message(
2782 struct iscsit_cmd *cmd,
2783 struct iscsit_conn *conn)
2784 {
2785 struct iscsi_async *hdr;
2786
2787 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2788
2789 hdr = (struct iscsi_async *) cmd->pdu;
2790 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2791 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2792 cmd->init_task_tag = RESERVED_ITT;
2793 cmd->targ_xfer_tag = 0xFFFFFFFF;
2794 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2795 cmd->stat_sn = conn->stat_sn++;
2796 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2797 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2798 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2799 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2800 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2801 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2802 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2803
2804 pr_debug("Sending Connection Dropped Async Message StatSN:"
2805 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2806 cmd->logout_cid, conn->cid);
2807
2808 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2809 }
2810
iscsit_tx_thread_wait_for_tcp(struct iscsit_conn * conn)2811 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
2812 {
2813 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2814 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2815 wait_for_completion_interruptible_timeout(
2816 &conn->tx_half_close_comp,
2817 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2818 }
2819 }
2820
2821 void
iscsit_build_datain_pdu(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_datain * datain,struct iscsi_data_rsp * hdr,bool set_statsn)2822 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2823 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2824 bool set_statsn)
2825 {
2826 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2827 hdr->flags = datain->flags;
2828 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2829 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2830 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2831 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2832 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2833 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2834 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2835 }
2836 }
2837 hton24(hdr->dlength, datain->length);
2838 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2839 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2840 (struct scsi_lun *)&hdr->lun);
2841 else
2842 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2843
2844 hdr->itt = cmd->init_task_tag;
2845
2846 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2847 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2848 else
2849 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2850 if (set_statsn)
2851 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2852 else
2853 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2854
2855 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2856 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2857 hdr->datasn = cpu_to_be32(datain->data_sn);
2858 hdr->offset = cpu_to_be32(datain->offset);
2859
2860 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2861 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2862 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2863 ntohl(hdr->offset), datain->length, conn->cid);
2864 }
2865 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2866
iscsit_send_datain(struct iscsit_cmd * cmd,struct iscsit_conn * conn)2867 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2868 {
2869 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2870 struct iscsi_datain datain;
2871 struct iscsi_datain_req *dr;
2872 int eodr = 0, ret;
2873 bool set_statsn = false;
2874
2875 memset(&datain, 0, sizeof(struct iscsi_datain));
2876 dr = iscsit_get_datain_values(cmd, &datain);
2877 if (!dr) {
2878 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2879 cmd->init_task_tag);
2880 return -1;
2881 }
2882 /*
2883 * Be paranoid and double check the logic for now.
2884 */
2885 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2886 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2887 " datain.length: %u exceeds cmd->data_length: %u\n",
2888 cmd->init_task_tag, datain.offset, datain.length,
2889 cmd->se_cmd.data_length);
2890 return -1;
2891 }
2892
2893 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2894 /*
2895 * Special case for successfully execution w/ both DATAIN
2896 * and Sense Data.
2897 */
2898 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2899 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2900 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2901 else {
2902 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2903 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2904 iscsit_increment_maxcmdsn(cmd, conn->sess);
2905 cmd->stat_sn = conn->stat_sn++;
2906 set_statsn = true;
2907 } else if (dr->dr_complete ==
2908 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2909 set_statsn = true;
2910 }
2911
2912 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2913
2914 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2915 if (ret < 0)
2916 return ret;
2917
2918 if (dr->dr_complete) {
2919 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2920 2 : 1;
2921 iscsit_free_datain_req(cmd, dr);
2922 }
2923
2924 return eodr;
2925 }
2926
2927 int
iscsit_build_logout_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_logout_rsp * hdr)2928 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2929 struct iscsi_logout_rsp *hdr)
2930 {
2931 struct iscsit_conn *logout_conn = NULL;
2932 struct iscsi_conn_recovery *cr = NULL;
2933 struct iscsit_session *sess = conn->sess;
2934 /*
2935 * The actual shutting down of Sessions and/or Connections
2936 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2937 * is done in scsi_logout_post_handler().
2938 */
2939 switch (cmd->logout_reason) {
2940 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2941 pr_debug("iSCSI session logout successful, setting"
2942 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2943 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2944 break;
2945 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2946 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2947 break;
2948 /*
2949 * For CLOSECONNECTION logout requests carrying
2950 * a matching logout CID -> local CID, the reference
2951 * for the local CID will have been incremented in
2952 * iscsi_logout_closeconnection().
2953 *
2954 * For CLOSECONNECTION logout requests carrying
2955 * a different CID than the connection it arrived
2956 * on, the connection responding to cmd->logout_cid
2957 * is stopped in iscsit_logout_post_handler_diffcid().
2958 */
2959
2960 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2961 " successful.\n", cmd->logout_cid, conn->cid);
2962 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2963 break;
2964 case ISCSI_LOGOUT_REASON_RECOVERY:
2965 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2966 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2967 break;
2968 /*
2969 * If the connection is still active from our point of view
2970 * force connection recovery to occur.
2971 */
2972 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2973 cmd->logout_cid);
2974 if (logout_conn) {
2975 iscsit_connection_reinstatement_rcfr(logout_conn);
2976 iscsit_dec_conn_usage_count(logout_conn);
2977 }
2978
2979 cr = iscsit_get_inactive_connection_recovery_entry(
2980 conn->sess, cmd->logout_cid);
2981 if (!cr) {
2982 pr_err("Unable to locate CID: %hu for"
2983 " REMOVECONNFORRECOVERY Logout Request.\n",
2984 cmd->logout_cid);
2985 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2986 break;
2987 }
2988
2989 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2990
2991 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2992 " for recovery for CID: %hu on CID: %hu successful.\n",
2993 cmd->logout_cid, conn->cid);
2994 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2995 break;
2996 default:
2997 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2998 cmd->logout_reason);
2999 return -1;
3000 }
3001
3002 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
3003 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3004 hdr->response = cmd->logout_response;
3005 hdr->itt = cmd->init_task_tag;
3006 cmd->stat_sn = conn->stat_sn++;
3007 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3008
3009 iscsit_increment_maxcmdsn(cmd, conn->sess);
3010 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3011 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3012
3013 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
3014 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
3015 cmd->init_task_tag, cmd->stat_sn, hdr->response,
3016 cmd->logout_cid, conn->cid);
3017
3018 return 0;
3019 }
3020 EXPORT_SYMBOL(iscsit_build_logout_rsp);
3021
3022 static int
iscsit_send_logout(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3023 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3024 {
3025 int rc;
3026
3027 rc = iscsit_build_logout_rsp(cmd, conn,
3028 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
3029 if (rc < 0)
3030 return rc;
3031
3032 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3033 }
3034
3035 void
iscsit_build_nopin_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_nopin * hdr,bool nopout_response)3036 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3037 struct iscsi_nopin *hdr, bool nopout_response)
3038 {
3039 hdr->opcode = ISCSI_OP_NOOP_IN;
3040 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3041 hton24(hdr->dlength, cmd->buf_ptr_size);
3042 if (nopout_response)
3043 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
3044 hdr->itt = cmd->init_task_tag;
3045 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3046 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
3047 conn->stat_sn;
3048 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3049
3050 if (nopout_response)
3051 iscsit_increment_maxcmdsn(cmd, conn->sess);
3052
3053 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3054 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3055
3056 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
3057 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
3058 "Solicited" : "Unsolicited", cmd->init_task_tag,
3059 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
3060 }
3061 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
3062
3063 /*
3064 * Unsolicited NOPIN, either requesting a response or not.
3065 */
iscsit_send_unsolicited_nopin(struct iscsit_cmd * cmd,struct iscsit_conn * conn,int want_response)3066 static int iscsit_send_unsolicited_nopin(
3067 struct iscsit_cmd *cmd,
3068 struct iscsit_conn *conn,
3069 int want_response)
3070 {
3071 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3072 int ret;
3073
3074 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3075
3076 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3077 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3078
3079 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3080 if (ret < 0)
3081 return ret;
3082
3083 spin_lock_bh(&cmd->istate_lock);
3084 cmd->i_state = want_response ?
3085 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3086 spin_unlock_bh(&cmd->istate_lock);
3087
3088 return 0;
3089 }
3090
3091 static int
iscsit_send_nopin(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3092 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3093 {
3094 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3095
3096 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3097
3098 /*
3099 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
3100 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
3101 */
3102 pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3103
3104 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3105 cmd->buf_ptr,
3106 cmd->buf_ptr_size);
3107 }
3108
iscsit_send_r2t(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3109 static int iscsit_send_r2t(
3110 struct iscsit_cmd *cmd,
3111 struct iscsit_conn *conn)
3112 {
3113 struct iscsi_r2t *r2t;
3114 struct iscsi_r2t_rsp *hdr;
3115 int ret;
3116
3117 r2t = iscsit_get_r2t_from_list(cmd);
3118 if (!r2t)
3119 return -1;
3120
3121 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
3122 memset(hdr, 0, ISCSI_HDR_LEN);
3123 hdr->opcode = ISCSI_OP_R2T;
3124 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3125 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3126 (struct scsi_lun *)&hdr->lun);
3127 hdr->itt = cmd->init_task_tag;
3128 if (conn->conn_transport->iscsit_get_r2t_ttt)
3129 conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3130 else
3131 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3132 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3133 hdr->statsn = cpu_to_be32(conn->stat_sn);
3134 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3135 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3136 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
3137 hdr->data_offset = cpu_to_be32(r2t->offset);
3138 hdr->data_length = cpu_to_be32(r2t->xfer_len);
3139
3140 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3141 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3142 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3143 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3144 r2t->offset, r2t->xfer_len, conn->cid);
3145
3146 spin_lock_bh(&cmd->r2t_lock);
3147 r2t->sent_r2t = 1;
3148 spin_unlock_bh(&cmd->r2t_lock);
3149
3150 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3151 if (ret < 0) {
3152 return ret;
3153 }
3154
3155 spin_lock_bh(&cmd->dataout_timeout_lock);
3156 iscsit_start_dataout_timer(cmd, conn);
3157 spin_unlock_bh(&cmd->dataout_timeout_lock);
3158
3159 return 0;
3160 }
3161
3162 /*
3163 * @recovery: If called from iscsi_task_reassign_complete_write() for
3164 * connection recovery.
3165 */
iscsit_build_r2ts_for_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,bool recovery)3166 int iscsit_build_r2ts_for_cmd(
3167 struct iscsit_conn *conn,
3168 struct iscsit_cmd *cmd,
3169 bool recovery)
3170 {
3171 int first_r2t = 1;
3172 u32 offset = 0, xfer_len = 0;
3173
3174 spin_lock_bh(&cmd->r2t_lock);
3175 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3176 spin_unlock_bh(&cmd->r2t_lock);
3177 return 0;
3178 }
3179
3180 if (conn->sess->sess_ops->DataSequenceInOrder &&
3181 !recovery)
3182 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3183
3184 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3185 if (conn->sess->sess_ops->DataSequenceInOrder) {
3186 offset = cmd->r2t_offset;
3187
3188 if (first_r2t && recovery) {
3189 int new_data_end = offset +
3190 conn->sess->sess_ops->MaxBurstLength -
3191 cmd->next_burst_len;
3192
3193 if (new_data_end > cmd->se_cmd.data_length)
3194 xfer_len = cmd->se_cmd.data_length - offset;
3195 else
3196 xfer_len =
3197 conn->sess->sess_ops->MaxBurstLength -
3198 cmd->next_burst_len;
3199 } else {
3200 int new_data_end = offset +
3201 conn->sess->sess_ops->MaxBurstLength;
3202
3203 if (new_data_end > cmd->se_cmd.data_length)
3204 xfer_len = cmd->se_cmd.data_length - offset;
3205 else
3206 xfer_len = conn->sess->sess_ops->MaxBurstLength;
3207 }
3208
3209 if ((s32)xfer_len < 0) {
3210 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3211 break;
3212 }
3213
3214 cmd->r2t_offset += xfer_len;
3215
3216 if (cmd->r2t_offset == cmd->se_cmd.data_length)
3217 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3218 } else {
3219 struct iscsi_seq *seq;
3220
3221 seq = iscsit_get_seq_holder_for_r2t(cmd);
3222 if (!seq) {
3223 spin_unlock_bh(&cmd->r2t_lock);
3224 return -1;
3225 }
3226
3227 offset = seq->offset;
3228 xfer_len = seq->xfer_len;
3229
3230 if (cmd->seq_send_order == cmd->seq_count)
3231 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3232 }
3233 cmd->outstanding_r2ts++;
3234 first_r2t = 0;
3235
3236 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3237 spin_unlock_bh(&cmd->r2t_lock);
3238 return -1;
3239 }
3240
3241 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3242 break;
3243 }
3244 spin_unlock_bh(&cmd->r2t_lock);
3245
3246 return 0;
3247 }
3248 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3249
iscsit_build_rsp_pdu(struct iscsit_cmd * cmd,struct iscsit_conn * conn,bool inc_stat_sn,struct iscsi_scsi_rsp * hdr)3250 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3251 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3252 {
3253 if (inc_stat_sn)
3254 cmd->stat_sn = conn->stat_sn++;
3255
3256 atomic_long_inc(&conn->sess->rsp_pdus);
3257
3258 memset(hdr, 0, ISCSI_HDR_LEN);
3259 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3260 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3261 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3262 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3263 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3264 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3265 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3266 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3267 }
3268 hdr->response = cmd->iscsi_response;
3269 hdr->cmd_status = cmd->se_cmd.scsi_status;
3270 hdr->itt = cmd->init_task_tag;
3271 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3272
3273 iscsit_increment_maxcmdsn(cmd, conn->sess);
3274 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3275 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3276
3277 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3278 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3279 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3280 cmd->se_cmd.scsi_status, conn->cid);
3281 }
3282 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3283
iscsit_send_response(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3284 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3285 {
3286 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3287 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3288 void *data_buf = NULL;
3289 u32 padding = 0, data_buf_len = 0;
3290
3291 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3292
3293 /*
3294 * Attach SENSE DATA payload to iSCSI Response PDU
3295 */
3296 if (cmd->se_cmd.sense_buffer &&
3297 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3298 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3299 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3300 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3301
3302 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3303 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3304 data_buf = cmd->sense_buffer;
3305 data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3306
3307 if (padding) {
3308 memset(cmd->sense_buffer +
3309 cmd->se_cmd.scsi_sense_length, 0, padding);
3310 pr_debug("Adding %u bytes of padding to"
3311 " SENSE.\n", padding);
3312 }
3313
3314 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3315 " Response PDU\n",
3316 cmd->se_cmd.scsi_sense_length);
3317 }
3318
3319 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3320 data_buf_len);
3321 }
3322
iscsit_convert_tcm_tmr_rsp(struct se_tmr_req * se_tmr)3323 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3324 {
3325 switch (se_tmr->response) {
3326 case TMR_FUNCTION_COMPLETE:
3327 return ISCSI_TMF_RSP_COMPLETE;
3328 case TMR_TASK_DOES_NOT_EXIST:
3329 return ISCSI_TMF_RSP_NO_TASK;
3330 case TMR_LUN_DOES_NOT_EXIST:
3331 return ISCSI_TMF_RSP_NO_LUN;
3332 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3333 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3334 case TMR_FUNCTION_REJECTED:
3335 default:
3336 return ISCSI_TMF_RSP_REJECTED;
3337 }
3338 }
3339
3340 void
iscsit_build_task_mgt_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_tm_rsp * hdr)3341 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3342 struct iscsi_tm_rsp *hdr)
3343 {
3344 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3345
3346 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3347 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3348 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3349 hdr->itt = cmd->init_task_tag;
3350 cmd->stat_sn = conn->stat_sn++;
3351 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3352
3353 iscsit_increment_maxcmdsn(cmd, conn->sess);
3354 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3355 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3356
3357 pr_debug("Built Task Management Response ITT: 0x%08x,"
3358 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3359 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3360 }
3361 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3362
3363 static int
iscsit_send_task_mgt_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3364 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3365 {
3366 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3367
3368 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3369
3370 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3371 }
3372
3373 #define SENDTARGETS_BUF_LIMIT 32768U
3374
3375 static int
iscsit_build_sendtargets_response(struct iscsit_cmd * cmd,enum iscsit_transport_type network_transport,int skip_bytes,bool * completed)3376 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
3377 enum iscsit_transport_type network_transport,
3378 int skip_bytes, bool *completed)
3379 {
3380 char *payload = NULL;
3381 struct iscsit_conn *conn = cmd->conn;
3382 struct iscsi_portal_group *tpg;
3383 struct iscsi_tiqn *tiqn;
3384 struct iscsi_tpg_np *tpg_np;
3385 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3386 int target_name_printed;
3387 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3388 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3389 bool active;
3390
3391 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3392 SENDTARGETS_BUF_LIMIT);
3393
3394 payload = kzalloc(buffer_len, GFP_KERNEL);
3395 if (!payload)
3396 return -ENOMEM;
3397
3398 /*
3399 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3400 * explicit case..
3401 */
3402 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3403 text_ptr = strchr(text_in, '=');
3404 if (!text_ptr) {
3405 pr_err("Unable to locate '=' string in text_in:"
3406 " %s\n", text_in);
3407 kfree(payload);
3408 return -EINVAL;
3409 }
3410 /*
3411 * Skip over '=' character..
3412 */
3413 text_ptr += 1;
3414 }
3415
3416 spin_lock(&tiqn_lock);
3417 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3418 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3419 strcmp(tiqn->tiqn, text_ptr)) {
3420 continue;
3421 }
3422
3423 target_name_printed = 0;
3424
3425 spin_lock(&tiqn->tiqn_tpg_lock);
3426 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3427
3428 /* If demo_mode_discovery=0 and generate_node_acls=0
3429 * (demo mode dislabed) do not return
3430 * TargetName+TargetAddress unless a NodeACL exists.
3431 */
3432
3433 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3434 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3435 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3436 cmd->conn->sess->sess_ops->InitiatorName))) {
3437 continue;
3438 }
3439
3440 spin_lock(&tpg->tpg_state_lock);
3441 active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3442 spin_unlock(&tpg->tpg_state_lock);
3443
3444 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3445 continue;
3446
3447 spin_lock(&tpg->tpg_np_lock);
3448 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3449 tpg_np_list) {
3450 struct iscsi_np *np = tpg_np->tpg_np;
3451 struct sockaddr_storage *sockaddr;
3452
3453 if (np->np_network_transport != network_transport)
3454 continue;
3455
3456 if (!target_name_printed) {
3457 len = sprintf(buf, "TargetName=%s",
3458 tiqn->tiqn);
3459 len += 1;
3460
3461 if ((len + payload_len) > buffer_len) {
3462 spin_unlock(&tpg->tpg_np_lock);
3463 spin_unlock(&tiqn->tiqn_tpg_lock);
3464 end_of_buf = 1;
3465 goto eob;
3466 }
3467
3468 if (skip_bytes && len <= skip_bytes) {
3469 skip_bytes -= len;
3470 } else {
3471 memcpy(payload + payload_len, buf, len);
3472 payload_len += len;
3473 target_name_printed = 1;
3474 if (len > skip_bytes)
3475 skip_bytes = 0;
3476 }
3477 }
3478
3479 if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
3480 sockaddr = &conn->local_sockaddr;
3481 else
3482 sockaddr = &np->np_sockaddr;
3483
3484 len = sprintf(buf, "TargetAddress="
3485 "%pISpc,%hu",
3486 sockaddr,
3487 tpg->tpgt);
3488 len += 1;
3489
3490 if ((len + payload_len) > buffer_len) {
3491 spin_unlock(&tpg->tpg_np_lock);
3492 spin_unlock(&tiqn->tiqn_tpg_lock);
3493 end_of_buf = 1;
3494 goto eob;
3495 }
3496
3497 if (skip_bytes && len <= skip_bytes) {
3498 skip_bytes -= len;
3499 } else {
3500 memcpy(payload + payload_len, buf, len);
3501 payload_len += len;
3502 if (len > skip_bytes)
3503 skip_bytes = 0;
3504 }
3505 }
3506 spin_unlock(&tpg->tpg_np_lock);
3507 }
3508 spin_unlock(&tiqn->tiqn_tpg_lock);
3509 eob:
3510 if (end_of_buf) {
3511 *completed = false;
3512 break;
3513 }
3514
3515 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3516 break;
3517 }
3518 spin_unlock(&tiqn_lock);
3519
3520 cmd->buf_ptr = payload;
3521
3522 return payload_len;
3523 }
3524
3525 int
iscsit_build_text_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_text_rsp * hdr,enum iscsit_transport_type network_transport)3526 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3527 struct iscsi_text_rsp *hdr,
3528 enum iscsit_transport_type network_transport)
3529 {
3530 int text_length, padding;
3531 bool completed = true;
3532
3533 text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3534 cmd->read_data_done,
3535 &completed);
3536 if (text_length < 0)
3537 return text_length;
3538
3539 if (completed) {
3540 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3541 } else {
3542 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3543 cmd->read_data_done += text_length;
3544 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3545 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3546 }
3547 hdr->opcode = ISCSI_OP_TEXT_RSP;
3548 padding = ((-text_length) & 3);
3549 hton24(hdr->dlength, text_length);
3550 hdr->itt = cmd->init_task_tag;
3551 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3552 cmd->stat_sn = conn->stat_sn++;
3553 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3554
3555 iscsit_increment_maxcmdsn(cmd, conn->sess);
3556 /*
3557 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3558 * correctly increment MaxCmdSN for each response answering a
3559 * non immediate text request with a valid CmdSN.
3560 */
3561 cmd->maxcmdsn_inc = 0;
3562 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3563 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3564
3565 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3566 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3567 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3568 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3569 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3570
3571 return text_length + padding;
3572 }
3573 EXPORT_SYMBOL(iscsit_build_text_rsp);
3574
iscsit_send_text_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3575 static int iscsit_send_text_rsp(
3576 struct iscsit_cmd *cmd,
3577 struct iscsit_conn *conn)
3578 {
3579 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3580 int text_length;
3581
3582 text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3583 conn->conn_transport->transport_type);
3584 if (text_length < 0)
3585 return text_length;
3586
3587 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3588 cmd->buf_ptr,
3589 text_length);
3590 }
3591
3592 void
iscsit_build_reject(struct iscsit_cmd * cmd,struct iscsit_conn * conn,struct iscsi_reject * hdr)3593 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3594 struct iscsi_reject *hdr)
3595 {
3596 hdr->opcode = ISCSI_OP_REJECT;
3597 hdr->reason = cmd->reject_reason;
3598 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3599 hton24(hdr->dlength, ISCSI_HDR_LEN);
3600 hdr->ffffffff = cpu_to_be32(0xffffffff);
3601 cmd->stat_sn = conn->stat_sn++;
3602 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3603 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3604 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3605
3606 }
3607 EXPORT_SYMBOL(iscsit_build_reject);
3608
iscsit_send_reject(struct iscsit_cmd * cmd,struct iscsit_conn * conn)3609 static int iscsit_send_reject(
3610 struct iscsit_cmd *cmd,
3611 struct iscsit_conn *conn)
3612 {
3613 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3614
3615 iscsit_build_reject(cmd, conn, hdr);
3616
3617 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3618 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3619
3620 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3621 cmd->buf_ptr,
3622 ISCSI_HDR_LEN);
3623 }
3624
iscsit_thread_get_cpumask(struct iscsit_conn * conn)3625 void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
3626 {
3627 int ord, cpu;
3628 cpumask_var_t conn_allowed_cpumask;
3629
3630 /*
3631 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3632 * within iscsit_start_kthreads()
3633 *
3634 * Here we use bitmap_id to determine which CPU that this
3635 * iSCSI connection's RX/TX threads will be scheduled to
3636 * execute upon.
3637 */
3638 if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
3639 ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3640 for_each_online_cpu(cpu) {
3641 if (ord-- == 0) {
3642 cpumask_set_cpu(cpu, conn->conn_cpumask);
3643 return;
3644 }
3645 }
3646 } else {
3647 cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
3648 cpu_online_mask);
3649
3650 cpumask_clear(conn->conn_cpumask);
3651 ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
3652 for_each_cpu(cpu, conn_allowed_cpumask) {
3653 if (ord-- == 0) {
3654 cpumask_set_cpu(cpu, conn->conn_cpumask);
3655 free_cpumask_var(conn_allowed_cpumask);
3656 return;
3657 }
3658 }
3659 free_cpumask_var(conn_allowed_cpumask);
3660 }
3661 /*
3662 * This should never be reached..
3663 */
3664 dump_stack();
3665 cpumask_setall(conn->conn_cpumask);
3666 }
3667
iscsit_thread_reschedule(struct iscsit_conn * conn)3668 static void iscsit_thread_reschedule(struct iscsit_conn *conn)
3669 {
3670 /*
3671 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
3672 * connection's RX/TX threads update conn->allowed_cpumask.
3673 */
3674 if (!cpumask_equal(iscsit_global->allowed_cpumask,
3675 conn->allowed_cpumask)) {
3676 iscsit_thread_get_cpumask(conn);
3677 conn->conn_tx_reset_cpumask = 1;
3678 conn->conn_rx_reset_cpumask = 1;
3679 cpumask_copy(conn->allowed_cpumask,
3680 iscsit_global->allowed_cpumask);
3681 }
3682 }
3683
iscsit_thread_check_cpumask(struct iscsit_conn * conn,struct task_struct * p,int mode)3684 void iscsit_thread_check_cpumask(
3685 struct iscsit_conn *conn,
3686 struct task_struct *p,
3687 int mode)
3688 {
3689 /*
3690 * The TX and RX threads maybe call iscsit_thread_check_cpumask()
3691 * at the same time. The RX thread might be faster and return from
3692 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
3693 * Then the TX thread sets it back to 1.
3694 * The next time the RX thread loops, it sees conn_rx_reset_cpumask
3695 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
3696 */
3697 iscsit_thread_reschedule(conn);
3698
3699 /*
3700 * mode == 1 signals iscsi_target_tx_thread() usage.
3701 * mode == 0 signals iscsi_target_rx_thread() usage.
3702 */
3703 if (mode == 1) {
3704 if (!conn->conn_tx_reset_cpumask)
3705 return;
3706 } else {
3707 if (!conn->conn_rx_reset_cpumask)
3708 return;
3709 }
3710
3711 /*
3712 * Update the CPU mask for this single kthread so that
3713 * both TX and RX kthreads are scheduled to run on the
3714 * same CPU.
3715 */
3716 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3717 if (mode == 1)
3718 conn->conn_tx_reset_cpumask = 0;
3719 else
3720 conn->conn_rx_reset_cpumask = 0;
3721 }
3722 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
3723
3724 int
iscsit_immediate_queue(struct iscsit_conn * conn,struct iscsit_cmd * cmd,int state)3725 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3726 {
3727 int ret;
3728
3729 switch (state) {
3730 case ISTATE_SEND_R2T:
3731 ret = iscsit_send_r2t(cmd, conn);
3732 if (ret < 0)
3733 goto err;
3734 break;
3735 case ISTATE_REMOVE:
3736 spin_lock_bh(&conn->cmd_lock);
3737 list_del_init(&cmd->i_conn_node);
3738 spin_unlock_bh(&conn->cmd_lock);
3739
3740 iscsit_free_cmd(cmd, false);
3741 break;
3742 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3743 iscsit_mod_nopin_response_timer(conn);
3744 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3745 if (ret < 0)
3746 goto err;
3747 break;
3748 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3749 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3750 if (ret < 0)
3751 goto err;
3752 break;
3753 default:
3754 pr_err("Unknown Opcode: 0x%02x ITT:"
3755 " 0x%08x, i_state: %d on CID: %hu\n",
3756 cmd->iscsi_opcode, cmd->init_task_tag, state,
3757 conn->cid);
3758 goto err;
3759 }
3760
3761 return 0;
3762
3763 err:
3764 return -1;
3765 }
3766 EXPORT_SYMBOL(iscsit_immediate_queue);
3767
3768 static int
iscsit_handle_immediate_queue(struct iscsit_conn * conn)3769 iscsit_handle_immediate_queue(struct iscsit_conn *conn)
3770 {
3771 struct iscsit_transport *t = conn->conn_transport;
3772 struct iscsi_queue_req *qr;
3773 struct iscsit_cmd *cmd;
3774 u8 state;
3775 int ret;
3776
3777 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3778 atomic_set(&conn->check_immediate_queue, 0);
3779 cmd = qr->cmd;
3780 state = qr->state;
3781 kmem_cache_free(lio_qr_cache, qr);
3782
3783 ret = t->iscsit_immediate_queue(conn, cmd, state);
3784 if (ret < 0)
3785 return ret;
3786 }
3787
3788 return 0;
3789 }
3790
3791 int
iscsit_response_queue(struct iscsit_conn * conn,struct iscsit_cmd * cmd,int state)3792 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3793 {
3794 int ret;
3795
3796 check_rsp_state:
3797 switch (state) {
3798 case ISTATE_SEND_DATAIN:
3799 ret = iscsit_send_datain(cmd, conn);
3800 if (ret < 0)
3801 goto err;
3802 else if (!ret)
3803 /* more drs */
3804 goto check_rsp_state;
3805 else if (ret == 1) {
3806 /* all done */
3807 spin_lock_bh(&cmd->istate_lock);
3808 cmd->i_state = ISTATE_SENT_STATUS;
3809 spin_unlock_bh(&cmd->istate_lock);
3810
3811 if (atomic_read(&conn->check_immediate_queue))
3812 return 1;
3813
3814 return 0;
3815 } else if (ret == 2) {
3816 /* Still must send status,
3817 SCF_TRANSPORT_TASK_SENSE was set */
3818 spin_lock_bh(&cmd->istate_lock);
3819 cmd->i_state = ISTATE_SEND_STATUS;
3820 spin_unlock_bh(&cmd->istate_lock);
3821 state = ISTATE_SEND_STATUS;
3822 goto check_rsp_state;
3823 }
3824
3825 break;
3826 case ISTATE_SEND_STATUS:
3827 case ISTATE_SEND_STATUS_RECOVERY:
3828 ret = iscsit_send_response(cmd, conn);
3829 break;
3830 case ISTATE_SEND_LOGOUTRSP:
3831 ret = iscsit_send_logout(cmd, conn);
3832 break;
3833 case ISTATE_SEND_ASYNCMSG:
3834 ret = iscsit_send_conn_drop_async_message(
3835 cmd, conn);
3836 break;
3837 case ISTATE_SEND_NOPIN:
3838 ret = iscsit_send_nopin(cmd, conn);
3839 break;
3840 case ISTATE_SEND_REJECT:
3841 ret = iscsit_send_reject(cmd, conn);
3842 break;
3843 case ISTATE_SEND_TASKMGTRSP:
3844 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3845 if (ret != 0)
3846 break;
3847 ret = iscsit_tmr_post_handler(cmd, conn);
3848 if (ret != 0)
3849 iscsit_fall_back_to_erl0(conn->sess);
3850 break;
3851 case ISTATE_SEND_TEXTRSP:
3852 ret = iscsit_send_text_rsp(cmd, conn);
3853 break;
3854 default:
3855 pr_err("Unknown Opcode: 0x%02x ITT:"
3856 " 0x%08x, i_state: %d on CID: %hu\n",
3857 cmd->iscsi_opcode, cmd->init_task_tag,
3858 state, conn->cid);
3859 goto err;
3860 }
3861 if (ret < 0)
3862 goto err;
3863
3864 switch (state) {
3865 case ISTATE_SEND_LOGOUTRSP:
3866 if (!iscsit_logout_post_handler(cmd, conn))
3867 return -ECONNRESET;
3868 fallthrough;
3869 case ISTATE_SEND_STATUS:
3870 case ISTATE_SEND_ASYNCMSG:
3871 case ISTATE_SEND_NOPIN:
3872 case ISTATE_SEND_STATUS_RECOVERY:
3873 case ISTATE_SEND_TEXTRSP:
3874 case ISTATE_SEND_TASKMGTRSP:
3875 case ISTATE_SEND_REJECT:
3876 spin_lock_bh(&cmd->istate_lock);
3877 cmd->i_state = ISTATE_SENT_STATUS;
3878 spin_unlock_bh(&cmd->istate_lock);
3879 break;
3880 default:
3881 pr_err("Unknown Opcode: 0x%02x ITT:"
3882 " 0x%08x, i_state: %d on CID: %hu\n",
3883 cmd->iscsi_opcode, cmd->init_task_tag,
3884 cmd->i_state, conn->cid);
3885 goto err;
3886 }
3887
3888 if (atomic_read(&conn->check_immediate_queue))
3889 return 1;
3890
3891 return 0;
3892
3893 err:
3894 return -1;
3895 }
3896 EXPORT_SYMBOL(iscsit_response_queue);
3897
iscsit_handle_response_queue(struct iscsit_conn * conn)3898 static int iscsit_handle_response_queue(struct iscsit_conn *conn)
3899 {
3900 struct iscsit_transport *t = conn->conn_transport;
3901 struct iscsi_queue_req *qr;
3902 struct iscsit_cmd *cmd;
3903 u8 state;
3904 int ret;
3905
3906 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3907 cmd = qr->cmd;
3908 state = qr->state;
3909 kmem_cache_free(lio_qr_cache, qr);
3910
3911 ret = t->iscsit_response_queue(conn, cmd, state);
3912 if (ret == 1 || ret < 0)
3913 return ret;
3914 }
3915
3916 return 0;
3917 }
3918
iscsi_target_tx_thread(void * arg)3919 int iscsi_target_tx_thread(void *arg)
3920 {
3921 int ret = 0;
3922 struct iscsit_conn *conn = arg;
3923 bool conn_freed = false;
3924
3925 /*
3926 * Allow ourselves to be interrupted by SIGINT so that a
3927 * connection recovery / failure event can be triggered externally.
3928 */
3929 allow_signal(SIGINT);
3930
3931 while (!kthread_should_stop()) {
3932 /*
3933 * Ensure that both TX and RX per connection kthreads
3934 * are scheduled to run on the same CPU.
3935 */
3936 iscsit_thread_check_cpumask(conn, current, 1);
3937
3938 wait_event_interruptible(conn->queues_wq,
3939 !iscsit_conn_all_queues_empty(conn));
3940
3941 if (signal_pending(current))
3942 goto transport_err;
3943
3944 get_immediate:
3945 ret = iscsit_handle_immediate_queue(conn);
3946 if (ret < 0)
3947 goto transport_err;
3948
3949 ret = iscsit_handle_response_queue(conn);
3950 if (ret == 1) {
3951 goto get_immediate;
3952 } else if (ret == -ECONNRESET) {
3953 conn_freed = true;
3954 goto out;
3955 } else if (ret < 0) {
3956 goto transport_err;
3957 }
3958 }
3959
3960 transport_err:
3961 /*
3962 * Avoid the normal connection failure code-path if this connection
3963 * is still within LOGIN mode, and iscsi_np process context is
3964 * responsible for cleaning up the early connection failure.
3965 */
3966 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3967 iscsit_take_action_for_connection_exit(conn, &conn_freed);
3968 out:
3969 if (!conn_freed) {
3970 while (!kthread_should_stop()) {
3971 msleep(100);
3972 }
3973 }
3974 return 0;
3975 }
3976
iscsi_target_rx_opcode(struct iscsit_conn * conn,unsigned char * buf)3977 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
3978 {
3979 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3980 struct iscsit_cmd *cmd;
3981 int ret = 0;
3982
3983 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3984 case ISCSI_OP_SCSI_CMD:
3985 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3986 if (!cmd)
3987 goto reject;
3988
3989 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3990 break;
3991 case ISCSI_OP_SCSI_DATA_OUT:
3992 ret = iscsit_handle_data_out(conn, buf);
3993 break;
3994 case ISCSI_OP_NOOP_OUT:
3995 cmd = NULL;
3996 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3997 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3998 if (!cmd)
3999 goto reject;
4000 }
4001 ret = iscsit_handle_nop_out(conn, cmd, buf);
4002 break;
4003 case ISCSI_OP_SCSI_TMFUNC:
4004 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4005 if (!cmd)
4006 goto reject;
4007
4008 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4009 break;
4010 case ISCSI_OP_TEXT:
4011 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4012 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4013 if (!cmd)
4014 goto reject;
4015 } else {
4016 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4017 if (!cmd)
4018 goto reject;
4019 }
4020
4021 ret = iscsit_handle_text_cmd(conn, cmd, buf);
4022 break;
4023 case ISCSI_OP_LOGOUT:
4024 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4025 if (!cmd)
4026 goto reject;
4027
4028 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
4029 if (ret > 0)
4030 wait_for_completion_timeout(&conn->conn_logout_comp,
4031 SECONDS_FOR_LOGOUT_COMP * HZ);
4032 break;
4033 case ISCSI_OP_SNACK:
4034 ret = iscsit_handle_snack(conn, buf);
4035 break;
4036 default:
4037 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4038 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4039 pr_err("Cannot recover from unknown"
4040 " opcode while ERL=0, closing iSCSI connection.\n");
4041 return -1;
4042 }
4043 pr_err("Unable to recover from unknown opcode while OFMarker=No,"
4044 " closing iSCSI connection.\n");
4045 ret = -1;
4046 break;
4047 }
4048
4049 return ret;
4050 reject:
4051 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4052 }
4053
iscsi_target_check_conn_state(struct iscsit_conn * conn)4054 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
4055 {
4056 bool ret;
4057
4058 spin_lock_bh(&conn->state_lock);
4059 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4060 spin_unlock_bh(&conn->state_lock);
4061
4062 return ret;
4063 }
4064
iscsit_get_rx_pdu(struct iscsit_conn * conn)4065 static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
4066 {
4067 int ret;
4068 u8 *buffer, *tmp_buf, opcode;
4069 u32 checksum = 0, digest = 0;
4070 struct iscsi_hdr *hdr;
4071 struct kvec iov;
4072
4073 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
4074 if (!buffer)
4075 return;
4076
4077 while (!kthread_should_stop()) {
4078 /*
4079 * Ensure that both TX and RX per connection kthreads
4080 * are scheduled to run on the same CPU.
4081 */
4082 iscsit_thread_check_cpumask(conn, current, 0);
4083
4084 memset(&iov, 0, sizeof(struct kvec));
4085
4086 iov.iov_base = buffer;
4087 iov.iov_len = ISCSI_HDR_LEN;
4088
4089 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4090 if (ret != ISCSI_HDR_LEN) {
4091 iscsit_rx_thread_wait_for_tcp(conn);
4092 break;
4093 }
4094
4095 hdr = (struct iscsi_hdr *) buffer;
4096 if (hdr->hlength) {
4097 iov.iov_len = hdr->hlength * 4;
4098 tmp_buf = krealloc(buffer,
4099 ISCSI_HDR_LEN + iov.iov_len,
4100 GFP_KERNEL);
4101 if (!tmp_buf)
4102 break;
4103
4104 buffer = tmp_buf;
4105 iov.iov_base = &buffer[ISCSI_HDR_LEN];
4106
4107 ret = rx_data(conn, &iov, 1, iov.iov_len);
4108 if (ret != iov.iov_len) {
4109 iscsit_rx_thread_wait_for_tcp(conn);
4110 break;
4111 }
4112 }
4113
4114 if (conn->conn_ops->HeaderDigest) {
4115 iov.iov_base = &digest;
4116 iov.iov_len = ISCSI_CRC_LEN;
4117
4118 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4119 if (ret != ISCSI_CRC_LEN) {
4120 iscsit_rx_thread_wait_for_tcp(conn);
4121 break;
4122 }
4123
4124 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
4125 ISCSI_HDR_LEN, 0, NULL,
4126 &checksum);
4127
4128 if (digest != checksum) {
4129 pr_err("HeaderDigest CRC32C failed,"
4130 " received 0x%08x, computed 0x%08x\n",
4131 digest, checksum);
4132 /*
4133 * Set the PDU to 0xff so it will intentionally
4134 * hit default in the switch below.
4135 */
4136 memset(buffer, 0xff, ISCSI_HDR_LEN);
4137 atomic_long_inc(&conn->sess->conn_digest_errors);
4138 } else {
4139 pr_debug("Got HeaderDigest CRC32C"
4140 " 0x%08x\n", checksum);
4141 }
4142 }
4143
4144 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4145 break;
4146
4147 opcode = buffer[0] & ISCSI_OPCODE_MASK;
4148
4149 if (conn->sess->sess_ops->SessionType &&
4150 ((!(opcode & ISCSI_OP_TEXT)) ||
4151 (!(opcode & ISCSI_OP_LOGOUT)))) {
4152 pr_err("Received illegal iSCSI Opcode: 0x%02x"
4153 " while in Discovery Session, rejecting.\n", opcode);
4154 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4155 buffer);
4156 break;
4157 }
4158
4159 ret = iscsi_target_rx_opcode(conn, buffer);
4160 if (ret < 0)
4161 break;
4162 }
4163
4164 kfree(buffer);
4165 }
4166
iscsi_target_rx_thread(void * arg)4167 int iscsi_target_rx_thread(void *arg)
4168 {
4169 int rc;
4170 struct iscsit_conn *conn = arg;
4171 bool conn_freed = false;
4172
4173 /*
4174 * Allow ourselves to be interrupted by SIGINT so that a
4175 * connection recovery / failure event can be triggered externally.
4176 */
4177 allow_signal(SIGINT);
4178 /*
4179 * Wait for iscsi_post_login_handler() to complete before allowing
4180 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4181 */
4182 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4183 if (rc < 0 || iscsi_target_check_conn_state(conn))
4184 goto out;
4185
4186 if (!conn->conn_transport->iscsit_get_rx_pdu)
4187 return 0;
4188
4189 conn->conn_transport->iscsit_get_rx_pdu(conn);
4190
4191 if (!signal_pending(current))
4192 atomic_set(&conn->transport_failed, 1);
4193 iscsit_take_action_for_connection_exit(conn, &conn_freed);
4194
4195 out:
4196 if (!conn_freed) {
4197 while (!kthread_should_stop()) {
4198 msleep(100);
4199 }
4200 }
4201
4202 return 0;
4203 }
4204
iscsit_release_commands_from_conn(struct iscsit_conn * conn)4205 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
4206 {
4207 LIST_HEAD(tmp_list);
4208 struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
4209 struct iscsit_session *sess = conn->sess;
4210 /*
4211 * We expect this function to only ever be called from either RX or TX
4212 * thread context via iscsit_close_connection() once the other context
4213 * has been reset -> returned sleeping pre-handler state.
4214 */
4215 spin_lock_bh(&conn->cmd_lock);
4216 list_splice_init(&conn->conn_cmd_list, &tmp_list);
4217
4218 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4219 struct se_cmd *se_cmd = &cmd->se_cmd;
4220
4221 if (se_cmd->se_tfo != NULL) {
4222 spin_lock_irq(&se_cmd->t_state_lock);
4223 if (se_cmd->transport_state & CMD_T_ABORTED) {
4224 /*
4225 * LIO's abort path owns the cleanup for this,
4226 * so put it back on the list and let
4227 * aborted_task handle it.
4228 */
4229 list_move_tail(&cmd->i_conn_node,
4230 &conn->conn_cmd_list);
4231 } else {
4232 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4233 }
4234 spin_unlock_irq(&se_cmd->t_state_lock);
4235 }
4236 }
4237 spin_unlock_bh(&conn->cmd_lock);
4238
4239 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4240 list_del_init(&cmd->i_conn_node);
4241
4242 iscsit_increment_maxcmdsn(cmd, sess);
4243 iscsit_free_cmd(cmd, true);
4244
4245 }
4246 }
4247
iscsit_stop_timers_for_cmds(struct iscsit_conn * conn)4248 static void iscsit_stop_timers_for_cmds(
4249 struct iscsit_conn *conn)
4250 {
4251 struct iscsit_cmd *cmd;
4252
4253 spin_lock_bh(&conn->cmd_lock);
4254 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4255 if (cmd->data_direction == DMA_TO_DEVICE)
4256 iscsit_stop_dataout_timer(cmd);
4257 }
4258 spin_unlock_bh(&conn->cmd_lock);
4259 }
4260
iscsit_close_connection(struct iscsit_conn * conn)4261 int iscsit_close_connection(
4262 struct iscsit_conn *conn)
4263 {
4264 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4265 struct iscsit_session *sess = conn->sess;
4266
4267 pr_debug("Closing iSCSI connection CID %hu on SID:"
4268 " %u\n", conn->cid, sess->sid);
4269 /*
4270 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4271 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4272 * sleeping and the logout response never got sent because the
4273 * connection failed.
4274 *
4275 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4276 * to signal logout response TX interrupt completion. Go ahead and skip
4277 * this for iser since isert_rx_opcode() does not wait on logout failure,
4278 * and to avoid iscsit_conn pointer dereference in iser-target code.
4279 */
4280 if (!conn->conn_transport->rdma_shutdown)
4281 complete(&conn->conn_logout_comp);
4282
4283 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4284 if (conn->tx_thread &&
4285 cmpxchg(&conn->tx_thread_active, true, false)) {
4286 send_sig(SIGINT, conn->tx_thread, 1);
4287 kthread_stop(conn->tx_thread);
4288 }
4289 } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4290 if (conn->rx_thread &&
4291 cmpxchg(&conn->rx_thread_active, true, false)) {
4292 send_sig(SIGINT, conn->rx_thread, 1);
4293 kthread_stop(conn->rx_thread);
4294 }
4295 }
4296
4297 spin_lock(&iscsit_global->ts_bitmap_lock);
4298 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4299 get_order(1));
4300 spin_unlock(&iscsit_global->ts_bitmap_lock);
4301
4302 iscsit_stop_timers_for_cmds(conn);
4303 iscsit_stop_nopin_response_timer(conn);
4304 iscsit_stop_nopin_timer(conn);
4305
4306 if (conn->conn_transport->iscsit_wait_conn)
4307 conn->conn_transport->iscsit_wait_conn(conn);
4308
4309 /*
4310 * During Connection recovery drop unacknowledged out of order
4311 * commands for this connection, and prepare the other commands
4312 * for reallegiance.
4313 *
4314 * During normal operation clear the out of order commands (but
4315 * do not free the struct iscsi_ooo_cmdsn's) and release all
4316 * struct iscsit_cmds.
4317 */
4318 if (atomic_read(&conn->connection_recovery)) {
4319 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4320 iscsit_prepare_cmds_for_reallegiance(conn);
4321 } else {
4322 iscsit_clear_ooo_cmdsns_for_conn(conn);
4323 iscsit_release_commands_from_conn(conn);
4324 }
4325 iscsit_free_queue_reqs_for_conn(conn);
4326
4327 /*
4328 * Handle decrementing session or connection usage count if
4329 * a logout response was not able to be sent because the
4330 * connection failed. Fall back to Session Recovery here.
4331 */
4332 if (atomic_read(&conn->conn_logout_remove)) {
4333 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4334 iscsit_dec_conn_usage_count(conn);
4335 iscsit_dec_session_usage_count(sess);
4336 }
4337 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4338 iscsit_dec_conn_usage_count(conn);
4339
4340 atomic_set(&conn->conn_logout_remove, 0);
4341 atomic_set(&sess->session_reinstatement, 0);
4342 atomic_set(&sess->session_fall_back_to_erl0, 1);
4343 }
4344
4345 spin_lock_bh(&sess->conn_lock);
4346 list_del(&conn->conn_list);
4347
4348 /*
4349 * Attempt to let the Initiator know this connection failed by
4350 * sending an Connection Dropped Async Message on another
4351 * active connection.
4352 */
4353 if (atomic_read(&conn->connection_recovery))
4354 iscsit_build_conn_drop_async_message(conn);
4355
4356 spin_unlock_bh(&sess->conn_lock);
4357
4358 /*
4359 * If connection reinstatement is being performed on this connection,
4360 * up the connection reinstatement semaphore that is being blocked on
4361 * in iscsit_cause_connection_reinstatement().
4362 */
4363 spin_lock_bh(&conn->state_lock);
4364 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4365 spin_unlock_bh(&conn->state_lock);
4366 complete(&conn->conn_wait_comp);
4367 wait_for_completion(&conn->conn_post_wait_comp);
4368 spin_lock_bh(&conn->state_lock);
4369 }
4370
4371 /*
4372 * If connection reinstatement is being performed on this connection
4373 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4374 * connection wait rcfr semaphore that is being blocked on
4375 * an iscsit_connection_reinstatement_rcfr().
4376 */
4377 if (atomic_read(&conn->connection_wait_rcfr)) {
4378 spin_unlock_bh(&conn->state_lock);
4379 complete(&conn->conn_wait_rcfr_comp);
4380 wait_for_completion(&conn->conn_post_wait_comp);
4381 spin_lock_bh(&conn->state_lock);
4382 }
4383 atomic_set(&conn->connection_reinstatement, 1);
4384 spin_unlock_bh(&conn->state_lock);
4385
4386 /*
4387 * If any other processes are accessing this connection pointer we
4388 * must wait until they have completed.
4389 */
4390 iscsit_check_conn_usage_count(conn);
4391
4392 ahash_request_free(conn->conn_tx_hash);
4393 if (conn->conn_rx_hash) {
4394 struct crypto_ahash *tfm;
4395
4396 tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4397 ahash_request_free(conn->conn_rx_hash);
4398 crypto_free_ahash(tfm);
4399 }
4400
4401 if (conn->sock)
4402 sock_release(conn->sock);
4403
4404 if (conn->conn_transport->iscsit_free_conn)
4405 conn->conn_transport->iscsit_free_conn(conn);
4406
4407 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4408 conn->conn_state = TARG_CONN_STATE_FREE;
4409 iscsit_free_conn(conn);
4410
4411 spin_lock_bh(&sess->conn_lock);
4412 atomic_dec(&sess->nconn);
4413 pr_debug("Decremented iSCSI connection count to %d from node:"
4414 " %s\n", atomic_read(&sess->nconn),
4415 sess->sess_ops->InitiatorName);
4416 /*
4417 * Make sure that if one connection fails in an non ERL=2 iSCSI
4418 * Session that they all fail.
4419 */
4420 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4421 !atomic_read(&sess->session_logout))
4422 atomic_set(&sess->session_fall_back_to_erl0, 1);
4423
4424 /*
4425 * If this was not the last connection in the session, and we are
4426 * performing session reinstatement or falling back to ERL=0, call
4427 * iscsit_stop_session() without sleeping to shutdown the other
4428 * active connections.
4429 */
4430 if (atomic_read(&sess->nconn)) {
4431 if (!atomic_read(&sess->session_reinstatement) &&
4432 !atomic_read(&sess->session_fall_back_to_erl0)) {
4433 spin_unlock_bh(&sess->conn_lock);
4434 return 0;
4435 }
4436 if (!atomic_read(&sess->session_stop_active)) {
4437 atomic_set(&sess->session_stop_active, 1);
4438 spin_unlock_bh(&sess->conn_lock);
4439 iscsit_stop_session(sess, 0, 0);
4440 return 0;
4441 }
4442 spin_unlock_bh(&sess->conn_lock);
4443 return 0;
4444 }
4445
4446 /*
4447 * If this was the last connection in the session and one of the
4448 * following is occurring:
4449 *
4450 * Session Reinstatement is not being performed, and are falling back
4451 * to ERL=0 call iscsit_close_session().
4452 *
4453 * Session Logout was requested. iscsit_close_session() will be called
4454 * elsewhere.
4455 *
4456 * Session Continuation is not being performed, start the Time2Retain
4457 * handler and check if sleep_on_sess_wait_sem is active.
4458 */
4459 if (!atomic_read(&sess->session_reinstatement) &&
4460 atomic_read(&sess->session_fall_back_to_erl0)) {
4461 spin_unlock_bh(&sess->conn_lock);
4462 complete_all(&sess->session_wait_comp);
4463 iscsit_close_session(sess, true);
4464
4465 return 0;
4466 } else if (atomic_read(&sess->session_logout)) {
4467 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4468 sess->session_state = TARG_SESS_STATE_FREE;
4469
4470 if (atomic_read(&sess->session_close)) {
4471 spin_unlock_bh(&sess->conn_lock);
4472 complete_all(&sess->session_wait_comp);
4473 iscsit_close_session(sess, true);
4474 } else {
4475 spin_unlock_bh(&sess->conn_lock);
4476 }
4477
4478 return 0;
4479 } else {
4480 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4481 sess->session_state = TARG_SESS_STATE_FAILED;
4482
4483 if (!atomic_read(&sess->session_continuation))
4484 iscsit_start_time2retain_handler(sess);
4485
4486 if (atomic_read(&sess->session_close)) {
4487 spin_unlock_bh(&sess->conn_lock);
4488 complete_all(&sess->session_wait_comp);
4489 iscsit_close_session(sess, true);
4490 } else {
4491 spin_unlock_bh(&sess->conn_lock);
4492 }
4493
4494 return 0;
4495 }
4496 }
4497
4498 /*
4499 * If the iSCSI Session for the iSCSI Initiator Node exists,
4500 * forcefully shutdown the iSCSI NEXUS.
4501 */
iscsit_close_session(struct iscsit_session * sess,bool can_sleep)4502 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
4503 {
4504 struct iscsi_portal_group *tpg = sess->tpg;
4505 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4506
4507 if (atomic_read(&sess->nconn)) {
4508 pr_err("%d connection(s) still exist for iSCSI session"
4509 " to %s\n", atomic_read(&sess->nconn),
4510 sess->sess_ops->InitiatorName);
4511 BUG();
4512 }
4513
4514 spin_lock_bh(&se_tpg->session_lock);
4515 atomic_set(&sess->session_logout, 1);
4516 atomic_set(&sess->session_reinstatement, 1);
4517 iscsit_stop_time2retain_timer(sess);
4518 spin_unlock_bh(&se_tpg->session_lock);
4519
4520 /*
4521 * transport_deregister_session_configfs() will clear the
4522 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4523 * can be setting it again with __transport_register_session() in
4524 * iscsi_post_login_handler() again after the iscsit_stop_session()
4525 * completes in iscsi_np context.
4526 */
4527 transport_deregister_session_configfs(sess->se_sess);
4528
4529 /*
4530 * If any other processes are accessing this session pointer we must
4531 * wait until they have completed. If we are in an interrupt (the
4532 * time2retain handler) and contain and active session usage count we
4533 * restart the timer and exit.
4534 */
4535 if (iscsit_check_session_usage_count(sess, can_sleep)) {
4536 atomic_set(&sess->session_logout, 0);
4537 iscsit_start_time2retain_handler(sess);
4538 return 0;
4539 }
4540
4541 transport_deregister_session(sess->se_sess);
4542
4543 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4544 iscsit_free_connection_recovery_entries(sess);
4545
4546 iscsit_free_all_ooo_cmdsns(sess);
4547
4548 spin_lock_bh(&se_tpg->session_lock);
4549 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4550 sess->session_state = TARG_SESS_STATE_FREE;
4551 pr_debug("Released iSCSI session from node: %s\n",
4552 sess->sess_ops->InitiatorName);
4553 tpg->nsessions--;
4554 if (tpg->tpg_tiqn)
4555 tpg->tpg_tiqn->tiqn_nsessions--;
4556
4557 pr_debug("Decremented number of active iSCSI Sessions on"
4558 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4559
4560 ida_free(&sess_ida, sess->session_index);
4561 kfree(sess->sess_ops);
4562 sess->sess_ops = NULL;
4563 spin_unlock_bh(&se_tpg->session_lock);
4564
4565 kfree(sess);
4566 return 0;
4567 }
4568
iscsit_logout_post_handler_closesession(struct iscsit_conn * conn)4569 static void iscsit_logout_post_handler_closesession(
4570 struct iscsit_conn *conn)
4571 {
4572 struct iscsit_session *sess = conn->sess;
4573 int sleep = 1;
4574 /*
4575 * Traditional iscsi/tcp will invoke this logic from TX thread
4576 * context during session logout, so clear tx_thread_active and
4577 * sleep if iscsit_close_connection() has not already occured.
4578 *
4579 * Since iser-target invokes this logic from it's own workqueue,
4580 * always sleep waiting for RX/TX thread shutdown to complete
4581 * within iscsit_close_connection().
4582 */
4583 if (!conn->conn_transport->rdma_shutdown) {
4584 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4585 if (!sleep)
4586 return;
4587 }
4588
4589 atomic_set(&conn->conn_logout_remove, 0);
4590 complete(&conn->conn_logout_comp);
4591
4592 iscsit_dec_conn_usage_count(conn);
4593 atomic_set(&sess->session_close, 1);
4594 iscsit_stop_session(sess, sleep, sleep);
4595 iscsit_dec_session_usage_count(sess);
4596 }
4597
iscsit_logout_post_handler_samecid(struct iscsit_conn * conn)4598 static void iscsit_logout_post_handler_samecid(
4599 struct iscsit_conn *conn)
4600 {
4601 int sleep = 1;
4602
4603 if (!conn->conn_transport->rdma_shutdown) {
4604 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4605 if (!sleep)
4606 return;
4607 }
4608
4609 atomic_set(&conn->conn_logout_remove, 0);
4610 complete(&conn->conn_logout_comp);
4611
4612 iscsit_cause_connection_reinstatement(conn, sleep);
4613 iscsit_dec_conn_usage_count(conn);
4614 }
4615
iscsit_logout_post_handler_diffcid(struct iscsit_conn * conn,u16 cid)4616 static void iscsit_logout_post_handler_diffcid(
4617 struct iscsit_conn *conn,
4618 u16 cid)
4619 {
4620 struct iscsit_conn *l_conn;
4621 struct iscsit_session *sess = conn->sess;
4622 bool conn_found = false;
4623
4624 if (!sess)
4625 return;
4626
4627 spin_lock_bh(&sess->conn_lock);
4628 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4629 if (l_conn->cid == cid) {
4630 iscsit_inc_conn_usage_count(l_conn);
4631 conn_found = true;
4632 break;
4633 }
4634 }
4635 spin_unlock_bh(&sess->conn_lock);
4636
4637 if (!conn_found)
4638 return;
4639
4640 if (l_conn->sock)
4641 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4642
4643 spin_lock_bh(&l_conn->state_lock);
4644 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4645 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4646 spin_unlock_bh(&l_conn->state_lock);
4647
4648 iscsit_cause_connection_reinstatement(l_conn, 1);
4649 iscsit_dec_conn_usage_count(l_conn);
4650 }
4651
4652 /*
4653 * Return of 0 causes the TX thread to restart.
4654 */
iscsit_logout_post_handler(struct iscsit_cmd * cmd,struct iscsit_conn * conn)4655 int iscsit_logout_post_handler(
4656 struct iscsit_cmd *cmd,
4657 struct iscsit_conn *conn)
4658 {
4659 int ret = 0;
4660
4661 switch (cmd->logout_reason) {
4662 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4663 switch (cmd->logout_response) {
4664 case ISCSI_LOGOUT_SUCCESS:
4665 case ISCSI_LOGOUT_CLEANUP_FAILED:
4666 default:
4667 iscsit_logout_post_handler_closesession(conn);
4668 break;
4669 }
4670 break;
4671 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4672 if (conn->cid == cmd->logout_cid) {
4673 switch (cmd->logout_response) {
4674 case ISCSI_LOGOUT_SUCCESS:
4675 case ISCSI_LOGOUT_CLEANUP_FAILED:
4676 default:
4677 iscsit_logout_post_handler_samecid(conn);
4678 break;
4679 }
4680 } else {
4681 switch (cmd->logout_response) {
4682 case ISCSI_LOGOUT_SUCCESS:
4683 iscsit_logout_post_handler_diffcid(conn,
4684 cmd->logout_cid);
4685 break;
4686 case ISCSI_LOGOUT_CID_NOT_FOUND:
4687 case ISCSI_LOGOUT_CLEANUP_FAILED:
4688 default:
4689 break;
4690 }
4691 ret = 1;
4692 }
4693 break;
4694 case ISCSI_LOGOUT_REASON_RECOVERY:
4695 switch (cmd->logout_response) {
4696 case ISCSI_LOGOUT_SUCCESS:
4697 case ISCSI_LOGOUT_CID_NOT_FOUND:
4698 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4699 case ISCSI_LOGOUT_CLEANUP_FAILED:
4700 default:
4701 break;
4702 }
4703 ret = 1;
4704 break;
4705 default:
4706 break;
4707
4708 }
4709 return ret;
4710 }
4711 EXPORT_SYMBOL(iscsit_logout_post_handler);
4712
iscsit_fail_session(struct iscsit_session * sess)4713 void iscsit_fail_session(struct iscsit_session *sess)
4714 {
4715 struct iscsit_conn *conn;
4716
4717 spin_lock_bh(&sess->conn_lock);
4718 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4719 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4720 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4721 }
4722 spin_unlock_bh(&sess->conn_lock);
4723
4724 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4725 sess->session_state = TARG_SESS_STATE_FAILED;
4726 }
4727
iscsit_stop_session(struct iscsit_session * sess,int session_sleep,int connection_sleep)4728 void iscsit_stop_session(
4729 struct iscsit_session *sess,
4730 int session_sleep,
4731 int connection_sleep)
4732 {
4733 u16 conn_count = atomic_read(&sess->nconn);
4734 struct iscsit_conn *conn, *conn_tmp = NULL;
4735 int is_last;
4736
4737 spin_lock_bh(&sess->conn_lock);
4738
4739 if (connection_sleep) {
4740 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4741 conn_list) {
4742 if (conn_count == 0)
4743 break;
4744
4745 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4746 is_last = 1;
4747 } else {
4748 iscsit_inc_conn_usage_count(conn_tmp);
4749 is_last = 0;
4750 }
4751 iscsit_inc_conn_usage_count(conn);
4752
4753 spin_unlock_bh(&sess->conn_lock);
4754 iscsit_cause_connection_reinstatement(conn, 1);
4755 spin_lock_bh(&sess->conn_lock);
4756
4757 iscsit_dec_conn_usage_count(conn);
4758 if (is_last == 0)
4759 iscsit_dec_conn_usage_count(conn_tmp);
4760 conn_count--;
4761 }
4762 } else {
4763 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4764 iscsit_cause_connection_reinstatement(conn, 0);
4765 }
4766
4767 if (session_sleep && atomic_read(&sess->nconn)) {
4768 spin_unlock_bh(&sess->conn_lock);
4769 wait_for_completion(&sess->session_wait_comp);
4770 } else
4771 spin_unlock_bh(&sess->conn_lock);
4772 }
4773
iscsit_release_sessions_for_tpg(struct iscsi_portal_group * tpg,int force)4774 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4775 {
4776 struct iscsit_session *sess;
4777 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4778 struct se_session *se_sess, *se_sess_tmp;
4779 LIST_HEAD(free_list);
4780 int session_count = 0;
4781
4782 spin_lock_bh(&se_tpg->session_lock);
4783 if (tpg->nsessions && !force) {
4784 spin_unlock_bh(&se_tpg->session_lock);
4785 return -1;
4786 }
4787
4788 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4789 sess_list) {
4790 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4791
4792 spin_lock(&sess->conn_lock);
4793 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4794 atomic_read(&sess->session_logout) ||
4795 atomic_read(&sess->session_close) ||
4796 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4797 spin_unlock(&sess->conn_lock);
4798 continue;
4799 }
4800 iscsit_inc_session_usage_count(sess);
4801 atomic_set(&sess->session_reinstatement, 1);
4802 atomic_set(&sess->session_fall_back_to_erl0, 1);
4803 atomic_set(&sess->session_close, 1);
4804 spin_unlock(&sess->conn_lock);
4805
4806 list_move_tail(&se_sess->sess_list, &free_list);
4807 }
4808 spin_unlock_bh(&se_tpg->session_lock);
4809
4810 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4811 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4812
4813 list_del_init(&se_sess->sess_list);
4814 iscsit_stop_session(sess, 1, 1);
4815 iscsit_dec_session_usage_count(sess);
4816 session_count++;
4817 }
4818
4819 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4820 " Group: %hu\n", session_count, tpg->tpgt);
4821 return 0;
4822 }
4823
4824 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4825 MODULE_VERSION("4.1.x");
4826 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4827 MODULE_LICENSE("GPL");
4828
4829 module_init(iscsi_target_init_module);
4830 module_exit(iscsi_target_cleanup_module);
4831