1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <net/sock.h>
36 #include <net/tcp.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_cmnd.h>
39 
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_tpg.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
45 
46 #include "target_core_hba.h"
47 
48 /*	core_clear_initiator_node_from_tpg():
49  *
50  *
51  */
core_clear_initiator_node_from_tpg(struct se_node_acl * nacl,struct se_portal_group * tpg)52 static void core_clear_initiator_node_from_tpg(
53 	struct se_node_acl *nacl,
54 	struct se_portal_group *tpg)
55 {
56 	int i;
57 	struct se_dev_entry *deve;
58 	struct se_lun *lun;
59 	struct se_lun_acl *acl, *acl_tmp;
60 
61 	spin_lock_irq(&nacl->device_list_lock);
62 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
63 		deve = &nacl->device_list[i];
64 
65 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
66 			continue;
67 
68 		if (!deve->se_lun) {
69 			printk(KERN_ERR "%s device entries device pointer is"
70 				" NULL, but Initiator has access.\n",
71 				TPG_TFO(tpg)->get_fabric_name());
72 			continue;
73 		}
74 
75 		lun = deve->se_lun;
76 		spin_unlock_irq(&nacl->device_list_lock);
77 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
78 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
79 
80 		spin_lock(&lun->lun_acl_lock);
81 		list_for_each_entry_safe(acl, acl_tmp,
82 					&lun->lun_acl_list, lacl_list) {
83 			if (!(strcmp(acl->initiatorname,
84 					nacl->initiatorname)) &&
85 			     (acl->mapped_lun == deve->mapped_lun))
86 				break;
87 		}
88 
89 		if (!acl) {
90 			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
91 				" mapped_lun: %u\n", nacl->initiatorname,
92 				deve->mapped_lun);
93 			spin_unlock(&lun->lun_acl_lock);
94 			spin_lock_irq(&nacl->device_list_lock);
95 			continue;
96 		}
97 
98 		list_del(&acl->lacl_list);
99 		spin_unlock(&lun->lun_acl_lock);
100 
101 		spin_lock_irq(&nacl->device_list_lock);
102 		kfree(acl);
103 	}
104 	spin_unlock_irq(&nacl->device_list_lock);
105 }
106 
107 /*	__core_tpg_get_initiator_node_acl():
108  *
109  *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
110  */
__core_tpg_get_initiator_node_acl(struct se_portal_group * tpg,const char * initiatorname)111 struct se_node_acl *__core_tpg_get_initiator_node_acl(
112 	struct se_portal_group *tpg,
113 	const char *initiatorname)
114 {
115 	struct se_node_acl *acl;
116 
117 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
118 		if (!(strcmp(acl->initiatorname, initiatorname)))
119 			return acl;
120 	}
121 
122 	return NULL;
123 }
124 
125 /*	core_tpg_get_initiator_node_acl():
126  *
127  *
128  */
core_tpg_get_initiator_node_acl(struct se_portal_group * tpg,unsigned char * initiatorname)129 struct se_node_acl *core_tpg_get_initiator_node_acl(
130 	struct se_portal_group *tpg,
131 	unsigned char *initiatorname)
132 {
133 	struct se_node_acl *acl;
134 
135 	spin_lock_bh(&tpg->acl_node_lock);
136 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
137 		if (!(strcmp(acl->initiatorname, initiatorname)) &&
138 		   (!(acl->dynamic_node_acl))) {
139 			spin_unlock_bh(&tpg->acl_node_lock);
140 			return acl;
141 		}
142 	}
143 	spin_unlock_bh(&tpg->acl_node_lock);
144 
145 	return NULL;
146 }
147 
148 /*	core_tpg_add_node_to_devs():
149  *
150  *
151  */
core_tpg_add_node_to_devs(struct se_node_acl * acl,struct se_portal_group * tpg)152 void core_tpg_add_node_to_devs(
153 	struct se_node_acl *acl,
154 	struct se_portal_group *tpg)
155 {
156 	int i = 0;
157 	u32 lun_access = 0;
158 	struct se_lun *lun;
159 	struct se_device *dev;
160 
161 	spin_lock(&tpg->tpg_lun_lock);
162 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
163 		lun = &tpg->tpg_lun_list[i];
164 		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
165 			continue;
166 
167 		spin_unlock(&tpg->tpg_lun_lock);
168 
169 		dev = lun->lun_se_dev;
170 		/*
171 		 * By default in LIO-Target $FABRIC_MOD,
172 		 * demo_mode_write_protect is ON, or READ_ONLY;
173 		 */
174 		if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
175 			if (dev->dev_flags & DF_READ_ONLY)
176 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
177 			else
178 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
179 		} else {
180 			/*
181 			 * Allow only optical drives to issue R/W in default RO
182 			 * demo mode.
183 			 */
184 			if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
185 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
186 			else
187 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
188 		}
189 
190 		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
191 			" access for LUN in Demo Mode\n",
192 			TPG_TFO(tpg)->get_fabric_name(),
193 			TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
194 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
195 			"READ-WRITE" : "READ-ONLY");
196 
197 		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
198 				lun_access, acl, tpg, 1);
199 		spin_lock(&tpg->tpg_lun_lock);
200 	}
201 	spin_unlock(&tpg->tpg_lun_lock);
202 }
203 
204 /*      core_set_queue_depth_for_node():
205  *
206  *
207  */
core_set_queue_depth_for_node(struct se_portal_group * tpg,struct se_node_acl * acl)208 static int core_set_queue_depth_for_node(
209 	struct se_portal_group *tpg,
210 	struct se_node_acl *acl)
211 {
212 	if (!acl->queue_depth) {
213 		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
214 			"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
215 			acl->initiatorname);
216 		acl->queue_depth = 1;
217 	}
218 
219 	return 0;
220 }
221 
222 /*      core_create_device_list_for_node():
223  *
224  *
225  */
core_create_device_list_for_node(struct se_node_acl * nacl)226 static int core_create_device_list_for_node(struct se_node_acl *nacl)
227 {
228 	struct se_dev_entry *deve;
229 	int i;
230 
231 	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
232 				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
233 	if (!(nacl->device_list)) {
234 		printk(KERN_ERR "Unable to allocate memory for"
235 			" struct se_node_acl->device_list\n");
236 		return -1;
237 	}
238 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
239 		deve = &nacl->device_list[i];
240 
241 		atomic_set(&deve->ua_count, 0);
242 		atomic_set(&deve->pr_ref_count, 0);
243 		spin_lock_init(&deve->ua_lock);
244 		INIT_LIST_HEAD(&deve->alua_port_list);
245 		INIT_LIST_HEAD(&deve->ua_list);
246 	}
247 
248 	return 0;
249 }
250 
251 /*	core_tpg_check_initiator_node_acl()
252  *
253  *
254  */
core_tpg_check_initiator_node_acl(struct se_portal_group * tpg,unsigned char * initiatorname)255 struct se_node_acl *core_tpg_check_initiator_node_acl(
256 	struct se_portal_group *tpg,
257 	unsigned char *initiatorname)
258 {
259 	struct se_node_acl *acl;
260 
261 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
262 	if ((acl))
263 		return acl;
264 
265 	if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
266 		return NULL;
267 
268 	acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
269 	if (!(acl))
270 		return NULL;
271 
272 	INIT_LIST_HEAD(&acl->acl_list);
273 	INIT_LIST_HEAD(&acl->acl_sess_list);
274 	spin_lock_init(&acl->device_list_lock);
275 	spin_lock_init(&acl->nacl_sess_lock);
276 	atomic_set(&acl->acl_pr_ref_count, 0);
277 	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
278 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 	acl->se_tpg = tpg;
280 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 	spin_lock_init(&acl->stats_lock);
282 	acl->dynamic_node_acl = 1;
283 
284 	TPG_TFO(tpg)->set_default_node_attributes(acl);
285 
286 	if (core_create_device_list_for_node(acl) < 0) {
287 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
288 		return NULL;
289 	}
290 
291 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
292 		core_free_device_list_for_node(acl, tpg);
293 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
294 		return NULL;
295 	}
296 
297 	core_tpg_add_node_to_devs(acl, tpg);
298 
299 	spin_lock_bh(&tpg->acl_node_lock);
300 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
301 	tpg->num_node_acls++;
302 	spin_unlock_bh(&tpg->acl_node_lock);
303 
304 	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
305 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
306 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
307 		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
308 
309 	return acl;
310 }
311 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
312 
core_tpg_wait_for_nacl_pr_ref(struct se_node_acl * nacl)313 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
314 {
315 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
316 		cpu_relax();
317 }
318 
core_tpg_clear_object_luns(struct se_portal_group * tpg)319 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
320 {
321 	int i, ret;
322 	struct se_lun *lun;
323 
324 	spin_lock(&tpg->tpg_lun_lock);
325 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
326 		lun = &tpg->tpg_lun_list[i];
327 
328 		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
329 		    (lun->lun_se_dev == NULL))
330 			continue;
331 
332 		spin_unlock(&tpg->tpg_lun_lock);
333 		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
334 		spin_lock(&tpg->tpg_lun_lock);
335 	}
336 	spin_unlock(&tpg->tpg_lun_lock);
337 }
338 EXPORT_SYMBOL(core_tpg_clear_object_luns);
339 
340 /*	core_tpg_add_initiator_node_acl():
341  *
342  *
343  */
core_tpg_add_initiator_node_acl(struct se_portal_group * tpg,struct se_node_acl * se_nacl,const char * initiatorname,u32 queue_depth)344 struct se_node_acl *core_tpg_add_initiator_node_acl(
345 	struct se_portal_group *tpg,
346 	struct se_node_acl *se_nacl,
347 	const char *initiatorname,
348 	u32 queue_depth)
349 {
350 	struct se_node_acl *acl = NULL;
351 
352 	spin_lock_bh(&tpg->acl_node_lock);
353 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
354 	if ((acl)) {
355 		if (acl->dynamic_node_acl) {
356 			acl->dynamic_node_acl = 0;
357 			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
358 				" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
359 				TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
360 			spin_unlock_bh(&tpg->acl_node_lock);
361 			/*
362 			 * Release the locally allocated struct se_node_acl
363 			 * because * core_tpg_add_initiator_node_acl() returned
364 			 * a pointer to an existing demo mode node ACL.
365 			 */
366 			if (se_nacl)
367 				TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
368 							se_nacl);
369 			goto done;
370 		}
371 
372 		printk(KERN_ERR "ACL entry for %s Initiator"
373 			" Node %s already exists for TPG %u, ignoring"
374 			" request.\n",  TPG_TFO(tpg)->get_fabric_name(),
375 			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
376 		spin_unlock_bh(&tpg->acl_node_lock);
377 		return ERR_PTR(-EEXIST);
378 	}
379 	spin_unlock_bh(&tpg->acl_node_lock);
380 
381 	if (!(se_nacl)) {
382 		printk("struct se_node_acl pointer is NULL\n");
383 		return ERR_PTR(-EINVAL);
384 	}
385 	/*
386 	 * For v4.x logic the se_node_acl_s is hanging off a fabric
387 	 * dependent structure allocated via
388 	 * struct target_core_fabric_ops->fabric_make_nodeacl()
389 	 */
390 	acl = se_nacl;
391 
392 	INIT_LIST_HEAD(&acl->acl_list);
393 	INIT_LIST_HEAD(&acl->acl_sess_list);
394 	spin_lock_init(&acl->device_list_lock);
395 	spin_lock_init(&acl->nacl_sess_lock);
396 	atomic_set(&acl->acl_pr_ref_count, 0);
397 	acl->queue_depth = queue_depth;
398 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
399 	acl->se_tpg = tpg;
400 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
401 	spin_lock_init(&acl->stats_lock);
402 
403 	TPG_TFO(tpg)->set_default_node_attributes(acl);
404 
405 	if (core_create_device_list_for_node(acl) < 0) {
406 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
407 		return ERR_PTR(-ENOMEM);
408 	}
409 
410 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
411 		core_free_device_list_for_node(acl, tpg);
412 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
413 		return ERR_PTR(-EINVAL);
414 	}
415 
416 	spin_lock_bh(&tpg->acl_node_lock);
417 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
418 	tpg->num_node_acls++;
419 	spin_unlock_bh(&tpg->acl_node_lock);
420 
421 done:
422 	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
423 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
424 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
425 		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
426 
427 	return acl;
428 }
429 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
430 
431 /*	core_tpg_del_initiator_node_acl():
432  *
433  *
434  */
core_tpg_del_initiator_node_acl(struct se_portal_group * tpg,struct se_node_acl * acl,int force)435 int core_tpg_del_initiator_node_acl(
436 	struct se_portal_group *tpg,
437 	struct se_node_acl *acl,
438 	int force)
439 {
440 	struct se_session *sess, *sess_tmp;
441 	int dynamic_acl = 0;
442 
443 	spin_lock_bh(&tpg->acl_node_lock);
444 	if (acl->dynamic_node_acl) {
445 		acl->dynamic_node_acl = 0;
446 		dynamic_acl = 1;
447 	}
448 	list_del(&acl->acl_list);
449 	tpg->num_node_acls--;
450 	spin_unlock_bh(&tpg->acl_node_lock);
451 
452 	spin_lock_bh(&tpg->session_lock);
453 	list_for_each_entry_safe(sess, sess_tmp,
454 				&tpg->tpg_sess_list, sess_list) {
455 		if (sess->se_node_acl != acl)
456 			continue;
457 		/*
458 		 * Determine if the session needs to be closed by our context.
459 		 */
460 		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
461 			continue;
462 
463 		spin_unlock_bh(&tpg->session_lock);
464 		/*
465 		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
466 		 * forcefully shutdown the $FABRIC_MOD session/nexus.
467 		 */
468 		TPG_TFO(tpg)->close_session(sess);
469 
470 		spin_lock_bh(&tpg->session_lock);
471 	}
472 	spin_unlock_bh(&tpg->session_lock);
473 
474 	core_tpg_wait_for_nacl_pr_ref(acl);
475 	core_clear_initiator_node_from_tpg(acl, tpg);
476 	core_free_device_list_for_node(acl, tpg);
477 
478 	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
479 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
480 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
481 		TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
482 
483 	return 0;
484 }
485 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
486 
487 /*	core_tpg_set_initiator_node_queue_depth():
488  *
489  *
490  */
core_tpg_set_initiator_node_queue_depth(struct se_portal_group * tpg,unsigned char * initiatorname,u32 queue_depth,int force)491 int core_tpg_set_initiator_node_queue_depth(
492 	struct se_portal_group *tpg,
493 	unsigned char *initiatorname,
494 	u32 queue_depth,
495 	int force)
496 {
497 	struct se_session *sess, *init_sess = NULL;
498 	struct se_node_acl *acl;
499 	int dynamic_acl = 0;
500 
501 	spin_lock_bh(&tpg->acl_node_lock);
502 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
503 	if (!(acl)) {
504 		printk(KERN_ERR "Access Control List entry for %s Initiator"
505 			" Node %s does not exists for TPG %hu, ignoring"
506 			" request.\n", TPG_TFO(tpg)->get_fabric_name(),
507 			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
508 		spin_unlock_bh(&tpg->acl_node_lock);
509 		return -ENODEV;
510 	}
511 	if (acl->dynamic_node_acl) {
512 		acl->dynamic_node_acl = 0;
513 		dynamic_acl = 1;
514 	}
515 	spin_unlock_bh(&tpg->acl_node_lock);
516 
517 	spin_lock_bh(&tpg->session_lock);
518 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
519 		if (sess->se_node_acl != acl)
520 			continue;
521 
522 		if (!force) {
523 			printk(KERN_ERR "Unable to change queue depth for %s"
524 				" Initiator Node: %s while session is"
525 				" operational.  To forcefully change the queue"
526 				" depth and force session reinstatement"
527 				" use the \"force=1\" parameter.\n",
528 				TPG_TFO(tpg)->get_fabric_name(), initiatorname);
529 			spin_unlock_bh(&tpg->session_lock);
530 
531 			spin_lock_bh(&tpg->acl_node_lock);
532 			if (dynamic_acl)
533 				acl->dynamic_node_acl = 1;
534 			spin_unlock_bh(&tpg->acl_node_lock);
535 			return -EEXIST;
536 		}
537 		/*
538 		 * Determine if the session needs to be closed by our context.
539 		 */
540 		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
541 			continue;
542 
543 		init_sess = sess;
544 		break;
545 	}
546 
547 	/*
548 	 * User has requested to change the queue depth for a Initiator Node.
549 	 * Change the value in the Node's struct se_node_acl, and call
550 	 * core_set_queue_depth_for_node() to add the requested queue depth.
551 	 *
552 	 * Finally call  TPG_TFO(tpg)->close_session() to force session
553 	 * reinstatement to occur if there is an active session for the
554 	 * $FABRIC_MOD Initiator Node in question.
555 	 */
556 	acl->queue_depth = queue_depth;
557 
558 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
559 		spin_unlock_bh(&tpg->session_lock);
560 		/*
561 		 * Force session reinstatement if
562 		 * core_set_queue_depth_for_node() failed, because we assume
563 		 * the $FABRIC_MOD has already the set session reinstatement
564 		 * bit from TPG_TFO(tpg)->shutdown_session() called above.
565 		 */
566 		if (init_sess)
567 			TPG_TFO(tpg)->close_session(init_sess);
568 
569 		spin_lock_bh(&tpg->acl_node_lock);
570 		if (dynamic_acl)
571 			acl->dynamic_node_acl = 1;
572 		spin_unlock_bh(&tpg->acl_node_lock);
573 		return -EINVAL;
574 	}
575 	spin_unlock_bh(&tpg->session_lock);
576 	/*
577 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
578 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
579 	 */
580 	if (init_sess)
581 		TPG_TFO(tpg)->close_session(init_sess);
582 
583 	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
584 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
585 		initiatorname, TPG_TFO(tpg)->get_fabric_name(),
586 		TPG_TFO(tpg)->tpg_get_tag(tpg));
587 
588 	spin_lock_bh(&tpg->acl_node_lock);
589 	if (dynamic_acl)
590 		acl->dynamic_node_acl = 1;
591 	spin_unlock_bh(&tpg->acl_node_lock);
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
596 
core_tpg_setup_virtual_lun0(struct se_portal_group * se_tpg)597 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
598 {
599 	/* Set in core_dev_setup_virtual_lun0() */
600 	struct se_device *dev = se_global->g_lun0_dev;
601 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
602 	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
603 	int ret;
604 
605 	lun->unpacked_lun = 0;
606 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
607 	atomic_set(&lun->lun_acl_count, 0);
608 	init_completion(&lun->lun_shutdown_comp);
609 	INIT_LIST_HEAD(&lun->lun_acl_list);
610 	INIT_LIST_HEAD(&lun->lun_cmd_list);
611 	spin_lock_init(&lun->lun_acl_lock);
612 	spin_lock_init(&lun->lun_cmd_lock);
613 	spin_lock_init(&lun->lun_sep_lock);
614 
615 	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
616 	if (ret < 0)
617 		return -1;
618 
619 	return 0;
620 }
621 
core_tpg_release_virtual_lun0(struct se_portal_group * se_tpg)622 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
623 {
624 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
625 
626 	core_tpg_post_dellun(se_tpg, lun);
627 }
628 
core_tpg_register(struct target_core_fabric_ops * tfo,struct se_wwn * se_wwn,struct se_portal_group * se_tpg,void * tpg_fabric_ptr,int se_tpg_type)629 int core_tpg_register(
630 	struct target_core_fabric_ops *tfo,
631 	struct se_wwn *se_wwn,
632 	struct se_portal_group *se_tpg,
633 	void *tpg_fabric_ptr,
634 	int se_tpg_type)
635 {
636 	struct se_lun *lun;
637 	u32 i;
638 
639 	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
640 				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
641 	if (!(se_tpg->tpg_lun_list)) {
642 		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
643 				"tpg_lun_list\n");
644 		return -ENOMEM;
645 	}
646 
647 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
648 		lun = &se_tpg->tpg_lun_list[i];
649 		lun->unpacked_lun = i;
650 		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
651 		atomic_set(&lun->lun_acl_count, 0);
652 		init_completion(&lun->lun_shutdown_comp);
653 		INIT_LIST_HEAD(&lun->lun_acl_list);
654 		INIT_LIST_HEAD(&lun->lun_cmd_list);
655 		spin_lock_init(&lun->lun_acl_lock);
656 		spin_lock_init(&lun->lun_cmd_lock);
657 		spin_lock_init(&lun->lun_sep_lock);
658 	}
659 
660 	se_tpg->se_tpg_type = se_tpg_type;
661 	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
662 	se_tpg->se_tpg_tfo = tfo;
663 	se_tpg->se_tpg_wwn = se_wwn;
664 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
665 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
666 	INIT_LIST_HEAD(&se_tpg->se_tpg_list);
667 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
668 	spin_lock_init(&se_tpg->acl_node_lock);
669 	spin_lock_init(&se_tpg->session_lock);
670 	spin_lock_init(&se_tpg->tpg_lun_lock);
671 
672 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
673 		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
674 			kfree(se_tpg);
675 			return -ENOMEM;
676 		}
677 	}
678 
679 	spin_lock_bh(&se_global->se_tpg_lock);
680 	list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
681 	spin_unlock_bh(&se_global->se_tpg_lock);
682 
683 	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
684 		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
685 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
686 		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
687 		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
688 
689 	return 0;
690 }
691 EXPORT_SYMBOL(core_tpg_register);
692 
core_tpg_deregister(struct se_portal_group * se_tpg)693 int core_tpg_deregister(struct se_portal_group *se_tpg)
694 {
695 	struct se_node_acl *nacl, *nacl_tmp;
696 
697 	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
698 		" for endpoint: %s Portal Tag %u\n",
699 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
700 		"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
701 		TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
702 		TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
703 
704 	spin_lock_bh(&se_global->se_tpg_lock);
705 	list_del(&se_tpg->se_tpg_list);
706 	spin_unlock_bh(&se_global->se_tpg_lock);
707 
708 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
709 		cpu_relax();
710 	/*
711 	 * Release any remaining demo-mode generated se_node_acl that have
712 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
713 	 * in transport_deregister_session().
714 	 */
715 	spin_lock_bh(&se_tpg->acl_node_lock);
716 	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
717 			acl_list) {
718 		list_del(&nacl->acl_list);
719 		se_tpg->num_node_acls--;
720 		spin_unlock_bh(&se_tpg->acl_node_lock);
721 
722 		core_tpg_wait_for_nacl_pr_ref(nacl);
723 		core_free_device_list_for_node(nacl, se_tpg);
724 		TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
725 
726 		spin_lock_bh(&se_tpg->acl_node_lock);
727 	}
728 	spin_unlock_bh(&se_tpg->acl_node_lock);
729 
730 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
731 		core_tpg_release_virtual_lun0(se_tpg);
732 
733 	se_tpg->se_tpg_fabric_ptr = NULL;
734 	kfree(se_tpg->tpg_lun_list);
735 	return 0;
736 }
737 EXPORT_SYMBOL(core_tpg_deregister);
738 
core_tpg_pre_addlun(struct se_portal_group * tpg,u32 unpacked_lun)739 struct se_lun *core_tpg_pre_addlun(
740 	struct se_portal_group *tpg,
741 	u32 unpacked_lun)
742 {
743 	struct se_lun *lun;
744 
745 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
746 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
747 			"-1: %u for Target Portal Group: %u\n",
748 			TPG_TFO(tpg)->get_fabric_name(),
749 			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
750 			TPG_TFO(tpg)->tpg_get_tag(tpg));
751 		return ERR_PTR(-EOVERFLOW);
752 	}
753 
754 	spin_lock(&tpg->tpg_lun_lock);
755 	lun = &tpg->tpg_lun_list[unpacked_lun];
756 	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
757 		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
758 			" on %s Target Portal Group: %u, ignoring request.\n",
759 			unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
760 			TPG_TFO(tpg)->tpg_get_tag(tpg));
761 		spin_unlock(&tpg->tpg_lun_lock);
762 		return ERR_PTR(-EINVAL);
763 	}
764 	spin_unlock(&tpg->tpg_lun_lock);
765 
766 	return lun;
767 }
768 
core_tpg_post_addlun(struct se_portal_group * tpg,struct se_lun * lun,u32 lun_access,void * lun_ptr)769 int core_tpg_post_addlun(
770 	struct se_portal_group *tpg,
771 	struct se_lun *lun,
772 	u32 lun_access,
773 	void *lun_ptr)
774 {
775 	if (core_dev_export(lun_ptr, tpg, lun) < 0)
776 		return -1;
777 
778 	spin_lock(&tpg->tpg_lun_lock);
779 	lun->lun_access = lun_access;
780 	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
781 	spin_unlock(&tpg->tpg_lun_lock);
782 
783 	return 0;
784 }
785 
core_tpg_shutdown_lun(struct se_portal_group * tpg,struct se_lun * lun)786 static void core_tpg_shutdown_lun(
787 	struct se_portal_group *tpg,
788 	struct se_lun *lun)
789 {
790 	core_clear_lun_from_tpg(lun, tpg);
791 	transport_clear_lun_from_sessions(lun);
792 }
793 
core_tpg_pre_dellun(struct se_portal_group * tpg,u32 unpacked_lun,int * ret)794 struct se_lun *core_tpg_pre_dellun(
795 	struct se_portal_group *tpg,
796 	u32 unpacked_lun,
797 	int *ret)
798 {
799 	struct se_lun *lun;
800 
801 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
802 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
803 			"-1: %u for Target Portal Group: %u\n",
804 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
805 			TRANSPORT_MAX_LUNS_PER_TPG-1,
806 			TPG_TFO(tpg)->tpg_get_tag(tpg));
807 		return ERR_PTR(-EOVERFLOW);
808 	}
809 
810 	spin_lock(&tpg->tpg_lun_lock);
811 	lun = &tpg->tpg_lun_list[unpacked_lun];
812 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
813 		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
814 			" Target Portal Group: %u, ignoring request.\n",
815 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
816 			TPG_TFO(tpg)->tpg_get_tag(tpg));
817 		spin_unlock(&tpg->tpg_lun_lock);
818 		return ERR_PTR(-ENODEV);
819 	}
820 	spin_unlock(&tpg->tpg_lun_lock);
821 
822 	return lun;
823 }
824 
core_tpg_post_dellun(struct se_portal_group * tpg,struct se_lun * lun)825 int core_tpg_post_dellun(
826 	struct se_portal_group *tpg,
827 	struct se_lun *lun)
828 {
829 	core_tpg_shutdown_lun(tpg, lun);
830 
831 	core_dev_unexport(lun->lun_se_dev, tpg, lun);
832 
833 	spin_lock(&tpg->tpg_lun_lock);
834 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
835 	spin_unlock(&tpg->tpg_lun_lock);
836 
837 	return 0;
838 }
839