1 #ifndef TARGET_CORE_BASE_H 2 #define TARGET_CORE_BASE_H 3 4 #include <linux/in.h> 5 #include <linux/configfs.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/blkdev.h> 8 #include <scsi/scsi_cmnd.h> 9 #include <net/sock.h> 10 #include <net/tcp.h> 11 12 #define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" 13 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) 14 15 /* Used by transport_generic_allocate_iovecs() */ 16 #define TRANSPORT_IOV_DATA_BUFFER 5 17 /* Maximum Number of LUNs per Target Portal Group */ 18 #define TRANSPORT_MAX_LUNS_PER_TPG 256 19 /* 20 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. 21 * 22 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and 23 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use 24 * 16-byte CDBs by default and require an extra allocation for 25 * 32-byte CDBs to because of legacy issues. 26 * 27 * Within TCM Core there are no such legacy limitiations, so we go ahead 28 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() 29 * within all TCM Core and subsystem plugin code. 30 */ 31 #define TCM_MAX_COMMAND_SIZE 32 32 /* 33 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently 34 * defined 96, but the real limit is 252 (or 260 including the header) 35 */ 36 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE 37 /* Used by transport_send_check_condition_and_sense() */ 38 #define SPC_SENSE_KEY_OFFSET 2 39 #define SPC_ASC_KEY_OFFSET 12 40 #define SPC_ASCQ_KEY_OFFSET 13 41 #define TRANSPORT_IQN_LEN 224 42 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ 43 #define LU_GROUP_NAME_BUF 256 44 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ 45 #define TG_PT_GROUP_NAME_BUF 256 46 /* Used to parse VPD into struct t10_vpd */ 47 #define VPD_TMP_BUF_SIZE 128 48 /* Used by transport_generic_cmd_sequencer() */ 49 #define READ_BLOCK_LEN 6 50 #define READ_CAP_LEN 8 51 #define READ_POSITION_LEN 20 52 #define INQUIRY_LEN 36 53 /* Used by transport_get_inquiry_vpd_serial() */ 54 #define INQUIRY_VPD_SERIAL_LEN 254 55 /* Used by transport_get_inquiry_vpd_device_ident() */ 56 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 57 58 /* struct se_hba->hba_flags */ 59 enum hba_flags_table { 60 HBA_FLAGS_INTERNAL_USE = 0x01, 61 HBA_FLAGS_PSCSI_MODE = 0x02, 62 }; 63 64 /* struct se_lun->lun_status */ 65 enum transport_lun_status_table { 66 TRANSPORT_LUN_STATUS_FREE = 0, 67 TRANSPORT_LUN_STATUS_ACTIVE = 1, 68 }; 69 70 /* struct se_portal_group->se_tpg_type */ 71 enum transport_tpg_type_table { 72 TRANSPORT_TPG_TYPE_NORMAL = 0, 73 TRANSPORT_TPG_TYPE_DISCOVERY = 1, 74 }; 75 76 /* Used for generate timer flags */ 77 enum timer_flags_table { 78 TF_RUNNING = 0x01, 79 TF_STOP = 0x02, 80 }; 81 82 /* Special transport agnostic struct se_cmd->t_states */ 83 enum transport_state_table { 84 TRANSPORT_NO_STATE = 0, 85 TRANSPORT_NEW_CMD = 1, 86 TRANSPORT_DEFERRED_CMD = 2, 87 TRANSPORT_WRITE_PENDING = 3, 88 TRANSPORT_PROCESS_WRITE = 4, 89 TRANSPORT_PROCESSING = 5, 90 TRANSPORT_COMPLETE_OK = 6, 91 TRANSPORT_COMPLETE_FAILURE = 7, 92 TRANSPORT_COMPLETE_TIMEOUT = 8, 93 TRANSPORT_PROCESS_TMR = 9, 94 TRANSPORT_TMR_COMPLETE = 10, 95 TRANSPORT_ISTATE_PROCESSING = 11, 96 TRANSPORT_ISTATE_PROCESSED = 12, 97 TRANSPORT_KILL = 13, 98 TRANSPORT_REMOVE = 14, 99 TRANSPORT_FREE = 15, 100 TRANSPORT_NEW_CMD_MAP = 16, 101 }; 102 103 /* Used for struct se_cmd->se_cmd_flags */ 104 enum se_cmd_flags_table { 105 SCF_SUPPORTED_SAM_OPCODE = 0x00000001, 106 SCF_TRANSPORT_TASK_SENSE = 0x00000002, 107 SCF_EMULATED_TASK_SENSE = 0x00000004, 108 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, 109 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, 110 SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, 111 SCF_SCSI_NON_DATA_CDB = 0x00000040, 112 SCF_SCSI_CDB_EXCEPTION = 0x00000080, 113 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, 114 SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, 115 SCF_SE_CMD_FAILED = 0x00000400, 116 SCF_SE_LUN_CMD = 0x00000800, 117 SCF_SE_ALLOW_EOO = 0x00001000, 118 SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, 119 SCF_SENT_CHECK_CONDITION = 0x00004000, 120 SCF_OVERFLOW_BIT = 0x00008000, 121 SCF_UNDERFLOW_BIT = 0x00010000, 122 SCF_SENT_DELAYED_TAS = 0x00020000, 123 SCF_ALUA_NON_OPTIMIZED = 0x00040000, 124 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, 125 SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, 126 SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, 127 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, 128 SCF_EMULATE_SYNC_CACHE = 0x00800000, 129 SCF_EMULATE_CDB_ASYNC = 0x01000000, 130 SCF_EMULATE_SYNC_UNMAP = 0x02000000 131 }; 132 133 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 134 enum transport_lunflags_table { 135 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, 136 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, 137 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, 138 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, 139 }; 140 141 /* struct se_device->dev_status */ 142 enum transport_device_status_table { 143 TRANSPORT_DEVICE_ACTIVATED = 0x01, 144 TRANSPORT_DEVICE_DEACTIVATED = 0x02, 145 TRANSPORT_DEVICE_QUEUE_FULL = 0x04, 146 TRANSPORT_DEVICE_SHUTDOWN = 0x08, 147 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, 148 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, 149 }; 150 151 /* 152 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason 153 * to signal which ASC/ASCQ sense payload should be built. 154 */ 155 enum tcm_sense_reason_table { 156 TCM_NON_EXISTENT_LUN = 0x01, 157 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, 158 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, 159 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, 160 TCM_SERVICE_CRC_ERROR = 0x05, 161 TCM_SNACK_REJECTED = 0x06, 162 TCM_SECTOR_COUNT_TOO_MANY = 0x07, 163 TCM_INVALID_CDB_FIELD = 0x08, 164 TCM_INVALID_PARAMETER_LIST = 0x09, 165 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, 166 TCM_UNKNOWN_MODE_PAGE = 0x0b, 167 TCM_WRITE_PROTECTED = 0x0c, 168 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, 169 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, 170 TCM_CHECK_CONDITION_NOT_READY = 0x0f, 171 }; 172 173 struct se_obj { 174 atomic_t obj_access_count; 175 } ____cacheline_aligned; 176 177 /* 178 * Used by TCM Core internally to signal if ALUA emulation is enabled or 179 * disabled, or running in with TCM/pSCSI passthrough mode 180 */ 181 typedef enum { 182 SPC_ALUA_PASSTHROUGH, 183 SPC2_ALUA_DISABLED, 184 SPC3_ALUA_EMULATED 185 } t10_alua_index_t; 186 187 /* 188 * Used by TCM Core internally to signal if SAM Task Attribute emulation 189 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode 190 */ 191 typedef enum { 192 SAM_TASK_ATTR_PASSTHROUGH, 193 SAM_TASK_ATTR_UNTAGGED, 194 SAM_TASK_ATTR_EMULATED 195 } t10_task_attr_index_t; 196 197 /* 198 * Used for target SCSI statistics 199 */ 200 typedef enum { 201 SCSI_INST_INDEX, 202 SCSI_DEVICE_INDEX, 203 SCSI_AUTH_INTR_INDEX, 204 SCSI_INDEX_TYPE_MAX 205 } scsi_index_t; 206 207 struct scsi_index_table { 208 spinlock_t lock; 209 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 210 } ____cacheline_aligned; 211 212 struct se_cmd; 213 214 struct t10_alua { 215 t10_alua_index_t alua_type; 216 /* ALUA Target Port Group ID */ 217 u16 alua_tg_pt_gps_counter; 218 u32 alua_tg_pt_gps_count; 219 spinlock_t tg_pt_gps_lock; 220 struct se_subsystem_dev *t10_sub_dev; 221 /* Used for default ALUA Target Port Group */ 222 struct t10_alua_tg_pt_gp *default_tg_pt_gp; 223 /* Used for default ALUA Target Port Group ConfigFS group */ 224 struct config_group alua_tg_pt_gps_group; 225 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); 226 struct list_head tg_pt_gps_list; 227 } ____cacheline_aligned; 228 229 struct t10_alua_lu_gp { 230 u16 lu_gp_id; 231 int lu_gp_valid_id; 232 u32 lu_gp_members; 233 atomic_t lu_gp_shutdown; 234 atomic_t lu_gp_ref_cnt; 235 spinlock_t lu_gp_lock; 236 struct config_group lu_gp_group; 237 struct list_head lu_gp_list; 238 struct list_head lu_gp_mem_list; 239 } ____cacheline_aligned; 240 241 struct t10_alua_lu_gp_member { 242 bool lu_gp_assoc; 243 atomic_t lu_gp_mem_ref_cnt; 244 spinlock_t lu_gp_mem_lock; 245 struct t10_alua_lu_gp *lu_gp; 246 struct se_device *lu_gp_mem_dev; 247 struct list_head lu_gp_mem_list; 248 } ____cacheline_aligned; 249 250 struct t10_alua_tg_pt_gp { 251 u16 tg_pt_gp_id; 252 int tg_pt_gp_valid_id; 253 int tg_pt_gp_alua_access_status; 254 int tg_pt_gp_alua_access_type; 255 int tg_pt_gp_nonop_delay_msecs; 256 int tg_pt_gp_trans_delay_msecs; 257 int tg_pt_gp_pref; 258 int tg_pt_gp_write_metadata; 259 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 260 #define ALUA_MD_BUF_LEN 1024 261 u32 tg_pt_gp_md_buf_len; 262 u32 tg_pt_gp_members; 263 atomic_t tg_pt_gp_alua_access_state; 264 atomic_t tg_pt_gp_ref_cnt; 265 spinlock_t tg_pt_gp_lock; 266 struct mutex tg_pt_gp_md_mutex; 267 struct se_subsystem_dev *tg_pt_gp_su_dev; 268 struct config_group tg_pt_gp_group; 269 struct list_head tg_pt_gp_list; 270 struct list_head tg_pt_gp_mem_list; 271 } ____cacheline_aligned; 272 273 struct t10_alua_tg_pt_gp_member { 274 bool tg_pt_gp_assoc; 275 atomic_t tg_pt_gp_mem_ref_cnt; 276 spinlock_t tg_pt_gp_mem_lock; 277 struct t10_alua_tg_pt_gp *tg_pt_gp; 278 struct se_port *tg_pt; 279 struct list_head tg_pt_gp_mem_list; 280 } ____cacheline_aligned; 281 282 struct t10_vpd { 283 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 284 int protocol_identifier_set; 285 u32 protocol_identifier; 286 u32 device_identifier_code_set; 287 u32 association; 288 u32 device_identifier_type; 289 struct list_head vpd_list; 290 } ____cacheline_aligned; 291 292 struct t10_wwn { 293 unsigned char vendor[8]; 294 unsigned char model[16]; 295 unsigned char revision[4]; 296 unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 297 spinlock_t t10_vpd_lock; 298 struct se_subsystem_dev *t10_sub_dev; 299 struct config_group t10_wwn_group; 300 struct list_head t10_vpd_list; 301 } ____cacheline_aligned; 302 303 304 /* 305 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations 306 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough 307 * mode 308 */ 309 typedef enum { 310 SPC_PASSTHROUGH, 311 SPC2_RESERVATIONS, 312 SPC3_PERSISTENT_RESERVATIONS 313 } t10_reservations_index_t; 314 315 struct t10_pr_registration { 316 /* Used for fabrics that contain WWN+ISID */ 317 #define PR_REG_ISID_LEN 16 318 /* PR_REG_ISID_LEN + ',i,0x' */ 319 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) 320 char pr_reg_isid[PR_REG_ISID_LEN]; 321 /* Used during APTPL metadata reading */ 322 #define PR_APTPL_MAX_IPORT_LEN 256 323 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; 324 /* Used during APTPL metadata reading */ 325 #define PR_APTPL_MAX_TPORT_LEN 256 326 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; 327 /* For writing out live meta data */ 328 unsigned char *pr_aptpl_buf; 329 u16 pr_aptpl_rpti; 330 u16 pr_reg_tpgt; 331 /* Reservation effects all target ports */ 332 int pr_reg_all_tg_pt; 333 /* Activate Persistence across Target Power Loss */ 334 int pr_reg_aptpl; 335 int pr_res_holder; 336 int pr_res_type; 337 int pr_res_scope; 338 /* Used for fabric initiator WWPNs using a ISID */ 339 bool isid_present_at_reg; 340 u32 pr_res_mapped_lun; 341 u32 pr_aptpl_target_lun; 342 u32 pr_res_generation; 343 u64 pr_reg_bin_isid; 344 u64 pr_res_key; 345 atomic_t pr_res_holders; 346 struct se_node_acl *pr_reg_nacl; 347 struct se_dev_entry *pr_reg_deve; 348 struct se_lun *pr_reg_tg_pt_lun; 349 struct list_head pr_reg_list; 350 struct list_head pr_reg_abort_list; 351 struct list_head pr_reg_aptpl_list; 352 struct list_head pr_reg_atp_list; 353 struct list_head pr_reg_atp_mem_list; 354 } ____cacheline_aligned; 355 356 /* 357 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, 358 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: 359 * core_setup_reservations() 360 */ 361 struct t10_reservation_ops { 362 int (*t10_reservation_check)(struct se_cmd *, u32 *); 363 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); 364 int (*t10_pr_register)(struct se_cmd *); 365 int (*t10_pr_clear)(struct se_cmd *); 366 }; 367 368 struct t10_reservation_template { 369 /* Reservation effects all target ports */ 370 int pr_all_tg_pt; 371 /* Activate Persistence across Target Power Loss enabled 372 * for SCSI device */ 373 int pr_aptpl_active; 374 /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ 375 #define PR_APTPL_BUF_LEN 8192 376 u32 pr_aptpl_buf_len; 377 u32 pr_generation; 378 t10_reservations_index_t res_type; 379 spinlock_t registration_lock; 380 spinlock_t aptpl_reg_lock; 381 /* 382 * This will always be set by one individual I_T Nexus. 383 * However with all_tg_pt=1, other I_T Nexus from the 384 * same initiator can access PR reg/res info on a different 385 * target port. 386 * 387 * There is also the 'All Registrants' case, where there is 388 * a single *pr_res_holder of the reservation, but all 389 * registrations are considered reservation holders. 390 */ 391 struct se_node_acl *pr_res_holder; 392 struct list_head registration_list; 393 struct list_head aptpl_reg_list; 394 struct t10_reservation_ops pr_ops; 395 } ____cacheline_aligned; 396 397 struct se_queue_req { 398 int state; 399 void *cmd; 400 struct list_head qr_list; 401 } ____cacheline_aligned; 402 403 struct se_queue_obj { 404 atomic_t queue_cnt; 405 spinlock_t cmd_queue_lock; 406 struct list_head qobj_list; 407 wait_queue_head_t thread_wq; 408 } ____cacheline_aligned; 409 410 /* 411 * Used one per struct se_cmd to hold all extra struct se_task 412 * metadata. This structure is setup and allocated in 413 * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() 414 */ 415 struct se_transport_task { 416 unsigned char *t_task_cdb; 417 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 418 unsigned long long t_task_lba; 419 int t_tasks_failed; 420 int t_tasks_fua; 421 bool t_tasks_bidi; 422 u32 t_task_cdbs; 423 u32 t_tasks_check; 424 u32 t_tasks_no; 425 u32 t_tasks_sectors; 426 u32 t_tasks_se_num; 427 u32 t_tasks_se_bidi_num; 428 u32 t_tasks_sg_chained_no; 429 atomic_t t_fe_count; 430 atomic_t t_se_count; 431 atomic_t t_task_cdbs_left; 432 atomic_t t_task_cdbs_ex_left; 433 atomic_t t_task_cdbs_timeout_left; 434 atomic_t t_task_cdbs_sent; 435 atomic_t t_transport_aborted; 436 atomic_t t_transport_active; 437 atomic_t t_transport_complete; 438 atomic_t t_transport_queue_active; 439 atomic_t t_transport_sent; 440 atomic_t t_transport_stop; 441 atomic_t t_transport_timeout; 442 atomic_t transport_dev_active; 443 atomic_t transport_lun_active; 444 atomic_t transport_lun_fe_stop; 445 atomic_t transport_lun_stop; 446 spinlock_t t_state_lock; 447 struct completion t_transport_stop_comp; 448 struct completion transport_lun_fe_stop_comp; 449 struct completion transport_lun_stop_comp; 450 struct scatterlist *t_tasks_sg_chained; 451 struct scatterlist t_tasks_sg_bounce; 452 void *t_task_buf; 453 /* 454 * Used for pre-registered fabric SGL passthrough WRITE and READ 455 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop 456 * and other HW target mode fabric modules. 457 */ 458 struct scatterlist *t_task_pt_sgl; 459 struct list_head *t_mem_list; 460 /* Used for BIDI READ */ 461 struct list_head *t_mem_bidi_list; 462 struct list_head t_task_list; 463 } ____cacheline_aligned; 464 465 struct se_task { 466 unsigned char task_sense; 467 struct scatterlist *task_sg; 468 struct scatterlist *task_sg_bidi; 469 u8 task_scsi_status; 470 u8 task_flags; 471 int task_error_status; 472 int task_state_flags; 473 bool task_padded_sg; 474 unsigned long long task_lba; 475 u32 task_no; 476 u32 task_sectors; 477 u32 task_size; 478 u32 task_sg_num; 479 u32 task_sg_offset; 480 enum dma_data_direction task_data_direction; 481 struct se_cmd *task_se_cmd; 482 struct se_device *se_dev; 483 struct completion task_stop_comp; 484 atomic_t task_active; 485 atomic_t task_execute_queue; 486 atomic_t task_timeout; 487 atomic_t task_sent; 488 atomic_t task_stop; 489 atomic_t task_state_active; 490 struct timer_list task_timer; 491 struct se_device *se_obj_ptr; 492 struct list_head t_list; 493 struct list_head t_execute_list; 494 struct list_head t_state_list; 495 } ____cacheline_aligned; 496 497 #define TASK_CMD(task) ((task)->task_se_cmd) 498 #define TASK_DEV(task) ((task)->se_dev) 499 500 struct se_cmd { 501 /* SAM response code being sent to initiator */ 502 u8 scsi_status; 503 u8 scsi_asc; 504 u8 scsi_ascq; 505 u8 scsi_sense_reason; 506 u16 scsi_sense_length; 507 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 508 int alua_nonop_delay; 509 /* See include/linux/dma-mapping.h */ 510 enum dma_data_direction data_direction; 511 /* For SAM Task Attribute */ 512 int sam_task_attr; 513 /* Transport protocol dependent state, see transport_state_table */ 514 enum transport_state_table t_state; 515 /* Transport protocol dependent state for out of order CmdSNs */ 516 int deferred_t_state; 517 /* Transport specific error status */ 518 int transport_error_status; 519 /* See se_cmd_flags_table */ 520 u32 se_cmd_flags; 521 u32 se_ordered_id; 522 /* Total size in bytes associated with command */ 523 u32 data_length; 524 /* SCSI Presented Data Transfer Length */ 525 u32 cmd_spdtl; 526 u32 residual_count; 527 u32 orig_fe_lun; 528 /* Persistent Reservation key */ 529 u64 pr_res_key; 530 atomic_t transport_sent; 531 /* Used for sense data */ 532 void *sense_buffer; 533 struct list_head se_delayed_list; 534 struct list_head se_ordered_list; 535 struct list_head se_lun_list; 536 struct se_device *se_dev; 537 struct se_dev_entry *se_deve; 538 struct se_device *se_obj_ptr; 539 struct se_device *se_orig_obj_ptr; 540 struct se_lun *se_lun; 541 /* Only used for internal passthrough and legacy TCM fabric modules */ 542 struct se_session *se_sess; 543 struct se_tmr_req *se_tmr_req; 544 /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ 545 struct se_transport_task *t_task; 546 struct se_transport_task t_task_backstore; 547 struct target_core_fabric_ops *se_tfo; 548 int (*transport_emulate_cdb)(struct se_cmd *); 549 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); 550 void (*transport_wait_for_tasks)(struct se_cmd *, int, int); 551 void (*transport_complete_callback)(struct se_cmd *); 552 } ____cacheline_aligned; 553 554 #define T_TASK(cmd) ((cmd)->t_task) 555 #define CMD_TFO(cmd) ((cmd)->se_tfo) 556 557 struct se_tmr_req { 558 /* Task Management function to be preformed */ 559 u8 function; 560 /* Task Management response to send */ 561 u8 response; 562 int call_transport; 563 /* Reference to ITT that Task Mgmt should be preformed */ 564 u32 ref_task_tag; 565 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ 566 u64 ref_task_lun; 567 void *fabric_tmr_ptr; 568 struct se_cmd *task_cmd; 569 struct se_cmd *ref_cmd; 570 struct se_device *tmr_dev; 571 struct se_lun *tmr_lun; 572 struct list_head tmr_list; 573 } ____cacheline_aligned; 574 575 struct se_ua { 576 u8 ua_asc; 577 u8 ua_ascq; 578 struct se_node_acl *ua_nacl; 579 struct list_head ua_dev_list; 580 struct list_head ua_nacl_list; 581 } ____cacheline_aligned; 582 583 struct se_node_acl { 584 char initiatorname[TRANSPORT_IQN_LEN]; 585 /* Used to signal demo mode created ACL, disabled by default */ 586 bool dynamic_node_acl; 587 u32 queue_depth; 588 u32 acl_index; 589 u64 num_cmds; 590 u64 read_bytes; 591 u64 write_bytes; 592 spinlock_t stats_lock; 593 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 594 atomic_t acl_pr_ref_count; 595 struct se_dev_entry *device_list; 596 struct se_session *nacl_sess; 597 struct se_portal_group *se_tpg; 598 spinlock_t device_list_lock; 599 spinlock_t nacl_sess_lock; 600 struct config_group acl_group; 601 struct config_group acl_attrib_group; 602 struct config_group acl_auth_group; 603 struct config_group acl_param_group; 604 struct config_group acl_fabric_stat_group; 605 struct config_group *acl_default_groups[5]; 606 struct list_head acl_list; 607 struct list_head acl_sess_list; 608 } ____cacheline_aligned; 609 610 struct se_session { 611 u64 sess_bin_isid; 612 struct se_node_acl *se_node_acl; 613 struct se_portal_group *se_tpg; 614 void *fabric_sess_ptr; 615 struct list_head sess_list; 616 struct list_head sess_acl_list; 617 } ____cacheline_aligned; 618 619 #define SE_SESS(cmd) ((cmd)->se_sess) 620 #define SE_NODE_ACL(sess) ((sess)->se_node_acl) 621 622 struct se_device; 623 struct se_transform_info; 624 struct scatterlist; 625 626 struct se_ml_stat_grps { 627 struct config_group stat_group; 628 struct config_group scsi_auth_intr_group; 629 struct config_group scsi_att_intr_port_group; 630 }; 631 632 struct se_lun_acl { 633 char initiatorname[TRANSPORT_IQN_LEN]; 634 u32 mapped_lun; 635 struct se_node_acl *se_lun_nacl; 636 struct se_lun *se_lun; 637 struct list_head lacl_list; 638 struct config_group se_lun_group; 639 struct se_ml_stat_grps ml_stat_grps; 640 } ____cacheline_aligned; 641 642 #define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) 643 644 struct se_dev_entry { 645 bool def_pr_registered; 646 /* See transport_lunflags_table */ 647 u32 lun_flags; 648 u32 deve_cmds; 649 u32 mapped_lun; 650 u32 average_bytes; 651 u32 last_byte_count; 652 u32 total_cmds; 653 u32 total_bytes; 654 u64 pr_res_key; 655 u64 creation_time; 656 u32 attach_count; 657 u64 read_bytes; 658 u64 write_bytes; 659 atomic_t ua_count; 660 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 661 atomic_t pr_ref_count; 662 struct se_lun_acl *se_lun_acl; 663 spinlock_t ua_lock; 664 struct se_lun *se_lun; 665 struct list_head alua_port_list; 666 struct list_head ua_list; 667 } ____cacheline_aligned; 668 669 struct se_dev_limits { 670 /* Max supported HW queue depth */ 671 u32 hw_queue_depth; 672 /* Max supported virtual queue depth */ 673 u32 queue_depth; 674 /* From include/linux/blkdev.h for the other HW/SW limits. */ 675 struct queue_limits limits; 676 } ____cacheline_aligned; 677 678 struct se_dev_attrib { 679 int emulate_dpo; 680 int emulate_fua_write; 681 int emulate_fua_read; 682 int emulate_write_cache; 683 int emulate_ua_intlck_ctrl; 684 int emulate_tas; 685 int emulate_tpu; 686 int emulate_tpws; 687 int emulate_reservations; 688 int emulate_alua; 689 int enforce_pr_isids; 690 u32 hw_block_size; 691 u32 block_size; 692 u32 hw_max_sectors; 693 u32 max_sectors; 694 u32 optimal_sectors; 695 u32 hw_queue_depth; 696 u32 queue_depth; 697 u32 task_timeout; 698 u32 max_unmap_lba_count; 699 u32 max_unmap_block_desc_count; 700 u32 unmap_granularity; 701 u32 unmap_granularity_alignment; 702 struct se_subsystem_dev *da_sub_dev; 703 struct config_group da_group; 704 } ____cacheline_aligned; 705 706 struct se_dev_stat_grps { 707 struct config_group stat_group; 708 struct config_group scsi_dev_group; 709 struct config_group scsi_tgt_dev_group; 710 struct config_group scsi_lu_group; 711 }; 712 713 struct se_subsystem_dev { 714 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ 715 #define SE_DEV_ALIAS_LEN 512 716 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; 717 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ 718 #define SE_UDEV_PATH_LEN 512 719 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; 720 u32 su_dev_flags; 721 struct se_hba *se_dev_hba; 722 struct se_device *se_dev_ptr; 723 struct se_dev_attrib se_dev_attrib; 724 /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 725 struct t10_alua t10_alua; 726 /* T10 Inquiry and VPD WWN Information */ 727 struct t10_wwn t10_wwn; 728 /* T10 SPC-2 + SPC-3 Reservations */ 729 struct t10_reservation_template t10_reservation; 730 spinlock_t se_dev_lock; 731 void *se_dev_su_ptr; 732 struct list_head g_se_dev_list; 733 struct config_group se_dev_group; 734 /* For T10 Reservations */ 735 struct config_group se_dev_pr_group; 736 /* For target_core_stat.c groups */ 737 struct se_dev_stat_grps dev_stat_grps; 738 } ____cacheline_aligned; 739 740 #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) 741 #define T10_RES(su_dev) (&(su_dev)->t10_reservation) 742 #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) 743 #define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) 744 745 struct se_device { 746 /* Set to 1 if thread is NOT sleeping on thread_sem */ 747 u8 thread_active; 748 u8 dev_status_timer_flags; 749 /* RELATIVE TARGET PORT IDENTIFER Counter */ 750 u16 dev_rpti_counter; 751 /* Used for SAM Task Attribute ordering */ 752 u32 dev_cur_ordered_id; 753 u32 dev_flags; 754 u32 dev_port_count; 755 /* See transport_device_status_table */ 756 u32 dev_status; 757 u32 dev_tcq_window_closed; 758 /* Physical device queue depth */ 759 u32 queue_depth; 760 /* Used for SPC-2 reservations enforce of ISIDs */ 761 u64 dev_res_bin_isid; 762 t10_task_attr_index_t dev_task_attr_type; 763 /* Pointer to transport specific device structure */ 764 void *dev_ptr; 765 u32 dev_index; 766 u64 creation_time; 767 u32 num_resets; 768 u64 num_cmds; 769 u64 read_bytes; 770 u64 write_bytes; 771 spinlock_t stats_lock; 772 /* Active commands on this virtual SE device */ 773 atomic_t active_cmds; 774 atomic_t simple_cmds; 775 atomic_t depth_left; 776 atomic_t dev_ordered_id; 777 atomic_t dev_tur_active; 778 atomic_t execute_tasks; 779 atomic_t dev_status_thr_count; 780 atomic_t dev_hoq_count; 781 atomic_t dev_ordered_sync; 782 struct se_obj dev_obj; 783 struct se_obj dev_access_obj; 784 struct se_obj dev_export_obj; 785 struct se_queue_obj *dev_queue_obj; 786 struct se_queue_obj *dev_status_queue_obj; 787 spinlock_t delayed_cmd_lock; 788 spinlock_t ordered_cmd_lock; 789 spinlock_t execute_task_lock; 790 spinlock_t state_task_lock; 791 spinlock_t dev_alua_lock; 792 spinlock_t dev_reservation_lock; 793 spinlock_t dev_state_lock; 794 spinlock_t dev_status_lock; 795 spinlock_t dev_status_thr_lock; 796 spinlock_t se_port_lock; 797 spinlock_t se_tmr_lock; 798 /* Used for legacy SPC-2 reservationsa */ 799 struct se_node_acl *dev_reserved_node_acl; 800 /* Used for ALUA Logical Unit Group membership */ 801 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; 802 /* Used for SPC-3 Persistent Reservations */ 803 struct t10_pr_registration *dev_pr_res_holder; 804 struct list_head dev_sep_list; 805 struct list_head dev_tmr_list; 806 struct timer_list dev_status_timer; 807 /* Pointer to descriptor for processing thread */ 808 struct task_struct *process_thread; 809 pid_t process_thread_pid; 810 struct task_struct *dev_mgmt_thread; 811 struct list_head delayed_cmd_list; 812 struct list_head ordered_cmd_list; 813 struct list_head execute_task_list; 814 struct list_head state_task_list; 815 /* Pointer to associated SE HBA */ 816 struct se_hba *se_hba; 817 struct se_subsystem_dev *se_sub_dev; 818 /* Pointer to template of function pointers for transport */ 819 struct se_subsystem_api *transport; 820 /* Linked list for struct se_hba struct se_device list */ 821 struct list_head dev_list; 822 /* Linked list for struct se_global->g_se_dev_list */ 823 struct list_head g_se_dev_list; 824 } ____cacheline_aligned; 825 826 #define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) 827 #define SU_DEV(dev) ((dev)->se_sub_dev) 828 #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) 829 #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) 830 831 struct se_hba { 832 u16 hba_tpgt; 833 u32 hba_id; 834 /* See hba_flags_table */ 835 u32 hba_flags; 836 /* Virtual iSCSI devices attached. */ 837 u32 dev_count; 838 u32 hba_index; 839 atomic_t load_balance_queue; 840 atomic_t left_queue_depth; 841 /* Maximum queue depth the HBA can handle. */ 842 atomic_t max_queue_depth; 843 /* Pointer to transport specific host structure. */ 844 void *hba_ptr; 845 /* Linked list for struct se_device */ 846 struct list_head hba_dev_list; 847 struct list_head hba_list; 848 spinlock_t device_lock; 849 spinlock_t hba_queue_lock; 850 struct config_group hba_group; 851 struct mutex hba_access_mutex; 852 struct se_subsystem_api *transport; 853 } ____cacheline_aligned; 854 855 #define SE_HBA(dev) ((dev)->se_hba) 856 857 struct se_port_stat_grps { 858 struct config_group stat_group; 859 struct config_group scsi_port_group; 860 struct config_group scsi_tgt_port_group; 861 struct config_group scsi_transport_group; 862 }; 863 864 struct se_lun { 865 /* See transport_lun_status_table */ 866 enum transport_lun_status_table lun_status; 867 u32 lun_access; 868 u32 lun_flags; 869 u32 unpacked_lun; 870 atomic_t lun_acl_count; 871 spinlock_t lun_acl_lock; 872 spinlock_t lun_cmd_lock; 873 spinlock_t lun_sep_lock; 874 struct completion lun_shutdown_comp; 875 struct list_head lun_cmd_list; 876 struct list_head lun_acl_list; 877 struct se_device *lun_se_dev; 878 struct se_port *lun_sep; 879 struct config_group lun_group; 880 struct se_port_stat_grps port_stat_grps; 881 } ____cacheline_aligned; 882 883 #define SE_LUN(cmd) ((cmd)->se_lun) 884 #define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) 885 886 struct scsi_port_stats { 887 u64 cmd_pdus; 888 u64 tx_data_octets; 889 u64 rx_data_octets; 890 } ____cacheline_aligned; 891 892 struct se_port { 893 /* RELATIVE TARGET PORT IDENTIFER */ 894 u16 sep_rtpi; 895 int sep_tg_pt_secondary_stat; 896 int sep_tg_pt_secondary_write_md; 897 u32 sep_index; 898 struct scsi_port_stats sep_stats; 899 /* Used for ALUA Target Port Groups membership */ 900 atomic_t sep_tg_pt_gp_active; 901 atomic_t sep_tg_pt_secondary_offline; 902 /* Used for PR ALL_TG_PT=1 */ 903 atomic_t sep_tg_pt_ref_cnt; 904 spinlock_t sep_alua_lock; 905 struct mutex sep_tg_pt_md_mutex; 906 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; 907 struct se_lun *sep_lun; 908 struct se_portal_group *sep_tpg; 909 struct list_head sep_alua_list; 910 struct list_head sep_list; 911 } ____cacheline_aligned; 912 913 struct se_tpg_np { 914 struct se_portal_group *tpg_np_parent; 915 struct config_group tpg_np_group; 916 } ____cacheline_aligned; 917 918 struct se_portal_group { 919 /* Type of target portal group, see transport_tpg_type_table */ 920 enum transport_tpg_type_table se_tpg_type; 921 /* Number of ACLed Initiator Nodes for this TPG */ 922 u32 num_node_acls; 923 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 924 atomic_t tpg_pr_ref_count; 925 /* Spinlock for adding/removing ACLed Nodes */ 926 spinlock_t acl_node_lock; 927 /* Spinlock for adding/removing sessions */ 928 spinlock_t session_lock; 929 spinlock_t tpg_lun_lock; 930 /* Pointer to $FABRIC_MOD portal group */ 931 void *se_tpg_fabric_ptr; 932 struct list_head se_tpg_list; 933 /* linked list for initiator ACL list */ 934 struct list_head acl_node_list; 935 struct se_lun *tpg_lun_list; 936 struct se_lun tpg_virt_lun0; 937 /* List of TCM sessions associated wth this TPG */ 938 struct list_head tpg_sess_list; 939 /* Pointer to $FABRIC_MOD dependent code */ 940 struct target_core_fabric_ops *se_tpg_tfo; 941 struct se_wwn *se_tpg_wwn; 942 struct config_group tpg_group; 943 struct config_group *tpg_default_groups[6]; 944 struct config_group tpg_lun_group; 945 struct config_group tpg_np_group; 946 struct config_group tpg_acl_group; 947 struct config_group tpg_attrib_group; 948 struct config_group tpg_param_group; 949 } ____cacheline_aligned; 950 951 #define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) 952 953 struct se_wwn { 954 struct target_fabric_configfs *wwn_tf; 955 struct config_group wwn_group; 956 struct config_group *wwn_default_groups[2]; 957 struct config_group fabric_stat_group; 958 } ____cacheline_aligned; 959 960 struct se_global { 961 u16 alua_lu_gps_counter; 962 int g_sub_api_initialized; 963 u32 in_shutdown; 964 u32 alua_lu_gps_count; 965 u32 g_hba_id_counter; 966 struct config_group target_core_hbagroup; 967 struct config_group alua_group; 968 struct config_group alua_lu_gps_group; 969 struct list_head g_lu_gps_list; 970 struct list_head g_se_tpg_list; 971 struct list_head g_hba_list; 972 struct list_head g_se_dev_list; 973 struct se_hba *g_lun0_hba; 974 struct se_subsystem_dev *g_lun0_su_dev; 975 struct se_device *g_lun0_dev; 976 struct t10_alua_lu_gp *default_lu_gp; 977 spinlock_t g_device_lock; 978 spinlock_t hba_lock; 979 spinlock_t se_tpg_lock; 980 spinlock_t lu_gps_lock; 981 spinlock_t plugin_class_lock; 982 } ____cacheline_aligned; 983 984 #endif /* TARGET_CORE_BASE_H */ 985