1 #ifndef TARGET_CORE_BASE_H 2 #define TARGET_CORE_BASE_H 3 4 #include <linux/in.h> 5 #include <linux/configfs.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/blkdev.h> 8 #include <scsi/scsi_cmnd.h> 9 #include <net/sock.h> 10 #include <net/tcp.h> 11 12 #define TARGET_CORE_MOD_VERSION "v4.1.0-rc2-ml" 13 #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION 14 15 /* Maximum Number of LUNs per Target Portal Group */ 16 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ 17 #define TRANSPORT_MAX_LUNS_PER_TPG 256 18 /* 19 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. 20 * 21 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and 22 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use 23 * 16-byte CDBs by default and require an extra allocation for 24 * 32-byte CDBs to because of legacy issues. 25 * 26 * Within TCM Core there are no such legacy limitiations, so we go ahead 27 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() 28 * within all TCM Core and subsystem plugin code. 29 */ 30 #define TCM_MAX_COMMAND_SIZE 32 31 /* 32 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently 33 * defined 96, but the real limit is 252 (or 260 including the header) 34 */ 35 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE 36 /* Used by transport_send_check_condition_and_sense() */ 37 #define SPC_SENSE_KEY_OFFSET 2 38 #define SPC_ADD_SENSE_LEN_OFFSET 7 39 #define SPC_ASC_KEY_OFFSET 12 40 #define SPC_ASCQ_KEY_OFFSET 13 41 #define TRANSPORT_IQN_LEN 224 42 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ 43 #define LU_GROUP_NAME_BUF 256 44 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ 45 #define TG_PT_GROUP_NAME_BUF 256 46 /* Used to parse VPD into struct t10_vpd */ 47 #define VPD_TMP_BUF_SIZE 128 48 /* Used by transport_generic_cmd_sequencer() */ 49 #define READ_BLOCK_LEN 6 50 #define READ_CAP_LEN 8 51 #define READ_POSITION_LEN 20 52 #define INQUIRY_LEN 36 53 /* Used by transport_get_inquiry_vpd_serial() */ 54 #define INQUIRY_VPD_SERIAL_LEN 254 55 /* Used by transport_get_inquiry_vpd_device_ident() */ 56 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 57 58 /* Attempts before moving from SHORT to LONG */ 59 #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 60 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ 61 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ 62 63 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ 64 65 /* 66 * struct se_subsystem_dev->su_dev_flags 67 */ 68 #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 69 #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 70 #define SDF_USING_UDEV_PATH 0x00000004 71 #define SDF_USING_ALIAS 0x00000008 72 73 /* 74 * struct se_device->dev_flags 75 */ 76 #define DF_READ_ONLY 0x00000001 77 #define DF_SPC2_RESERVATIONS 0x00000002 78 #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 79 80 /* struct se_dev_attrib sanity values */ 81 /* Default max_unmap_lba_count */ 82 #define DA_MAX_UNMAP_LBA_COUNT 0 83 /* Default max_unmap_block_desc_count */ 84 #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 85 /* Default unmap_granularity */ 86 #define DA_UNMAP_GRANULARITY_DEFAULT 0 87 /* Default unmap_granularity_alignment */ 88 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 89 /* Default max transfer length */ 90 #define DA_FABRIC_MAX_SECTORS 8192 91 /* Emulation for Direct Page Out */ 92 #define DA_EMULATE_DPO 0 93 /* Emulation for Forced Unit Access WRITEs */ 94 #define DA_EMULATE_FUA_WRITE 1 95 /* Emulation for Forced Unit Access READs */ 96 #define DA_EMULATE_FUA_READ 0 97 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ 98 #define DA_EMULATE_WRITE_CACHE 0 99 /* Emulation for UNIT ATTENTION Interlock Control */ 100 #define DA_EMULATE_UA_INTLLCK_CTRL 0 101 /* Emulation for TASK_ABORTED status (TAS) by default */ 102 #define DA_EMULATE_TAS 1 103 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ 104 #define DA_EMULATE_TPU 0 105 /* 106 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using 107 * block/blk-lib.c:blkdev_issue_discard() 108 */ 109 #define DA_EMULATE_TPWS 0 110 /* No Emulation for PSCSI by default */ 111 #define DA_EMULATE_RESERVATIONS 0 112 /* No Emulation for PSCSI by default */ 113 #define DA_EMULATE_ALUA 0 114 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 115 #define DA_ENFORCE_PR_ISIDS 1 116 #define DA_STATUS_MAX_SECTORS_MIN 16 117 #define DA_STATUS_MAX_SECTORS_MAX 8192 118 /* By default don't report non-rotating (solid state) medium */ 119 #define DA_IS_NONROT 0 120 /* Queue Algorithm Modifier default for restricted reordering in control mode page */ 121 #define DA_EMULATE_REST_REORD 0 122 123 #define SE_INQUIRY_BUF 512 124 #define SE_MODE_PAGE_BUF 512 125 126 /* struct se_hba->hba_flags */ 127 enum hba_flags_table { 128 HBA_FLAGS_INTERNAL_USE = 0x01, 129 HBA_FLAGS_PSCSI_MODE = 0x02, 130 }; 131 132 /* struct se_lun->lun_status */ 133 enum transport_lun_status_table { 134 TRANSPORT_LUN_STATUS_FREE = 0, 135 TRANSPORT_LUN_STATUS_ACTIVE = 1, 136 }; 137 138 /* struct se_portal_group->se_tpg_type */ 139 enum transport_tpg_type_table { 140 TRANSPORT_TPG_TYPE_NORMAL = 0, 141 TRANSPORT_TPG_TYPE_DISCOVERY = 1, 142 }; 143 144 /* struct se_task->task_flags */ 145 enum se_task_flags { 146 TF_ACTIVE = (1 << 0), 147 TF_SENT = (1 << 1), 148 TF_REQUEST_STOP = (1 << 2), 149 TF_HAS_SENSE = (1 << 3), 150 }; 151 152 /* Special transport agnostic struct se_cmd->t_states */ 153 enum transport_state_table { 154 TRANSPORT_NO_STATE = 0, 155 TRANSPORT_NEW_CMD = 1, 156 TRANSPORT_WRITE_PENDING = 3, 157 TRANSPORT_PROCESS_WRITE = 4, 158 TRANSPORT_PROCESSING = 5, 159 TRANSPORT_COMPLETE = 6, 160 TRANSPORT_PROCESS_TMR = 9, 161 TRANSPORT_ISTATE_PROCESSING = 11, 162 TRANSPORT_NEW_CMD_MAP = 16, 163 TRANSPORT_COMPLETE_QF_WP = 18, 164 TRANSPORT_COMPLETE_QF_OK = 19, 165 }; 166 167 /* Used for struct se_cmd->se_cmd_flags */ 168 enum se_cmd_flags_table { 169 SCF_SUPPORTED_SAM_OPCODE = 0x00000001, 170 SCF_TRANSPORT_TASK_SENSE = 0x00000002, 171 SCF_EMULATED_TASK_SENSE = 0x00000004, 172 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, 173 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, 174 SCF_SCSI_NON_DATA_CDB = 0x00000020, 175 SCF_SCSI_TMR_CDB = 0x00000040, 176 SCF_SCSI_CDB_EXCEPTION = 0x00000080, 177 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, 178 SCF_FUA = 0x00000200, 179 SCF_SE_LUN_CMD = 0x00000800, 180 SCF_SE_ALLOW_EOO = 0x00001000, 181 SCF_BIDI = 0x00002000, 182 SCF_SENT_CHECK_CONDITION = 0x00004000, 183 SCF_OVERFLOW_BIT = 0x00008000, 184 SCF_UNDERFLOW_BIT = 0x00010000, 185 SCF_SENT_DELAYED_TAS = 0x00020000, 186 SCF_ALUA_NON_OPTIMIZED = 0x00040000, 187 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, 188 SCF_UNUSED = 0x00100000, 189 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000, 190 SCF_ACK_KREF = 0x00400000, 191 }; 192 193 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 194 enum transport_lunflags_table { 195 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, 196 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, 197 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, 198 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, 199 }; 200 201 /* struct se_device->dev_status */ 202 enum transport_device_status_table { 203 TRANSPORT_DEVICE_ACTIVATED = 0x01, 204 TRANSPORT_DEVICE_DEACTIVATED = 0x02, 205 TRANSPORT_DEVICE_QUEUE_FULL = 0x04, 206 TRANSPORT_DEVICE_SHUTDOWN = 0x08, 207 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, 208 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, 209 }; 210 211 /* 212 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason 213 * to signal which ASC/ASCQ sense payload should be built. 214 */ 215 enum tcm_sense_reason_table { 216 TCM_NON_EXISTENT_LUN = 0x01, 217 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, 218 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, 219 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, 220 TCM_SERVICE_CRC_ERROR = 0x05, 221 TCM_SNACK_REJECTED = 0x06, 222 TCM_SECTOR_COUNT_TOO_MANY = 0x07, 223 TCM_INVALID_CDB_FIELD = 0x08, 224 TCM_INVALID_PARAMETER_LIST = 0x09, 225 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, 226 TCM_UNKNOWN_MODE_PAGE = 0x0b, 227 TCM_WRITE_PROTECTED = 0x0c, 228 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, 229 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, 230 TCM_CHECK_CONDITION_NOT_READY = 0x0f, 231 TCM_RESERVATION_CONFLICT = 0x10, 232 TCM_ADDRESS_OUT_OF_RANGE = 0x11, 233 }; 234 235 enum target_sc_flags_table { 236 TARGET_SCF_BIDI_OP = 0x01, 237 TARGET_SCF_ACK_KREF = 0x02, 238 }; 239 240 /* fabric independent task management function values */ 241 enum tcm_tmreq_table { 242 TMR_ABORT_TASK = 1, 243 TMR_ABORT_TASK_SET = 2, 244 TMR_CLEAR_ACA = 3, 245 TMR_CLEAR_TASK_SET = 4, 246 TMR_LUN_RESET = 5, 247 TMR_TARGET_WARM_RESET = 6, 248 TMR_TARGET_COLD_RESET = 7, 249 TMR_FABRIC_TMR = 255, 250 }; 251 252 /* fabric independent task management response values */ 253 enum tcm_tmrsp_table { 254 TMR_FUNCTION_COMPLETE = 0, 255 TMR_TASK_DOES_NOT_EXIST = 1, 256 TMR_LUN_DOES_NOT_EXIST = 2, 257 TMR_TASK_STILL_ALLEGIANT = 3, 258 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4, 259 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5, 260 TMR_FUNCTION_AUTHORIZATION_FAILED = 6, 261 TMR_FUNCTION_REJECTED = 255, 262 }; 263 264 struct se_obj { 265 atomic_t obj_access_count; 266 }; 267 268 /* 269 * Used by TCM Core internally to signal if ALUA emulation is enabled or 270 * disabled, or running in with TCM/pSCSI passthrough mode 271 */ 272 typedef enum { 273 SPC_ALUA_PASSTHROUGH, 274 SPC2_ALUA_DISABLED, 275 SPC3_ALUA_EMULATED 276 } t10_alua_index_t; 277 278 /* 279 * Used by TCM Core internally to signal if SAM Task Attribute emulation 280 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode 281 */ 282 typedef enum { 283 SAM_TASK_ATTR_PASSTHROUGH, 284 SAM_TASK_ATTR_UNTAGGED, 285 SAM_TASK_ATTR_EMULATED 286 } t10_task_attr_index_t; 287 288 /* 289 * Used for target SCSI statistics 290 */ 291 typedef enum { 292 SCSI_INST_INDEX, 293 SCSI_DEVICE_INDEX, 294 SCSI_AUTH_INTR_INDEX, 295 SCSI_INDEX_TYPE_MAX 296 } scsi_index_t; 297 298 struct se_cmd; 299 300 struct t10_alua { 301 t10_alua_index_t alua_type; 302 /* ALUA Target Port Group ID */ 303 u16 alua_tg_pt_gps_counter; 304 u32 alua_tg_pt_gps_count; 305 spinlock_t tg_pt_gps_lock; 306 struct se_subsystem_dev *t10_sub_dev; 307 /* Used for default ALUA Target Port Group */ 308 struct t10_alua_tg_pt_gp *default_tg_pt_gp; 309 /* Used for default ALUA Target Port Group ConfigFS group */ 310 struct config_group alua_tg_pt_gps_group; 311 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); 312 struct list_head tg_pt_gps_list; 313 }; 314 315 struct t10_alua_lu_gp { 316 u16 lu_gp_id; 317 int lu_gp_valid_id; 318 u32 lu_gp_members; 319 atomic_t lu_gp_ref_cnt; 320 spinlock_t lu_gp_lock; 321 struct config_group lu_gp_group; 322 struct list_head lu_gp_node; 323 struct list_head lu_gp_mem_list; 324 }; 325 326 struct t10_alua_lu_gp_member { 327 bool lu_gp_assoc; 328 atomic_t lu_gp_mem_ref_cnt; 329 spinlock_t lu_gp_mem_lock; 330 struct t10_alua_lu_gp *lu_gp; 331 struct se_device *lu_gp_mem_dev; 332 struct list_head lu_gp_mem_list; 333 }; 334 335 struct t10_alua_tg_pt_gp { 336 u16 tg_pt_gp_id; 337 int tg_pt_gp_valid_id; 338 int tg_pt_gp_alua_access_status; 339 int tg_pt_gp_alua_access_type; 340 int tg_pt_gp_nonop_delay_msecs; 341 int tg_pt_gp_trans_delay_msecs; 342 int tg_pt_gp_pref; 343 int tg_pt_gp_write_metadata; 344 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 345 #define ALUA_MD_BUF_LEN 1024 346 u32 tg_pt_gp_md_buf_len; 347 u32 tg_pt_gp_members; 348 atomic_t tg_pt_gp_alua_access_state; 349 atomic_t tg_pt_gp_ref_cnt; 350 spinlock_t tg_pt_gp_lock; 351 struct mutex tg_pt_gp_md_mutex; 352 struct se_subsystem_dev *tg_pt_gp_su_dev; 353 struct config_group tg_pt_gp_group; 354 struct list_head tg_pt_gp_list; 355 struct list_head tg_pt_gp_mem_list; 356 }; 357 358 struct t10_alua_tg_pt_gp_member { 359 bool tg_pt_gp_assoc; 360 atomic_t tg_pt_gp_mem_ref_cnt; 361 spinlock_t tg_pt_gp_mem_lock; 362 struct t10_alua_tg_pt_gp *tg_pt_gp; 363 struct se_port *tg_pt; 364 struct list_head tg_pt_gp_mem_list; 365 }; 366 367 struct t10_vpd { 368 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 369 int protocol_identifier_set; 370 u32 protocol_identifier; 371 u32 device_identifier_code_set; 372 u32 association; 373 u32 device_identifier_type; 374 struct list_head vpd_list; 375 }; 376 377 struct t10_wwn { 378 char vendor[8]; 379 char model[16]; 380 char revision[4]; 381 char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 382 spinlock_t t10_vpd_lock; 383 struct se_subsystem_dev *t10_sub_dev; 384 struct config_group t10_wwn_group; 385 struct list_head t10_vpd_list; 386 }; 387 388 389 /* 390 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations 391 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough 392 * mode 393 */ 394 typedef enum { 395 SPC_PASSTHROUGH, 396 SPC2_RESERVATIONS, 397 SPC3_PERSISTENT_RESERVATIONS 398 } t10_reservations_index_t; 399 400 struct t10_pr_registration { 401 /* Used for fabrics that contain WWN+ISID */ 402 #define PR_REG_ISID_LEN 16 403 /* PR_REG_ISID_LEN + ',i,0x' */ 404 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) 405 char pr_reg_isid[PR_REG_ISID_LEN]; 406 /* Used during APTPL metadata reading */ 407 #define PR_APTPL_MAX_IPORT_LEN 256 408 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; 409 /* Used during APTPL metadata reading */ 410 #define PR_APTPL_MAX_TPORT_LEN 256 411 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; 412 /* For writing out live meta data */ 413 unsigned char *pr_aptpl_buf; 414 u16 pr_aptpl_rpti; 415 u16 pr_reg_tpgt; 416 /* Reservation effects all target ports */ 417 int pr_reg_all_tg_pt; 418 /* Activate Persistence across Target Power Loss */ 419 int pr_reg_aptpl; 420 int pr_res_holder; 421 int pr_res_type; 422 int pr_res_scope; 423 /* Used for fabric initiator WWPNs using a ISID */ 424 bool isid_present_at_reg; 425 u32 pr_res_mapped_lun; 426 u32 pr_aptpl_target_lun; 427 u32 pr_res_generation; 428 u64 pr_reg_bin_isid; 429 u64 pr_res_key; 430 atomic_t pr_res_holders; 431 struct se_node_acl *pr_reg_nacl; 432 struct se_dev_entry *pr_reg_deve; 433 struct se_lun *pr_reg_tg_pt_lun; 434 struct list_head pr_reg_list; 435 struct list_head pr_reg_abort_list; 436 struct list_head pr_reg_aptpl_list; 437 struct list_head pr_reg_atp_list; 438 struct list_head pr_reg_atp_mem_list; 439 }; 440 441 /* 442 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, 443 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: 444 * core_setup_reservations() 445 */ 446 struct t10_reservation_ops { 447 int (*t10_reservation_check)(struct se_cmd *, u32 *); 448 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); 449 int (*t10_pr_register)(struct se_cmd *); 450 int (*t10_pr_clear)(struct se_cmd *); 451 }; 452 453 struct t10_reservation { 454 /* Reservation effects all target ports */ 455 int pr_all_tg_pt; 456 /* Activate Persistence across Target Power Loss enabled 457 * for SCSI device */ 458 int pr_aptpl_active; 459 /* Used by struct t10_reservation->pr_aptpl_buf_len */ 460 #define PR_APTPL_BUF_LEN 8192 461 u32 pr_aptpl_buf_len; 462 u32 pr_generation; 463 t10_reservations_index_t res_type; 464 spinlock_t registration_lock; 465 spinlock_t aptpl_reg_lock; 466 /* 467 * This will always be set by one individual I_T Nexus. 468 * However with all_tg_pt=1, other I_T Nexus from the 469 * same initiator can access PR reg/res info on a different 470 * target port. 471 * 472 * There is also the 'All Registrants' case, where there is 473 * a single *pr_res_holder of the reservation, but all 474 * registrations are considered reservation holders. 475 */ 476 struct se_node_acl *pr_res_holder; 477 struct list_head registration_list; 478 struct list_head aptpl_reg_list; 479 struct t10_reservation_ops pr_ops; 480 }; 481 482 struct se_queue_obj { 483 atomic_t queue_cnt; 484 spinlock_t cmd_queue_lock; 485 struct list_head qobj_list; 486 wait_queue_head_t thread_wq; 487 }; 488 489 struct se_task { 490 unsigned long long task_lba; 491 u32 task_sectors; 492 u32 task_size; 493 struct se_cmd *task_se_cmd; 494 struct scatterlist *task_sg; 495 u32 task_sg_nents; 496 u16 task_flags; 497 u8 task_scsi_status; 498 enum dma_data_direction task_data_direction; 499 struct list_head t_list; 500 struct list_head t_execute_list; 501 struct list_head t_state_list; 502 bool t_state_active; 503 struct completion task_stop_comp; 504 }; 505 506 struct se_tmr_req { 507 /* Task Management function to be performed */ 508 u8 function; 509 /* Task Management response to send */ 510 u8 response; 511 int call_transport; 512 /* Reference to ITT that Task Mgmt should be performed */ 513 u32 ref_task_tag; 514 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ 515 u64 ref_task_lun; 516 void *fabric_tmr_ptr; 517 struct se_cmd *task_cmd; 518 struct se_cmd *ref_cmd; 519 struct se_device *tmr_dev; 520 struct se_lun *tmr_lun; 521 struct list_head tmr_list; 522 }; 523 524 struct se_cmd { 525 /* SAM response code being sent to initiator */ 526 u8 scsi_status; 527 u8 scsi_asc; 528 u8 scsi_ascq; 529 u8 scsi_sense_reason; 530 u16 scsi_sense_length; 531 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 532 int alua_nonop_delay; 533 /* See include/linux/dma-mapping.h */ 534 enum dma_data_direction data_direction; 535 /* For SAM Task Attribute */ 536 int sam_task_attr; 537 /* Transport protocol dependent state, see transport_state_table */ 538 enum transport_state_table t_state; 539 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ 540 unsigned check_release:1; 541 unsigned cmd_wait_set:1; 542 /* See se_cmd_flags_table */ 543 u32 se_cmd_flags; 544 u32 se_ordered_id; 545 /* Total size in bytes associated with command */ 546 u32 data_length; 547 /* SCSI Presented Data Transfer Length */ 548 u32 cmd_spdtl; 549 u32 residual_count; 550 u32 orig_fe_lun; 551 /* Persistent Reservation key */ 552 u64 pr_res_key; 553 /* Used for sense data */ 554 void *sense_buffer; 555 struct list_head se_delayed_node; 556 struct list_head se_lun_node; 557 struct list_head se_qf_node; 558 struct se_device *se_dev; 559 struct se_dev_entry *se_deve; 560 struct se_lun *se_lun; 561 /* Only used for internal passthrough and legacy TCM fabric modules */ 562 struct se_session *se_sess; 563 struct se_tmr_req *se_tmr_req; 564 struct list_head se_queue_node; 565 struct list_head se_cmd_list; 566 struct completion cmd_wait_comp; 567 struct kref cmd_kref; 568 struct target_core_fabric_ops *se_tfo; 569 int (*execute_task)(struct se_task *); 570 void (*transport_complete_callback)(struct se_cmd *); 571 572 unsigned char *t_task_cdb; 573 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 574 unsigned long long t_task_lba; 575 u32 t_tasks_sg_chained_no; 576 atomic_t t_fe_count; 577 atomic_t t_se_count; 578 atomic_t t_task_cdbs_left; 579 atomic_t t_task_cdbs_ex_left; 580 atomic_t t_task_cdbs_sent; 581 unsigned int transport_state; 582 #define CMD_T_ABORTED (1 << 0) 583 #define CMD_T_ACTIVE (1 << 1) 584 #define CMD_T_COMPLETE (1 << 2) 585 #define CMD_T_QUEUED (1 << 3) 586 #define CMD_T_SENT (1 << 4) 587 #define CMD_T_STOP (1 << 5) 588 #define CMD_T_FAILED (1 << 6) 589 #define CMD_T_LUN_STOP (1 << 7) 590 #define CMD_T_LUN_FE_STOP (1 << 8) 591 #define CMD_T_DEV_ACTIVE (1 << 9) 592 spinlock_t t_state_lock; 593 struct completion t_transport_stop_comp; 594 struct completion transport_lun_fe_stop_comp; 595 struct completion transport_lun_stop_comp; 596 struct scatterlist *t_tasks_sg_chained; 597 598 struct work_struct work; 599 600 struct scatterlist *t_data_sg; 601 unsigned int t_data_nents; 602 void *t_data_vmap; 603 struct scatterlist *t_bidi_data_sg; 604 unsigned int t_bidi_data_nents; 605 606 /* Used for BIDI READ */ 607 struct list_head t_task_list; 608 u32 t_task_list_num; 609 610 }; 611 612 struct se_ua { 613 u8 ua_asc; 614 u8 ua_ascq; 615 struct se_node_acl *ua_nacl; 616 struct list_head ua_dev_list; 617 struct list_head ua_nacl_list; 618 }; 619 620 struct se_node_acl { 621 char initiatorname[TRANSPORT_IQN_LEN]; 622 /* Used to signal demo mode created ACL, disabled by default */ 623 bool dynamic_node_acl; 624 bool acl_stop:1; 625 u32 queue_depth; 626 u32 acl_index; 627 u64 num_cmds; 628 u64 read_bytes; 629 u64 write_bytes; 630 spinlock_t stats_lock; 631 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 632 atomic_t acl_pr_ref_count; 633 struct se_dev_entry **device_list; 634 struct se_session *nacl_sess; 635 struct se_portal_group *se_tpg; 636 spinlock_t device_list_lock; 637 spinlock_t nacl_sess_lock; 638 struct config_group acl_group; 639 struct config_group acl_attrib_group; 640 struct config_group acl_auth_group; 641 struct config_group acl_param_group; 642 struct config_group acl_fabric_stat_group; 643 struct config_group *acl_default_groups[5]; 644 struct list_head acl_list; 645 struct list_head acl_sess_list; 646 struct completion acl_free_comp; 647 struct kref acl_kref; 648 }; 649 650 struct se_session { 651 unsigned sess_tearing_down:1; 652 u64 sess_bin_isid; 653 struct se_node_acl *se_node_acl; 654 struct se_portal_group *se_tpg; 655 void *fabric_sess_ptr; 656 struct list_head sess_list; 657 struct list_head sess_acl_list; 658 struct list_head sess_cmd_list; 659 struct list_head sess_wait_list; 660 spinlock_t sess_cmd_lock; 661 struct kref sess_kref; 662 }; 663 664 struct se_device; 665 struct se_transform_info; 666 struct scatterlist; 667 668 struct se_ml_stat_grps { 669 struct config_group stat_group; 670 struct config_group scsi_auth_intr_group; 671 struct config_group scsi_att_intr_port_group; 672 }; 673 674 struct se_lun_acl { 675 char initiatorname[TRANSPORT_IQN_LEN]; 676 u32 mapped_lun; 677 struct se_node_acl *se_lun_nacl; 678 struct se_lun *se_lun; 679 struct list_head lacl_list; 680 struct config_group se_lun_group; 681 struct se_ml_stat_grps ml_stat_grps; 682 }; 683 684 struct se_dev_entry { 685 bool def_pr_registered; 686 /* See transport_lunflags_table */ 687 u32 lun_flags; 688 u32 deve_cmds; 689 u32 mapped_lun; 690 u32 average_bytes; 691 u32 last_byte_count; 692 u32 total_cmds; 693 u32 total_bytes; 694 u64 pr_res_key; 695 u64 creation_time; 696 u32 attach_count; 697 u64 read_bytes; 698 u64 write_bytes; 699 atomic_t ua_count; 700 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 701 atomic_t pr_ref_count; 702 struct se_lun_acl *se_lun_acl; 703 spinlock_t ua_lock; 704 struct se_lun *se_lun; 705 struct list_head alua_port_list; 706 struct list_head ua_list; 707 }; 708 709 struct se_dev_limits { 710 /* Max supported HW queue depth */ 711 u32 hw_queue_depth; 712 /* Max supported virtual queue depth */ 713 u32 queue_depth; 714 /* From include/linux/blkdev.h for the other HW/SW limits. */ 715 struct queue_limits limits; 716 }; 717 718 struct se_dev_attrib { 719 int emulate_dpo; 720 int emulate_fua_write; 721 int emulate_fua_read; 722 int emulate_write_cache; 723 int emulate_ua_intlck_ctrl; 724 int emulate_tas; 725 int emulate_tpu; 726 int emulate_tpws; 727 int emulate_reservations; 728 int emulate_alua; 729 int enforce_pr_isids; 730 int is_nonrot; 731 int emulate_rest_reord; 732 u32 hw_block_size; 733 u32 block_size; 734 u32 hw_max_sectors; 735 u32 max_sectors; 736 u32 fabric_max_sectors; 737 u32 optimal_sectors; 738 u32 hw_queue_depth; 739 u32 queue_depth; 740 u32 max_unmap_lba_count; 741 u32 max_unmap_block_desc_count; 742 u32 unmap_granularity; 743 u32 unmap_granularity_alignment; 744 struct se_subsystem_dev *da_sub_dev; 745 struct config_group da_group; 746 }; 747 748 struct se_dev_stat_grps { 749 struct config_group stat_group; 750 struct config_group scsi_dev_group; 751 struct config_group scsi_tgt_dev_group; 752 struct config_group scsi_lu_group; 753 }; 754 755 struct se_subsystem_dev { 756 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ 757 #define SE_DEV_ALIAS_LEN 512 758 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; 759 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ 760 #define SE_UDEV_PATH_LEN 512 761 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; 762 u32 su_dev_flags; 763 struct se_hba *se_dev_hba; 764 struct se_device *se_dev_ptr; 765 struct se_dev_attrib se_dev_attrib; 766 /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 767 struct t10_alua t10_alua; 768 /* T10 Inquiry and VPD WWN Information */ 769 struct t10_wwn t10_wwn; 770 /* T10 SPC-2 + SPC-3 Reservations */ 771 struct t10_reservation t10_pr; 772 spinlock_t se_dev_lock; 773 void *se_dev_su_ptr; 774 struct config_group se_dev_group; 775 /* For T10 Reservations */ 776 struct config_group se_dev_pr_group; 777 /* For target_core_stat.c groups */ 778 struct se_dev_stat_grps dev_stat_grps; 779 }; 780 781 struct se_device { 782 #define SE_DEV_LINK_MAGIC 0xfeeddeef 783 u32 dev_link_magic; 784 /* RELATIVE TARGET PORT IDENTIFER Counter */ 785 u16 dev_rpti_counter; 786 /* Used for SAM Task Attribute ordering */ 787 u32 dev_cur_ordered_id; 788 u32 dev_flags; 789 u32 dev_port_count; 790 /* See transport_device_status_table */ 791 u32 dev_status; 792 /* Physical device queue depth */ 793 u32 queue_depth; 794 /* Used for SPC-2 reservations enforce of ISIDs */ 795 u64 dev_res_bin_isid; 796 t10_task_attr_index_t dev_task_attr_type; 797 /* Pointer to transport specific device structure */ 798 void *dev_ptr; 799 u32 dev_index; 800 u64 creation_time; 801 u32 num_resets; 802 u64 num_cmds; 803 u64 read_bytes; 804 u64 write_bytes; 805 spinlock_t stats_lock; 806 /* Active commands on this virtual SE device */ 807 atomic_t simple_cmds; 808 atomic_t dev_ordered_id; 809 atomic_t execute_tasks; 810 atomic_t dev_ordered_sync; 811 atomic_t dev_qf_count; 812 struct se_obj dev_obj; 813 struct se_obj dev_access_obj; 814 struct se_obj dev_export_obj; 815 struct se_queue_obj dev_queue_obj; 816 spinlock_t delayed_cmd_lock; 817 spinlock_t execute_task_lock; 818 spinlock_t dev_reservation_lock; 819 spinlock_t dev_status_lock; 820 spinlock_t se_port_lock; 821 spinlock_t se_tmr_lock; 822 spinlock_t qf_cmd_lock; 823 /* Used for legacy SPC-2 reservationsa */ 824 struct se_node_acl *dev_reserved_node_acl; 825 /* Used for ALUA Logical Unit Group membership */ 826 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; 827 /* Used for SPC-3 Persistent Reservations */ 828 struct t10_pr_registration *dev_pr_res_holder; 829 struct list_head dev_sep_list; 830 struct list_head dev_tmr_list; 831 /* Pointer to descriptor for processing thread */ 832 struct task_struct *process_thread; 833 struct work_struct qf_work_queue; 834 struct list_head delayed_cmd_list; 835 struct list_head execute_task_list; 836 struct list_head state_task_list; 837 struct list_head qf_cmd_list; 838 /* Pointer to associated SE HBA */ 839 struct se_hba *se_hba; 840 struct se_subsystem_dev *se_sub_dev; 841 /* Pointer to template of function pointers for transport */ 842 struct se_subsystem_api *transport; 843 /* Linked list for struct se_hba struct se_device list */ 844 struct list_head dev_list; 845 }; 846 847 struct se_hba { 848 u16 hba_tpgt; 849 u32 hba_id; 850 /* See hba_flags_table */ 851 u32 hba_flags; 852 /* Virtual iSCSI devices attached. */ 853 u32 dev_count; 854 u32 hba_index; 855 /* Pointer to transport specific host structure. */ 856 void *hba_ptr; 857 /* Linked list for struct se_device */ 858 struct list_head hba_dev_list; 859 struct list_head hba_node; 860 spinlock_t device_lock; 861 struct config_group hba_group; 862 struct mutex hba_access_mutex; 863 struct se_subsystem_api *transport; 864 }; 865 866 struct se_port_stat_grps { 867 struct config_group stat_group; 868 struct config_group scsi_port_group; 869 struct config_group scsi_tgt_port_group; 870 struct config_group scsi_transport_group; 871 }; 872 873 struct se_lun { 874 #define SE_LUN_LINK_MAGIC 0xffff7771 875 u32 lun_link_magic; 876 /* See transport_lun_status_table */ 877 enum transport_lun_status_table lun_status; 878 u32 lun_access; 879 u32 lun_flags; 880 u32 unpacked_lun; 881 atomic_t lun_acl_count; 882 spinlock_t lun_acl_lock; 883 spinlock_t lun_cmd_lock; 884 spinlock_t lun_sep_lock; 885 struct completion lun_shutdown_comp; 886 struct list_head lun_cmd_list; 887 struct list_head lun_acl_list; 888 struct se_device *lun_se_dev; 889 struct se_port *lun_sep; 890 struct config_group lun_group; 891 struct se_port_stat_grps port_stat_grps; 892 }; 893 894 struct scsi_port_stats { 895 u64 cmd_pdus; 896 u64 tx_data_octets; 897 u64 rx_data_octets; 898 }; 899 900 struct se_port { 901 /* RELATIVE TARGET PORT IDENTIFER */ 902 u16 sep_rtpi; 903 int sep_tg_pt_secondary_stat; 904 int sep_tg_pt_secondary_write_md; 905 u32 sep_index; 906 struct scsi_port_stats sep_stats; 907 /* Used for ALUA Target Port Groups membership */ 908 atomic_t sep_tg_pt_secondary_offline; 909 /* Used for PR ALL_TG_PT=1 */ 910 atomic_t sep_tg_pt_ref_cnt; 911 spinlock_t sep_alua_lock; 912 struct mutex sep_tg_pt_md_mutex; 913 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; 914 struct se_lun *sep_lun; 915 struct se_portal_group *sep_tpg; 916 struct list_head sep_alua_list; 917 struct list_head sep_list; 918 }; 919 920 struct se_tpg_np { 921 struct se_portal_group *tpg_np_parent; 922 struct config_group tpg_np_group; 923 }; 924 925 struct se_portal_group { 926 /* Type of target portal group, see transport_tpg_type_table */ 927 enum transport_tpg_type_table se_tpg_type; 928 /* Number of ACLed Initiator Nodes for this TPG */ 929 u32 num_node_acls; 930 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 931 atomic_t tpg_pr_ref_count; 932 /* Spinlock for adding/removing ACLed Nodes */ 933 spinlock_t acl_node_lock; 934 /* Spinlock for adding/removing sessions */ 935 spinlock_t session_lock; 936 spinlock_t tpg_lun_lock; 937 /* Pointer to $FABRIC_MOD portal group */ 938 void *se_tpg_fabric_ptr; 939 struct list_head se_tpg_node; 940 /* linked list for initiator ACL list */ 941 struct list_head acl_node_list; 942 struct se_lun **tpg_lun_list; 943 struct se_lun tpg_virt_lun0; 944 /* List of TCM sessions associated wth this TPG */ 945 struct list_head tpg_sess_list; 946 /* Pointer to $FABRIC_MOD dependent code */ 947 struct target_core_fabric_ops *se_tpg_tfo; 948 struct se_wwn *se_tpg_wwn; 949 struct config_group tpg_group; 950 struct config_group *tpg_default_groups[6]; 951 struct config_group tpg_lun_group; 952 struct config_group tpg_np_group; 953 struct config_group tpg_acl_group; 954 struct config_group tpg_attrib_group; 955 struct config_group tpg_param_group; 956 }; 957 958 struct se_wwn { 959 struct target_fabric_configfs *wwn_tf; 960 struct config_group wwn_group; 961 struct config_group *wwn_default_groups[2]; 962 struct config_group fabric_stat_group; 963 }; 964 965 #endif /* TARGET_CORE_BASE_H */ 966