1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #ifndef _IDXD_REGISTERS_H_ 4 #define _IDXD_REGISTERS_H_ 5 6 /* PCI Config */ 7 #define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 8 #define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe 9 10 #define DEVICE_VERSION_1 0x100 11 #define DEVICE_VERSION_2 0x200 12 13 #define IDXD_MMIO_BAR 0 14 #define IDXD_WQ_BAR 2 15 #define IDXD_PORTAL_SIZE PAGE_SIZE 16 17 /* MMIO Device BAR0 Registers */ 18 #define IDXD_VER_OFFSET 0x00 19 #define IDXD_VER_MAJOR_MASK 0xf0 20 #define IDXD_VER_MINOR_MASK 0x0f 21 #define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) 22 #define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) 23 24 union gen_cap_reg { 25 struct { 26 u64 block_on_fault:1; 27 u64 overlap_copy:1; 28 u64 cache_control_mem:1; 29 u64 cache_control_cache:1; 30 u64 cmd_cap:1; 31 u64 rsvd:3; 32 u64 dest_readback:1; 33 u64 drain_readback:1; 34 u64 rsvd2:6; 35 u64 max_xfer_shift:5; 36 u64 max_batch_shift:4; 37 u64 max_ims_mult:6; 38 u64 config_en:1; 39 u64 rsvd3:32; 40 }; 41 u64 bits; 42 } __packed; 43 #define IDXD_GENCAP_OFFSET 0x10 44 45 union wq_cap_reg { 46 struct { 47 u64 total_wq_size:16; 48 u64 num_wqs:8; 49 u64 wqcfg_size:4; 50 u64 rsvd:20; 51 u64 shared_mode:1; 52 u64 dedicated_mode:1; 53 u64 wq_ats_support:1; 54 u64 priority:1; 55 u64 occupancy:1; 56 u64 occupancy_int:1; 57 u64 op_config:1; 58 u64 rsvd3:9; 59 }; 60 u64 bits; 61 } __packed; 62 #define IDXD_WQCAP_OFFSET 0x20 63 #define IDXD_WQCFG_MIN 5 64 65 union group_cap_reg { 66 struct { 67 u64 num_groups:8; 68 u64 total_rdbufs:8; /* formerly total_tokens */ 69 u64 rdbuf_ctrl:1; /* formerly token_en */ 70 u64 rdbuf_limit:1; /* formerly token_limit */ 71 u64 progress_limit:1; /* descriptor and batch descriptor */ 72 u64 rsvd:45; 73 }; 74 u64 bits; 75 } __packed; 76 #define IDXD_GRPCAP_OFFSET 0x30 77 78 union engine_cap_reg { 79 struct { 80 u64 num_engines:8; 81 u64 rsvd:56; 82 }; 83 u64 bits; 84 } __packed; 85 86 #define IDXD_ENGCAP_OFFSET 0x38 87 88 #define IDXD_OPCAP_NOOP 0x0001 89 #define IDXD_OPCAP_BATCH 0x0002 90 #define IDXD_OPCAP_MEMMOVE 0x0008 91 struct opcap { 92 u64 bits[4]; 93 }; 94 95 #define IDXD_MAX_OPCAP_BITS 256U 96 97 #define IDXD_OPCAP_OFFSET 0x40 98 99 #define IDXD_TABLE_OFFSET 0x60 100 union offsets_reg { 101 struct { 102 u64 grpcfg:16; 103 u64 wqcfg:16; 104 u64 msix_perm:16; 105 u64 ims:16; 106 u64 perfmon:16; 107 u64 rsvd:48; 108 }; 109 u64 bits[2]; 110 } __packed; 111 112 #define IDXD_TABLE_MULT 0x100 113 114 #define IDXD_GENCFG_OFFSET 0x80 115 union gencfg_reg { 116 struct { 117 u32 rdbuf_limit:8; 118 u32 rsvd:4; 119 u32 user_int_en:1; 120 u32 rsvd2:19; 121 }; 122 u32 bits; 123 } __packed; 124 125 #define IDXD_GENCTRL_OFFSET 0x88 126 union genctrl_reg { 127 struct { 128 u32 softerr_int_en:1; 129 u32 halt_int_en:1; 130 u32 rsvd:30; 131 }; 132 u32 bits; 133 } __packed; 134 135 #define IDXD_GENSTATS_OFFSET 0x90 136 union gensts_reg { 137 struct { 138 u32 state:2; 139 u32 reset_type:2; 140 u32 rsvd:28; 141 }; 142 u32 bits; 143 } __packed; 144 145 enum idxd_device_status_state { 146 IDXD_DEVICE_STATE_DISABLED = 0, 147 IDXD_DEVICE_STATE_ENABLED, 148 IDXD_DEVICE_STATE_DRAIN, 149 IDXD_DEVICE_STATE_HALT, 150 }; 151 152 enum idxd_device_reset_type { 153 IDXD_DEVICE_RESET_SOFTWARE = 0, 154 IDXD_DEVICE_RESET_FLR, 155 IDXD_DEVICE_RESET_WARM, 156 IDXD_DEVICE_RESET_COLD, 157 }; 158 159 #define IDXD_INTCAUSE_OFFSET 0x98 160 #define IDXD_INTC_ERR 0x01 161 #define IDXD_INTC_CMD 0x02 162 #define IDXD_INTC_OCCUPY 0x04 163 #define IDXD_INTC_PERFMON_OVFL 0x08 164 #define IDXD_INTC_HALT_STATE 0x10 165 #define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000 166 167 #define IDXD_CMD_OFFSET 0xa0 168 union idxd_command_reg { 169 struct { 170 u32 operand:20; 171 u32 cmd:5; 172 u32 rsvd:6; 173 u32 int_req:1; 174 }; 175 u32 bits; 176 } __packed; 177 178 enum idxd_cmd { 179 IDXD_CMD_ENABLE_DEVICE = 1, 180 IDXD_CMD_DISABLE_DEVICE, 181 IDXD_CMD_DRAIN_ALL, 182 IDXD_CMD_ABORT_ALL, 183 IDXD_CMD_RESET_DEVICE, 184 IDXD_CMD_ENABLE_WQ, 185 IDXD_CMD_DISABLE_WQ, 186 IDXD_CMD_DRAIN_WQ, 187 IDXD_CMD_ABORT_WQ, 188 IDXD_CMD_RESET_WQ, 189 IDXD_CMD_DRAIN_PASID, 190 IDXD_CMD_ABORT_PASID, 191 IDXD_CMD_REQUEST_INT_HANDLE, 192 IDXD_CMD_RELEASE_INT_HANDLE, 193 }; 194 195 #define CMD_INT_HANDLE_IMS 0x10000 196 197 #define IDXD_CMDSTS_OFFSET 0xa8 198 union cmdsts_reg { 199 struct { 200 u8 err; 201 u16 result; 202 u8 rsvd:7; 203 u8 active:1; 204 }; 205 u32 bits; 206 } __packed; 207 #define IDXD_CMDSTS_ACTIVE 0x80000000 208 #define IDXD_CMDSTS_ERR_MASK 0xff 209 #define IDXD_CMDSTS_RES_SHIFT 8 210 211 enum idxd_cmdsts_err { 212 IDXD_CMDSTS_SUCCESS = 0, 213 IDXD_CMDSTS_INVAL_CMD, 214 IDXD_CMDSTS_INVAL_WQIDX, 215 IDXD_CMDSTS_HW_ERR, 216 /* enable device errors */ 217 IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, 218 IDXD_CMDSTS_ERR_CONFIG, 219 IDXD_CMDSTS_ERR_BUSMASTER_EN, 220 IDXD_CMDSTS_ERR_PASID_INVAL, 221 IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, 222 IDXD_CMDSTS_ERR_GRP_CONFIG, 223 IDXD_CMDSTS_ERR_GRP_CONFIG2, 224 IDXD_CMDSTS_ERR_GRP_CONFIG3, 225 IDXD_CMDSTS_ERR_GRP_CONFIG4, 226 /* enable wq errors */ 227 IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, 228 IDXD_CMDSTS_ERR_WQ_ENABLED, 229 IDXD_CMDSTS_ERR_WQ_SIZE, 230 IDXD_CMDSTS_ERR_WQ_PRIOR, 231 IDXD_CMDSTS_ERR_WQ_MODE, 232 IDXD_CMDSTS_ERR_BOF_EN, 233 IDXD_CMDSTS_ERR_PASID_EN, 234 IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, 235 IDXD_CMDSTS_ERR_MAX_XFER_SIZE, 236 /* disable device errors */ 237 IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, 238 /* disable WQ, drain WQ, abort WQ, reset WQ */ 239 IDXD_CMDSTS_ERR_DEV_NOT_EN, 240 /* request interrupt handle */ 241 IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, 242 IDXD_CMDSTS_ERR_NO_HANDLE, 243 }; 244 245 #define IDXD_CMDCAP_OFFSET 0xb0 246 247 #define IDXD_SWERR_OFFSET 0xc0 248 #define IDXD_SWERR_VALID 0x00000001 249 #define IDXD_SWERR_OVERFLOW 0x00000002 250 #define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) 251 union sw_err_reg { 252 struct { 253 u64 valid:1; 254 u64 overflow:1; 255 u64 desc_valid:1; 256 u64 wq_idx_valid:1; 257 u64 batch:1; 258 u64 fault_rw:1; 259 u64 priv:1; 260 u64 rsvd:1; 261 u64 error:8; 262 u64 wq_idx:8; 263 u64 rsvd2:8; 264 u64 operation:8; 265 u64 pasid:20; 266 u64 rsvd3:4; 267 268 u64 batch_idx:16; 269 u64 rsvd4:16; 270 u64 invalid_flags:32; 271 272 u64 fault_addr; 273 274 u64 rsvd5; 275 }; 276 u64 bits[4]; 277 } __packed; 278 279 union msix_perm { 280 struct { 281 u32 rsvd:2; 282 u32 ignore:1; 283 u32 pasid_en:1; 284 u32 rsvd2:8; 285 u32 pasid:20; 286 }; 287 u32 bits; 288 } __packed; 289 290 union group_flags { 291 struct { 292 u64 tc_a:3; 293 u64 tc_b:3; 294 u64 rsvd:1; 295 u64 use_rdbuf_limit:1; 296 u64 rdbufs_reserved:8; 297 u64 rsvd2:4; 298 u64 rdbufs_allowed:8; 299 u64 rsvd3:4; 300 u64 desc_progress_limit:2; 301 u64 rsvd4:2; 302 u64 batch_progress_limit:2; 303 u64 rsvd5:26; 304 }; 305 u64 bits; 306 } __packed; 307 308 struct grpcfg { 309 u64 wqs[4]; 310 u64 engines; 311 union group_flags flags; 312 } __packed; 313 314 union wqcfg { 315 struct { 316 /* bytes 0-3 */ 317 u16 wq_size; 318 u16 rsvd; 319 320 /* bytes 4-7 */ 321 u16 wq_thresh; 322 u16 rsvd1; 323 324 /* bytes 8-11 */ 325 u32 mode:1; /* shared or dedicated */ 326 u32 bof:1; /* block on fault */ 327 u32 wq_ats_disable:1; 328 u32 rsvd2:1; 329 u32 priority:4; 330 u32 pasid:20; 331 u32 pasid_en:1; 332 u32 priv:1; 333 u32 rsvd3:2; 334 335 /* bytes 12-15 */ 336 u32 max_xfer_shift:5; 337 u32 max_batch_shift:4; 338 u32 rsvd4:23; 339 340 /* bytes 16-19 */ 341 u16 occupancy_inth; 342 u16 occupancy_table_sel:1; 343 u16 rsvd5:15; 344 345 /* bytes 20-23 */ 346 u16 occupancy_limit; 347 u16 occupancy_int_en:1; 348 u16 rsvd6:15; 349 350 /* bytes 24-27 */ 351 u16 occupancy; 352 u16 occupancy_int:1; 353 u16 rsvd7:12; 354 u16 mode_support:1; 355 u16 wq_state:2; 356 357 /* bytes 28-31 */ 358 u32 rsvd8; 359 360 /* bytes 32-63 */ 361 u64 op_config[4]; 362 }; 363 u32 bits[16]; 364 } __packed; 365 366 #define WQCFG_PASID_IDX 2 367 #define WQCFG_PRIVL_IDX 2 368 #define WQCFG_OCCUP_IDX 6 369 370 #define WQCFG_OCCUP_MASK 0xffff 371 372 /* 373 * This macro calculates the offset into the WQCFG register 374 * idxd - struct idxd * 375 * n - wq id 376 * ofs - the index of the 32b dword for the config register 377 * 378 * The WQCFG register block is divided into groups per each wq. The n index 379 * allows us to move to the register group that's for that particular wq. 380 * Each register is 32bits. The ofs gives us the number of register to access. 381 */ 382 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \ 383 ({\ 384 typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \ 385 (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \ 386 }) 387 388 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32)) 389 390 #define GRPCFG_SIZE 64 391 #define GRPWQCFG_STRIDES 4 392 393 /* 394 * This macro calculates the offset into the GRPCFG register 395 * idxd - struct idxd * 396 * n - wq id 397 * ofs - the index of the 32b dword for the config register 398 * 399 * The WQCFG register block is divided into groups per each wq. The n index 400 * allows us to move to the register group that's for that particular wq. 401 * Each register is 32bits. The ofs gives us the number of register to access. 402 */ 403 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ 404 (n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) 405 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32) 406 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40) 407 408 /* Following is performance monitor registers */ 409 #define IDXD_PERFCAP_OFFSET 0x0 410 union idxd_perfcap { 411 struct { 412 u64 num_perf_counter:6; 413 u64 rsvd1:2; 414 u64 counter_width:8; 415 u64 num_event_category:4; 416 u64 global_event_category:16; 417 u64 filter:8; 418 u64 rsvd2:8; 419 u64 cap_per_counter:1; 420 u64 writeable_counter:1; 421 u64 counter_freeze:1; 422 u64 overflow_interrupt:1; 423 u64 rsvd3:8; 424 }; 425 u64 bits; 426 } __packed; 427 428 #define IDXD_EVNTCAP_OFFSET 0x80 429 union idxd_evntcap { 430 struct { 431 u64 events:28; 432 u64 rsvd:36; 433 }; 434 u64 bits; 435 } __packed; 436 437 struct idxd_event { 438 union { 439 struct { 440 u32 event_category:4; 441 u32 events:28; 442 }; 443 u32 val; 444 }; 445 } __packed; 446 447 #define IDXD_CNTRCAP_OFFSET 0x800 448 struct idxd_cntrcap { 449 union { 450 struct { 451 u32 counter_width:8; 452 u32 rsvd:20; 453 u32 num_events:4; 454 }; 455 u32 val; 456 }; 457 struct idxd_event events[]; 458 } __packed; 459 460 #define IDXD_PERFRST_OFFSET 0x10 461 union idxd_perfrst { 462 struct { 463 u32 perfrst_config:1; 464 u32 perfrst_counter:1; 465 u32 rsvd:30; 466 }; 467 u32 val; 468 } __packed; 469 470 #define IDXD_OVFSTATUS_OFFSET 0x30 471 #define IDXD_PERFFRZ_OFFSET 0x20 472 #define IDXD_CNTRCFG_OFFSET 0x100 473 union idxd_cntrcfg { 474 struct { 475 u64 enable:1; 476 u64 interrupt_ovf:1; 477 u64 global_freeze_ovf:1; 478 u64 rsvd1:5; 479 u64 event_category:4; 480 u64 rsvd2:20; 481 u64 events:28; 482 u64 rsvd3:4; 483 }; 484 u64 val; 485 } __packed; 486 487 #define IDXD_FLTCFG_OFFSET 0x300 488 489 #define IDXD_CNTRDATA_OFFSET 0x200 490 union idxd_cntrdata { 491 struct { 492 u64 event_count_value; 493 }; 494 u64 val; 495 } __packed; 496 497 union event_cfg { 498 struct { 499 u64 event_cat:4; 500 u64 event_enc:28; 501 }; 502 u64 val; 503 } __packed; 504 505 union filter_cfg { 506 struct { 507 u64 wq:32; 508 u64 tc:8; 509 u64 pg_sz:4; 510 u64 xfer_sz:8; 511 u64 eng:8; 512 }; 513 u64 val; 514 } __packed; 515 516 #endif 517