Lines Matching refs:idxd

36 	struct idxd_device *idxd = engine->idxd;  in engine_group_id_store()  local
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in engine_group_id_store()
48 if (id > idxd->max_groups - 1 || id < -1) in engine_group_id_store()
63 engine->group = idxd->groups[id]; in engine_group_id_store()
102 static void idxd_set_free_rdbufs(struct idxd_device *idxd) in idxd_set_free_rdbufs() argument
106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { in idxd_set_free_rdbufs()
107 struct idxd_group *g = idxd->groups[i]; in idxd_set_free_rdbufs()
112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; in idxd_set_free_rdbufs()
137 struct idxd_device *idxd = group->idxd; in group_read_buffers_reserved_store() local
145 if (idxd->data->type == IDXD_TYPE_IAX) in group_read_buffers_reserved_store()
148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_read_buffers_reserved_store()
151 if (idxd->state == IDXD_DEV_ENABLED) in group_read_buffers_reserved_store()
154 if (val > idxd->max_rdbufs) in group_read_buffers_reserved_store()
157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved) in group_read_buffers_reserved_store()
161 idxd_set_free_rdbufs(idxd); in group_read_buffers_reserved_store()
203 struct idxd_device *idxd = group->idxd; in group_read_buffers_allowed_store() local
211 if (idxd->data->type == IDXD_TYPE_IAX) in group_read_buffers_allowed_store()
214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_read_buffers_allowed_store()
217 if (idxd->state == IDXD_DEV_ENABLED) in group_read_buffers_allowed_store()
221 val > group->rdbufs_reserved + idxd->nr_rdbufs) in group_read_buffers_allowed_store()
266 struct idxd_device *idxd = group->idxd; in group_use_read_buffer_limit_store() local
274 if (idxd->data->type == IDXD_TYPE_IAX) in group_use_read_buffer_limit_store()
277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_use_read_buffer_limit_store()
280 if (idxd->state == IDXD_DEV_ENABLED) in group_use_read_buffer_limit_store()
283 if (idxd->rdbuf_limit == 0) in group_use_read_buffer_limit_store()
311 struct idxd_device *idxd = group->idxd; in group_engines_show() local
313 for (i = 0; i < idxd->max_engines; i++) { in group_engines_show()
314 struct idxd_engine *engine = idxd->engines[i]; in group_engines_show()
320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); in group_engines_show()
339 struct idxd_device *idxd = group->idxd; in group_work_queues_show() local
341 for (i = 0; i < idxd->max_wqs; i++) { in group_work_queues_show()
342 struct idxd_wq *wq = idxd->wqs[i]; in group_work_queues_show()
348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); in group_work_queues_show()
376 struct idxd_device *idxd = group->idxd; in group_traffic_class_a_store() local
384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_a_store()
387 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_a_store()
390 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) in group_traffic_class_a_store()
418 struct idxd_device *idxd = group->idxd; in group_traffic_class_b_store() local
426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_b_store()
429 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_b_store()
432 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) in group_traffic_class_b_store()
524 struct idxd_device *idxd) in idxd_group_attr_progress_limit_invisible() argument
528 !idxd->hw.group_cap.progress_limit; in idxd_group_attr_progress_limit_invisible()
532 struct idxd_device *idxd) in idxd_group_attr_read_buffers_invisible() argument
544 idxd->data->type == IDXD_TYPE_IAX; in idxd_group_attr_read_buffers_invisible()
552 struct idxd_device *idxd = group->idxd; in idxd_group_attr_visible() local
554 if (idxd_group_attr_progress_limit_invisible(attr, idxd)) in idxd_group_attr_visible()
557 if (idxd_group_attr_read_buffers_invisible(attr, idxd)) in idxd_group_attr_visible()
632 struct idxd_device *idxd = wq->idxd; in wq_group_id_store() local
641 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_group_id_store()
647 if (id > idxd->max_groups - 1 || id < -1) in wq_group_id_store()
658 group = idxd->groups[id]; in wq_group_id_store()
684 struct idxd_device *idxd = wq->idxd; in wq_mode_store() local
686 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_mode_store()
715 static int total_claimed_wq_size(struct idxd_device *idxd) in total_claimed_wq_size() argument
720 for (i = 0; i < idxd->max_wqs; i++) { in total_claimed_wq_size()
721 struct idxd_wq *wq = idxd->wqs[i]; in total_claimed_wq_size()
735 struct idxd_device *idxd = wq->idxd; in wq_size_store() local
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_size_store()
745 if (idxd->state == IDXD_DEV_ENABLED) in wq_size_store()
748 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) in wq_size_store()
772 struct idxd_device *idxd = wq->idxd; in wq_priority_store() local
779 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_priority_store()
808 struct idxd_device *idxd = wq->idxd; in wq_block_on_fault_store() local
812 if (!idxd->hw.gen_cap.block_on_fault) in wq_block_on_fault_store()
815 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_block_on_fault_store()
850 struct idxd_device *idxd = wq->idxd; in wq_threshold_store() local
861 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_threshold_store()
951 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) in wq_name_store()
1014 struct idxd_device *idxd = wq->idxd; in wq_max_transfer_size_store() local
1018 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_max_transfer_size_store()
1028 if (xfer_size > idxd->max_xfer_bytes) in wq_max_transfer_size_store()
1051 struct idxd_device *idxd = wq->idxd; in wq_max_batch_size_store() local
1055 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_max_batch_size_store()
1065 if (batch_size > idxd->max_batch_size) in wq_max_batch_size_store()
1068 idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size); in wq_max_batch_size_store()
1087 struct idxd_device *idxd = wq->idxd; in wq_ats_disable_store() local
1094 if (!idxd->hw.wq_cap.wq_ats_support) in wq_ats_disable_store()
1115 struct idxd_device *idxd = wq->idxd; in wq_occupancy_show() local
1118 if (!idxd->hw.wq_cap.occupancy) in wq_occupancy_show()
1121 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX); in wq_occupancy_show()
1122 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK; in wq_occupancy_show()
1173 static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) in idxd_verify_supported_opcap() argument
1184 if (!test_bit(bit, idxd->opcap_bmap)) in idxd_verify_supported_opcap()
1195 struct idxd_device *idxd = wq->idxd; in wq_op_config_store() local
1210 rc = idxd_verify_supported_opcap(idxd, opmask); in wq_op_config_store()
1249 struct idxd_device *idxd) in idxd_wq_attr_op_config_invisible() argument
1252 !idxd->hw.wq_cap.op_config; in idxd_wq_attr_op_config_invisible()
1256 struct idxd_device *idxd) in idxd_wq_attr_max_batch_size_invisible() argument
1260 idxd->data->type == IDXD_TYPE_IAX; in idxd_wq_attr_max_batch_size_invisible()
1268 struct idxd_device *idxd = wq->idxd; in idxd_wq_attr_visible() local
1270 if (idxd_wq_attr_op_config_invisible(attr, idxd)) in idxd_wq_attr_visible()
1273 if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) in idxd_wq_attr_visible()
1308 struct idxd_device *idxd = confdev_to_idxd(dev); in version_show() local
1310 return sysfs_emit(buf, "%#x\n", idxd->hw.version); in version_show()
1318 struct idxd_device *idxd = confdev_to_idxd(dev); in max_work_queues_size_show() local
1320 return sysfs_emit(buf, "%u\n", idxd->max_wq_size); in max_work_queues_size_show()
1327 struct idxd_device *idxd = confdev_to_idxd(dev); in max_groups_show() local
1329 return sysfs_emit(buf, "%u\n", idxd->max_groups); in max_groups_show()
1336 struct idxd_device *idxd = confdev_to_idxd(dev); in max_work_queues_show() local
1338 return sysfs_emit(buf, "%u\n", idxd->max_wqs); in max_work_queues_show()
1345 struct idxd_device *idxd = confdev_to_idxd(dev); in max_engines_show() local
1347 return sysfs_emit(buf, "%u\n", idxd->max_engines); in max_engines_show()
1354 struct idxd_device *idxd = confdev_to_idxd(dev); in numa_node_show() local
1356 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); in numa_node_show()
1363 struct idxd_device *idxd = confdev_to_idxd(dev); in max_batch_size_show() local
1365 return sysfs_emit(buf, "%u\n", idxd->max_batch_size); in max_batch_size_show()
1373 struct idxd_device *idxd = confdev_to_idxd(dev); in max_transfer_size_show() local
1375 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); in max_transfer_size_show()
1382 struct idxd_device *idxd = confdev_to_idxd(dev); in op_cap_show() local
1384 return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap); in op_cap_show()
1391 struct idxd_device *idxd = confdev_to_idxd(dev); in gen_cap_show() local
1393 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); in gen_cap_show()
1400 struct idxd_device *idxd = confdev_to_idxd(dev); in configurable_show() local
1402 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); in configurable_show()
1409 struct idxd_device *idxd = confdev_to_idxd(dev); in clients_show() local
1412 spin_lock(&idxd->dev_lock); in clients_show()
1413 for (i = 0; i < idxd->max_wqs; i++) { in clients_show()
1414 struct idxd_wq *wq = idxd->wqs[i]; in clients_show()
1418 spin_unlock(&idxd->dev_lock); in clients_show()
1427 struct idxd_device *idxd = confdev_to_idxd(dev); in pasid_enabled_show() local
1429 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd)); in pasid_enabled_show()
1436 struct idxd_device *idxd = confdev_to_idxd(dev); in state_show() local
1438 switch (idxd->state) { in state_show()
1454 struct idxd_device *idxd = confdev_to_idxd(dev); in errors_show() local
1457 spin_lock(&idxd->dev_lock); in errors_show()
1459 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); in errors_show()
1460 spin_unlock(&idxd->dev_lock); in errors_show()
1470 struct idxd_device *idxd = confdev_to_idxd(dev); in max_read_buffers_show() local
1472 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); in max_read_buffers_show()
1488 struct idxd_device *idxd = confdev_to_idxd(dev); in read_buffer_limit_show() local
1490 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); in read_buffer_limit_show()
1504 struct idxd_device *idxd = confdev_to_idxd(dev); in read_buffer_limit_store() local
1512 if (idxd->state == IDXD_DEV_ENABLED) in read_buffer_limit_store()
1515 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in read_buffer_limit_store()
1518 if (!idxd->hw.group_cap.rdbuf_limit) in read_buffer_limit_store()
1521 if (val > idxd->hw.group_cap.total_rdbufs) in read_buffer_limit_store()
1524 idxd->rdbuf_limit = val; in read_buffer_limit_store()
1542 struct idxd_device *idxd = confdev_to_idxd(dev); in cdev_major_show() local
1544 return sysfs_emit(buf, "%u\n", idxd->major); in cdev_major_show()
1551 struct idxd_device *idxd = confdev_to_idxd(dev); in cmd_status_show() local
1553 return sysfs_emit(buf, "%#x\n", idxd->cmd_status); in cmd_status_show()
1559 struct idxd_device *idxd = confdev_to_idxd(dev); in cmd_status_store() local
1561 idxd->cmd_status = 0; in cmd_status_store()
1567 struct idxd_device *idxd) in idxd_device_attr_max_batch_size_invisible() argument
1571 idxd->data->type == IDXD_TYPE_IAX; in idxd_device_attr_max_batch_size_invisible()
1575 struct idxd_device *idxd) in idxd_device_attr_read_buffers_invisible() argument
1585 idxd->data->type == IDXD_TYPE_IAX; in idxd_device_attr_read_buffers_invisible()
1592 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_device_attr_visible() local
1594 if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) in idxd_device_attr_visible()
1597 if (idxd_device_attr_read_buffers_invisible(attr, idxd)) in idxd_device_attr_visible()
1640 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_conf_device_release() local
1642 kfree(idxd->groups); in idxd_conf_device_release()
1643 bitmap_free(idxd->wq_enable_map); in idxd_conf_device_release()
1644 kfree(idxd->wqs); in idxd_conf_device_release()
1645 kfree(idxd->engines); in idxd_conf_device_release()
1646 ida_free(&idxd_ida, idxd->id); in idxd_conf_device_release()
1647 bitmap_free(idxd->opcap_bmap); in idxd_conf_device_release()
1648 kfree(idxd); in idxd_conf_device_release()
1663 static int idxd_register_engine_devices(struct idxd_device *idxd) in idxd_register_engine_devices() argument
1668 for (i = 0; i < idxd->max_engines; i++) { in idxd_register_engine_devices()
1669 engine = idxd->engines[i]; in idxd_register_engine_devices()
1679 for (; i < idxd->max_engines; i++) { in idxd_register_engine_devices()
1680 engine = idxd->engines[i]; in idxd_register_engine_devices()
1685 engine = idxd->engines[j]; in idxd_register_engine_devices()
1691 static int idxd_register_group_devices(struct idxd_device *idxd) in idxd_register_group_devices() argument
1696 for (i = 0; i < idxd->max_groups; i++) { in idxd_register_group_devices()
1697 group = idxd->groups[i]; in idxd_register_group_devices()
1707 for (; i < idxd->max_groups; i++) { in idxd_register_group_devices()
1708 group = idxd->groups[i]; in idxd_register_group_devices()
1713 group = idxd->groups[j]; in idxd_register_group_devices()
1719 static int idxd_register_wq_devices(struct idxd_device *idxd) in idxd_register_wq_devices() argument
1724 for (i = 0; i < idxd->max_wqs; i++) { in idxd_register_wq_devices()
1725 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1735 for (; i < idxd->max_wqs; i++) { in idxd_register_wq_devices()
1736 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1741 wq = idxd->wqs[j]; in idxd_register_wq_devices()
1747 int idxd_register_devices(struct idxd_device *idxd) in idxd_register_devices() argument
1749 struct device *dev = &idxd->pdev->dev; in idxd_register_devices()
1752 rc = device_add(idxd_confdev(idxd)); in idxd_register_devices()
1756 rc = idxd_register_wq_devices(idxd); in idxd_register_devices()
1762 rc = idxd_register_engine_devices(idxd); in idxd_register_devices()
1768 rc = idxd_register_group_devices(idxd); in idxd_register_devices()
1777 for (i = 0; i < idxd->max_engines; i++) in idxd_register_devices()
1778 device_unregister(engine_confdev(idxd->engines[i])); in idxd_register_devices()
1780 for (i = 0; i < idxd->max_wqs; i++) in idxd_register_devices()
1781 device_unregister(wq_confdev(idxd->wqs[i])); in idxd_register_devices()
1783 device_del(idxd_confdev(idxd)); in idxd_register_devices()
1787 void idxd_unregister_devices(struct idxd_device *idxd) in idxd_unregister_devices() argument
1791 for (i = 0; i < idxd->max_wqs; i++) { in idxd_unregister_devices()
1792 struct idxd_wq *wq = idxd->wqs[i]; in idxd_unregister_devices()
1797 for (i = 0; i < idxd->max_engines; i++) { in idxd_unregister_devices()
1798 struct idxd_engine *engine = idxd->engines[i]; in idxd_unregister_devices()
1803 for (i = 0; i < idxd->max_groups; i++) { in idxd_unregister_devices()
1804 struct idxd_group *group = idxd->groups[i]; in idxd_unregister_devices()