Home
last modified time | relevance | path

Searched refs:idxd (Results 1 – 15 of 15) sorted by relevance

/linux-6.1.9/drivers/dma/idxd/
Dinit.c71 static int idxd_setup_interrupts(struct idxd_device *idxd) in idxd_setup_interrupts() argument
73 struct pci_dev *pdev = idxd->pdev; in idxd_setup_interrupts()
84 idxd->irq_cnt = msixcnt; in idxd_setup_interrupts()
94 ie = idxd_get_ie(idxd, 0); in idxd_setup_interrupts()
103 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_interrupts()
106 ie = idxd_get_ie(idxd, msix_idx); in idxd_setup_interrupts()
116 idxd_unmask_error_interrupts(idxd); in idxd_setup_interrupts()
120 idxd_mask_error_interrupts(idxd); in idxd_setup_interrupts()
126 static void idxd_cleanup_interrupts(struct idxd_device *idxd) in idxd_cleanup_interrupts() argument
128 struct pci_dev *pdev = idxd->pdev; in idxd_cleanup_interrupts()
[all …]
Ddevice.c16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
22 void idxd_unmask_error_interrupts(struct idxd_device *idxd) in idxd_unmask_error_interrupts() argument
26 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
29 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
32 void idxd_mask_error_interrupts(struct idxd_device *idxd) in idxd_mask_error_interrupts() argument
36 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
39 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
54 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs()
87 struct device *dev = &wq->idxd->pdev->dev; in alloc_descs()
[all …]
Dsysfs.c36 struct idxd_device *idxd = engine->idxd; in engine_group_id_store() local
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in engine_group_id_store()
48 if (id > idxd->max_groups - 1 || id < -1) in engine_group_id_store()
63 engine->group = idxd->groups[id]; in engine_group_id_store()
102 static void idxd_set_free_rdbufs(struct idxd_device *idxd) in idxd_set_free_rdbufs() argument
106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { in idxd_set_free_rdbufs()
107 struct idxd_group *g = idxd->groups[i]; in idxd_set_free_rdbufs()
112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; in idxd_set_free_rdbufs()
137 struct idxd_device *idxd = group->idxd; in group_read_buffers_reserved_store() local
145 if (idxd->data->type == IDXD_TYPE_IAX) in group_read_buffers_reserved_store()
[all …]
Dirq.c27 struct idxd_device *idxd; member
32 struct idxd_device *idxd = container_of(work, struct idxd_device, work); in idxd_device_reinit() local
33 struct device *dev = &idxd->pdev->dev; in idxd_device_reinit()
36 idxd_device_reset(idxd); in idxd_device_reinit()
37 rc = idxd_device_config(idxd); in idxd_device_reinit()
41 rc = idxd_device_enable(idxd); in idxd_device_reinit()
45 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_reinit()
46 if (test_bit(i, idxd->wq_enable_map)) { in idxd_device_reinit()
47 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit()
51 clear_bit(i, idxd->wq_enable_map); in idxd_device_reinit()
[all …]
Dperfmon.c126 struct idxd_device *idxd = idxd_pmu->idxd; in perfmon_assign_hw_event() local
130 hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx)); in perfmon_assign_hw_event()
131 hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx)); in perfmon_assign_hw_event()
200 struct idxd_device *idxd; in perfmon_pmu_event_init() local
203 idxd = event_to_idxd(event); in perfmon_pmu_event_init()
216 if (event->pmu != &idxd->idxd_pmu->pmu) in perfmon_pmu_event_init()
219 event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); in perfmon_pmu_event_init()
220 event->cpu = idxd->idxd_pmu->cpu; in perfmon_pmu_event_init()
225 ret = perfmon_validate_group(idxd->idxd_pmu, event); in perfmon_pmu_event_init()
233 struct idxd_device *idxd; in perfmon_pmu_read_counter() local
[all …]
Dperfmon.h38 return idxd_pmu->idxd; in event_to_idxd()
47 return idxd_pmu->idxd; in pmu_to_idxd()
87 #define PERFMON_REG_OFFSET(idxd, offset) \ argument
88 (PERFMON_TABLE_OFFSET(idxd) + (offset))
90 #define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET)) argument
91 #define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET)) argument
92 #define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET)) argument
93 #define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET)) argument
95 #define FLTCFG_REG(idxd, cntr, flt) \ argument
96 (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4))
[all …]
Dcdev.c48 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_cdev_dev_release()
75 struct idxd_device *idxd; in idxd_cdev_open() local
83 idxd = wq->idxd; in idxd_cdev_open()
84 dev = &idxd->pdev->dev; in idxd_cdev_open()
102 if (device_user_pasid_enabled(idxd)) { in idxd_cdev_open()
144 struct idxd_device *idxd = wq->idxd; in idxd_cdev_release() local
145 struct device *dev = &idxd->pdev->dev; in idxd_cdev_release()
153 idxd_device_drain_pasid(idxd, ctx->pasid); in idxd_cdev_release()
155 if (device_user_pasid_enabled(idxd)) { in idxd_cdev_release()
177 struct device *dev = &wq->idxd->pdev->dev; in check_vma()
[all …]
Didxd.h89 struct idxd_device *idxd; member
104 struct idxd_device *idxd; member
188 struct idxd_device *idxd; member
223 struct idxd_device *idxd; member
251 struct idxd_device *idxd; member
349 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev argument
404 static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx) in idxd_get_ie() argument
406 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; in idxd_get_ie()
476 static inline bool device_pasid_enabled(struct idxd_device *idxd) in device_pasid_enabled() argument
478 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); in device_pasid_enabled()
[all …]
Ddma.c27 struct idxd_device *idxd = desc->wq->idxd; in idxd_dma_complete_txd() local
35 if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT && in idxd_dma_complete_txd()
117 struct idxd_device *idxd = wq->idxd; in idxd_dma_submit_memcpy() local
123 if (len > idxd->max_xfer_bytes) in idxd_dma_submit_memcpy()
143 struct device *dev = &wq->idxd->pdev->dev; in idxd_dma_alloc_chan_resources()
154 struct device *dev = &wq->idxd->pdev->dev; in idxd_dma_free_chan_resources()
202 int idxd_register_dma_device(struct idxd_device *idxd) in idxd_register_dma_device() argument
206 struct device *dev = &idxd->pdev->dev; in idxd_register_dma_device()
223 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { in idxd_register_dma_device()
239 idxd_dma->idxd = idxd; in idxd_register_dma_device()
[all …]
Dsubmit.c14 struct idxd_device *idxd = wq->idxd; in __get_desc() local
18 memset(desc->completion, 0, idxd->data->compl_size); in __get_desc()
21 if (device_pasid_enabled(idxd)) in __get_desc()
22 desc->hw->pasid = idxd->pasid; in __get_desc()
30 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc() local
35 if (idxd->state != IDXD_DEV_ENABLED) in idxd_alloc_desc()
168 struct idxd_device *idxd = wq->idxd; in idxd_submit_desc() local
174 if (idxd->state != IDXD_DEV_ENABLED) in idxd_submit_desc()
DMakefile3 obj-$(CONFIG_INTEL_IDXD) += idxd.o
4 idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
6 idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
/linux-6.1.9/drivers/dma/
DMakefile46 obj-y += idxd/
DKconfig308 bool "Legacy behavior for idxd driver"
325 # support shared virtual memory for the devices supported by idxd.
/linux-6.1.9/Documentation/admin-guide/
Dkernel-parameters.txt1854 idxd.sva= [HW]
1857 support for the idxd driver. By default it is set to
1860 idxd.tc_override= [HW]
/linux-6.1.9/
DMAINTAINERS10304 F: drivers/dma/idxd/*
10305 F: include/uapi/linux/idxd.h