1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/iopoll.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_gen4_pm.h"
8 #include "adf_cfg_strings.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_gen4_hw_data.h"
11 #include "adf_cfg.h"
12
13 enum qat_pm_host_msg {
14 PM_NO_CHANGE = 0,
15 PM_SET_MIN,
16 };
17
18 struct adf_gen4_pm_data {
19 struct work_struct pm_irq_work;
20 struct adf_accel_dev *accel_dev;
21 u32 pm_int_sts;
22 };
23
send_host_msg(struct adf_accel_dev * accel_dev)24 static int send_host_msg(struct adf_accel_dev *accel_dev)
25 {
26 void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
27 u32 msg;
28
29 msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
30 if (msg & ADF_GEN4_PM_MSG_PENDING)
31 return -EBUSY;
32
33 /* Send HOST_MSG */
34 msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
35 msg |= ADF_GEN4_PM_MSG_PENDING;
36 ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
37
38 /* Poll status register to make sure the HOST_MSG has been processed */
39 return read_poll_timeout(ADF_CSR_RD, msg,
40 !(msg & ADF_GEN4_PM_MSG_PENDING),
41 ADF_GEN4_PM_MSG_POLL_DELAY_US,
42 ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
43 ADF_GEN4_PM_HOST_MSG);
44 }
45
pm_bh_handler(struct work_struct * work)46 static void pm_bh_handler(struct work_struct *work)
47 {
48 struct adf_gen4_pm_data *pm_data =
49 container_of(work, struct adf_gen4_pm_data, pm_irq_work);
50 struct adf_accel_dev *accel_dev = pm_data->accel_dev;
51 void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
52 u32 pm_int_sts = pm_data->pm_int_sts;
53 u32 val;
54
55 /* PM Idle interrupt */
56 if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
57 /* Issue host message to FW */
58 if (send_host_msg(accel_dev))
59 dev_warn_ratelimited(&GET_DEV(accel_dev),
60 "Failed to send host msg to FW\n");
61 }
62
63 /* Clear interrupt status */
64 ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
65
66 /* Reenable PM interrupt */
67 val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
68 val &= ~ADF_GEN4_PM_SOU;
69 ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
70
71 kfree(pm_data);
72 }
73
adf_gen4_handle_pm_interrupt(struct adf_accel_dev * accel_dev)74 bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
75 {
76 void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
77 struct adf_gen4_pm_data *pm_data = NULL;
78 u32 errsou2;
79 u32 errmsk2;
80 u32 val;
81
82 /* Only handle the interrupt triggered by PM */
83 errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
84 if (errmsk2 & ADF_GEN4_PM_SOU)
85 return false;
86
87 errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
88 if (!(errsou2 & ADF_GEN4_PM_SOU))
89 return false;
90
91 /* Disable interrupt */
92 val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
93 val |= ADF_GEN4_PM_SOU;
94 ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
95
96 val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
97
98 pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
99 if (!pm_data)
100 return false;
101
102 pm_data->pm_int_sts = val;
103 pm_data->accel_dev = accel_dev;
104
105 INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
106 adf_misc_wq_queue_work(&pm_data->pm_irq_work);
107
108 return true;
109 }
110 EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
111
adf_gen4_enable_pm(struct adf_accel_dev * accel_dev)112 int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
113 {
114 void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
115 int ret;
116 u32 val;
117
118 ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
119 if (ret)
120 return ret;
121
122 /* Enable default PM interrupts: IDLE, THROTTLE */
123 val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
124 val |= ADF_GEN4_PM_INT_EN_DEFAULT;
125
126 /* Clear interrupt status */
127 val |= ADF_GEN4_PM_INT_STS_MASK;
128 ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
129
130 /* Unmask PM Interrupt */
131 val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
132 val &= ~ADF_GEN4_PM_SOU;
133 ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
134
135 return 0;
136 }
137 EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
138