1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2021 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_gen2_config.h>
6 #include <adf_gen2_dc.h>
7 #include <adf_gen2_hw_data.h>
8 #include <adf_gen2_pfvf.h>
9 #include "adf_dh895xcc_hw_data.h"
10 #include "adf_heartbeat.h"
11 #include "icp_qat_hw.h"
12
13 #define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
14
15 /* Worker thread to service arbiter mappings */
16 static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
17 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
18 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
19 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
20 };
21
22 static struct adf_hw_device_class dh895xcc_class = {
23 .name = ADF_DH895XCC_DEVICE_NAME,
24 .type = DEV_DH895XCC,
25 .instances = 0
26 };
27
get_accel_mask(struct adf_hw_device_data * self)28 static u32 get_accel_mask(struct adf_hw_device_data *self)
29 {
30 u32 fuses = self->fuses;
31
32 return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
33 ADF_DH895XCC_ACCELERATORS_MASK;
34 }
35
get_ae_mask(struct adf_hw_device_data * self)36 static u32 get_ae_mask(struct adf_hw_device_data *self)
37 {
38 u32 fuses = self->fuses;
39
40 return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
41 }
42
get_misc_bar_id(struct adf_hw_device_data * self)43 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
44 {
45 return ADF_DH895XCC_PMISC_BAR;
46 }
47
get_ts_clock(struct adf_hw_device_data * self)48 static u32 get_ts_clock(struct adf_hw_device_data *self)
49 {
50 /*
51 * Timestamp update interval is 16 AE clock ticks for dh895xcc.
52 */
53 return self->clock_frequency / 16;
54 }
55
get_etr_bar_id(struct adf_hw_device_data * self)56 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
57 {
58 return ADF_DH895XCC_ETR_BAR;
59 }
60
get_sram_bar_id(struct adf_hw_device_data * self)61 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
62 {
63 return ADF_DH895XCC_SRAM_BAR;
64 }
65
get_accel_cap(struct adf_accel_dev * accel_dev)66 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
67 {
68 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
69 u32 capabilities;
70 u32 legfuses;
71
72 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
73 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
74 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
75 ICP_ACCEL_CAPABILITIES_CIPHER |
76 ICP_ACCEL_CAPABILITIES_COMPRESSION;
77
78 /* Read accelerator capabilities mask */
79 pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
80
81 /* A set bit in legfuses means the feature is OFF in this SKU */
82 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
83 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
84 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
85 }
86 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
87 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
88 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
89 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
90 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
91 }
92 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
93 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
94
95 return capabilities;
96 }
97
get_sku(struct adf_hw_device_data * self)98 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
99 {
100 int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
101 >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
102
103 switch (sku) {
104 case ADF_DH895XCC_FUSECTL_SKU_1:
105 return DEV_SKU_1;
106 case ADF_DH895XCC_FUSECTL_SKU_2:
107 return DEV_SKU_2;
108 case ADF_DH895XCC_FUSECTL_SKU_3:
109 return DEV_SKU_3;
110 case ADF_DH895XCC_FUSECTL_SKU_4:
111 return DEV_SKU_4;
112 default:
113 return DEV_SKU_UNKNOWN;
114 }
115 return DEV_SKU_UNKNOWN;
116 }
117
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)118 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
119 {
120 return thrd_to_arb_map;
121 }
122
enable_vf2pf_interrupts(void __iomem * pmisc_addr,u32 vf_mask)123 static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
124 {
125 /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
126 if (vf_mask & 0xFFFF) {
127 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
128 & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
129 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
130 }
131
132 /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
133 if (vf_mask >> 16) {
134 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
135 & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
136 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
137 }
138 }
139
disable_all_vf2pf_interrupts(void __iomem * pmisc_addr)140 static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
141 {
142 u32 val;
143
144 /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
145 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
146 | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
147 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
148
149 /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
150 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
151 | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
152 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
153 }
154
disable_pending_vf2pf_interrupts(void __iomem * pmisc_addr)155 static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
156 {
157 u32 sources, pending, disabled;
158 u32 errsou3, errmsk3;
159 u32 errsou5, errmsk5;
160
161 /* Get the interrupt sources triggered by VFs */
162 errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
163 errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
164 sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
165 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
166
167 if (!sources)
168 return 0;
169
170 /* Get the already disabled interrupts */
171 errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
172 errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
173 disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
174 | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
175
176 pending = sources & ~disabled;
177 if (!pending)
178 return 0;
179
180 /* Due to HW limitations, when disabling the interrupts, we can't
181 * just disable the requested sources, as this would lead to missed
182 * interrupts if sources changes just before writing to ERRMSK3 and
183 * ERRMSK5.
184 * To work around it, disable all and re-enable only the sources that
185 * are not in vf_mask and were not already disabled. Re-enabling will
186 * trigger a new interrupt for the sources that have changed in the
187 * meantime, if any.
188 */
189 errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
190 errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
191 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
192 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
193
194 errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
195 errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
196 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
197 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
198
199 /* Return the sources of the (new) interrupt(s) */
200 return pending;
201 }
202
configure_iov_threads(struct adf_accel_dev * accel_dev,bool enable)203 static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
204 {
205 adf_gen2_cfg_iov_thds(accel_dev, enable,
206 ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
207 ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
208 }
209
adf_init_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)210 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
211 {
212 hw_data->dev_class = &dh895xcc_class;
213 hw_data->instance_id = dh895xcc_class.instances++;
214 hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
215 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
216 hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
217 hw_data->num_logical_accel = 1;
218 hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
219 hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
220 hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
221 hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
222 hw_data->alloc_irq = adf_isr_resource_alloc;
223 hw_data->free_irq = adf_isr_resource_free;
224 hw_data->enable_error_correction = adf_gen2_enable_error_correction;
225 hw_data->get_accel_mask = get_accel_mask;
226 hw_data->get_ae_mask = get_ae_mask;
227 hw_data->get_accel_cap = get_accel_cap;
228 hw_data->get_num_accels = adf_gen2_get_num_accels;
229 hw_data->get_num_aes = adf_gen2_get_num_aes;
230 hw_data->get_etr_bar_id = get_etr_bar_id;
231 hw_data->get_misc_bar_id = get_misc_bar_id;
232 hw_data->get_admin_info = adf_gen2_get_admin_info;
233 hw_data->get_arb_info = adf_gen2_get_arb_info;
234 hw_data->get_sram_bar_id = get_sram_bar_id;
235 hw_data->get_sku = get_sku;
236 hw_data->fw_name = ADF_DH895XCC_FW;
237 hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
238 hw_data->init_admin_comms = adf_init_admin_comms;
239 hw_data->exit_admin_comms = adf_exit_admin_comms;
240 hw_data->configure_iov_threads = configure_iov_threads;
241 hw_data->send_admin_init = adf_send_admin_init;
242 hw_data->init_arb = adf_init_arb;
243 hw_data->exit_arb = adf_exit_arb;
244 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
245 hw_data->enable_ints = adf_gen2_enable_ints;
246 hw_data->reset_device = adf_reset_sbr;
247 hw_data->disable_iov = adf_disable_sriov;
248 hw_data->dev_config = adf_gen2_dev_config;
249 hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
250 hw_data->get_hb_clock = get_ts_clock;
251 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
252 hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
253
254 adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
255 hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
256 hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
257 hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
258 adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
259 adf_gen2_init_dc_ops(&hw_data->dc_ops);
260 }
261
adf_clean_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)262 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
263 {
264 hw_data->dev_class->instances--;
265 }
266