1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include <linux/nospec.h>
5 #include "adf_accel_devices.h"
6 #include "adf_transport_internal.h"
7 #include "adf_transport_access_macros.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10
11 #define ADF_MAX_RING_THRESHOLD 80
12 #define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
13
adf_modulo(u32 data,u32 shift)14 static inline u32 adf_modulo(u32 data, u32 shift)
15 {
16 u32 div = data >> shift;
17 u32 mult = div << shift;
18
19 return data - mult;
20 }
21
adf_check_ring_alignment(u64 addr,u64 size)22 static inline int adf_check_ring_alignment(u64 addr, u64 size)
23 {
24 if (((size - 1) & addr) != 0)
25 return -EFAULT;
26 return 0;
27 }
28
adf_verify_ring_size(u32 msg_size,u32 msg_num)29 static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
30 {
31 int i = ADF_MIN_RING_SIZE;
32
33 for (; i <= ADF_MAX_RING_SIZE; i++)
34 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
35 return i;
36
37 return ADF_DEFAULT_RING_SIZE;
38 }
39
adf_reserve_ring(struct adf_etr_bank_data * bank,u32 ring)40 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
41 {
42 spin_lock(&bank->lock);
43 if (bank->ring_mask & (1 << ring)) {
44 spin_unlock(&bank->lock);
45 return -EFAULT;
46 }
47 bank->ring_mask |= (1 << ring);
48 spin_unlock(&bank->lock);
49 return 0;
50 }
51
adf_unreserve_ring(struct adf_etr_bank_data * bank,u32 ring)52 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
53 {
54 spin_lock(&bank->lock);
55 bank->ring_mask &= ~(1 << ring);
56 spin_unlock(&bank->lock);
57 }
58
adf_enable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)59 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
60 {
61 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
62
63 spin_lock_bh(&bank->lock);
64 bank->irq_mask |= (1 << ring);
65 spin_unlock_bh(&bank->lock);
66 csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
67 bank->irq_mask);
68 csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
69 bank->irq_coalesc_timer);
70 }
71
adf_disable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)72 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
73 {
74 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
75
76 spin_lock_bh(&bank->lock);
77 bank->irq_mask &= ~(1 << ring);
78 spin_unlock_bh(&bank->lock);
79 csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
80 bank->irq_mask);
81 }
82
adf_ring_nearly_full(struct adf_etr_ring_data * ring)83 bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
84 {
85 return atomic_read(ring->inflights) > ring->threshold;
86 }
87
adf_send_message(struct adf_etr_ring_data * ring,u32 * msg)88 int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
89 {
90 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
91
92 if (atomic_add_return(1, ring->inflights) >
93 ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
94 atomic_dec(ring->inflights);
95 return -EAGAIN;
96 }
97 spin_lock_bh(&ring->lock);
98 memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
99 ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
100
101 ring->tail = adf_modulo(ring->tail +
102 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
103 ADF_RING_SIZE_MODULO(ring->ring_size));
104 csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
105 ring->bank->bank_number, ring->ring_number,
106 ring->tail);
107 spin_unlock_bh(&ring->lock);
108
109 return 0;
110 }
111
adf_handle_response(struct adf_etr_ring_data * ring)112 static int adf_handle_response(struct adf_etr_ring_data *ring)
113 {
114 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
115 u32 msg_counter = 0;
116 u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
117
118 while (*msg != ADF_RING_EMPTY_SIG) {
119 ring->callback((u32 *)msg);
120 atomic_dec(ring->inflights);
121 *msg = ADF_RING_EMPTY_SIG;
122 ring->head = adf_modulo(ring->head +
123 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
124 ADF_RING_SIZE_MODULO(ring->ring_size));
125 msg_counter++;
126 msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
127 }
128 if (msg_counter > 0) {
129 csr_ops->write_csr_ring_head(ring->bank->csr_addr,
130 ring->bank->bank_number,
131 ring->ring_number, ring->head);
132 }
133 return 0;
134 }
135
adf_configure_tx_ring(struct adf_etr_ring_data * ring)136 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
137 {
138 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
139 u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
140
141 csr_ops->write_csr_ring_config(ring->bank->csr_addr,
142 ring->bank->bank_number,
143 ring->ring_number, ring_config);
144
145 }
146
adf_configure_rx_ring(struct adf_etr_ring_data * ring)147 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
148 {
149 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
150 u32 ring_config =
151 BUILD_RESP_RING_CONFIG(ring->ring_size,
152 ADF_RING_NEAR_WATERMARK_512,
153 ADF_RING_NEAR_WATERMARK_0);
154
155 csr_ops->write_csr_ring_config(ring->bank->csr_addr,
156 ring->bank->bank_number,
157 ring->ring_number, ring_config);
158 }
159
adf_init_ring(struct adf_etr_ring_data * ring)160 static int adf_init_ring(struct adf_etr_ring_data *ring)
161 {
162 struct adf_etr_bank_data *bank = ring->bank;
163 struct adf_accel_dev *accel_dev = bank->accel_dev;
164 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
165 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
166 u64 ring_base;
167 u32 ring_size_bytes =
168 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
169
170 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
171 ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
172 ring_size_bytes, &ring->dma_addr,
173 GFP_KERNEL);
174 if (!ring->base_addr)
175 return -ENOMEM;
176
177 memset(ring->base_addr, 0x7F, ring_size_bytes);
178 /* The base_addr has to be aligned to the size of the buffer */
179 if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
180 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
181 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
182 ring->base_addr, ring->dma_addr);
183 ring->base_addr = NULL;
184 return -EFAULT;
185 }
186
187 if (hw_data->tx_rings_mask & (1 << ring->ring_number))
188 adf_configure_tx_ring(ring);
189
190 else
191 adf_configure_rx_ring(ring);
192
193 ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
194 ring->ring_size);
195
196 csr_ops->write_csr_ring_base(ring->bank->csr_addr,
197 ring->bank->bank_number, ring->ring_number,
198 ring_base);
199 spin_lock_init(&ring->lock);
200 return 0;
201 }
202
adf_cleanup_ring(struct adf_etr_ring_data * ring)203 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
204 {
205 u32 ring_size_bytes =
206 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
207 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
208
209 if (ring->base_addr) {
210 memset(ring->base_addr, 0x7F, ring_size_bytes);
211 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
212 ring_size_bytes, ring->base_addr,
213 ring->dma_addr);
214 }
215 }
216
adf_create_ring(struct adf_accel_dev * accel_dev,const char * section,u32 bank_num,u32 num_msgs,u32 msg_size,const char * ring_name,adf_callback_fn callback,int poll_mode,struct adf_etr_ring_data ** ring_ptr)217 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
218 u32 bank_num, u32 num_msgs,
219 u32 msg_size, const char *ring_name,
220 adf_callback_fn callback, int poll_mode,
221 struct adf_etr_ring_data **ring_ptr)
222 {
223 struct adf_etr_data *transport_data = accel_dev->transport;
224 u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
225 struct adf_etr_bank_data *bank;
226 struct adf_etr_ring_data *ring;
227 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
228 int max_inflights;
229 u32 ring_num;
230 int ret;
231
232 if (bank_num >= GET_MAX_BANKS(accel_dev)) {
233 dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
234 return -EFAULT;
235 }
236 if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
237 dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
238 return -EFAULT;
239 }
240 if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
241 ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
242 dev_err(&GET_DEV(accel_dev),
243 "Invalid ring size for given msg size\n");
244 return -EFAULT;
245 }
246 if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
247 dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
248 section, ring_name);
249 return -EFAULT;
250 }
251 if (kstrtouint(val, 10, &ring_num)) {
252 dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
253 return -EFAULT;
254 }
255 if (ring_num >= num_rings_per_bank) {
256 dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
257 return -EFAULT;
258 }
259
260 ring_num = array_index_nospec(ring_num, num_rings_per_bank);
261 bank = &transport_data->banks[bank_num];
262 if (adf_reserve_ring(bank, ring_num)) {
263 dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
264 ring_num, ring_name);
265 return -EFAULT;
266 }
267 ring = &bank->rings[ring_num];
268 ring->ring_number = ring_num;
269 ring->bank = bank;
270 ring->callback = callback;
271 ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
272 ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
273 ring->head = 0;
274 ring->tail = 0;
275 max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
276 ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
277 atomic_set(ring->inflights, 0);
278 ret = adf_init_ring(ring);
279 if (ret)
280 goto err;
281
282 /* Enable HW arbitration for the given ring */
283 adf_update_ring_arb(ring);
284
285 if (adf_ring_debugfs_add(ring, ring_name)) {
286 dev_err(&GET_DEV(accel_dev),
287 "Couldn't add ring debugfs entry\n");
288 ret = -EFAULT;
289 goto err;
290 }
291
292 /* Enable interrupts if needed */
293 if (callback && (!poll_mode))
294 adf_enable_ring_irq(bank, ring->ring_number);
295 *ring_ptr = ring;
296 return 0;
297 err:
298 adf_cleanup_ring(ring);
299 adf_unreserve_ring(bank, ring_num);
300 adf_update_ring_arb(ring);
301 return ret;
302 }
303
adf_remove_ring(struct adf_etr_ring_data * ring)304 void adf_remove_ring(struct adf_etr_ring_data *ring)
305 {
306 struct adf_etr_bank_data *bank = ring->bank;
307 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
308
309 /* Disable interrupts for the given ring */
310 adf_disable_ring_irq(bank, ring->ring_number);
311
312 /* Clear PCI config space */
313
314 csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
315 ring->ring_number, 0);
316 csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
317 ring->ring_number, 0);
318 adf_ring_debugfs_rm(ring);
319 adf_unreserve_ring(bank, ring->ring_number);
320 /* Disable HW arbitration for the given ring */
321 adf_update_ring_arb(ring);
322 adf_cleanup_ring(ring);
323 }
324
adf_ring_response_handler(struct adf_etr_bank_data * bank)325 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
326 {
327 struct adf_accel_dev *accel_dev = bank->accel_dev;
328 u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
329 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
330 unsigned long empty_rings;
331 int i;
332
333 empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
334 bank->bank_number);
335 empty_rings = ~empty_rings & bank->irq_mask;
336
337 for_each_set_bit(i, &empty_rings, num_rings_per_bank)
338 adf_handle_response(&bank->rings[i]);
339 }
340
adf_response_handler(uintptr_t bank_addr)341 void adf_response_handler(uintptr_t bank_addr)
342 {
343 struct adf_etr_bank_data *bank = (void *)bank_addr;
344 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
345
346 /* Handle all the responses and reenable IRQs */
347 adf_ring_response_handler(bank);
348
349 csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
350 bank->irq_mask);
351 }
352
adf_get_cfg_int(struct adf_accel_dev * accel_dev,const char * section,const char * format,u32 key,u32 * value)353 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
354 const char *section, const char *format,
355 u32 key, u32 *value)
356 {
357 char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
358 char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
359
360 snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
361
362 if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
363 return -EFAULT;
364
365 if (kstrtouint(val_buf, 10, value))
366 return -EFAULT;
367 return 0;
368 }
369
adf_get_coalesc_timer(struct adf_etr_bank_data * bank,const char * section,u32 bank_num_in_accel)370 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
371 const char *section,
372 u32 bank_num_in_accel)
373 {
374 if (adf_get_cfg_int(bank->accel_dev, section,
375 ADF_ETRMGR_COALESCE_TIMER_FORMAT,
376 bank_num_in_accel, &bank->irq_coalesc_timer))
377 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
378
379 if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
380 ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
381 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
382 }
383
adf_init_bank(struct adf_accel_dev * accel_dev,struct adf_etr_bank_data * bank,u32 bank_num,void __iomem * csr_addr)384 static int adf_init_bank(struct adf_accel_dev *accel_dev,
385 struct adf_etr_bank_data *bank,
386 u32 bank_num, void __iomem *csr_addr)
387 {
388 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
389 u8 num_rings_per_bank = hw_data->num_rings_per_bank;
390 struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
391 u32 irq_mask = BIT(num_rings_per_bank) - 1;
392 struct adf_etr_ring_data *ring;
393 struct adf_etr_ring_data *tx_ring;
394 u32 i, coalesc_enabled = 0;
395 unsigned long ring_mask;
396 int size;
397
398 memset(bank, 0, sizeof(*bank));
399 bank->bank_number = bank_num;
400 bank->csr_addr = csr_addr;
401 bank->accel_dev = accel_dev;
402 spin_lock_init(&bank->lock);
403
404 /* Allocate the rings in the bank */
405 size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
406 bank->rings = kzalloc_node(size, GFP_KERNEL,
407 dev_to_node(&GET_DEV(accel_dev)));
408 if (!bank->rings)
409 return -ENOMEM;
410
411 /* Enable IRQ coalescing always. This will allow to use
412 * the optimised flag and coalesc register.
413 * If it is disabled in the config file just use min time value */
414 if ((adf_get_cfg_int(accel_dev, "Accelerator0",
415 ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
416 &coalesc_enabled) == 0) && coalesc_enabled)
417 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
418 else
419 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
420
421 for (i = 0; i < num_rings_per_bank; i++) {
422 csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
423 csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
424
425 ring = &bank->rings[i];
426 if (hw_data->tx_rings_mask & (1 << i)) {
427 ring->inflights =
428 kzalloc_node(sizeof(atomic_t),
429 GFP_KERNEL,
430 dev_to_node(&GET_DEV(accel_dev)));
431 if (!ring->inflights)
432 goto err;
433 } else {
434 if (i < hw_data->tx_rx_gap) {
435 dev_err(&GET_DEV(accel_dev),
436 "Invalid tx rings mask config\n");
437 goto err;
438 }
439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
440 ring->inflights = tx_ring->inflights;
441 }
442 }
443 if (adf_bank_debugfs_add(bank)) {
444 dev_err(&GET_DEV(accel_dev),
445 "Failed to add bank debugfs entry\n");
446 goto err;
447 }
448
449 csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
450 csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
451
452 return 0;
453 err:
454 ring_mask = hw_data->tx_rings_mask;
455 for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
456 ring = &bank->rings[i];
457 kfree(ring->inflights);
458 ring->inflights = NULL;
459 }
460 kfree(bank->rings);
461 return -ENOMEM;
462 }
463
464 /**
465 * adf_init_etr_data() - Initialize transport rings for acceleration device
466 * @accel_dev: Pointer to acceleration device.
467 *
468 * Function is the initializes the communications channels (rings) to the
469 * acceleration device accel_dev.
470 * To be used by QAT device specific drivers.
471 *
472 * Return: 0 on success, error code otherwise.
473 */
adf_init_etr_data(struct adf_accel_dev * accel_dev)474 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
475 {
476 struct adf_etr_data *etr_data;
477 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
478 void __iomem *csr_addr;
479 u32 size;
480 u32 num_banks = 0;
481 int i, ret;
482
483 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
484 dev_to_node(&GET_DEV(accel_dev)));
485 if (!etr_data)
486 return -ENOMEM;
487
488 num_banks = GET_MAX_BANKS(accel_dev);
489 size = num_banks * sizeof(struct adf_etr_bank_data);
490 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
491 dev_to_node(&GET_DEV(accel_dev)));
492 if (!etr_data->banks) {
493 ret = -ENOMEM;
494 goto err_bank;
495 }
496
497 accel_dev->transport = etr_data;
498 i = hw_data->get_etr_bar_id(hw_data);
499 csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
500
501 /* accel_dev->debugfs_dir should always be non-NULL here */
502 etr_data->debug = debugfs_create_dir("transport",
503 accel_dev->debugfs_dir);
504
505 for (i = 0; i < num_banks; i++) {
506 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
507 csr_addr);
508 if (ret)
509 goto err_bank_all;
510 }
511
512 return 0;
513
514 err_bank_all:
515 debugfs_remove(etr_data->debug);
516 kfree(etr_data->banks);
517 err_bank:
518 kfree(etr_data);
519 accel_dev->transport = NULL;
520 return ret;
521 }
522 EXPORT_SYMBOL_GPL(adf_init_etr_data);
523
cleanup_bank(struct adf_etr_bank_data * bank)524 static void cleanup_bank(struct adf_etr_bank_data *bank)
525 {
526 struct adf_accel_dev *accel_dev = bank->accel_dev;
527 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
528 u8 num_rings_per_bank = hw_data->num_rings_per_bank;
529 u32 i;
530
531 for (i = 0; i < num_rings_per_bank; i++) {
532 struct adf_etr_ring_data *ring = &bank->rings[i];
533
534 if (bank->ring_mask & (1 << i))
535 adf_cleanup_ring(ring);
536
537 if (hw_data->tx_rings_mask & (1 << i))
538 kfree(ring->inflights);
539 }
540 kfree(bank->rings);
541 adf_bank_debugfs_rm(bank);
542 memset(bank, 0, sizeof(*bank));
543 }
544
adf_cleanup_etr_handles(struct adf_accel_dev * accel_dev)545 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
546 {
547 struct adf_etr_data *etr_data = accel_dev->transport;
548 u32 i, num_banks = GET_MAX_BANKS(accel_dev);
549
550 for (i = 0; i < num_banks; i++)
551 cleanup_bank(&etr_data->banks[i]);
552 }
553
554 /**
555 * adf_cleanup_etr_data() - Clear transport rings for acceleration device
556 * @accel_dev: Pointer to acceleration device.
557 *
558 * Function is the clears the communications channels (rings) of the
559 * acceleration device accel_dev.
560 * To be used by QAT device specific drivers.
561 *
562 * Return: void
563 */
adf_cleanup_etr_data(struct adf_accel_dev * accel_dev)564 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
565 {
566 struct adf_etr_data *etr_data = accel_dev->transport;
567
568 if (etr_data) {
569 adf_cleanup_etr_handles(accel_dev);
570 debugfs_remove(etr_data->debug);
571 kfree(etr_data->banks->rings);
572 kfree(etr_data->banks);
573 kfree(etr_data);
574 accel_dev->transport = NULL;
575 }
576 }
577 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
578