GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / crypto / qat / qat_common / adf_transport.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
53
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
55 {
56         uint32_t div = data >> shift;
57         uint32_t mult = div << shift;
58
59         return data - mult;
60 }
61
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
63 {
64         if (((size - 1) & addr) != 0)
65                 return -EFAULT;
66         return 0;
67 }
68
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
70 {
71         int i = ADF_MIN_RING_SIZE;
72
73         for (; i <= ADF_MAX_RING_SIZE; i++)
74                 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75                         return i;
76
77         return ADF_DEFAULT_RING_SIZE;
78 }
79
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
81 {
82         spin_lock(&bank->lock);
83         if (bank->ring_mask & (1 << ring)) {
84                 spin_unlock(&bank->lock);
85                 return -EFAULT;
86         }
87         bank->ring_mask |= (1 << ring);
88         spin_unlock(&bank->lock);
89         return 0;
90 }
91
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
93 {
94         spin_lock(&bank->lock);
95         bank->ring_mask &= ~(1 << ring);
96         spin_unlock(&bank->lock);
97 }
98
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
100 {
101         spin_lock_bh(&bank->lock);
102         bank->irq_mask |= (1 << ring);
103         spin_unlock_bh(&bank->lock);
104         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105         WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106                               bank->irq_coalesc_timer);
107 }
108
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
110 {
111         spin_lock_bh(&bank->lock);
112         bank->irq_mask &= ~(1 << ring);
113         spin_unlock_bh(&bank->lock);
114         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
115 }
116
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
118 {
119         if (atomic_add_return(1, ring->inflights) >
120             ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121                 atomic_dec(ring->inflights);
122                 return -EAGAIN;
123         }
124         spin_lock_bh(&ring->lock);
125         memcpy(ring->base_addr + ring->tail, msg,
126                ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
127
128         ring->tail = adf_modulo(ring->tail +
129                                 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130                                 ADF_RING_SIZE_MODULO(ring->ring_size));
131         WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132                             ring->ring_number, ring->tail);
133         spin_unlock_bh(&ring->lock);
134         return 0;
135 }
136
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
138 {
139         uint32_t msg_counter = 0;
140         uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
141
142         while (*msg != ADF_RING_EMPTY_SIG) {
143                 ring->callback((uint32_t *)msg);
144                 *msg = ADF_RING_EMPTY_SIG;
145                 ring->head = adf_modulo(ring->head +
146                                         ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
147                                         ADF_RING_SIZE_MODULO(ring->ring_size));
148                 msg_counter++;
149                 msg = (uint32_t *)(ring->base_addr + ring->head);
150         }
151         if (msg_counter > 0) {
152                 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
153                                     ring->bank->bank_number,
154                                     ring->ring_number, ring->head);
155                 atomic_sub(msg_counter, ring->inflights);
156         }
157         return 0;
158 }
159
160 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
161 {
162         uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
163
164         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
165                               ring->ring_number, ring_config);
166 }
167
168 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
169 {
170         uint32_t ring_config =
171                         BUILD_RESP_RING_CONFIG(ring->ring_size,
172                                                ADF_RING_NEAR_WATERMARK_512,
173                                                ADF_RING_NEAR_WATERMARK_0);
174
175         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
176                               ring->ring_number, ring_config);
177 }
178
179 static int adf_init_ring(struct adf_etr_ring_data *ring)
180 {
181         struct adf_etr_bank_data *bank = ring->bank;
182         struct adf_accel_dev *accel_dev = bank->accel_dev;
183         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
184         uint64_t ring_base;
185         uint32_t ring_size_bytes =
186                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
187
188         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
189         ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
190                                              ring_size_bytes, &ring->dma_addr,
191                                              GFP_KERNEL);
192         if (!ring->base_addr)
193                 return -ENOMEM;
194
195         memset(ring->base_addr, 0x7F, ring_size_bytes);
196         /* The base_addr has to be aligned to the size of the buffer */
197         if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
198                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
199                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
200                                   ring->base_addr, ring->dma_addr);
201                 ring->base_addr = NULL;
202                 return -EFAULT;
203         }
204
205         if (hw_data->tx_rings_mask & (1 << ring->ring_number))
206                 adf_configure_tx_ring(ring);
207
208         else
209                 adf_configure_rx_ring(ring);
210
211         ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
212         WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
213                             ring->ring_number, ring_base);
214         spin_lock_init(&ring->lock);
215         return 0;
216 }
217
218 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
219 {
220         uint32_t ring_size_bytes =
221                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
222         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
223
224         if (ring->base_addr) {
225                 memset(ring->base_addr, 0x7F, ring_size_bytes);
226                 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
227                                   ring_size_bytes, ring->base_addr,
228                                   ring->dma_addr);
229         }
230 }
231
232 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
233                     uint32_t bank_num, uint32_t num_msgs,
234                     uint32_t msg_size, const char *ring_name,
235                     adf_callback_fn callback, int poll_mode,
236                     struct adf_etr_ring_data **ring_ptr)
237 {
238         struct adf_etr_data *transport_data = accel_dev->transport;
239         struct adf_etr_bank_data *bank;
240         struct adf_etr_ring_data *ring;
241         char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
242         uint32_t ring_num;
243         int ret;
244
245         if (bank_num >= GET_MAX_BANKS(accel_dev)) {
246                 dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
247                 return -EFAULT;
248         }
249         if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
250                 dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
251                 return -EFAULT;
252         }
253         if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
254                               ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
255                 dev_err(&GET_DEV(accel_dev),
256                         "Invalid ring size for given msg size\n");
257                 return -EFAULT;
258         }
259         if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
260                 dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
261                         section, ring_name);
262                 return -EFAULT;
263         }
264         if (kstrtouint(val, 10, &ring_num)) {
265                 dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
266                 return -EFAULT;
267         }
268         if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
269                 dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
270                 return -EFAULT;
271         }
272
273         bank = &transport_data->banks[bank_num];
274         if (adf_reserve_ring(bank, ring_num)) {
275                 dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
276                         ring_num, ring_name);
277                 return -EFAULT;
278         }
279         ring = &bank->rings[ring_num];
280         ring->ring_number = ring_num;
281         ring->bank = bank;
282         ring->callback = callback;
283         ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
284         ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
285         ring->head = 0;
286         ring->tail = 0;
287         atomic_set(ring->inflights, 0);
288         ret = adf_init_ring(ring);
289         if (ret)
290                 goto err;
291
292         /* Enable HW arbitration for the given ring */
293         adf_update_ring_arb(ring);
294
295         if (adf_ring_debugfs_add(ring, ring_name)) {
296                 dev_err(&GET_DEV(accel_dev),
297                         "Couldn't add ring debugfs entry\n");
298                 ret = -EFAULT;
299                 goto err;
300         }
301
302         /* Enable interrupts if needed */
303         if (callback && (!poll_mode))
304                 adf_enable_ring_irq(bank, ring->ring_number);
305         *ring_ptr = ring;
306         return 0;
307 err:
308         adf_cleanup_ring(ring);
309         adf_unreserve_ring(bank, ring_num);
310         adf_update_ring_arb(ring);
311         return ret;
312 }
313
314 void adf_remove_ring(struct adf_etr_ring_data *ring)
315 {
316         struct adf_etr_bank_data *bank = ring->bank;
317
318         /* Disable interrupts for the given ring */
319         adf_disable_ring_irq(bank, ring->ring_number);
320
321         /* Clear PCI config space */
322         WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
323                               ring->ring_number, 0);
324         WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
325                             ring->ring_number, 0);
326         adf_ring_debugfs_rm(ring);
327         adf_unreserve_ring(bank, ring->ring_number);
328         /* Disable HW arbitration for the given ring */
329         adf_update_ring_arb(ring);
330         adf_cleanup_ring(ring);
331 }
332
333 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
334 {
335         uint32_t empty_rings, i;
336
337         empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
338         empty_rings = ~empty_rings & bank->irq_mask;
339
340         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
341                 if (empty_rings & (1 << i))
342                         adf_handle_response(&bank->rings[i]);
343         }
344 }
345
346 /**
347  * adf_response_handler() - Bottom half handler response handler
348  * @bank_addr:  Address of a ring bank for with the BH was scheduled.
349  *
350  * Function is the bottom half handler for the response from acceleration
351  * device. There is one handler for every ring bank. Function checks all
352  * communication rings in the bank.
353  * To be used by QAT device specific drivers.
354  *
355  * Return: void
356  */
357 void adf_response_handler(unsigned long bank_addr)
358 {
359         struct adf_etr_bank_data *bank = (void *)bank_addr;
360
361         /* Handle all the responses nad reenable IRQs */
362         adf_ring_response_handler(bank);
363         WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
364                                    bank->irq_mask);
365 }
366 EXPORT_SYMBOL_GPL(adf_response_handler);
367
368 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
369                                   const char *section, const char *format,
370                                   uint32_t key, uint32_t *value)
371 {
372         char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
373         char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
374
375         snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
376
377         if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
378                 return -EFAULT;
379
380         if (kstrtouint(val_buf, 10, value))
381                 return -EFAULT;
382         return 0;
383 }
384
385 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
386                                   const char *section,
387                                   uint32_t bank_num_in_accel)
388 {
389         if (adf_get_cfg_int(bank->accel_dev, section,
390                             ADF_ETRMGR_COALESCE_TIMER_FORMAT,
391                             bank_num_in_accel, &bank->irq_coalesc_timer))
392                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
393
394         if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
395             ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
396                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
397 }
398
399 static int adf_init_bank(struct adf_accel_dev *accel_dev,
400                          struct adf_etr_bank_data *bank,
401                          uint32_t bank_num, void __iomem *csr_addr)
402 {
403         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
404         struct adf_etr_ring_data *ring;
405         struct adf_etr_ring_data *tx_ring;
406         uint32_t i, coalesc_enabled = 0;
407
408         memset(bank, 0, sizeof(*bank));
409         bank->bank_number = bank_num;
410         bank->csr_addr = csr_addr;
411         bank->accel_dev = accel_dev;
412         spin_lock_init(&bank->lock);
413
414         /* Enable IRQ coalescing always. This will allow to use
415          * the optimised flag and coalesc register.
416          * If it is disabled in the config file just use min time value */
417         if ((adf_get_cfg_int(accel_dev, "Accelerator0",
418                              ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
419                              &coalesc_enabled) == 0) && coalesc_enabled)
420                 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
421         else
422                 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
423
424         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
425                 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
426                 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
427                 ring = &bank->rings[i];
428                 if (hw_data->tx_rings_mask & (1 << i)) {
429                         ring->inflights =
430                                 kzalloc_node(sizeof(atomic_t),
431                                              GFP_KERNEL,
432                                              dev_to_node(&GET_DEV(accel_dev)));
433                         if (!ring->inflights)
434                                 goto err;
435                 } else {
436                         if (i < hw_data->tx_rx_gap) {
437                                 dev_err(&GET_DEV(accel_dev),
438                                         "Invalid tx rings mask config\n");
439                                 goto err;
440                         }
441                         tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
442                         ring->inflights = tx_ring->inflights;
443                 }
444         }
445         if (adf_bank_debugfs_add(bank)) {
446                 dev_err(&GET_DEV(accel_dev),
447                         "Failed to add bank debugfs entry\n");
448                 goto err;
449         }
450
451         WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
452         return 0;
453 err:
454         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
455                 ring = &bank->rings[i];
456                 if (hw_data->tx_rings_mask & (1 << i))
457                         kfree(ring->inflights);
458         }
459         return -ENOMEM;
460 }
461
462 /**
463  * adf_init_etr_data() - Initialize transport rings for acceleration device
464  * @accel_dev:  Pointer to acceleration device.
465  *
466  * Function is the initializes the communications channels (rings) to the
467  * acceleration device accel_dev.
468  * To be used by QAT device specific drivers.
469  *
470  * Return: 0 on success, error code otherwise.
471  */
472 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
473 {
474         struct adf_etr_data *etr_data;
475         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
476         void __iomem *csr_addr;
477         uint32_t size;
478         uint32_t num_banks = 0;
479         int i, ret;
480
481         etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
482                                 dev_to_node(&GET_DEV(accel_dev)));
483         if (!etr_data)
484                 return -ENOMEM;
485
486         num_banks = GET_MAX_BANKS(accel_dev);
487         size = num_banks * sizeof(struct adf_etr_bank_data);
488         etr_data->banks = kzalloc_node(size, GFP_KERNEL,
489                                        dev_to_node(&GET_DEV(accel_dev)));
490         if (!etr_data->banks) {
491                 ret = -ENOMEM;
492                 goto err_bank;
493         }
494
495         accel_dev->transport = etr_data;
496         i = hw_data->get_etr_bar_id(hw_data);
497         csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
498
499         /* accel_dev->debugfs_dir should always be non-NULL here */
500         etr_data->debug = debugfs_create_dir("transport",
501                                              accel_dev->debugfs_dir);
502         if (!etr_data->debug) {
503                 dev_err(&GET_DEV(accel_dev),
504                         "Unable to create transport debugfs entry\n");
505                 ret = -ENOENT;
506                 goto err_bank_debug;
507         }
508
509         for (i = 0; i < num_banks; i++) {
510                 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
511                                     csr_addr);
512                 if (ret)
513                         goto err_bank_all;
514         }
515
516         return 0;
517
518 err_bank_all:
519         debugfs_remove(etr_data->debug);
520 err_bank_debug:
521         kfree(etr_data->banks);
522 err_bank:
523         kfree(etr_data);
524         accel_dev->transport = NULL;
525         return ret;
526 }
527 EXPORT_SYMBOL_GPL(adf_init_etr_data);
528
529 static void cleanup_bank(struct adf_etr_bank_data *bank)
530 {
531         uint32_t i;
532
533         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
534                 struct adf_accel_dev *accel_dev = bank->accel_dev;
535                 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
536                 struct adf_etr_ring_data *ring = &bank->rings[i];
537
538                 if (bank->ring_mask & (1 << i))
539                         adf_cleanup_ring(ring);
540
541                 if (hw_data->tx_rings_mask & (1 << i))
542                         kfree(ring->inflights);
543         }
544         adf_bank_debugfs_rm(bank);
545         memset(bank, 0, sizeof(*bank));
546 }
547
548 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
549 {
550         struct adf_etr_data *etr_data = accel_dev->transport;
551         uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
552
553         for (i = 0; i < num_banks; i++)
554                 cleanup_bank(&etr_data->banks[i]);
555 }
556
557 /**
558  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
559  * @accel_dev:  Pointer to acceleration device.
560  *
561  * Function is the clears the communications channels (rings) of the
562  * acceleration device accel_dev.
563  * To be used by QAT device specific drivers.
564  *
565  * Return: void
566  */
567 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
568 {
569         struct adf_etr_data *etr_data = accel_dev->transport;
570
571         if (etr_data) {
572                 adf_cleanup_etr_handles(accel_dev);
573                 debugfs_remove(etr_data->debug);
574                 kfree(etr_data->banks);
575                 kfree(etr_data);
576                 accel_dev->transport = NULL;
577         }
578 }
579 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);