GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / crypto / qat / qat_common / adf_transport.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
53
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
55 {
56         uint32_t div = data >> shift;
57         uint32_t mult = div << shift;
58
59         return data - mult;
60 }
61
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
63 {
64         if (((size - 1) & addr) != 0)
65                 return -EFAULT;
66         return 0;
67 }
68
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
70 {
71         int i = ADF_MIN_RING_SIZE;
72
73         for (; i <= ADF_MAX_RING_SIZE; i++)
74                 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75                         return i;
76
77         return ADF_DEFAULT_RING_SIZE;
78 }
79
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
81 {
82         spin_lock(&bank->lock);
83         if (bank->ring_mask & (1 << ring)) {
84                 spin_unlock(&bank->lock);
85                 return -EFAULT;
86         }
87         bank->ring_mask |= (1 << ring);
88         spin_unlock(&bank->lock);
89         return 0;
90 }
91
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
93 {
94         spin_lock(&bank->lock);
95         bank->ring_mask &= ~(1 << ring);
96         spin_unlock(&bank->lock);
97 }
98
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
100 {
101         spin_lock_bh(&bank->lock);
102         bank->irq_mask |= (1 << ring);
103         spin_unlock_bh(&bank->lock);
104         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105         WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106                               bank->irq_coalesc_timer);
107 }
108
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
110 {
111         spin_lock_bh(&bank->lock);
112         bank->irq_mask &= ~(1 << ring);
113         spin_unlock_bh(&bank->lock);
114         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
115 }
116
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
118 {
119         if (atomic_add_return(1, ring->inflights) >
120             ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121                 atomic_dec(ring->inflights);
122                 return -EAGAIN;
123         }
124         spin_lock_bh(&ring->lock);
125         memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
126                ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
127
128         ring->tail = adf_modulo(ring->tail +
129                                 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130                                 ADF_RING_SIZE_MODULO(ring->ring_size));
131         WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132                             ring->ring_number, ring->tail);
133         spin_unlock_bh(&ring->lock);
134         return 0;
135 }
136
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
138 {
139         uint32_t msg_counter = 0;
140         uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
141
142         while (*msg != ADF_RING_EMPTY_SIG) {
143                 ring->callback((uint32_t *)msg);
144                 atomic_dec(ring->inflights);
145                 *msg = ADF_RING_EMPTY_SIG;
146                 ring->head = adf_modulo(ring->head +
147                                         ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
148                                         ADF_RING_SIZE_MODULO(ring->ring_size));
149                 msg_counter++;
150                 msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
151         }
152         if (msg_counter > 0)
153                 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
154                                     ring->bank->bank_number,
155                                     ring->ring_number, ring->head);
156         return 0;
157 }
158
159 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
160 {
161         uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
162
163         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
164                               ring->ring_number, ring_config);
165 }
166
167 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
168 {
169         uint32_t ring_config =
170                         BUILD_RESP_RING_CONFIG(ring->ring_size,
171                                                ADF_RING_NEAR_WATERMARK_512,
172                                                ADF_RING_NEAR_WATERMARK_0);
173
174         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
175                               ring->ring_number, ring_config);
176 }
177
178 static int adf_init_ring(struct adf_etr_ring_data *ring)
179 {
180         struct adf_etr_bank_data *bank = ring->bank;
181         struct adf_accel_dev *accel_dev = bank->accel_dev;
182         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
183         uint64_t ring_base;
184         uint32_t ring_size_bytes =
185                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
186
187         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
188         ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
189                                              ring_size_bytes, &ring->dma_addr,
190                                              GFP_KERNEL);
191         if (!ring->base_addr)
192                 return -ENOMEM;
193
194         memset(ring->base_addr, 0x7F, ring_size_bytes);
195         /* The base_addr has to be aligned to the size of the buffer */
196         if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
197                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
198                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
199                                   ring->base_addr, ring->dma_addr);
200                 ring->base_addr = NULL;
201                 return -EFAULT;
202         }
203
204         if (hw_data->tx_rings_mask & (1 << ring->ring_number))
205                 adf_configure_tx_ring(ring);
206
207         else
208                 adf_configure_rx_ring(ring);
209
210         ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
211         WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
212                             ring->ring_number, ring_base);
213         spin_lock_init(&ring->lock);
214         return 0;
215 }
216
217 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
218 {
219         uint32_t ring_size_bytes =
220                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
221         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
222
223         if (ring->base_addr) {
224                 memset(ring->base_addr, 0x7F, ring_size_bytes);
225                 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
226                                   ring_size_bytes, ring->base_addr,
227                                   ring->dma_addr);
228         }
229 }
230
231 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
232                     uint32_t bank_num, uint32_t num_msgs,
233                     uint32_t msg_size, const char *ring_name,
234                     adf_callback_fn callback, int poll_mode,
235                     struct adf_etr_ring_data **ring_ptr)
236 {
237         struct adf_etr_data *transport_data = accel_dev->transport;
238         struct adf_etr_bank_data *bank;
239         struct adf_etr_ring_data *ring;
240         char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
241         uint32_t ring_num;
242         int ret;
243
244         if (bank_num >= GET_MAX_BANKS(accel_dev)) {
245                 dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
246                 return -EFAULT;
247         }
248         if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
249                 dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
250                 return -EFAULT;
251         }
252         if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
253                               ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
254                 dev_err(&GET_DEV(accel_dev),
255                         "Invalid ring size for given msg size\n");
256                 return -EFAULT;
257         }
258         if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
259                 dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
260                         section, ring_name);
261                 return -EFAULT;
262         }
263         if (kstrtouint(val, 10, &ring_num)) {
264                 dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
265                 return -EFAULT;
266         }
267         if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
268                 dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
269                 return -EFAULT;
270         }
271
272         bank = &transport_data->banks[bank_num];
273         if (adf_reserve_ring(bank, ring_num)) {
274                 dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
275                         ring_num, ring_name);
276                 return -EFAULT;
277         }
278         ring = &bank->rings[ring_num];
279         ring->ring_number = ring_num;
280         ring->bank = bank;
281         ring->callback = callback;
282         ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
283         ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
284         ring->head = 0;
285         ring->tail = 0;
286         atomic_set(ring->inflights, 0);
287         ret = adf_init_ring(ring);
288         if (ret)
289                 goto err;
290
291         /* Enable HW arbitration for the given ring */
292         adf_update_ring_arb(ring);
293
294         if (adf_ring_debugfs_add(ring, ring_name)) {
295                 dev_err(&GET_DEV(accel_dev),
296                         "Couldn't add ring debugfs entry\n");
297                 ret = -EFAULT;
298                 goto err;
299         }
300
301         /* Enable interrupts if needed */
302         if (callback && (!poll_mode))
303                 adf_enable_ring_irq(bank, ring->ring_number);
304         *ring_ptr = ring;
305         return 0;
306 err:
307         adf_cleanup_ring(ring);
308         adf_unreserve_ring(bank, ring_num);
309         adf_update_ring_arb(ring);
310         return ret;
311 }
312
313 void adf_remove_ring(struct adf_etr_ring_data *ring)
314 {
315         struct adf_etr_bank_data *bank = ring->bank;
316
317         /* Disable interrupts for the given ring */
318         adf_disable_ring_irq(bank, ring->ring_number);
319
320         /* Clear PCI config space */
321         WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
322                               ring->ring_number, 0);
323         WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
324                             ring->ring_number, 0);
325         adf_ring_debugfs_rm(ring);
326         adf_unreserve_ring(bank, ring->ring_number);
327         /* Disable HW arbitration for the given ring */
328         adf_update_ring_arb(ring);
329         adf_cleanup_ring(ring);
330 }
331
332 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
333 {
334         uint32_t empty_rings, i;
335
336         empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
337         empty_rings = ~empty_rings & bank->irq_mask;
338
339         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
340                 if (empty_rings & (1 << i))
341                         adf_handle_response(&bank->rings[i]);
342         }
343 }
344
345 void adf_response_handler(uintptr_t bank_addr)
346 {
347         struct adf_etr_bank_data *bank = (void *)bank_addr;
348
349         /* Handle all the responses and reenable IRQs */
350         adf_ring_response_handler(bank);
351         WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
352                                    bank->irq_mask);
353 }
354
355 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
356                                   const char *section, const char *format,
357                                   uint32_t key, uint32_t *value)
358 {
359         char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
360         char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
361
362         snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
363
364         if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
365                 return -EFAULT;
366
367         if (kstrtouint(val_buf, 10, value))
368                 return -EFAULT;
369         return 0;
370 }
371
372 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
373                                   const char *section,
374                                   uint32_t bank_num_in_accel)
375 {
376         if (adf_get_cfg_int(bank->accel_dev, section,
377                             ADF_ETRMGR_COALESCE_TIMER_FORMAT,
378                             bank_num_in_accel, &bank->irq_coalesc_timer))
379                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
380
381         if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
382             ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
383                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
384 }
385
386 static int adf_init_bank(struct adf_accel_dev *accel_dev,
387                          struct adf_etr_bank_data *bank,
388                          uint32_t bank_num, void __iomem *csr_addr)
389 {
390         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
391         struct adf_etr_ring_data *ring;
392         struct adf_etr_ring_data *tx_ring;
393         uint32_t i, coalesc_enabled = 0;
394
395         memset(bank, 0, sizeof(*bank));
396         bank->bank_number = bank_num;
397         bank->csr_addr = csr_addr;
398         bank->accel_dev = accel_dev;
399         spin_lock_init(&bank->lock);
400
401         /* Enable IRQ coalescing always. This will allow to use
402          * the optimised flag and coalesc register.
403          * If it is disabled in the config file just use min time value */
404         if ((adf_get_cfg_int(accel_dev, "Accelerator0",
405                              ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
406                              &coalesc_enabled) == 0) && coalesc_enabled)
407                 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
408         else
409                 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
410
411         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
412                 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
413                 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
414                 ring = &bank->rings[i];
415                 if (hw_data->tx_rings_mask & (1 << i)) {
416                         ring->inflights =
417                                 kzalloc_node(sizeof(atomic_t),
418                                              GFP_KERNEL,
419                                              dev_to_node(&GET_DEV(accel_dev)));
420                         if (!ring->inflights)
421                                 goto err;
422                 } else {
423                         if (i < hw_data->tx_rx_gap) {
424                                 dev_err(&GET_DEV(accel_dev),
425                                         "Invalid tx rings mask config\n");
426                                 goto err;
427                         }
428                         tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
429                         ring->inflights = tx_ring->inflights;
430                 }
431         }
432         if (adf_bank_debugfs_add(bank)) {
433                 dev_err(&GET_DEV(accel_dev),
434                         "Failed to add bank debugfs entry\n");
435                 goto err;
436         }
437
438         WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
439         WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
440         return 0;
441 err:
442         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
443                 ring = &bank->rings[i];
444                 if (hw_data->tx_rings_mask & (1 << i))
445                         kfree(ring->inflights);
446         }
447         return -ENOMEM;
448 }
449
450 /**
451  * adf_init_etr_data() - Initialize transport rings for acceleration device
452  * @accel_dev:  Pointer to acceleration device.
453  *
454  * Function is the initializes the communications channels (rings) to the
455  * acceleration device accel_dev.
456  * To be used by QAT device specific drivers.
457  *
458  * Return: 0 on success, error code otherwise.
459  */
460 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
461 {
462         struct adf_etr_data *etr_data;
463         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
464         void __iomem *csr_addr;
465         uint32_t size;
466         uint32_t num_banks = 0;
467         int i, ret;
468
469         etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
470                                 dev_to_node(&GET_DEV(accel_dev)));
471         if (!etr_data)
472                 return -ENOMEM;
473
474         num_banks = GET_MAX_BANKS(accel_dev);
475         size = num_banks * sizeof(struct adf_etr_bank_data);
476         etr_data->banks = kzalloc_node(size, GFP_KERNEL,
477                                        dev_to_node(&GET_DEV(accel_dev)));
478         if (!etr_data->banks) {
479                 ret = -ENOMEM;
480                 goto err_bank;
481         }
482
483         accel_dev->transport = etr_data;
484         i = hw_data->get_etr_bar_id(hw_data);
485         csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
486
487         /* accel_dev->debugfs_dir should always be non-NULL here */
488         etr_data->debug = debugfs_create_dir("transport",
489                                              accel_dev->debugfs_dir);
490         if (!etr_data->debug) {
491                 dev_err(&GET_DEV(accel_dev),
492                         "Unable to create transport debugfs entry\n");
493                 ret = -ENOENT;
494                 goto err_bank_debug;
495         }
496
497         for (i = 0; i < num_banks; i++) {
498                 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
499                                     csr_addr);
500                 if (ret)
501                         goto err_bank_all;
502         }
503
504         return 0;
505
506 err_bank_all:
507         debugfs_remove(etr_data->debug);
508 err_bank_debug:
509         kfree(etr_data->banks);
510 err_bank:
511         kfree(etr_data);
512         accel_dev->transport = NULL;
513         return ret;
514 }
515 EXPORT_SYMBOL_GPL(adf_init_etr_data);
516
517 static void cleanup_bank(struct adf_etr_bank_data *bank)
518 {
519         uint32_t i;
520
521         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
522                 struct adf_accel_dev *accel_dev = bank->accel_dev;
523                 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
524                 struct adf_etr_ring_data *ring = &bank->rings[i];
525
526                 if (bank->ring_mask & (1 << i))
527                         adf_cleanup_ring(ring);
528
529                 if (hw_data->tx_rings_mask & (1 << i))
530                         kfree(ring->inflights);
531         }
532         adf_bank_debugfs_rm(bank);
533         memset(bank, 0, sizeof(*bank));
534 }
535
536 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
537 {
538         struct adf_etr_data *etr_data = accel_dev->transport;
539         uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
540
541         for (i = 0; i < num_banks; i++)
542                 cleanup_bank(&etr_data->banks[i]);
543 }
544
545 /**
546  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
547  * @accel_dev:  Pointer to acceleration device.
548  *
549  * Function is the clears the communications channels (rings) of the
550  * acceleration device accel_dev.
551  * To be used by QAT device specific drivers.
552  *
553  * Return: void
554  */
555 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
556 {
557         struct adf_etr_data *etr_data = accel_dev->transport;
558
559         if (etr_data) {
560                 adf_cleanup_etr_handles(accel_dev);
561                 debugfs_remove(etr_data->debug);
562                 kfree(etr_data->banks);
563                 kfree(etr_data);
564                 accel_dev->transport = NULL;
565         }
566 }
567 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);