GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aes.h>
39 #include <crypto/sha.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43 #include "crypto4xx_trng.h"
44
45 #define PPC4XX_SEC_VERSION_STR                  "0.5"
46
47 /**
48  * PPC4xx Crypto Engine Initialization Routine
49  */
50 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51 {
52         union ce_ring_size ring_size;
53         union ce_ring_contol ring_ctrl;
54         union ce_part_ring_size part_ring_size;
55         union ce_io_threshold io_threshold;
56         u32 rand_num;
57         union ce_pe_dma_cfg pe_dma_cfg;
58         u32 device_ctrl;
59
60         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61         /* setup pe dma, include reset sg, pdr and pe, then release reset */
62         pe_dma_cfg.w = 0;
63         pe_dma_cfg.bf.bo_sgpd_en = 1;
64         pe_dma_cfg.bf.bo_data_en = 0;
65         pe_dma_cfg.bf.bo_sa_en = 1;
66         pe_dma_cfg.bf.bo_pd_en = 1;
67         pe_dma_cfg.bf.dynamic_sa_en = 1;
68         pe_dma_cfg.bf.reset_sg = 1;
69         pe_dma_cfg.bf.reset_pdr = 1;
70         pe_dma_cfg.bf.reset_pe = 1;
71         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72         /* un reset pe,sg and pdr */
73         pe_dma_cfg.bf.pe_mode = 0;
74         pe_dma_cfg.bf.reset_sg = 0;
75         pe_dma_cfg.bf.reset_pdr = 0;
76         pe_dma_cfg.bf.reset_pe = 0;
77         pe_dma_cfg.bf.bo_td_en = 0;
78         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82         get_random_bytes(&rand_num, sizeof(rand_num));
83         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84         get_random_bytes(&rand_num, sizeof(rand_num));
85         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
86         ring_size.w = 0;
87         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
89         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
90         ring_ctrl.w = 0;
91         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
92         device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93         device_ctrl |= PPC4XX_DC_3DES_EN;
94         writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
95         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
97         part_ring_size.w = 0;
98         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
102         io_threshold.w = 0;
103         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
105         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113         /* un reset pe,sg and pdr */
114         pe_dma_cfg.bf.pe_mode = 1;
115         pe_dma_cfg.bf.reset_sg = 0;
116         pe_dma_cfg.bf.reset_pdr = 0;
117         pe_dma_cfg.bf.reset_pe = 0;
118         pe_dma_cfg.bf.bo_td_en = 0;
119         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120         /*clear all pending interrupt*/
121         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
125         writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
126 }
127
128 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
129 {
130         ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
131                                         &ctx->sa_in_dma_addr, GFP_ATOMIC);
132         if (ctx->sa_in == NULL)
133                 return -ENOMEM;
134
135         ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
136                                          &ctx->sa_out_dma_addr, GFP_ATOMIC);
137         if (ctx->sa_out == NULL) {
138                 dma_free_coherent(ctx->dev->core_dev->device,
139                                   ctx->sa_len * 4,
140                                   ctx->sa_in, ctx->sa_in_dma_addr);
141                 return -ENOMEM;
142         }
143
144         memset(ctx->sa_in, 0, size * 4);
145         memset(ctx->sa_out, 0, size * 4);
146         ctx->sa_len = size;
147
148         return 0;
149 }
150
151 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
152 {
153         if (ctx->sa_in != NULL)
154                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
155                                   ctx->sa_in, ctx->sa_in_dma_addr);
156         if (ctx->sa_out != NULL)
157                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
158                                   ctx->sa_out, ctx->sa_out_dma_addr);
159
160         ctx->sa_in_dma_addr = 0;
161         ctx->sa_out_dma_addr = 0;
162         ctx->sa_len = 0;
163 }
164
165 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
166 {
167         ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
168                                 sizeof(struct sa_state_record),
169                                 &ctx->state_record_dma_addr, GFP_ATOMIC);
170         if (!ctx->state_record_dma_addr)
171                 return -ENOMEM;
172         memset(ctx->state_record, 0, sizeof(struct sa_state_record));
173
174         return 0;
175 }
176
177 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
178 {
179         if (ctx->state_record != NULL)
180                 dma_free_coherent(ctx->dev->core_dev->device,
181                                   sizeof(struct sa_state_record),
182                                   ctx->state_record,
183                                   ctx->state_record_dma_addr);
184         ctx->state_record_dma_addr = 0;
185 }
186
187 /**
188  * alloc memory for the gather ring
189  * no need to alloc buf for the ring
190  * gdr_tail, gdr_head and gdr_count are initialized by this function
191  */
192 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
193 {
194         int i;
195         struct pd_uinfo *pd_uinfo;
196         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
197                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
198                                       &dev->pdr_pa, GFP_ATOMIC);
199         if (!dev->pdr)
200                 return -ENOMEM;
201
202         dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
203                                 GFP_KERNEL);
204         if (!dev->pdr_uinfo) {
205                 dma_free_coherent(dev->core_dev->device,
206                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
207                                   dev->pdr,
208                                   dev->pdr_pa);
209                 return -ENOMEM;
210         }
211         memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
212         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
213                                    256 * PPC4XX_NUM_PD,
214                                    &dev->shadow_sa_pool_pa,
215                                    GFP_ATOMIC);
216         if (!dev->shadow_sa_pool)
217                 return -ENOMEM;
218
219         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
220                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
221                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
222         if (!dev->shadow_sr_pool)
223                 return -ENOMEM;
224         for (i = 0; i < PPC4XX_NUM_PD; i++) {
225                 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
226                                                 sizeof(struct pd_uinfo) * i);
227
228                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
229                 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
230                 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
231
232                 /* alloc state record */
233                 pd_uinfo->sr_va = dev->shadow_sr_pool +
234                     sizeof(struct sa_state_record) * i;
235                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
236                     sizeof(struct sa_state_record) * i;
237         }
238
239         return 0;
240 }
241
242 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
243 {
244         if (dev->pdr)
245                 dma_free_coherent(dev->core_dev->device,
246                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
247                                   dev->pdr, dev->pdr_pa);
248
249         if (dev->shadow_sa_pool)
250                 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
251                                   dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
252
253         if (dev->shadow_sr_pool)
254                 dma_free_coherent(dev->core_dev->device,
255                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
256                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
257
258         kfree(dev->pdr_uinfo);
259 }
260
261 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
262 {
263         u32 retval;
264         u32 tmp;
265
266         retval = dev->pdr_head;
267         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
268
269         if (tmp == dev->pdr_tail)
270                 return ERING_WAS_FULL;
271
272         dev->pdr_head = tmp;
273
274         return retval;
275 }
276
277 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
278 {
279         struct pd_uinfo *pd_uinfo;
280         unsigned long flags;
281
282         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
283                                        sizeof(struct pd_uinfo) * idx);
284         spin_lock_irqsave(&dev->core_dev->lock, flags);
285         if (dev->pdr_tail != PPC4XX_LAST_PD)
286                 dev->pdr_tail++;
287         else
288                 dev->pdr_tail = 0;
289         pd_uinfo->state = PD_ENTRY_FREE;
290         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
291
292         return 0;
293 }
294
295 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
296                                        dma_addr_t *pd_dma, u32 idx)
297 {
298         *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
299
300         return dev->pdr + sizeof(struct ce_pd) * idx;
301 }
302
303 /**
304  * alloc memory for the gather ring
305  * no need to alloc buf for the ring
306  * gdr_tail, gdr_head and gdr_count are initialized by this function
307  */
308 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
309 {
310         dev->gdr = dma_alloc_coherent(dev->core_dev->device,
311                                       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
312                                       &dev->gdr_pa, GFP_ATOMIC);
313         if (!dev->gdr)
314                 return -ENOMEM;
315
316         memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
317
318         return 0;
319 }
320
321 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
322 {
323         dma_free_coherent(dev->core_dev->device,
324                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
325                           dev->gdr, dev->gdr_pa);
326 }
327
328 /*
329  * when this function is called.
330  * preemption or interrupt must be disabled
331  */
332 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
333 {
334         u32 retval;
335         u32 tmp;
336         if (n >= PPC4XX_NUM_GD)
337                 return ERING_WAS_FULL;
338
339         retval = dev->gdr_head;
340         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
341         if (dev->gdr_head > dev->gdr_tail) {
342                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
343                         return ERING_WAS_FULL;
344         } else if (dev->gdr_head < dev->gdr_tail) {
345                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
346                         return ERING_WAS_FULL;
347         }
348         dev->gdr_head = tmp;
349
350         return retval;
351 }
352
353 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
354 {
355         unsigned long flags;
356
357         spin_lock_irqsave(&dev->core_dev->lock, flags);
358         if (dev->gdr_tail == dev->gdr_head) {
359                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
360                 return 0;
361         }
362
363         if (dev->gdr_tail != PPC4XX_LAST_GD)
364                 dev->gdr_tail++;
365         else
366                 dev->gdr_tail = 0;
367
368         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
369
370         return 0;
371 }
372
373 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
374                                               dma_addr_t *gd_dma, u32 idx)
375 {
376         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
377
378         return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
379 }
380
381 /**
382  * alloc memory for the scatter ring
383  * need to alloc buf for the ring
384  * sdr_tail, sdr_head and sdr_count are initialized by this function
385  */
386 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
387 {
388         int i;
389         struct ce_sd *sd_array;
390
391         /* alloc memory for scatter descriptor ring */
392         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
393                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
394                                       &dev->sdr_pa, GFP_ATOMIC);
395         if (!dev->sdr)
396                 return -ENOMEM;
397
398         dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
399         dev->scatter_buffer_va =
400                 dma_alloc_coherent(dev->core_dev->device,
401                         dev->scatter_buffer_size * PPC4XX_NUM_SD,
402                         &dev->scatter_buffer_pa, GFP_ATOMIC);
403         if (!dev->scatter_buffer_va)
404                 return -ENOMEM;
405
406         sd_array = dev->sdr;
407
408         for (i = 0; i < PPC4XX_NUM_SD; i++) {
409                 sd_array[i].ptr = dev->scatter_buffer_pa +
410                                   dev->scatter_buffer_size * i;
411         }
412
413         return 0;
414 }
415
416 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
417 {
418         if (dev->sdr)
419                 dma_free_coherent(dev->core_dev->device,
420                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
421                                   dev->sdr, dev->sdr_pa);
422
423         if (dev->scatter_buffer_va)
424                 dma_free_coherent(dev->core_dev->device,
425                                   dev->scatter_buffer_size * PPC4XX_NUM_SD,
426                                   dev->scatter_buffer_va,
427                                   dev->scatter_buffer_pa);
428 }
429
430 /*
431  * when this function is called.
432  * preemption or interrupt must be disabled
433  */
434 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
435 {
436         u32 retval;
437         u32 tmp;
438
439         if (n >= PPC4XX_NUM_SD)
440                 return ERING_WAS_FULL;
441
442         retval = dev->sdr_head;
443         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
444         if (dev->sdr_head > dev->gdr_tail) {
445                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
446                         return ERING_WAS_FULL;
447         } else if (dev->sdr_head < dev->sdr_tail) {
448                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
449                         return ERING_WAS_FULL;
450         } /* the head = tail, or empty case is already take cared */
451         dev->sdr_head = tmp;
452
453         return retval;
454 }
455
456 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
457 {
458         unsigned long flags;
459
460         spin_lock_irqsave(&dev->core_dev->lock, flags);
461         if (dev->sdr_tail == dev->sdr_head) {
462                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
463                 return 0;
464         }
465         if (dev->sdr_tail != PPC4XX_LAST_SD)
466                 dev->sdr_tail++;
467         else
468                 dev->sdr_tail = 0;
469         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
470
471         return 0;
472 }
473
474 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
475                                               dma_addr_t *sd_dma, u32 idx)
476 {
477         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
478
479         return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
480 }
481
482 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
483                                    dma_addr_t *addr, u32 *length,
484                                    u32 *idx, u32 *offset, u32 *nbytes)
485 {
486         u32 len;
487
488         if (*length > dev->scatter_buffer_size) {
489                 memcpy(phys_to_virt(*addr),
490                         dev->scatter_buffer_va +
491                         *idx * dev->scatter_buffer_size + *offset,
492                         dev->scatter_buffer_size);
493                 *offset = 0;
494                 *length -= dev->scatter_buffer_size;
495                 *nbytes -= dev->scatter_buffer_size;
496                 if (*idx == PPC4XX_LAST_SD)
497                         *idx = 0;
498                 else
499                         (*idx)++;
500                 *addr = *addr +  dev->scatter_buffer_size;
501                 return 1;
502         } else if (*length < dev->scatter_buffer_size) {
503                 memcpy(phys_to_virt(*addr),
504                         dev->scatter_buffer_va +
505                         *idx * dev->scatter_buffer_size + *offset, *length);
506                 if ((*offset + *length) == dev->scatter_buffer_size) {
507                         if (*idx == PPC4XX_LAST_SD)
508                                 *idx = 0;
509                         else
510                                 (*idx)++;
511                         *nbytes -= *length;
512                         *offset = 0;
513                 } else {
514                         *nbytes -= *length;
515                         *offset += *length;
516                 }
517
518                 return 0;
519         } else {
520                 len = (*nbytes <= dev->scatter_buffer_size) ?
521                                 (*nbytes) : dev->scatter_buffer_size;
522                 memcpy(phys_to_virt(*addr),
523                         dev->scatter_buffer_va +
524                         *idx * dev->scatter_buffer_size + *offset,
525                         len);
526                 *offset = 0;
527                 *nbytes -= len;
528
529                 if (*idx == PPC4XX_LAST_SD)
530                         *idx = 0;
531                 else
532                         (*idx)++;
533
534                 return 0;
535     }
536 }
537
538 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
539                                       struct ce_pd *pd,
540                                       struct pd_uinfo *pd_uinfo,
541                                       u32 nbytes,
542                                       struct scatterlist *dst)
543 {
544         dma_addr_t addr;
545         u32 this_sd;
546         u32 offset;
547         u32 len;
548         u32 i;
549         u32 sg_len;
550         struct scatterlist *sg;
551
552         this_sd = pd_uinfo->first_sd;
553         offset = 0;
554         i = 0;
555
556         while (nbytes) {
557                 sg = &dst[i];
558                 sg_len = sg->length;
559                 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
560                                 sg->offset, sg->length, DMA_TO_DEVICE);
561
562                 if (offset == 0) {
563                         len = (nbytes <= sg->length) ? nbytes : sg->length;
564                         while (crypto4xx_fill_one_page(dev, &addr, &len,
565                                 &this_sd, &offset, &nbytes))
566                                 ;
567                         if (!nbytes)
568                                 return;
569                         i++;
570                 } else {
571                         len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
572                                 nbytes : (dev->scatter_buffer_size - offset);
573                         len = (sg->length < len) ? sg->length : len;
574                         while (crypto4xx_fill_one_page(dev, &addr, &len,
575                                                &this_sd, &offset, &nbytes))
576                                 ;
577                         if (!nbytes)
578                                 return;
579                         sg_len -= len;
580                         if (sg_len) {
581                                 addr += len;
582                                 while (crypto4xx_fill_one_page(dev, &addr,
583                                         &sg_len, &this_sd, &offset, &nbytes))
584                                         ;
585                         }
586                         i++;
587                 }
588         }
589 }
590
591 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
592                                         struct crypto4xx_ctx *ctx)
593 {
594         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
595         struct sa_state_record *state_record =
596                                 (struct sa_state_record *) pd_uinfo->sr_va;
597
598         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
599                 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
600                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
601         }
602
603         return 0;
604 }
605
606 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
607                                   struct pd_uinfo *pd_uinfo)
608 {
609         int i;
610         if (pd_uinfo->num_gd) {
611                 for (i = 0; i < pd_uinfo->num_gd; i++)
612                         crypto4xx_put_gd_to_gdr(dev);
613                 pd_uinfo->first_gd = 0xffffffff;
614                 pd_uinfo->num_gd = 0;
615         }
616         if (pd_uinfo->num_sd) {
617                 for (i = 0; i < pd_uinfo->num_sd; i++)
618                         crypto4xx_put_sd_to_sdr(dev);
619
620                 pd_uinfo->first_sd = 0xffffffff;
621                 pd_uinfo->num_sd = 0;
622         }
623 }
624
625 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
626                                      struct pd_uinfo *pd_uinfo,
627                                      struct ce_pd *pd)
628 {
629         struct crypto4xx_ctx *ctx;
630         struct ablkcipher_request *ablk_req;
631         struct scatterlist *dst;
632         dma_addr_t addr;
633
634         ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
635         ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
636
637         if (pd_uinfo->using_sd) {
638                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
639                                           ablk_req->dst);
640         } else {
641                 dst = pd_uinfo->dest_va;
642                 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
643                                     dst->offset, dst->length, DMA_FROM_DEVICE);
644         }
645
646         if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
647                 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
648
649                 crypto4xx_memcpy_from_le32((u32 *)req->iv,
650                         pd_uinfo->sr_va->save_iv,
651                         crypto_skcipher_ivsize(skcipher));
652         }
653
654         crypto4xx_ret_sg_desc(dev, pd_uinfo);
655         if (ablk_req->base.complete != NULL)
656                 ablk_req->base.complete(&ablk_req->base, 0);
657
658         return 0;
659 }
660
661 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
662                                 struct pd_uinfo *pd_uinfo)
663 {
664         struct crypto4xx_ctx *ctx;
665         struct ahash_request *ahash_req;
666
667         ahash_req = ahash_request_cast(pd_uinfo->async_req);
668         ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
669
670         crypto4xx_copy_digest_to_dst(pd_uinfo,
671                                      crypto_tfm_ctx(ahash_req->base.tfm));
672         crypto4xx_ret_sg_desc(dev, pd_uinfo);
673         /* call user provided callback function x */
674         if (ahash_req->base.complete != NULL)
675                 ahash_req->base.complete(&ahash_req->base, 0);
676
677         return 0;
678 }
679
680 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
681 {
682         struct ce_pd *pd;
683         struct pd_uinfo *pd_uinfo;
684
685         pd =  dev->pdr + sizeof(struct ce_pd)*idx;
686         pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
687         if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
688                         CRYPTO_ALG_TYPE_ABLKCIPHER)
689                 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
690         else
691                 return crypto4xx_ahash_done(dev, pd_uinfo);
692 }
693
694 /**
695  * Note: Only use this function to copy items that is word aligned.
696  */
697 void crypto4xx_memcpy_le(unsigned int *dst,
698                          const unsigned char *buf,
699                          int len)
700 {
701         u8 *tmp;
702         for (; len >= 4; buf += 4, len -= 4)
703                 *dst++ = cpu_to_le32(*(unsigned int *) buf);
704
705         tmp = (u8 *)dst;
706         switch (len) {
707         case 3:
708                 *tmp++ = 0;
709                 *tmp++ = *(buf+2);
710                 *tmp++ = *(buf+1);
711                 *tmp++ = *buf;
712                 break;
713         case 2:
714                 *tmp++ = 0;
715                 *tmp++ = 0;
716                 *tmp++ = *(buf+1);
717                 *tmp++ = *buf;
718                 break;
719         case 1:
720                 *tmp++ = 0;
721                 *tmp++ = 0;
722                 *tmp++ = 0;
723                 *tmp++ = *buf;
724                 break;
725         default:
726                 break;
727         }
728 }
729
730 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
731 {
732         crypto4xx_destroy_pdr(core_dev->dev);
733         crypto4xx_destroy_gdr(core_dev->dev);
734         crypto4xx_destroy_sdr(core_dev->dev);
735         iounmap(core_dev->dev->ce_base);
736         kfree(core_dev->dev);
737         kfree(core_dev);
738 }
739
740 void crypto4xx_return_pd(struct crypto4xx_device *dev,
741                          u32 pd_entry, struct ce_pd *pd,
742                          struct pd_uinfo *pd_uinfo)
743 {
744         /* irq should be already disabled */
745         dev->pdr_head = pd_entry;
746         pd->pd_ctl.w = 0;
747         pd->pd_ctl_len.w = 0;
748         pd_uinfo->state = PD_ENTRY_FREE;
749 }
750
751 static u32 get_next_gd(u32 current)
752 {
753         if (current != PPC4XX_LAST_GD)
754                 return current + 1;
755         else
756                 return 0;
757 }
758
759 static u32 get_next_sd(u32 current)
760 {
761         if (current != PPC4XX_LAST_SD)
762                 return current + 1;
763         else
764                 return 0;
765 }
766
767 u32 crypto4xx_build_pd(struct crypto_async_request *req,
768                        struct crypto4xx_ctx *ctx,
769                        struct scatterlist *src,
770                        struct scatterlist *dst,
771                        unsigned int datalen,
772                        void *iv, u32 iv_len)
773 {
774         struct crypto4xx_device *dev = ctx->dev;
775         dma_addr_t addr, pd_dma, sd_dma, gd_dma;
776         struct dynamic_sa_ctl *sa;
777         struct scatterlist *sg;
778         struct ce_gd *gd;
779         struct ce_pd *pd;
780         u32 num_gd, num_sd;
781         u32 fst_gd = 0xffffffff;
782         u32 fst_sd = 0xffffffff;
783         u32 pd_entry;
784         unsigned long flags;
785         struct pd_uinfo *pd_uinfo = NULL;
786         unsigned int nbytes = datalen, idx;
787         unsigned int ivlen = 0;
788         u32 gd_idx = 0;
789
790         /* figure how many gd is needed */
791         num_gd = sg_nents_for_len(src, datalen);
792         if ((int)num_gd < 0) {
793                 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
794                 return -EINVAL;
795         }
796         if (num_gd == 1)
797                 num_gd = 0;
798
799         /* figure how many sd is needed */
800         if (sg_is_last(dst) || ctx->is_hash) {
801                 num_sd = 0;
802         } else {
803                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
804                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
805                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
806                                 num_sd++;
807                 } else {
808                         num_sd = 1;
809                 }
810         }
811
812         /*
813          * The follow section of code needs to be protected
814          * The gather ring and scatter ring needs to be consecutive
815          * In case of run out of any kind of descriptor, the descriptor
816          * already got must be return the original place.
817          */
818         spin_lock_irqsave(&dev->core_dev->lock, flags);
819         if (num_gd) {
820                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
821                 if (fst_gd == ERING_WAS_FULL) {
822                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
823                         return -EAGAIN;
824                 }
825         }
826         if (num_sd) {
827                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
828                 if (fst_sd == ERING_WAS_FULL) {
829                         if (num_gd)
830                                 dev->gdr_head = fst_gd;
831                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
832                         return -EAGAIN;
833                 }
834         }
835         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
836         if (pd_entry == ERING_WAS_FULL) {
837                 if (num_gd)
838                         dev->gdr_head = fst_gd;
839                 if (num_sd)
840                         dev->sdr_head = fst_sd;
841                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
842                 return -EAGAIN;
843         }
844         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
845
846         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
847                                        sizeof(struct pd_uinfo) * pd_entry);
848         pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
849         pd_uinfo->async_req = req;
850         pd_uinfo->num_gd = num_gd;
851         pd_uinfo->num_sd = num_sd;
852
853         if (iv_len || ctx->is_hash) {
854                 ivlen = iv_len;
855                 pd->sa = pd_uinfo->sa_pa;
856                 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
857                 if (ctx->direction == DIR_INBOUND)
858                         memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
859                 else
860                         memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
861
862                 memcpy((void *) sa + ctx->offset_to_sr_ptr,
863                         &pd_uinfo->sr_pa, 4);
864
865                 if (iv_len)
866                         crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
867         } else {
868                 if (ctx->direction == DIR_INBOUND) {
869                         pd->sa = ctx->sa_in_dma_addr;
870                         sa = (struct dynamic_sa_ctl *) ctx->sa_in;
871                 } else {
872                         pd->sa = ctx->sa_out_dma_addr;
873                         sa = (struct dynamic_sa_ctl *) ctx->sa_out;
874                 }
875         }
876         pd->sa_len = ctx->sa_len;
877         if (num_gd) {
878                 /* get first gd we are going to use */
879                 gd_idx = fst_gd;
880                 pd_uinfo->first_gd = fst_gd;
881                 pd_uinfo->num_gd = num_gd;
882                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
883                 pd->src = gd_dma;
884                 /* enable gather */
885                 sa->sa_command_0.bf.gather = 1;
886                 idx = 0;
887                 src = &src[0];
888                 /* walk the sg, and setup gather array */
889                 while (nbytes) {
890                         sg = &src[idx];
891                         addr = dma_map_page(dev->core_dev->device, sg_page(sg),
892                                     sg->offset, sg->length, DMA_TO_DEVICE);
893                         gd->ptr = addr;
894                         gd->ctl_len.len = sg->length;
895                         gd->ctl_len.done = 0;
896                         gd->ctl_len.ready = 1;
897                         if (sg->length >= nbytes)
898                                 break;
899                         nbytes -= sg->length;
900                         gd_idx = get_next_gd(gd_idx);
901                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
902                         idx++;
903                 }
904         } else {
905                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
906                                 src->offset, src->length, DMA_TO_DEVICE);
907                 /*
908                  * Disable gather in sa command
909                  */
910                 sa->sa_command_0.bf.gather = 0;
911                 /*
912                  * Indicate gather array is not used
913                  */
914                 pd_uinfo->first_gd = 0xffffffff;
915                 pd_uinfo->num_gd = 0;
916         }
917         if (ctx->is_hash || sg_is_last(dst)) {
918                 /*
919                  * we know application give us dst a whole piece of memory
920                  * no need to use scatter ring.
921                  * In case of is_hash, the icv is always at end of src data.
922                  */
923                 pd_uinfo->using_sd = 0;
924                 pd_uinfo->first_sd = 0xffffffff;
925                 pd_uinfo->num_sd = 0;
926                 pd_uinfo->dest_va = dst;
927                 sa->sa_command_0.bf.scatter = 0;
928                 if (ctx->is_hash)
929                         pd->dest = virt_to_phys((void *)dst);
930                 else
931                         pd->dest = (u32)dma_map_page(dev->core_dev->device,
932                                         sg_page(dst), dst->offset,
933                                         dst->length, DMA_TO_DEVICE);
934         } else {
935                 struct ce_sd *sd = NULL;
936                 u32 sd_idx = fst_sd;
937                 nbytes = datalen;
938                 sa->sa_command_0.bf.scatter = 1;
939                 pd_uinfo->using_sd = 1;
940                 pd_uinfo->dest_va = dst;
941                 pd_uinfo->first_sd = fst_sd;
942                 pd_uinfo->num_sd = num_sd;
943                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
944                 pd->dest = sd_dma;
945                 /* setup scatter descriptor */
946                 sd->ctl.done = 0;
947                 sd->ctl.rdy = 1;
948                 /* sd->ptr should be setup by sd_init routine*/
949                 idx = 0;
950                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
951                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
952                 else
953                         nbytes = 0;
954                 while (nbytes) {
955                         sd_idx = get_next_sd(sd_idx);
956                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
957                         /* setup scatter descriptor */
958                         sd->ctl.done = 0;
959                         sd->ctl.rdy = 1;
960                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
961                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
962                         else
963                                 /*
964                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
965                                  * which is more than nbytes, so done.
966                                  */
967                                 nbytes = 0;
968                 }
969         }
970
971         sa->sa_command_1.bf.hash_crypto_offset = 0;
972         pd->pd_ctl.w = ctx->pd_ctl;
973         pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
974         pd_uinfo->state = PD_ENTRY_INUSE;
975         wmb();
976         /* write any value to push engine to read a pd */
977         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
978         return -EINPROGRESS;
979 }
980
981 /**
982  * Algorithm Registration Functions
983  */
984 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
985 {
986         struct crypto_alg *alg = tfm->__crt_alg;
987         struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
988         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
989
990         ctx->dev = amcc_alg->dev;
991         ctx->sa_in = NULL;
992         ctx->sa_out = NULL;
993         ctx->sa_in_dma_addr = 0;
994         ctx->sa_out_dma_addr = 0;
995         ctx->sa_len = 0;
996
997         switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
998         default:
999                 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1000                 break;
1001         case CRYPTO_ALG_TYPE_AHASH:
1002                 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1003                                          sizeof(struct crypto4xx_ctx));
1004                 break;
1005         }
1006
1007         return 0;
1008 }
1009
1010 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1011 {
1012         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1013
1014         crypto4xx_free_sa(ctx);
1015         crypto4xx_free_state_record(ctx);
1016 }
1017
1018 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1019                            struct crypto4xx_alg_common *crypto_alg,
1020                            int array_size)
1021 {
1022         struct crypto4xx_alg *alg;
1023         int i;
1024         int rc = 0;
1025
1026         for (i = 0; i < array_size; i++) {
1027                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1028                 if (!alg)
1029                         return -ENOMEM;
1030
1031                 alg->alg = crypto_alg[i];
1032                 alg->dev = sec_dev;
1033
1034                 switch (alg->alg.type) {
1035                 case CRYPTO_ALG_TYPE_AHASH:
1036                         rc = crypto_register_ahash(&alg->alg.u.hash);
1037                         break;
1038
1039                 default:
1040                         rc = crypto_register_alg(&alg->alg.u.cipher);
1041                         break;
1042                 }
1043
1044                 if (rc)
1045                         kfree(alg);
1046                 else
1047                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1048         }
1049
1050         return 0;
1051 }
1052
1053 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1054 {
1055         struct crypto4xx_alg *alg, *tmp;
1056
1057         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1058                 list_del(&alg->entry);
1059                 switch (alg->alg.type) {
1060                 case CRYPTO_ALG_TYPE_AHASH:
1061                         crypto_unregister_ahash(&alg->alg.u.hash);
1062                         break;
1063
1064                 default:
1065                         crypto_unregister_alg(&alg->alg.u.cipher);
1066                 }
1067                 kfree(alg);
1068         }
1069 }
1070
1071 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1072 {
1073         struct device *dev = (struct device *)data;
1074         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1075         struct pd_uinfo *pd_uinfo;
1076         struct ce_pd *pd;
1077         u32 tail;
1078
1079         while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1080                 tail = core_dev->dev->pdr_tail;
1081                 pd_uinfo = core_dev->dev->pdr_uinfo +
1082                         sizeof(struct pd_uinfo)*tail;
1083                 pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1084                 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1085                                    pd->pd_ctl.bf.pe_done &&
1086                                    !pd->pd_ctl.bf.host_ready) {
1087                         pd->pd_ctl.bf.pe_done = 0;
1088                         crypto4xx_pd_done(core_dev->dev, tail);
1089                         crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1090                         pd_uinfo->state = PD_ENTRY_FREE;
1091                 } else {
1092                         /* if tail not done, break */
1093                         break;
1094                 }
1095         }
1096 }
1097
1098 /**
1099  * Top Half of isr.
1100  */
1101 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1102 {
1103         struct device *dev = (struct device *)data;
1104         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1105
1106         if (!core_dev->dev->ce_base)
1107                 return 0;
1108
1109         writel(PPC4XX_INTERRUPT_CLR,
1110                core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1111         tasklet_schedule(&core_dev->tasklet);
1112
1113         return IRQ_HANDLED;
1114 }
1115
1116 /**
1117  * Supported Crypto Algorithms
1118  */
1119 struct crypto4xx_alg_common crypto4xx_alg[] = {
1120         /* Crypto AES modes */
1121         { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1122                 .cra_name       = "cbc(aes)",
1123                 .cra_driver_name = "cbc-aes-ppc4xx",
1124                 .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1125                 .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1126                 .cra_blocksize  = AES_BLOCK_SIZE,
1127                 .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1128                 .cra_type       = &crypto_ablkcipher_type,
1129                 .cra_init       = crypto4xx_alg_init,
1130                 .cra_exit       = crypto4xx_alg_exit,
1131                 .cra_module     = THIS_MODULE,
1132                 .cra_u          = {
1133                         .ablkcipher = {
1134                                 .min_keysize    = AES_MIN_KEY_SIZE,
1135                                 .max_keysize    = AES_MAX_KEY_SIZE,
1136                                 .ivsize         = AES_IV_SIZE,
1137                                 .setkey         = crypto4xx_setkey_aes_cbc,
1138                                 .encrypt        = crypto4xx_encrypt,
1139                                 .decrypt        = crypto4xx_decrypt,
1140                         }
1141                 }
1142         }},
1143 };
1144
1145 /**
1146  * Module Initialization Routine
1147  */
1148 static int crypto4xx_probe(struct platform_device *ofdev)
1149 {
1150         int rc;
1151         struct resource res;
1152         struct device *dev = &ofdev->dev;
1153         struct crypto4xx_core_device *core_dev;
1154
1155         rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1156         if (rc)
1157                 return -ENODEV;
1158
1159         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1160                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1161                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1162                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1163                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1164         } else if (of_find_compatible_node(NULL, NULL,
1165                         "amcc,ppc405ex-crypto")) {
1166                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1167                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1168                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1169                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1170         } else if (of_find_compatible_node(NULL, NULL,
1171                         "amcc,ppc460sx-crypto")) {
1172                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1173                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1174                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1175                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1176         } else {
1177                 printk(KERN_ERR "Crypto Function Not supported!\n");
1178                 return -EINVAL;
1179         }
1180
1181         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1182         if (!core_dev)
1183                 return -ENOMEM;
1184
1185         dev_set_drvdata(dev, core_dev);
1186         core_dev->ofdev = ofdev;
1187         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1188         if (!core_dev->dev)
1189                 goto err_alloc_dev;
1190
1191         core_dev->dev->core_dev = core_dev;
1192         core_dev->device = dev;
1193         spin_lock_init(&core_dev->lock);
1194         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1195         rc = crypto4xx_build_pdr(core_dev->dev);
1196         if (rc)
1197                 goto err_build_pdr;
1198
1199         rc = crypto4xx_build_gdr(core_dev->dev);
1200         if (rc)
1201                 goto err_build_pdr;
1202
1203         rc = crypto4xx_build_sdr(core_dev->dev);
1204         if (rc)
1205                 goto err_build_sdr;
1206
1207         /* Init tasklet for bottom half processing */
1208         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1209                      (unsigned long) dev);
1210
1211         /* Register for Crypto isr, Crypto Engine IRQ */
1212         core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1213         rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1214                          core_dev->dev->name, dev);
1215         if (rc)
1216                 goto err_request_irq;
1217
1218         core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1219         if (!core_dev->dev->ce_base) {
1220                 dev_err(dev, "failed to of_iomap\n");
1221                 rc = -ENOMEM;
1222                 goto err_iomap;
1223         }
1224
1225         /* need to setup pdr, rdr, gdr and sdr before this */
1226         crypto4xx_hw_init(core_dev->dev);
1227
1228         /* Register security algorithms with Linux CryptoAPI */
1229         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1230                                ARRAY_SIZE(crypto4xx_alg));
1231         if (rc)
1232                 goto err_start_dev;
1233
1234         ppc4xx_trng_probe(core_dev);
1235         return 0;
1236
1237 err_start_dev:
1238         iounmap(core_dev->dev->ce_base);
1239 err_iomap:
1240         free_irq(core_dev->irq, dev);
1241 err_request_irq:
1242         irq_dispose_mapping(core_dev->irq);
1243         tasklet_kill(&core_dev->tasklet);
1244 err_build_sdr:
1245         crypto4xx_destroy_sdr(core_dev->dev);
1246         crypto4xx_destroy_gdr(core_dev->dev);
1247 err_build_pdr:
1248         crypto4xx_destroy_pdr(core_dev->dev);
1249         kfree(core_dev->dev);
1250 err_alloc_dev:
1251         kfree(core_dev);
1252
1253         return rc;
1254 }
1255
1256 static int crypto4xx_remove(struct platform_device *ofdev)
1257 {
1258         struct device *dev = &ofdev->dev;
1259         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1260
1261         ppc4xx_trng_remove(core_dev);
1262
1263         free_irq(core_dev->irq, dev);
1264         irq_dispose_mapping(core_dev->irq);
1265
1266         tasklet_kill(&core_dev->tasklet);
1267         /* Un-register with Linux CryptoAPI */
1268         crypto4xx_unregister_alg(core_dev->dev);
1269         /* Free all allocated memory */
1270         crypto4xx_stop_all(core_dev);
1271
1272         return 0;
1273 }
1274
1275 static const struct of_device_id crypto4xx_match[] = {
1276         { .compatible      = "amcc,ppc4xx-crypto",},
1277         { },
1278 };
1279 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1280
1281 static struct platform_driver crypto4xx_driver = {
1282         .driver = {
1283                 .name = MODULE_NAME,
1284                 .of_match_table = crypto4xx_match,
1285         },
1286         .probe          = crypto4xx_probe,
1287         .remove         = crypto4xx_remove,
1288 };
1289
1290 module_platform_driver(crypto4xx_driver);
1291
1292 MODULE_LICENSE("GPL");
1293 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1294 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");