blob: 83aca95a95bc226e6b3d1b083c7ba5373660cc6b [file] [log] [blame]
Kim Phillips9c4a7962008-06-23 19:50:15 +08001/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
Kim Phillips5228f0f2011-07-15 11:21:38 +08004 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
Kim Phillips9c4a7962008-06-23 19:50:15 +08005 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
Rob Herring5af50732013-09-17 14:28:33 -050035#include <linux/of_address.h>
36#include <linux/of_irq.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080037#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080043
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
Lee Nipper3952f172008-07-10 18:29:18 +080046#include <crypto/des.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080047#include <crypto/sha.h>
Lee Nipper497f2e62010-05-19 19:20:36 +100048#include <crypto/md5.h>
Herbert Xue98014a2015-05-11 17:47:48 +080049#include <crypto/internal/aead.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080050#include <crypto/authenc.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080051#include <crypto/skcipher.h>
Lee Nipperacbf7c622010-05-19 19:19:33 +100052#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080054#include <crypto/scatterwalk.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080055
56#include "talitos.h"
57
LEROY Christophe922f9dc2015-04-17 16:32:07 +020058static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
Kim Phillips81eb0242009-08-13 11:51:51 +100060{
LEROY Christopheedc6bd62015-04-17 16:31:53 +020061 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
LEROY Christophe922f9dc2015-04-17 16:32:07 +020062 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
Kim Phillips81eb0242009-08-13 11:51:51 +100064}
65
Horia Geant?42e8b0d2015-05-11 20:04:56 +030066static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
LEROY Christophe922f9dc2015-04-17 16:32:07 +020067 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020068{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020069 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
LEROY Christophe538caf82015-04-17 16:31:59 +020075}
76
LEROY Christophe922f9dc2015-04-17 16:32:07 +020077static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020079{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020080 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
LEROY Christophe538caf82015-04-17 16:31:59 +020084}
85
LEROY Christophe922f9dc2015-04-17 16:32:07 +020086static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
LEROY Christophe185eb792015-04-17 16:31:55 +020087{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020088 if (!is_sec1)
89 ptr->j_extent = 0;
LEROY Christophe185eb792015-04-17 16:31:55 +020090}
91
Kim Phillips9c4a7962008-06-23 19:50:15 +080092/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +020096 struct talitos_ptr *ptr,
Horia Geant?42e8b0d2015-05-11 20:04:56 +030097 unsigned int len, void *data,
Kim Phillips9c4a7962008-06-23 19:50:15 +080098 enum dma_data_direction dir)
99{
Kim Phillips81eb0242009-08-13 11:51:51 +1000100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips81eb0242009-08-13 11:51:51 +1000103
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200113 struct talitos_ptr *ptr,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800114 enum dma_data_direction dir)
115{
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200120 from_talitos_ptr_len(ptr, is_sec1), dir);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200127 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800128
LEROY Christophedd3c0982015-04-17 16:32:13 +0200129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800132
LEROY Christophedd3c0982015-04-17 16:32:13 +0200133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
Kim Phillips81eb0242009-08-13 11:51:51 +1000150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
Kim Phillips81eb0242009-08-13 11:51:51 +1000152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800153
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800157 TALITOS_CCCR_LO_IWSE);
158
Kim Phillips9c4a7962008-06-23 19:50:15 +0800159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800168
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800169 setbits32(priv->reg + TALITOS_MCR, mcr);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800170
LEROY Christophedd3c0982015-04-17 16:32:13 +0200171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800172 && --timeout)
173 cpu_relax();
174
Kim Phillips2cdba3c2011-12-12 14:59:11 -0600175 if (priv->irq[1]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
Kim Phillips9c4a7962008-06-23 19:50:15 +0800180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200195 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800228
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800232 TALITOS_MDEUICR_LO_ICE);
233
Kim Phillips9c4a7962008-06-23 19:50:15 +0800234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
Kim Phillips5228f0f2011-07-15 11:21:38 +0800240 * @ch: the SEC device channel to be used
Kim Phillips9c4a7962008-06-23 19:50:15 +0800241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
Horia Geanta865d5062012-07-03 19:16:52 +0300249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800257 unsigned long flags;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800258 int head;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200259 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800260
Kim Phillips4b9926282009-08-13 11:50:38 +1000261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800262
Kim Phillips4b9926282009-08-13 11:50:38 +1000263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
Kim Phillipsec6644d2008-07-17 20:16:40 +0800264 /* h/w fifo is full */
Kim Phillips4b9926282009-08-13 11:50:38 +1000265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800266 return -EAGAIN;
267 }
268
Kim Phillips4b9926282009-08-13 11:50:38 +1000269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
Kim Phillipsec6644d2008-07-17 20:16:40 +0800271
Kim Phillips9c4a7962008-06-23 19:50:15 +0800272 /* map descriptor and save caller data */
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
Kim Phillips4b9926282009-08-13 11:50:38 +1000288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
Kim Phillipsa7524472010-09-23 15:56:38 +0800298 lower_32_bits(request->dma_desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800299
Kim Phillips4b9926282009-08-13 11:50:38 +1000300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800301
302 return -EINPROGRESS;
303}
Horia Geanta865d5062012-07-03 19:16:52 +0300304EXPORT_SYMBOL(talitos_submit);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200315 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800316
Kim Phillips4b9926282009-08-13 11:50:38 +1000317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800318
Kim Phillips4b9926282009-08-13 11:50:38 +1000319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200321 __be32 hdr;
322
Kim Phillips4b9926282009-08-13 11:50:38 +1000323 request = &priv->chan[ch].fifo[tail];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800330 status = 0;
Lee Nipperca38a812008-12-20 17:09:25 +1100331 else
Kim Phillips9c4a7962008-06-23 19:50:15 +0800332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200338 TALITOS_DESC_SIZE,
Kim Phillipse938e462009-03-29 15:53:23 +0800339 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
Kim Phillips4b9926282009-08-13 11:50:38 +1000351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800352
Kim Phillips4b9926282009-08-13 11:50:38 +1000353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800354
Kim Phillips4b9926282009-08-13 11:50:38 +1000355 atomic_dec(&priv->chan[ch].submit_count);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800356
Kim Phillips9c4a7962008-06-23 19:50:15 +0800357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
Kim Phillips4b9926282009-08-13 11:50:38 +1000362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800364 }
365
Kim Phillips4b9926282009-08-13 11:50:38 +1000366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800367}
368
369/*
370 * process completed requests for channels that have done status
371 */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300406 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
Horia Geanta511d63c2012-03-30 17:49:53 +0300422 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800426}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800431
432/*
433 * locate current (offending) descriptor
434 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200435static u32 current_desc_hdr(struct device *dev, int ch)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
Horia Geantab62ffd82013-11-13 12:20:37 +0200438 int tail, iter;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800439 dma_addr_t cur_desc;
440
Horia Geantab62ffd82013-11-13 12:20:37 +0200441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800443
Horia Geantab62ffd82013-11-13 12:20:37 +0200444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800455 dev_err(dev, "couldn't locate current descriptor\n");
Kim Phillips3e721ae2011-10-21 15:20:28 +0200456 return 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800457 }
458 }
459
Horia Geantab62ffd82013-11-13 12:20:37 +0200460 return priv->chan[ch].fifo[iter].desc->hdr;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
Kim Phillips3e721ae2011-10-21 15:20:28 +0200471 if (!desc_hdr)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
Kim Phillips3e721ae2011-10-21 15:20:28 +0200473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800515 break;
516 }
517
Kim Phillips3e721ae2011-10-21 15:20:28 +0200518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800536}
537
538/*
539 * recover from error interrupts
540 */
Kim Phillips5e718a02011-12-12 14:59:12 -0600541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800542{
Kim Phillips9c4a7962008-06-23 19:50:15 +0800543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200545 int ch, error, reset_dev = 0;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300546 u32 v_lo;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800560
561 error = -EINVAL;
562
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800588 if (v_lo & TALITOS_CCPSR_LO_EU)
Kim Phillips3e721ae2011-10-21 15:20:28 +0200589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
LEROY Christophedd3c0982015-04-17 16:32:13 +0200590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
LEROY Christophedd3c0982015-04-17 16:32:13 +0200607 TALITOS2_CCCR_CONT);
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
LEROY Christophedd3c0982015-04-17 16:32:13 +0200610 TALITOS2_CCCR_CONT) && --timeout)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
LEROY Christophedd3c0982015-04-17 16:32:13 +0200619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
LEROY Christophedd3c0982015-04-17 16:32:13 +0200637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
Horia Geanta511d63c2012-03-30 17:49:53 +0300678 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800679 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300680 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
Horia Geanta511d63c2012-03-30 17:49:53 +0300698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800703}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
Kim Phillips9c4a7962008-06-23 19:50:15 +0800723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
769
770 priv->rng.name = dev_driver_string(dev),
771 priv->rng.init = talitos_rng_init,
772 priv->rng.data_present = talitos_rng_data_present,
773 priv->rng.data_read = talitos_rng_data_read,
774 priv->rng.priv = (unsigned long)dev;
775
776 return hwrng_register(&priv->rng);
777}
778
779static void talitos_unregister_rng(struct device *dev)
780{
781 struct talitos_private *priv = dev_get_drvdata(dev);
782
783 hwrng_unregister(&priv->rng);
784}
785
786/*
787 * crypto alg
788 */
789#define TALITOS_CRA_PRIORITY 3000
Horia Geanta357fb602012-07-03 19:16:53 +0300790#define TALITOS_MAX_KEY_SIZE 96
Lee Nipper3952f172008-07-10 18:29:18 +0800791#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
Lee Nipper70bcaca2008-07-03 19:08:46 +0800792
Kim Phillips9c4a7962008-06-23 19:50:15 +0800793struct talitos_ctx {
794 struct device *dev;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800795 int ch;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800796 __be32 desc_hdr_template;
797 u8 key[TALITOS_MAX_KEY_SIZE];
Lee Nipper70bcaca2008-07-03 19:08:46 +0800798 u8 iv[TALITOS_MAX_IV_LENGTH];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800799 unsigned int keylen;
800 unsigned int enckeylen;
801 unsigned int authkeylen;
802 unsigned int authsize;
803};
804
Lee Nipper497f2e62010-05-19 19:20:36 +1000805#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
806#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
807
808struct talitos_ahash_req_ctx {
Kim Phillips60f208d2010-05-19 19:21:53 +1000809 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
Lee Nipper497f2e62010-05-19 19:20:36 +1000810 unsigned int hw_context_size;
811 u8 buf[HASH_MAX_BLOCK_SIZE];
812 u8 bufnext[HASH_MAX_BLOCK_SIZE];
Kim Phillips60f208d2010-05-19 19:21:53 +1000813 unsigned int swinit;
Lee Nipper497f2e62010-05-19 19:20:36 +1000814 unsigned int first;
815 unsigned int last;
816 unsigned int to_hash_later;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300817 unsigned int nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +1000818 struct scatterlist bufsl[2];
819 struct scatterlist *psrc;
820};
821
Lee Nipper56af8cd2009-03-29 15:50:50 +0800822static int aead_setauthsize(struct crypto_aead *authenc,
823 unsigned int authsize)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800824{
825 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
826
827 ctx->authsize = authsize;
828
829 return 0;
830}
831
Lee Nipper56af8cd2009-03-29 15:50:50 +0800832static int aead_setkey(struct crypto_aead *authenc,
833 const u8 *key, unsigned int keylen)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800834{
835 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Mathias Krausec306a982013-10-15 13:49:34 +0200836 struct crypto_authenc_keys keys;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800837
Mathias Krausec306a982013-10-15 13:49:34 +0200838 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800839 goto badkey;
840
Mathias Krausec306a982013-10-15 13:49:34 +0200841 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800842 goto badkey;
843
Mathias Krausec306a982013-10-15 13:49:34 +0200844 memcpy(ctx->key, keys.authkey, keys.authkeylen);
845 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800846
Mathias Krausec306a982013-10-15 13:49:34 +0200847 ctx->keylen = keys.authkeylen + keys.enckeylen;
848 ctx->enckeylen = keys.enckeylen;
849 ctx->authkeylen = keys.authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800850
851 return 0;
852
853badkey:
854 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
855 return -EINVAL;
856}
857
858/*
Lee Nipper56af8cd2009-03-29 15:50:50 +0800859 * talitos_edesc - s/w-extended descriptor
Horia Geanta79fd31d2012-08-02 17:16:40 +0300860 * @assoc_nents: number of segments in associated data scatterlist
Kim Phillips9c4a7962008-06-23 19:50:15 +0800861 * @src_nents: number of segments in input scatterlist
862 * @dst_nents: number of segments in output scatterlist
Horia Geanta79fd31d2012-08-02 17:16:40 +0300863 * @assoc_chained: whether assoc is chained or not
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300864 * @src_chained: whether src is chained or not
865 * @dst_chained: whether dst is chained or not
Horia Geanta79fd31d2012-08-02 17:16:40 +0300866 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips9c4a7962008-06-23 19:50:15 +0800867 * @dma_len: length of dma mapped link_tbl space
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200868 * @dma_link_tbl: bus physical address of link_tbl/buf
Kim Phillips9c4a7962008-06-23 19:50:15 +0800869 * @desc: h/w descriptor
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200870 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
871 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800872 *
873 * if decrypting (with authcheck), or either one of src_nents or dst_nents
874 * is greater than 1, an integrity check value is concatenated to the end
875 * of link_tbl data
876 */
Lee Nipper56af8cd2009-03-29 15:50:50 +0800877struct talitos_edesc {
Horia Geanta79fd31d2012-08-02 17:16:40 +0300878 int assoc_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800879 int src_nents;
880 int dst_nents;
Horia Geanta79fd31d2012-08-02 17:16:40 +0300881 bool assoc_chained;
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300882 bool src_chained;
883 bool dst_chained;
Horia Geanta79fd31d2012-08-02 17:16:40 +0300884 dma_addr_t iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800885 int dma_len;
886 dma_addr_t dma_link_tbl;
887 struct talitos_desc desc;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200888 union {
889 struct talitos_ptr link_tbl[0];
890 u8 buf[0];
891 };
Kim Phillips9c4a7962008-06-23 19:50:15 +0800892};
893
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800894static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
895 unsigned int nents, enum dma_data_direction dir,
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300896 bool chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800897{
898 if (unlikely(chained))
899 while (sg) {
900 dma_map_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200901 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800902 }
903 else
904 dma_map_sg(dev, sg, nents, dir);
905 return nents;
906}
907
908static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
909 enum dma_data_direction dir)
910{
911 while (sg) {
912 dma_unmap_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200913 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800914 }
915}
916
917static void talitos_sg_unmap(struct device *dev,
918 struct talitos_edesc *edesc,
919 struct scatterlist *src,
920 struct scatterlist *dst)
921{
922 unsigned int src_nents = edesc->src_nents ? : 1;
923 unsigned int dst_nents = edesc->dst_nents ? : 1;
924
925 if (src != dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300926 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800927 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
928 else
929 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
930
Lee Nipper497f2e62010-05-19 19:20:36 +1000931 if (dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300932 if (edesc->dst_chained)
Lee Nipper497f2e62010-05-19 19:20:36 +1000933 talitos_unmap_sg_chain(dev, dst,
934 DMA_FROM_DEVICE);
935 else
936 dma_unmap_sg(dev, dst, dst_nents,
937 DMA_FROM_DEVICE);
938 }
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800939 } else
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300940 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800941 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
942 else
943 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
944}
945
Kim Phillips9c4a7962008-06-23 19:50:15 +0800946static void ipsec_esp_unmap(struct device *dev,
Lee Nipper56af8cd2009-03-29 15:50:50 +0800947 struct talitos_edesc *edesc,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800948 struct aead_request *areq)
949{
950 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
952 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
954
Horia Geanta79fd31d2012-08-02 17:16:40 +0300955 if (edesc->assoc_chained)
956 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
Horia Geanta935e99a2013-11-19 14:57:49 +0200957 else if (areq->assoclen)
Horia Geanta79fd31d2012-08-02 17:16:40 +0300958 /* assoc_nents counts also for IV in non-contiguous cases */
959 dma_unmap_sg(dev, areq->assoc,
960 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
961 DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800962
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800963 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800964
965 if (edesc->dma_len)
966 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
967 DMA_BIDIRECTIONAL);
968}
969
970/*
971 * ipsec_esp descriptor callbacks
972 */
973static void ipsec_esp_encrypt_done(struct device *dev,
974 struct talitos_desc *desc, void *context,
975 int err)
976{
977 struct aead_request *areq = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800978 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
979 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +0800980 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800981 struct scatterlist *sg;
982 void *icvdata;
983
Kim Phillips19bbbc62009-03-29 15:53:59 +0800984 edesc = container_of(desc, struct talitos_edesc, desc);
985
Kim Phillips9c4a7962008-06-23 19:50:15 +0800986 ipsec_esp_unmap(dev, edesc, areq);
987
988 /* copy the generated ICV to dst */
Horia Geanta60542502012-08-02 17:16:37 +0300989 if (edesc->dst_nents) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800990 icvdata = &edesc->link_tbl[edesc->src_nents +
Horia Geanta79fd31d2012-08-02 17:16:40 +0300991 edesc->dst_nents + 2 +
992 edesc->assoc_nents];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800993 sg = sg_last(areq->dst, edesc->dst_nents);
994 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
995 icvdata, ctx->authsize);
996 }
997
998 kfree(edesc);
999
1000 aead_request_complete(areq, err);
1001}
1002
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001003static void ipsec_esp_decrypt_swauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001004 struct talitos_desc *desc,
1005 void *context, int err)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001006{
1007 struct aead_request *req = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001008 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001010 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001011 struct scatterlist *sg;
1012 void *icvdata;
1013
Kim Phillips19bbbc62009-03-29 15:53:59 +08001014 edesc = container_of(desc, struct talitos_edesc, desc);
1015
Kim Phillips9c4a7962008-06-23 19:50:15 +08001016 ipsec_esp_unmap(dev, edesc, req);
1017
1018 if (!err) {
1019 /* auth check */
1020 if (edesc->dma_len)
1021 icvdata = &edesc->link_tbl[edesc->src_nents +
Horia Geanta79fd31d2012-08-02 17:16:40 +03001022 edesc->dst_nents + 2 +
1023 edesc->assoc_nents];
Kim Phillips9c4a7962008-06-23 19:50:15 +08001024 else
1025 icvdata = &edesc->link_tbl[0];
1026
1027 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1028 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
1029 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
1030 }
1031
1032 kfree(edesc);
1033
1034 aead_request_complete(req, err);
1035}
1036
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001037static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001038 struct talitos_desc *desc,
1039 void *context, int err)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001040{
1041 struct aead_request *req = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001042 struct talitos_edesc *edesc;
1043
1044 edesc = container_of(desc, struct talitos_edesc, desc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001045
1046 ipsec_esp_unmap(dev, edesc, req);
1047
1048 /* check ICV auth status */
Kim Phillipse938e462009-03-29 15:53:23 +08001049 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1050 DESC_HDR_LO_ICCR1_PASS))
1051 err = -EBADMSG;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001052
1053 kfree(edesc);
1054
1055 aead_request_complete(req, err);
1056}
1057
Kim Phillips9c4a7962008-06-23 19:50:15 +08001058/*
1059 * convert scatterlist to SEC h/w link table format
1060 * stop at cryptlen bytes
1061 */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001062static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001063 int cryptlen, struct talitos_ptr *link_tbl_ptr)
1064{
Lee Nipper70bcaca2008-07-03 19:08:46 +08001065 int n_sg = sg_count;
1066
Horia Geant?bde90792015-05-12 11:28:05 +03001067 while (sg && n_sg--) {
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001068 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001069 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1070 link_tbl_ptr->j_extent = 0;
1071 link_tbl_ptr++;
1072 cryptlen -= sg_dma_len(sg);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001073 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001074 }
1075
Lee Nipper70bcaca2008-07-03 19:08:46 +08001076 /* adjust (decrease) last one (or two) entry's len to cryptlen */
Kim Phillips9c4a7962008-06-23 19:50:15 +08001077 link_tbl_ptr--;
Kim Phillipsc0e741d2008-07-17 20:20:59 +08001078 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
Lee Nipper70bcaca2008-07-03 19:08:46 +08001079 /* Empty this entry, and move to previous one */
1080 cryptlen += be16_to_cpu(link_tbl_ptr->len);
1081 link_tbl_ptr->len = 0;
1082 sg_count--;
1083 link_tbl_ptr--;
1084 }
Horia Geant?69d9cd82015-05-11 20:04:49 +03001085 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
1086 + cryptlen);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001087
1088 /* tag end of link table */
1089 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001090
1091 return sg_count;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001092}
1093
1094/*
1095 * fill in and submit ipsec_esp descriptor
1096 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001097static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001098 u64 seq, void (*callback) (struct device *dev,
1099 struct talitos_desc *desc,
1100 void *context, int error))
Kim Phillips9c4a7962008-06-23 19:50:15 +08001101{
1102 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1103 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1104 struct device *dev = ctx->dev;
1105 struct talitos_desc *desc = &edesc->desc;
1106 unsigned int cryptlen = areq->cryptlen;
1107 unsigned int authsize = ctx->authsize;
Kim Phillipse41256f2009-08-13 11:49:06 +10001108 unsigned int ivsize = crypto_aead_ivsize(aead);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001109 int sg_count, ret;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001110 int sg_link_tbl_len;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001111
1112 /* hmac key */
1113 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001114 DMA_TO_DEVICE);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001115
Kim Phillips9c4a7962008-06-23 19:50:15 +08001116 /* hmac data */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001117 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
1118 if (edesc->assoc_nents) {
1119 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
1120 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1121
1122 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001123 sizeof(struct talitos_ptr), 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001124 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1125
1126 /* assoc_nents - 1 entries for assoc, 1 for IV */
1127 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
1128 areq->assoclen, tbl_ptr);
1129
1130 /* add IV to link table */
1131 tbl_ptr += sg_count - 1;
1132 tbl_ptr->j_extent = 0;
1133 tbl_ptr++;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001134 to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001135 tbl_ptr->len = cpu_to_be16(ivsize);
1136 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1137
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, DMA_BIDIRECTIONAL);
1140 } else {
Horia Geanta935e99a2013-11-19 14:57:49 +02001141 if (areq->assoclen)
1142 to_talitos_ptr(&desc->ptr[1],
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001143 sg_dma_address(areq->assoc), 0);
Horia Geanta935e99a2013-11-19 14:57:49 +02001144 else
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001145 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001146 desc->ptr[1].j_extent = 0;
1147 }
1148
Kim Phillips9c4a7962008-06-23 19:50:15 +08001149 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001150 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001151 desc->ptr[2].len = cpu_to_be16(ivsize);
1152 desc->ptr[2].j_extent = 0;
1153 /* Sync needed for the aead_givencrypt case */
1154 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001155
1156 /* cipher key */
1157 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001158 (char *)&ctx->key + ctx->authkeylen,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001159 DMA_TO_DEVICE);
1160
1161 /*
1162 * cipher in
1163 * map and adjust cipher len to aead request cryptlen.
1164 * extent is bytes of HMAC postpended to ciphertext,
1165 * typically 12 for ipsec
1166 */
1167 desc->ptr[4].len = cpu_to_be16(cryptlen);
1168 desc->ptr[4].j_extent = authsize;
1169
Kim Phillipse938e462009-03-29 15:53:23 +08001170 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1171 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1172 : DMA_TO_DEVICE,
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001173 edesc->src_chained);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001174
1175 if (sg_count == 1) {
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001176 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001177 } else {
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001178 sg_link_tbl_len = cryptlen;
1179
Kim Phillips962a9c92009-03-29 15:54:30 +08001180 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001181 sg_link_tbl_len = cryptlen + authsize;
Kim Phillipse938e462009-03-29 15:53:23 +08001182
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001183 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
Lee Nipper70bcaca2008-07-03 19:08:46 +08001184 &edesc->link_tbl[0]);
1185 if (sg_count > 1) {
1186 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001187 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
Kim Phillipse938e462009-03-29 15:53:23 +08001188 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1189 edesc->dma_len,
1190 DMA_BIDIRECTIONAL);
Lee Nipper70bcaca2008-07-03 19:08:46 +08001191 } else {
1192 /* Only one segment now, so no link tbl needed */
Kim Phillips81eb0242009-08-13 11:51:51 +10001193 to_talitos_ptr(&desc->ptr[4],
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001194 sg_dma_address(areq->src), 0);
Lee Nipper70bcaca2008-07-03 19:08:46 +08001195 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001196 }
1197
1198 /* cipher out */
1199 desc->ptr[5].len = cpu_to_be16(cryptlen);
1200 desc->ptr[5].j_extent = authsize;
1201
Kim Phillipse938e462009-03-29 15:53:23 +08001202 if (areq->src != areq->dst)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001203 sg_count = talitos_map_sg(dev, areq->dst,
1204 edesc->dst_nents ? : 1,
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001205 DMA_FROM_DEVICE, edesc->dst_chained);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001206
1207 if (sg_count == 1) {
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001208 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001209 } else {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001210 int tbl_off = edesc->src_nents + 1;
1211 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
Kim Phillips9c4a7962008-06-23 19:50:15 +08001212
Kim Phillips81eb0242009-08-13 11:51:51 +10001213 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001214 tbl_off * sizeof(struct talitos_ptr), 0);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001215 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001216 tbl_ptr);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001217
Lee Nipperf3c85bc2008-07-30 16:26:57 +08001218 /* Add an entry to the link table for ICV data */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001219 tbl_ptr += sg_count - 1;
1220 tbl_ptr->j_extent = 0;
1221 tbl_ptr++;
1222 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1223 tbl_ptr->len = cpu_to_be16(authsize);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001224
1225 /* icv data follows link tables */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001226 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1227 (tbl_off + edesc->dst_nents + 1 +
1228 edesc->assoc_nents) *
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001229 sizeof(struct talitos_ptr), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001230 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1231 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1232 edesc->dma_len, DMA_BIDIRECTIONAL);
1233 }
1234
1235 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001236 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001237 DMA_FROM_DEVICE);
1238
Kim Phillips5228f0f2011-07-15 11:21:38 +08001239 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001240 if (ret != -EINPROGRESS) {
1241 ipsec_esp_unmap(dev, edesc, areq);
1242 kfree(edesc);
1243 }
1244 return ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001245}
1246
Kim Phillips9c4a7962008-06-23 19:50:15 +08001247/*
1248 * derive number of elements in scatterlist
1249 */
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001250static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001251{
1252 struct scatterlist *sg = sg_list;
1253 int sg_nents = 0;
1254
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001255 *chained = false;
Horia Geant?bde90792015-05-12 11:28:05 +03001256 while (nbytes > 0 && sg) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001257 sg_nents++;
1258 nbytes -= sg->length;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001259 if (!sg_is_last(sg) && (sg + 1)->length == 0)
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001260 *chained = true;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001261 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001262 }
1263
1264 return sg_nents;
1265}
1266
1267/*
Lee Nipper56af8cd2009-03-29 15:50:50 +08001268 * allocate and map the extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +08001269 */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001270static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001271 struct scatterlist *assoc,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001272 struct scatterlist *src,
1273 struct scatterlist *dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001274 u8 *iv,
1275 unsigned int assoclen,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001276 unsigned int cryptlen,
1277 unsigned int authsize,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001278 unsigned int ivsize,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001279 int icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001280 u32 cryptoflags,
1281 bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001282{
Lee Nipper56af8cd2009-03-29 15:50:50 +08001283 struct talitos_edesc *edesc;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001284 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1285 bool assoc_chained = false, src_chained = false, dst_chained = false;
1286 dma_addr_t iv_dma = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001287 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
Kim Phillips586725f2008-07-17 20:19:18 +08001288 GFP_ATOMIC;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001289 struct talitos_private *priv = dev_get_drvdata(dev);
1290 bool is_sec1 = has_ftr_sec1(priv);
1291 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001292
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001293 if (cryptlen + authsize > max_len) {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001294 dev_err(dev, "length exceeds h/w max limit\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001295 return ERR_PTR(-EINVAL);
1296 }
1297
Horia Geanta935e99a2013-11-19 14:57:49 +02001298 if (ivsize)
Horia Geanta79fd31d2012-08-02 17:16:40 +03001299 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1300
Horia Geanta935e99a2013-11-19 14:57:49 +02001301 if (assoclen) {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001302 /*
1303 * Currently it is assumed that iv is provided whenever assoc
1304 * is.
1305 */
1306 BUG_ON(!iv);
1307
1308 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1309 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1310 assoc_chained);
1311 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1312
1313 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1314 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1315 }
1316
Horia Geanta62293a32013-11-28 15:11:17 +02001317 if (!dst || dst == src) {
1318 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1319 src_nents = (src_nents == 1) ? 0 : src_nents;
1320 dst_nents = dst ? src_nents : 0;
1321 } else { /* dst && dst != src*/
1322 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1323 &src_chained);
1324 src_nents = (src_nents == 1) ? 0 : src_nents;
1325 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1326 &dst_chained);
1327 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001328 }
1329
1330 /*
1331 * allocate space for base edesc plus the link tables,
Lee Nipperf3c85bc2008-07-30 16:26:57 +08001332 * allowing for two separate entries for ICV and generated ICV (+ 2),
Kim Phillips9c4a7962008-06-23 19:50:15 +08001333 * and the ICV data itself
1334 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001335 alloc_len = sizeof(struct talitos_edesc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001336 if (assoc_nents || src_nents || dst_nents) {
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001337 if (is_sec1)
Dan Carpenter608f37d2015-05-11 13:10:09 +03001338 dma_len = (src_nents ? cryptlen : 0) +
1339 (dst_nents ? cryptlen : 0);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001340 else
1341 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1342 sizeof(struct talitos_ptr) + authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001343 alloc_len += dma_len;
1344 } else {
1345 dma_len = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001346 alloc_len += icv_stashing ? authsize : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001347 }
1348
Kim Phillips586725f2008-07-17 20:19:18 +08001349 edesc = kmalloc(alloc_len, GFP_DMA | flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001350 if (!edesc) {
Horia Geanta935e99a2013-11-19 14:57:49 +02001351 if (assoc_chained)
1352 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1353 else if (assoclen)
1354 dma_unmap_sg(dev, assoc,
1355 assoc_nents ? assoc_nents - 1 : 1,
1356 DMA_TO_DEVICE);
1357
Horia Geanta79fd31d2012-08-02 17:16:40 +03001358 if (iv_dma)
1359 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Horia Geanta935e99a2013-11-19 14:57:49 +02001360
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001361 dev_err(dev, "could not allocate edescriptor\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001362 return ERR_PTR(-ENOMEM);
1363 }
1364
Horia Geanta79fd31d2012-08-02 17:16:40 +03001365 edesc->assoc_nents = assoc_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001366 edesc->src_nents = src_nents;
1367 edesc->dst_nents = dst_nents;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001368 edesc->assoc_chained = assoc_chained;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001369 edesc->src_chained = src_chained;
1370 edesc->dst_chained = dst_chained;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001371 edesc->iv_dma = iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001372 edesc->dma_len = dma_len;
Lee Nipper497f2e62010-05-19 19:20:36 +10001373 if (dma_len)
1374 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1375 edesc->dma_len,
1376 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001377
1378 return edesc;
1379}
1380
Horia Geanta79fd31d2012-08-02 17:16:40 +03001381static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
Horia Geanta62293a32013-11-28 15:11:17 +02001382 int icv_stashing, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001383{
1384 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1385 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001386 unsigned int ivsize = crypto_aead_ivsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001387
Horia Geanta79fd31d2012-08-02 17:16:40 +03001388 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1389 iv, areq->assoclen, areq->cryptlen,
1390 ctx->authsize, ivsize, icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001391 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001392}
1393
Lee Nipper56af8cd2009-03-29 15:50:50 +08001394static int aead_encrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001395{
1396 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1397 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001398 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001399
1400 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001401 edesc = aead_edesc_alloc(req, req->iv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001402 if (IS_ERR(edesc))
1403 return PTR_ERR(edesc);
1404
1405 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001406 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001407
Horia Geanta79fd31d2012-08-02 17:16:40 +03001408 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001409}
1410
Lee Nipper56af8cd2009-03-29 15:50:50 +08001411static int aead_decrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001412{
1413 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1414 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1415 unsigned int authsize = ctx->authsize;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001416 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001417 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001418 struct scatterlist *sg;
1419 void *icvdata;
1420
1421 req->cryptlen -= authsize;
1422
1423 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001424 edesc = aead_edesc_alloc(req, req->iv, 1, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001425 if (IS_ERR(edesc))
1426 return PTR_ERR(edesc);
1427
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001428 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
Kim Phillipse938e462009-03-29 15:53:23 +08001429 ((!edesc->src_nents && !edesc->dst_nents) ||
1430 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001431
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001432 /* decrypt and check the ICV */
Kim Phillipse938e462009-03-29 15:53:23 +08001433 edesc->desc.hdr = ctx->desc_hdr_template |
1434 DESC_HDR_DIR_INBOUND |
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001435 DESC_HDR_MODE1_MDEU_CICV;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001436
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001437 /* reset integrity check result bits */
1438 edesc->desc.hdr_lo = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001439
Horia Geanta79fd31d2012-08-02 17:16:40 +03001440 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001441 }
Kim Phillipse938e462009-03-29 15:53:23 +08001442
1443 /* Have to check the ICV with software */
1444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1445
1446 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1447 if (edesc->dma_len)
1448 icvdata = &edesc->link_tbl[edesc->src_nents +
Horia Geanta79fd31d2012-08-02 17:16:40 +03001449 edesc->dst_nents + 2 +
1450 edesc->assoc_nents];
Kim Phillipse938e462009-03-29 15:53:23 +08001451 else
1452 icvdata = &edesc->link_tbl[0];
1453
1454 sg = sg_last(req->src, edesc->src_nents ? : 1);
1455
1456 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1457 ctx->authsize);
1458
Horia Geanta79fd31d2012-08-02 17:16:40 +03001459 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001460}
1461
Lee Nipper56af8cd2009-03-29 15:50:50 +08001462static int aead_givencrypt(struct aead_givcrypt_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001463{
1464 struct aead_request *areq = &req->areq;
1465 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1466 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001467 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001468
1469 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001470 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001471 if (IS_ERR(edesc))
1472 return PTR_ERR(edesc);
1473
1474 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001475 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001476
1477 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
Kim Phillipsba954872008-09-14 13:41:19 -07001478 /* avoid consecutive packets going out with same IV */
1479 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001480
Horia Geanta79fd31d2012-08-02 17:16:40 +03001481 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001482}
1483
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001484static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1485 const u8 *key, unsigned int keylen)
1486{
1487 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001488
1489 memcpy(&ctx->key, key, keylen);
1490 ctx->keylen = keylen;
1491
1492 return 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001493}
1494
LEROY Christophe032d1972015-04-17 16:31:51 +02001495static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1496 struct scatterlist *dst, unsigned int len,
1497 struct talitos_edesc *edesc)
1498{
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001499 struct talitos_private *priv = dev_get_drvdata(dev);
1500 bool is_sec1 = has_ftr_sec1(priv);
1501
1502 if (is_sec1) {
1503 if (!edesc->src_nents) {
1504 dma_unmap_sg(dev, src, 1,
1505 dst != src ? DMA_TO_DEVICE
1506 : DMA_BIDIRECTIONAL);
1507 }
1508 if (dst && edesc->dst_nents) {
1509 dma_sync_single_for_device(dev,
1510 edesc->dma_link_tbl + len,
1511 len, DMA_FROM_DEVICE);
1512 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1513 edesc->buf + len, len);
1514 } else if (dst && dst != src) {
1515 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1516 }
1517 } else {
1518 talitos_sg_unmap(dev, edesc, src, dst);
1519 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001520}
1521
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001522static void common_nonsnoop_unmap(struct device *dev,
1523 struct talitos_edesc *edesc,
1524 struct ablkcipher_request *areq)
1525{
1526 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
LEROY Christophe032d1972015-04-17 16:31:51 +02001527
1528 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001529 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1530 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1531
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001532 if (edesc->dma_len)
1533 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1534 DMA_BIDIRECTIONAL);
1535}
1536
1537static void ablkcipher_done(struct device *dev,
1538 struct talitos_desc *desc, void *context,
1539 int err)
1540{
1541 struct ablkcipher_request *areq = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001542 struct talitos_edesc *edesc;
1543
1544 edesc = container_of(desc, struct talitos_edesc, desc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001545
1546 common_nonsnoop_unmap(dev, edesc, areq);
1547
1548 kfree(edesc);
1549
1550 areq->base.complete(&areq->base, err);
1551}
1552
LEROY Christophe032d1972015-04-17 16:31:51 +02001553int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1554 unsigned int len, struct talitos_edesc *edesc,
1555 enum dma_data_direction dir, struct talitos_ptr *ptr)
1556{
1557 int sg_count;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001558 struct talitos_private *priv = dev_get_drvdata(dev);
1559 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe032d1972015-04-17 16:31:51 +02001560
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001561 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001562
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001563 if (is_sec1) {
1564 sg_count = edesc->src_nents ? : 1;
LEROY Christophe032d1972015-04-17 16:31:51 +02001565
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001566 if (sg_count == 1) {
1567 dma_map_sg(dev, src, 1, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001568 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001569 } else {
1570 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1571 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1572 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1573 len, DMA_TO_DEVICE);
1574 }
1575 } else {
1576 to_talitos_ptr_extent_clear(ptr, is_sec1);
1577
1578 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1579 edesc->src_chained);
1580
1581 if (sg_count == 1) {
1582 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1583 } else {
1584 sg_count = sg_to_link_tbl(src, sg_count, len,
1585 &edesc->link_tbl[0]);
1586 if (sg_count > 1) {
1587 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1588 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1589 dma_sync_single_for_device(dev,
1590 edesc->dma_link_tbl,
1591 edesc->dma_len,
1592 DMA_BIDIRECTIONAL);
1593 } else {
1594 /* Only one segment now, so no link tbl needed*/
1595 to_talitos_ptr(ptr, sg_dma_address(src),
1596 is_sec1);
1597 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001598 }
1599 }
1600 return sg_count;
1601}
1602
1603void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1604 unsigned int len, struct talitos_edesc *edesc,
1605 enum dma_data_direction dir,
1606 struct talitos_ptr *ptr, int sg_count)
1607{
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001608 struct talitos_private *priv = dev_get_drvdata(dev);
1609 bool is_sec1 = has_ftr_sec1(priv);
1610
LEROY Christophe032d1972015-04-17 16:31:51 +02001611 if (dir != DMA_NONE)
1612 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1613 dir, edesc->dst_chained);
1614
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001615 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001616
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001617 if (is_sec1) {
1618 if (sg_count == 1) {
1619 if (dir != DMA_NONE)
1620 dma_map_sg(dev, dst, 1, dir);
1621 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1622 } else {
1623 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1624 dma_sync_single_for_device(dev,
1625 edesc->dma_link_tbl + len,
1626 len, DMA_FROM_DEVICE);
1627 }
1628 } else {
1629 to_talitos_ptr_extent_clear(ptr, is_sec1);
1630
1631 if (sg_count == 1) {
1632 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1633 } else {
1634 struct talitos_ptr *link_tbl_ptr =
1635 &edesc->link_tbl[edesc->src_nents + 1];
1636
1637 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1638 (edesc->src_nents + 1) *
1639 sizeof(struct talitos_ptr), 0);
1640 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
Horia Geant?42e8b0d2015-05-11 20:04:56 +03001641 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001642 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1643 edesc->dma_len,
1644 DMA_BIDIRECTIONAL);
1645 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001646 }
1647}
1648
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001649static int common_nonsnoop(struct talitos_edesc *edesc,
1650 struct ablkcipher_request *areq,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001651 void (*callback) (struct device *dev,
1652 struct talitos_desc *desc,
1653 void *context, int error))
1654{
1655 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1656 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1657 struct device *dev = ctx->dev;
1658 struct talitos_desc *desc = &edesc->desc;
1659 unsigned int cryptlen = areq->nbytes;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001660 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001661 int sg_count, ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001662 struct talitos_private *priv = dev_get_drvdata(dev);
1663 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001664
1665 /* first DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001666 desc->ptr[0] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001667
1668 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001669 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1670 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1671 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001672
1673 /* cipher key */
1674 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001675 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001676
1677 /*
1678 * cipher in
1679 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001680 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1681 (areq->src == areq->dst) ?
1682 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1683 &desc->ptr[3]);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001684
1685 /* cipher out */
LEROY Christophe032d1972015-04-17 16:31:51 +02001686 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1687 (areq->src == areq->dst) ? DMA_NONE
1688 : DMA_FROM_DEVICE,
1689 &desc->ptr[4], sg_count);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001690
1691 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001692 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001693 DMA_FROM_DEVICE);
1694
1695 /* last DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001696 desc->ptr[6] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001697
Kim Phillips5228f0f2011-07-15 11:21:38 +08001698 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001699 if (ret != -EINPROGRESS) {
1700 common_nonsnoop_unmap(dev, edesc, areq);
1701 kfree(edesc);
1702 }
1703 return ret;
1704}
1705
Kim Phillipse938e462009-03-29 15:53:23 +08001706static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
Horia Geanta62293a32013-11-28 15:11:17 +02001707 areq, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001708{
1709 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1710 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001711 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001712
Horia Geanta79fd31d2012-08-02 17:16:40 +03001713 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1714 areq->info, 0, areq->nbytes, 0, ivsize, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001715 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001716}
1717
1718static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1719{
1720 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1721 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1722 struct talitos_edesc *edesc;
1723
1724 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001725 edesc = ablkcipher_edesc_alloc(areq, true);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001726 if (IS_ERR(edesc))
1727 return PTR_ERR(edesc);
1728
1729 /* set encrypt */
1730 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1731
Kim Phillipsfebec542011-07-15 11:21:39 +08001732 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001733}
1734
1735static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1736{
1737 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1738 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1739 struct talitos_edesc *edesc;
1740
1741 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001742 edesc = ablkcipher_edesc_alloc(areq, false);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001743 if (IS_ERR(edesc))
1744 return PTR_ERR(edesc);
1745
1746 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1747
Kim Phillipsfebec542011-07-15 11:21:39 +08001748 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001749}
1750
Lee Nipper497f2e62010-05-19 19:20:36 +10001751static void common_nonsnoop_hash_unmap(struct device *dev,
1752 struct talitos_edesc *edesc,
1753 struct ahash_request *areq)
1754{
1755 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001756 struct talitos_private *priv = dev_get_drvdata(dev);
1757 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001758
1759 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1760
LEROY Christophe032d1972015-04-17 16:31:51 +02001761 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1762
Lee Nipper497f2e62010-05-19 19:20:36 +10001763 /* When using hashctx-in, must unmap it. */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001764 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001765 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1766 DMA_TO_DEVICE);
1767
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001768 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001769 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1770 DMA_TO_DEVICE);
1771
Lee Nipper497f2e62010-05-19 19:20:36 +10001772 if (edesc->dma_len)
1773 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1774 DMA_BIDIRECTIONAL);
1775
1776}
1777
1778static void ahash_done(struct device *dev,
1779 struct talitos_desc *desc, void *context,
1780 int err)
1781{
1782 struct ahash_request *areq = context;
1783 struct talitos_edesc *edesc =
1784 container_of(desc, struct talitos_edesc, desc);
1785 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1786
1787 if (!req_ctx->last && req_ctx->to_hash_later) {
1788 /* Position any partial block for next update/final/finup */
1789 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
Lee Nipper5e833bc2010-06-16 15:29:15 +10001790 req_ctx->nbuf = req_ctx->to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001791 }
1792 common_nonsnoop_hash_unmap(dev, edesc, areq);
1793
1794 kfree(edesc);
1795
1796 areq->base.complete(&areq->base, err);
1797}
1798
LEROY Christophe2d029052015-04-17 16:32:18 +02001799/*
1800 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1801 * ourself and submit a padded block
1802 */
1803void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1804 struct talitos_edesc *edesc,
1805 struct talitos_ptr *ptr)
1806{
1807 static u8 padded_hash[64] = {
1808 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1809 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1810 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1811 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1812 };
1813
1814 pr_err_once("Bug in SEC1, padding ourself\n");
1815 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1816 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1817 (char *)padded_hash, DMA_TO_DEVICE);
1818}
1819
Lee Nipper497f2e62010-05-19 19:20:36 +10001820static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1821 struct ahash_request *areq, unsigned int length,
1822 void (*callback) (struct device *dev,
1823 struct talitos_desc *desc,
1824 void *context, int error))
1825{
1826 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1827 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1828 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1829 struct device *dev = ctx->dev;
1830 struct talitos_desc *desc = &edesc->desc;
LEROY Christophe032d1972015-04-17 16:31:51 +02001831 int ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001832 struct talitos_private *priv = dev_get_drvdata(dev);
1833 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001834
1835 /* first DWORD empty */
1836 desc->ptr[0] = zero_entry;
1837
Kim Phillips60f208d2010-05-19 19:21:53 +10001838 /* hash context in */
1839 if (!req_ctx->first || req_ctx->swinit) {
Lee Nipper497f2e62010-05-19 19:20:36 +10001840 map_single_talitos_ptr(dev, &desc->ptr[1],
1841 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001842 (char *)req_ctx->hw_context,
Lee Nipper497f2e62010-05-19 19:20:36 +10001843 DMA_TO_DEVICE);
Kim Phillips60f208d2010-05-19 19:21:53 +10001844 req_ctx->swinit = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001845 } else {
1846 desc->ptr[1] = zero_entry;
1847 /* Indicate next op is not the first. */
1848 req_ctx->first = 0;
1849 }
1850
1851 /* HMAC key */
1852 if (ctx->keylen)
1853 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001854 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001855 else
1856 desc->ptr[2] = zero_entry;
1857
1858 /*
1859 * data in
1860 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001861 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1862 DMA_TO_DEVICE, &desc->ptr[3]);
Lee Nipper497f2e62010-05-19 19:20:36 +10001863
1864 /* fifth DWORD empty */
1865 desc->ptr[4] = zero_entry;
1866
1867 /* hash/HMAC out -or- hash context out */
1868 if (req_ctx->last)
1869 map_single_talitos_ptr(dev, &desc->ptr[5],
1870 crypto_ahash_digestsize(tfm),
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001871 areq->result, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001872 else
1873 map_single_talitos_ptr(dev, &desc->ptr[5],
1874 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001875 req_ctx->hw_context, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001876
1877 /* last DWORD empty */
1878 desc->ptr[6] = zero_entry;
1879
LEROY Christophe2d029052015-04-17 16:32:18 +02001880 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1881 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1882
Kim Phillips5228f0f2011-07-15 11:21:38 +08001883 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001884 if (ret != -EINPROGRESS) {
1885 common_nonsnoop_hash_unmap(dev, edesc, areq);
1886 kfree(edesc);
1887 }
1888 return ret;
1889}
1890
1891static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1892 unsigned int nbytes)
1893{
1894 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1895 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1896 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1897
Horia Geanta79fd31d2012-08-02 17:16:40 +03001898 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001899 nbytes, 0, 0, 0, areq->base.flags, false);
Lee Nipper497f2e62010-05-19 19:20:36 +10001900}
1901
1902static int ahash_init(struct ahash_request *areq)
1903{
1904 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1905 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1906
1907 /* Initialize the context */
Lee Nipper5e833bc2010-06-16 15:29:15 +10001908 req_ctx->nbuf = 0;
Kim Phillips60f208d2010-05-19 19:21:53 +10001909 req_ctx->first = 1; /* first indicates h/w must init its context */
1910 req_ctx->swinit = 0; /* assume h/w init of context */
Lee Nipper497f2e62010-05-19 19:20:36 +10001911 req_ctx->hw_context_size =
1912 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1913 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1914 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1915
1916 return 0;
1917}
1918
Kim Phillips60f208d2010-05-19 19:21:53 +10001919/*
1920 * on h/w without explicit sha224 support, we initialize h/w context
1921 * manually with sha224 constants, and tell it to run sha256.
1922 */
1923static int ahash_init_sha224_swinit(struct ahash_request *areq)
1924{
1925 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1926
1927 ahash_init(areq);
1928 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1929
Kim Phillipsa7524472010-09-23 15:56:38 +08001930 req_ctx->hw_context[0] = SHA224_H0;
1931 req_ctx->hw_context[1] = SHA224_H1;
1932 req_ctx->hw_context[2] = SHA224_H2;
1933 req_ctx->hw_context[3] = SHA224_H3;
1934 req_ctx->hw_context[4] = SHA224_H4;
1935 req_ctx->hw_context[5] = SHA224_H5;
1936 req_ctx->hw_context[6] = SHA224_H6;
1937 req_ctx->hw_context[7] = SHA224_H7;
Kim Phillips60f208d2010-05-19 19:21:53 +10001938
1939 /* init 64-bit count */
1940 req_ctx->hw_context[8] = 0;
1941 req_ctx->hw_context[9] = 0;
1942
1943 return 0;
1944}
1945
Lee Nipper497f2e62010-05-19 19:20:36 +10001946static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1947{
1948 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1949 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1950 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1951 struct talitos_edesc *edesc;
1952 unsigned int blocksize =
1953 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1954 unsigned int nbytes_to_hash;
1955 unsigned int to_hash_later;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001956 unsigned int nsg;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001957 bool chained;
Lee Nipper497f2e62010-05-19 19:20:36 +10001958
Lee Nipper5e833bc2010-06-16 15:29:15 +10001959 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1960 /* Buffer up to one whole block */
Lee Nipper497f2e62010-05-19 19:20:36 +10001961 sg_copy_to_buffer(areq->src,
1962 sg_count(areq->src, nbytes, &chained),
Lee Nipper5e833bc2010-06-16 15:29:15 +10001963 req_ctx->buf + req_ctx->nbuf, nbytes);
1964 req_ctx->nbuf += nbytes;
Lee Nipper497f2e62010-05-19 19:20:36 +10001965 return 0;
1966 }
1967
Lee Nipper5e833bc2010-06-16 15:29:15 +10001968 /* At least (blocksize + 1) bytes are available to hash */
1969 nbytes_to_hash = nbytes + req_ctx->nbuf;
1970 to_hash_later = nbytes_to_hash & (blocksize - 1);
1971
1972 if (req_ctx->last)
1973 to_hash_later = 0;
1974 else if (to_hash_later)
1975 /* There is a partial block. Hash the full block(s) now */
1976 nbytes_to_hash -= to_hash_later;
1977 else {
1978 /* Keep one block buffered */
1979 nbytes_to_hash -= blocksize;
1980 to_hash_later = blocksize;
1981 }
1982
1983 /* Chain in any previously buffered data */
1984 if (req_ctx->nbuf) {
1985 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1986 sg_init_table(req_ctx->bufsl, nsg);
1987 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1988 if (nsg > 1)
1989 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
Lee Nipper497f2e62010-05-19 19:20:36 +10001990 req_ctx->psrc = req_ctx->bufsl;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001991 } else
Lee Nipper497f2e62010-05-19 19:20:36 +10001992 req_ctx->psrc = areq->src;
Lee Nipper497f2e62010-05-19 19:20:36 +10001993
Lee Nipper5e833bc2010-06-16 15:29:15 +10001994 if (to_hash_later) {
1995 int nents = sg_count(areq->src, nbytes, &chained);
Akinobu Mitad0525722013-07-08 16:01:55 -07001996 sg_pcopy_to_buffer(areq->src, nents,
Lee Nipper5e833bc2010-06-16 15:29:15 +10001997 req_ctx->bufnext,
1998 to_hash_later,
1999 nbytes - to_hash_later);
Lee Nipper497f2e62010-05-19 19:20:36 +10002000 }
Lee Nipper5e833bc2010-06-16 15:29:15 +10002001 req_ctx->to_hash_later = to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10002002
Lee Nipper5e833bc2010-06-16 15:29:15 +10002003 /* Allocate extended descriptor */
Lee Nipper497f2e62010-05-19 19:20:36 +10002004 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2005 if (IS_ERR(edesc))
2006 return PTR_ERR(edesc);
2007
2008 edesc->desc.hdr = ctx->desc_hdr_template;
2009
2010 /* On last one, request SEC to pad; otherwise continue */
2011 if (req_ctx->last)
2012 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2013 else
2014 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2015
Kim Phillips60f208d2010-05-19 19:21:53 +10002016 /* request SEC to INIT hash. */
2017 if (req_ctx->first && !req_ctx->swinit)
Lee Nipper497f2e62010-05-19 19:20:36 +10002018 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2019
2020 /* When the tfm context has a keylen, it's an HMAC.
2021 * A first or last (ie. not middle) descriptor must request HMAC.
2022 */
2023 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2024 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2025
2026 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
2027 ahash_done);
2028}
2029
2030static int ahash_update(struct ahash_request *areq)
2031{
2032 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2033
2034 req_ctx->last = 0;
2035
2036 return ahash_process_req(areq, areq->nbytes);
2037}
2038
2039static int ahash_final(struct ahash_request *areq)
2040{
2041 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2042
2043 req_ctx->last = 1;
2044
2045 return ahash_process_req(areq, 0);
2046}
2047
2048static int ahash_finup(struct ahash_request *areq)
2049{
2050 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2051
2052 req_ctx->last = 1;
2053
2054 return ahash_process_req(areq, areq->nbytes);
2055}
2056
2057static int ahash_digest(struct ahash_request *areq)
2058{
2059 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Kim Phillips60f208d2010-05-19 19:21:53 +10002060 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002061
Kim Phillips60f208d2010-05-19 19:21:53 +10002062 ahash->init(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002063 req_ctx->last = 1;
2064
2065 return ahash_process_req(areq, areq->nbytes);
2066}
2067
Lee Nipper79b3a412011-11-21 16:13:25 +08002068struct keyhash_result {
2069 struct completion completion;
2070 int err;
2071};
2072
2073static void keyhash_complete(struct crypto_async_request *req, int err)
2074{
2075 struct keyhash_result *res = req->data;
2076
2077 if (err == -EINPROGRESS)
2078 return;
2079
2080 res->err = err;
2081 complete(&res->completion);
2082}
2083
2084static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2085 u8 *hash)
2086{
2087 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2088
2089 struct scatterlist sg[1];
2090 struct ahash_request *req;
2091 struct keyhash_result hresult;
2092 int ret;
2093
2094 init_completion(&hresult.completion);
2095
2096 req = ahash_request_alloc(tfm, GFP_KERNEL);
2097 if (!req)
2098 return -ENOMEM;
2099
2100 /* Keep tfm keylen == 0 during hash of the long key */
2101 ctx->keylen = 0;
2102 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2103 keyhash_complete, &hresult);
2104
2105 sg_init_one(&sg[0], key, keylen);
2106
2107 ahash_request_set_crypt(req, sg, hash, keylen);
2108 ret = crypto_ahash_digest(req);
2109 switch (ret) {
2110 case 0:
2111 break;
2112 case -EINPROGRESS:
2113 case -EBUSY:
2114 ret = wait_for_completion_interruptible(
2115 &hresult.completion);
2116 if (!ret)
2117 ret = hresult.err;
2118 break;
2119 default:
2120 break;
2121 }
2122 ahash_request_free(req);
2123
2124 return ret;
2125}
2126
2127static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2128 unsigned int keylen)
2129{
2130 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2131 unsigned int blocksize =
2132 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2133 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2134 unsigned int keysize = keylen;
2135 u8 hash[SHA512_DIGEST_SIZE];
2136 int ret;
2137
2138 if (keylen <= blocksize)
2139 memcpy(ctx->key, key, keysize);
2140 else {
2141 /* Must get the hash of the long key */
2142 ret = keyhash(tfm, key, keylen, hash);
2143
2144 if (ret) {
2145 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2146 return -EINVAL;
2147 }
2148
2149 keysize = digestsize;
2150 memcpy(ctx->key, hash, digestsize);
2151 }
2152
2153 ctx->keylen = keysize;
2154
2155 return 0;
2156}
2157
2158
Kim Phillips9c4a7962008-06-23 19:50:15 +08002159struct talitos_alg_template {
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002160 u32 type;
2161 union {
2162 struct crypto_alg crypto;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002163 struct ahash_alg hash;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002164 } alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002165 __be32 desc_hdr_template;
2166};
2167
2168static struct talitos_alg_template driver_algs[] = {
Horia Geanta991155b2013-03-20 16:31:38 +02002169 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002170 { .type = CRYPTO_ALG_TYPE_AEAD,
2171 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002172 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2173 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2174 .cra_blocksize = AES_BLOCK_SIZE,
2175 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002176 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002177 .ivsize = AES_BLOCK_SIZE,
2178 .maxauthsize = SHA1_DIGEST_SIZE,
2179 }
2180 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08002181 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2182 DESC_HDR_SEL0_AESU |
2183 DESC_HDR_MODE0_AESU_CBC |
2184 DESC_HDR_SEL1_MDEUA |
2185 DESC_HDR_MODE1_MDEU_INIT |
2186 DESC_HDR_MODE1_MDEU_PAD |
2187 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper70bcaca2008-07-03 19:08:46 +08002188 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002189 { .type = CRYPTO_ALG_TYPE_AEAD,
2190 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002191 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2192 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2193 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2194 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002195 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002196 .ivsize = DES3_EDE_BLOCK_SIZE,
2197 .maxauthsize = SHA1_DIGEST_SIZE,
2198 }
2199 },
Lee Nipper70bcaca2008-07-03 19:08:46 +08002200 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2201 DESC_HDR_SEL0_DEU |
2202 DESC_HDR_MODE0_DEU_CBC |
2203 DESC_HDR_MODE0_DEU_3DES |
2204 DESC_HDR_SEL1_MDEUA |
2205 DESC_HDR_MODE1_MDEU_INIT |
2206 DESC_HDR_MODE1_MDEU_PAD |
2207 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper3952f172008-07-10 18:29:18 +08002208 },
Horia Geanta357fb602012-07-03 19:16:53 +03002209 { .type = CRYPTO_ALG_TYPE_AEAD,
2210 .alg.crypto = {
2211 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2212 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
2213 .cra_blocksize = AES_BLOCK_SIZE,
2214 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002215 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002216 .ivsize = AES_BLOCK_SIZE,
2217 .maxauthsize = SHA224_DIGEST_SIZE,
2218 }
2219 },
2220 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2221 DESC_HDR_SEL0_AESU |
2222 DESC_HDR_MODE0_AESU_CBC |
2223 DESC_HDR_SEL1_MDEUA |
2224 DESC_HDR_MODE1_MDEU_INIT |
2225 DESC_HDR_MODE1_MDEU_PAD |
2226 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2227 },
2228 { .type = CRYPTO_ALG_TYPE_AEAD,
2229 .alg.crypto = {
2230 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
2231 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
2232 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2233 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002234 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002235 .ivsize = DES3_EDE_BLOCK_SIZE,
2236 .maxauthsize = SHA224_DIGEST_SIZE,
2237 }
2238 },
2239 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2240 DESC_HDR_SEL0_DEU |
2241 DESC_HDR_MODE0_DEU_CBC |
2242 DESC_HDR_MODE0_DEU_3DES |
2243 DESC_HDR_SEL1_MDEUA |
2244 DESC_HDR_MODE1_MDEU_INIT |
2245 DESC_HDR_MODE1_MDEU_PAD |
2246 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2247 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002248 { .type = CRYPTO_ALG_TYPE_AEAD,
2249 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002250 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2251 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2252 .cra_blocksize = AES_BLOCK_SIZE,
2253 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002254 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002255 .ivsize = AES_BLOCK_SIZE,
2256 .maxauthsize = SHA256_DIGEST_SIZE,
2257 }
2258 },
Lee Nipper3952f172008-07-10 18:29:18 +08002259 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2260 DESC_HDR_SEL0_AESU |
2261 DESC_HDR_MODE0_AESU_CBC |
2262 DESC_HDR_SEL1_MDEUA |
2263 DESC_HDR_MODE1_MDEU_INIT |
2264 DESC_HDR_MODE1_MDEU_PAD |
2265 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2266 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002267 { .type = CRYPTO_ALG_TYPE_AEAD,
2268 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002269 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2270 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2271 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2272 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002273 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002274 .ivsize = DES3_EDE_BLOCK_SIZE,
2275 .maxauthsize = SHA256_DIGEST_SIZE,
2276 }
2277 },
Lee Nipper3952f172008-07-10 18:29:18 +08002278 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2279 DESC_HDR_SEL0_DEU |
2280 DESC_HDR_MODE0_DEU_CBC |
2281 DESC_HDR_MODE0_DEU_3DES |
2282 DESC_HDR_SEL1_MDEUA |
2283 DESC_HDR_MODE1_MDEU_INIT |
2284 DESC_HDR_MODE1_MDEU_PAD |
2285 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2286 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002287 { .type = CRYPTO_ALG_TYPE_AEAD,
2288 .alg.crypto = {
Horia Geanta357fb602012-07-03 19:16:53 +03002289 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2290 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2291 .cra_blocksize = AES_BLOCK_SIZE,
2292 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002293 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002294 .ivsize = AES_BLOCK_SIZE,
2295 .maxauthsize = SHA384_DIGEST_SIZE,
2296 }
2297 },
2298 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2299 DESC_HDR_SEL0_AESU |
2300 DESC_HDR_MODE0_AESU_CBC |
2301 DESC_HDR_SEL1_MDEUB |
2302 DESC_HDR_MODE1_MDEU_INIT |
2303 DESC_HDR_MODE1_MDEU_PAD |
2304 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2305 },
2306 { .type = CRYPTO_ALG_TYPE_AEAD,
2307 .alg.crypto = {
2308 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2309 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2310 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002312 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002313 .ivsize = DES3_EDE_BLOCK_SIZE,
2314 .maxauthsize = SHA384_DIGEST_SIZE,
2315 }
2316 },
2317 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2318 DESC_HDR_SEL0_DEU |
2319 DESC_HDR_MODE0_DEU_CBC |
2320 DESC_HDR_MODE0_DEU_3DES |
2321 DESC_HDR_SEL1_MDEUB |
2322 DESC_HDR_MODE1_MDEU_INIT |
2323 DESC_HDR_MODE1_MDEU_PAD |
2324 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2325 },
2326 { .type = CRYPTO_ALG_TYPE_AEAD,
2327 .alg.crypto = {
2328 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2329 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2330 .cra_blocksize = AES_BLOCK_SIZE,
2331 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002332 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002333 .ivsize = AES_BLOCK_SIZE,
2334 .maxauthsize = SHA512_DIGEST_SIZE,
2335 }
2336 },
2337 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2338 DESC_HDR_SEL0_AESU |
2339 DESC_HDR_MODE0_AESU_CBC |
2340 DESC_HDR_SEL1_MDEUB |
2341 DESC_HDR_MODE1_MDEU_INIT |
2342 DESC_HDR_MODE1_MDEU_PAD |
2343 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2344 },
2345 { .type = CRYPTO_ALG_TYPE_AEAD,
2346 .alg.crypto = {
2347 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2348 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2349 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2350 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Horia Geanta357fb602012-07-03 19:16:53 +03002351 .cra_aead = {
Horia Geanta357fb602012-07-03 19:16:53 +03002352 .ivsize = DES3_EDE_BLOCK_SIZE,
2353 .maxauthsize = SHA512_DIGEST_SIZE,
2354 }
2355 },
2356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2357 DESC_HDR_SEL0_DEU |
2358 DESC_HDR_MODE0_DEU_CBC |
2359 DESC_HDR_MODE0_DEU_3DES |
2360 DESC_HDR_SEL1_MDEUB |
2361 DESC_HDR_MODE1_MDEU_INIT |
2362 DESC_HDR_MODE1_MDEU_PAD |
2363 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2364 },
2365 { .type = CRYPTO_ALG_TYPE_AEAD,
2366 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002367 .cra_name = "authenc(hmac(md5),cbc(aes))",
2368 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2369 .cra_blocksize = AES_BLOCK_SIZE,
2370 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002371 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002372 .ivsize = AES_BLOCK_SIZE,
2373 .maxauthsize = MD5_DIGEST_SIZE,
2374 }
2375 },
Lee Nipper3952f172008-07-10 18:29:18 +08002376 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2377 DESC_HDR_SEL0_AESU |
2378 DESC_HDR_MODE0_AESU_CBC |
2379 DESC_HDR_SEL1_MDEUA |
2380 DESC_HDR_MODE1_MDEU_INIT |
2381 DESC_HDR_MODE1_MDEU_PAD |
2382 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2383 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002384 { .type = CRYPTO_ALG_TYPE_AEAD,
2385 .alg.crypto = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002386 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2387 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002390 .cra_aead = {
Lee Nipper56af8cd2009-03-29 15:50:50 +08002391 .ivsize = DES3_EDE_BLOCK_SIZE,
2392 .maxauthsize = MD5_DIGEST_SIZE,
2393 }
2394 },
Lee Nipper3952f172008-07-10 18:29:18 +08002395 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2396 DESC_HDR_SEL0_DEU |
2397 DESC_HDR_MODE0_DEU_CBC |
2398 DESC_HDR_MODE0_DEU_3DES |
2399 DESC_HDR_SEL1_MDEUA |
2400 DESC_HDR_MODE1_MDEU_INIT |
2401 DESC_HDR_MODE1_MDEU_PAD |
2402 DESC_HDR_MODE1_MDEU_MD5_HMAC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002403 },
2404 /* ABLKCIPHER algorithms. */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002405 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2406 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002407 .cra_name = "cbc(aes)",
2408 .cra_driver_name = "cbc-aes-talitos",
2409 .cra_blocksize = AES_BLOCK_SIZE,
2410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2411 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002412 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002413 .min_keysize = AES_MIN_KEY_SIZE,
2414 .max_keysize = AES_MAX_KEY_SIZE,
2415 .ivsize = AES_BLOCK_SIZE,
2416 }
2417 },
2418 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2419 DESC_HDR_SEL0_AESU |
2420 DESC_HDR_MODE0_AESU_CBC,
2421 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002422 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2423 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002424 .cra_name = "cbc(des3_ede)",
2425 .cra_driver_name = "cbc-3des-talitos",
2426 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2428 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002429 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002430 .min_keysize = DES3_EDE_KEY_SIZE,
2431 .max_keysize = DES3_EDE_KEY_SIZE,
2432 .ivsize = DES3_EDE_BLOCK_SIZE,
2433 }
2434 },
2435 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2436 DESC_HDR_SEL0_DEU |
2437 DESC_HDR_MODE0_DEU_CBC |
2438 DESC_HDR_MODE0_DEU_3DES,
Lee Nipper497f2e62010-05-19 19:20:36 +10002439 },
2440 /* AHASH algorithms. */
2441 { .type = CRYPTO_ALG_TYPE_AHASH,
2442 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002443 .halg.digestsize = MD5_DIGEST_SIZE,
2444 .halg.base = {
2445 .cra_name = "md5",
2446 .cra_driver_name = "md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002447 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper497f2e62010-05-19 19:20:36 +10002448 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2449 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002450 }
2451 },
2452 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2453 DESC_HDR_SEL0_MDEUA |
2454 DESC_HDR_MODE0_MDEU_MD5,
2455 },
2456 { .type = CRYPTO_ALG_TYPE_AHASH,
2457 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002458 .halg.digestsize = SHA1_DIGEST_SIZE,
2459 .halg.base = {
2460 .cra_name = "sha1",
2461 .cra_driver_name = "sha1-talitos",
2462 .cra_blocksize = SHA1_BLOCK_SIZE,
2463 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2464 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002465 }
2466 },
2467 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2468 DESC_HDR_SEL0_MDEUA |
2469 DESC_HDR_MODE0_MDEU_SHA1,
2470 },
2471 { .type = CRYPTO_ALG_TYPE_AHASH,
2472 .alg.hash = {
Kim Phillips60f208d2010-05-19 19:21:53 +10002473 .halg.digestsize = SHA224_DIGEST_SIZE,
2474 .halg.base = {
2475 .cra_name = "sha224",
2476 .cra_driver_name = "sha224-talitos",
2477 .cra_blocksize = SHA224_BLOCK_SIZE,
2478 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2479 CRYPTO_ALG_ASYNC,
Kim Phillips60f208d2010-05-19 19:21:53 +10002480 }
2481 },
2482 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2483 DESC_HDR_SEL0_MDEUA |
2484 DESC_HDR_MODE0_MDEU_SHA224,
2485 },
2486 { .type = CRYPTO_ALG_TYPE_AHASH,
2487 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002488 .halg.digestsize = SHA256_DIGEST_SIZE,
2489 .halg.base = {
2490 .cra_name = "sha256",
2491 .cra_driver_name = "sha256-talitos",
2492 .cra_blocksize = SHA256_BLOCK_SIZE,
2493 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2494 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002495 }
2496 },
2497 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2498 DESC_HDR_SEL0_MDEUA |
2499 DESC_HDR_MODE0_MDEU_SHA256,
2500 },
2501 { .type = CRYPTO_ALG_TYPE_AHASH,
2502 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002503 .halg.digestsize = SHA384_DIGEST_SIZE,
2504 .halg.base = {
2505 .cra_name = "sha384",
2506 .cra_driver_name = "sha384-talitos",
2507 .cra_blocksize = SHA384_BLOCK_SIZE,
2508 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2509 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002510 }
2511 },
2512 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2513 DESC_HDR_SEL0_MDEUB |
2514 DESC_HDR_MODE0_MDEUB_SHA384,
2515 },
2516 { .type = CRYPTO_ALG_TYPE_AHASH,
2517 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002518 .halg.digestsize = SHA512_DIGEST_SIZE,
2519 .halg.base = {
2520 .cra_name = "sha512",
2521 .cra_driver_name = "sha512-talitos",
2522 .cra_blocksize = SHA512_BLOCK_SIZE,
2523 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2524 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002525 }
2526 },
2527 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2528 DESC_HDR_SEL0_MDEUB |
2529 DESC_HDR_MODE0_MDEUB_SHA512,
2530 },
Lee Nipper79b3a412011-11-21 16:13:25 +08002531 { .type = CRYPTO_ALG_TYPE_AHASH,
2532 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002533 .halg.digestsize = MD5_DIGEST_SIZE,
2534 .halg.base = {
2535 .cra_name = "hmac(md5)",
2536 .cra_driver_name = "hmac-md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002537 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper79b3a412011-11-21 16:13:25 +08002538 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2539 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002540 }
2541 },
2542 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2543 DESC_HDR_SEL0_MDEUA |
2544 DESC_HDR_MODE0_MDEU_MD5,
2545 },
2546 { .type = CRYPTO_ALG_TYPE_AHASH,
2547 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002548 .halg.digestsize = SHA1_DIGEST_SIZE,
2549 .halg.base = {
2550 .cra_name = "hmac(sha1)",
2551 .cra_driver_name = "hmac-sha1-talitos",
2552 .cra_blocksize = SHA1_BLOCK_SIZE,
2553 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2554 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002555 }
2556 },
2557 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2558 DESC_HDR_SEL0_MDEUA |
2559 DESC_HDR_MODE0_MDEU_SHA1,
2560 },
2561 { .type = CRYPTO_ALG_TYPE_AHASH,
2562 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002563 .halg.digestsize = SHA224_DIGEST_SIZE,
2564 .halg.base = {
2565 .cra_name = "hmac(sha224)",
2566 .cra_driver_name = "hmac-sha224-talitos",
2567 .cra_blocksize = SHA224_BLOCK_SIZE,
2568 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2569 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002570 }
2571 },
2572 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2573 DESC_HDR_SEL0_MDEUA |
2574 DESC_HDR_MODE0_MDEU_SHA224,
2575 },
2576 { .type = CRYPTO_ALG_TYPE_AHASH,
2577 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002578 .halg.digestsize = SHA256_DIGEST_SIZE,
2579 .halg.base = {
2580 .cra_name = "hmac(sha256)",
2581 .cra_driver_name = "hmac-sha256-talitos",
2582 .cra_blocksize = SHA256_BLOCK_SIZE,
2583 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2584 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002585 }
2586 },
2587 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2588 DESC_HDR_SEL0_MDEUA |
2589 DESC_HDR_MODE0_MDEU_SHA256,
2590 },
2591 { .type = CRYPTO_ALG_TYPE_AHASH,
2592 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002593 .halg.digestsize = SHA384_DIGEST_SIZE,
2594 .halg.base = {
2595 .cra_name = "hmac(sha384)",
2596 .cra_driver_name = "hmac-sha384-talitos",
2597 .cra_blocksize = SHA384_BLOCK_SIZE,
2598 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2599 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002600 }
2601 },
2602 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2603 DESC_HDR_SEL0_MDEUB |
2604 DESC_HDR_MODE0_MDEUB_SHA384,
2605 },
2606 { .type = CRYPTO_ALG_TYPE_AHASH,
2607 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002608 .halg.digestsize = SHA512_DIGEST_SIZE,
2609 .halg.base = {
2610 .cra_name = "hmac(sha512)",
2611 .cra_driver_name = "hmac-sha512-talitos",
2612 .cra_blocksize = SHA512_BLOCK_SIZE,
2613 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2614 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002615 }
2616 },
2617 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2618 DESC_HDR_SEL0_MDEUB |
2619 DESC_HDR_MODE0_MDEUB_SHA512,
2620 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002621};
2622
2623struct talitos_crypto_alg {
2624 struct list_head entry;
2625 struct device *dev;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002626 struct talitos_alg_template algt;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002627};
2628
2629static int talitos_cra_init(struct crypto_tfm *tfm)
2630{
2631 struct crypto_alg *alg = tfm->__crt_alg;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002632 struct talitos_crypto_alg *talitos_alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002633 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips5228f0f2011-07-15 11:21:38 +08002634 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002635
Lee Nipper497f2e62010-05-19 19:20:36 +10002636 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2637 talitos_alg = container_of(__crypto_ahash_alg(alg),
2638 struct talitos_crypto_alg,
2639 algt.alg.hash);
2640 else
2641 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2642 algt.alg.crypto);
Kim Phillips19bbbc62009-03-29 15:53:59 +08002643
Kim Phillips9c4a7962008-06-23 19:50:15 +08002644 /* update context with ptr to dev */
2645 ctx->dev = talitos_alg->dev;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002646
Kim Phillips5228f0f2011-07-15 11:21:38 +08002647 /* assign SEC channel to tfm in round-robin fashion */
2648 priv = dev_get_drvdata(ctx->dev);
2649 ctx->ch = atomic_inc_return(&priv->last_chan) &
2650 (priv->num_channels - 1);
2651
Kim Phillips9c4a7962008-06-23 19:50:15 +08002652 /* copy descriptor header template value */
Lee Nipperacbf7c622010-05-19 19:19:33 +10002653 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002654
Kim Phillips602dba52011-07-15 11:21:39 +08002655 /* select done notification */
2656 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2657
Lee Nipper497f2e62010-05-19 19:20:36 +10002658 return 0;
2659}
2660
2661static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2662{
2663 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2664
2665 talitos_cra_init(tfm);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002666
2667 /* random first IV */
Lee Nipper70bcaca2008-07-03 19:08:46 +08002668 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002669
2670 return 0;
2671}
2672
Lee Nipper497f2e62010-05-19 19:20:36 +10002673static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2674{
2675 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2676
2677 talitos_cra_init(tfm);
2678
2679 ctx->keylen = 0;
2680 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2681 sizeof(struct talitos_ahash_req_ctx));
2682
2683 return 0;
2684}
2685
Kim Phillips9c4a7962008-06-23 19:50:15 +08002686/*
2687 * given the alg's descriptor header template, determine whether descriptor
2688 * type and primary/secondary execution units required match the hw
2689 * capabilities description provided in the device tree node.
2690 */
2691static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2692{
2693 struct talitos_private *priv = dev_get_drvdata(dev);
2694 int ret;
2695
2696 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2697 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2698
2699 if (SECONDARY_EU(desc_hdr_template))
2700 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2701 & priv->exec_units);
2702
2703 return ret;
2704}
2705
Grant Likely2dc11582010-08-06 09:25:50 -06002706static int talitos_remove(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002707{
2708 struct device *dev = &ofdev->dev;
2709 struct talitos_private *priv = dev_get_drvdata(dev);
2710 struct talitos_crypto_alg *t_alg, *n;
2711 int i;
2712
2713 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
Lee Nipperacbf7c622010-05-19 19:19:33 +10002714 switch (t_alg->algt.type) {
2715 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2716 case CRYPTO_ALG_TYPE_AEAD:
2717 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2718 break;
2719 case CRYPTO_ALG_TYPE_AHASH:
2720 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2721 break;
2722 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002723 list_del(&t_alg->entry);
2724 kfree(t_alg);
2725 }
2726
2727 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2728 talitos_unregister_rng(dev);
2729
Kim Phillips4b9926282009-08-13 11:50:38 +10002730 for (i = 0; i < priv->num_channels; i++)
Kim Phillips0b798242010-09-23 15:56:08 +08002731 kfree(priv->chan[i].fifo);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002732
Kim Phillips4b9926282009-08-13 11:50:38 +10002733 kfree(priv->chan);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002734
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002735 for (i = 0; i < 2; i++)
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002736 if (priv->irq[i]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002737 free_irq(priv->irq[i], dev);
2738 irq_dispose_mapping(priv->irq[i]);
2739 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002740
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002741 tasklet_kill(&priv->done_task[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002742 if (priv->irq[1])
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002743 tasklet_kill(&priv->done_task[1]);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002744
2745 iounmap(priv->reg);
2746
Kim Phillips9c4a7962008-06-23 19:50:15 +08002747 kfree(priv);
2748
2749 return 0;
2750}
2751
2752static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2753 struct talitos_alg_template
2754 *template)
2755{
Kim Phillips60f208d2010-05-19 19:21:53 +10002756 struct talitos_private *priv = dev_get_drvdata(dev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002757 struct talitos_crypto_alg *t_alg;
2758 struct crypto_alg *alg;
2759
2760 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2761 if (!t_alg)
2762 return ERR_PTR(-ENOMEM);
2763
Lee Nipperacbf7c622010-05-19 19:19:33 +10002764 t_alg->algt = *template;
2765
2766 switch (t_alg->algt.type) {
2767 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipper497f2e62010-05-19 19:20:36 +10002768 alg = &t_alg->algt.alg.crypto;
2769 alg->cra_init = talitos_cra_init;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002770 alg->cra_type = &crypto_ablkcipher_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002771 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2772 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2773 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2774 alg->cra_ablkcipher.geniv = "eseqiv";
Lee Nipper497f2e62010-05-19 19:20:36 +10002775 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002776 case CRYPTO_ALG_TYPE_AEAD:
2777 alg = &t_alg->algt.alg.crypto;
Lee Nipper497f2e62010-05-19 19:20:36 +10002778 alg->cra_init = talitos_cra_init_aead;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002779 alg->cra_type = &crypto_aead_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002780 alg->cra_aead.setkey = aead_setkey;
2781 alg->cra_aead.setauthsize = aead_setauthsize;
2782 alg->cra_aead.encrypt = aead_encrypt;
2783 alg->cra_aead.decrypt = aead_decrypt;
2784 alg->cra_aead.givencrypt = aead_givencrypt;
2785 alg->cra_aead.geniv = "<built-in>";
Lee Nipperacbf7c622010-05-19 19:19:33 +10002786 break;
2787 case CRYPTO_ALG_TYPE_AHASH:
2788 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipper497f2e62010-05-19 19:20:36 +10002789 alg->cra_init = talitos_cra_init_ahash;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002790 alg->cra_type = &crypto_ahash_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002791 t_alg->algt.alg.hash.init = ahash_init;
2792 t_alg->algt.alg.hash.update = ahash_update;
2793 t_alg->algt.alg.hash.final = ahash_final;
2794 t_alg->algt.alg.hash.finup = ahash_finup;
2795 t_alg->algt.alg.hash.digest = ahash_digest;
2796 t_alg->algt.alg.hash.setkey = ahash_setkey;
2797
Lee Nipper79b3a412011-11-21 16:13:25 +08002798 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
Kim Phillips0b2730d2011-12-12 14:59:10 -06002799 !strncmp(alg->cra_name, "hmac", 4)) {
2800 kfree(t_alg);
Lee Nipper79b3a412011-11-21 16:13:25 +08002801 return ERR_PTR(-ENOTSUPP);
Kim Phillips0b2730d2011-12-12 14:59:10 -06002802 }
Kim Phillips60f208d2010-05-19 19:21:53 +10002803 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
Lee Nipper79b3a412011-11-21 16:13:25 +08002804 (!strcmp(alg->cra_name, "sha224") ||
2805 !strcmp(alg->cra_name, "hmac(sha224)"))) {
Kim Phillips60f208d2010-05-19 19:21:53 +10002806 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2807 t_alg->algt.desc_hdr_template =
2808 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2809 DESC_HDR_SEL0_MDEUA |
2810 DESC_HDR_MODE0_MDEU_SHA256;
2811 }
Lee Nipper497f2e62010-05-19 19:20:36 +10002812 break;
Kim Phillips1d119112010-09-23 15:55:27 +08002813 default:
2814 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
Horia Geant?5fa7dad2015-05-11 20:03:24 +03002815 kfree(t_alg);
Kim Phillips1d119112010-09-23 15:55:27 +08002816 return ERR_PTR(-EINVAL);
Lee Nipperacbf7c622010-05-19 19:19:33 +10002817 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002818
Kim Phillips9c4a7962008-06-23 19:50:15 +08002819 alg->cra_module = THIS_MODULE;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002820 alg->cra_priority = TALITOS_CRA_PRIORITY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002821 alg->cra_alignmask = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002822 alg->cra_ctxsize = sizeof(struct talitos_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002823 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002824
Kim Phillips9c4a7962008-06-23 19:50:15 +08002825 t_alg->dev = dev;
2826
2827 return t_alg;
2828}
2829
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002830static int talitos_probe_irq(struct platform_device *ofdev)
2831{
2832 struct device *dev = &ofdev->dev;
2833 struct device_node *np = ofdev->dev.of_node;
2834 struct talitos_private *priv = dev_get_drvdata(dev);
2835 int err;
LEROY Christophedd3c0982015-04-17 16:32:13 +02002836 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002837
2838 priv->irq[0] = irq_of_parse_and_map(np, 0);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002839 if (!priv->irq[0]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002840 dev_err(dev, "failed to map irq\n");
2841 return -EINVAL;
2842 }
LEROY Christophedd3c0982015-04-17 16:32:13 +02002843 if (is_sec1) {
2844 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2845 dev_driver_string(dev), dev);
2846 goto primary_out;
2847 }
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002848
2849 priv->irq[1] = irq_of_parse_and_map(np, 1);
2850
2851 /* get the primary irq line */
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002852 if (!priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02002853 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002854 dev_driver_string(dev), dev);
2855 goto primary_out;
2856 }
2857
LEROY Christophedd3c0982015-04-17 16:32:13 +02002858 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002859 dev_driver_string(dev), dev);
2860 if (err)
2861 goto primary_out;
2862
2863 /* get the secondary irq line */
LEROY Christophedd3c0982015-04-17 16:32:13 +02002864 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002865 dev_driver_string(dev), dev);
2866 if (err) {
2867 dev_err(dev, "failed to request secondary irq\n");
2868 irq_dispose_mapping(priv->irq[1]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002869 priv->irq[1] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002870 }
2871
2872 return err;
2873
2874primary_out:
2875 if (err) {
2876 dev_err(dev, "failed to request primary irq\n");
2877 irq_dispose_mapping(priv->irq[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002878 priv->irq[0] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002879 }
2880
2881 return err;
2882}
2883
Grant Likely1c48a5c2011-02-17 02:43:24 -07002884static int talitos_probe(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002885{
2886 struct device *dev = &ofdev->dev;
Grant Likely61c7a082010-04-13 16:12:29 -07002887 struct device_node *np = ofdev->dev.of_node;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002888 struct talitos_private *priv;
2889 const unsigned int *prop;
2890 int i, err;
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002891 int stride;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002892
2893 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2894 if (!priv)
2895 return -ENOMEM;
2896
Kevin Haof3de9cb2014-01-28 20:17:23 +08002897 INIT_LIST_HEAD(&priv->alg_list);
2898
Kim Phillips9c4a7962008-06-23 19:50:15 +08002899 dev_set_drvdata(dev, priv);
2900
2901 priv->ofdev = ofdev;
2902
Horia Geanta511d63c2012-03-30 17:49:53 +03002903 spin_lock_init(&priv->reg_lock);
2904
Kim Phillips9c4a7962008-06-23 19:50:15 +08002905 priv->reg = of_iomap(np, 0);
2906 if (!priv->reg) {
2907 dev_err(dev, "failed to of_iomap\n");
2908 err = -ENOMEM;
2909 goto err_out;
2910 }
2911
2912 /* get SEC version capabilities from device tree */
2913 prop = of_get_property(np, "fsl,num-channels", NULL);
2914 if (prop)
2915 priv->num_channels = *prop;
2916
2917 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2918 if (prop)
2919 priv->chfifo_len = *prop;
2920
2921 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2922 if (prop)
2923 priv->exec_units = *prop;
2924
2925 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2926 if (prop)
2927 priv->desc_types = *prop;
2928
2929 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2930 !priv->exec_units || !priv->desc_types) {
2931 dev_err(dev, "invalid property data in device tree node\n");
2932 err = -EINVAL;
2933 goto err_out;
2934 }
2935
Lee Nipperf3c85bc2008-07-30 16:26:57 +08002936 if (of_device_is_compatible(np, "fsl,sec3.0"))
2937 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2938
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002939 if (of_device_is_compatible(np, "fsl,sec2.1"))
Kim Phillips60f208d2010-05-19 19:21:53 +10002940 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
Lee Nipper79b3a412011-11-21 16:13:25 +08002941 TALITOS_FTR_SHA224_HWINIT |
2942 TALITOS_FTR_HMAC_OK;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002943
LEROY Christophe21590882015-04-17 16:32:05 +02002944 if (of_device_is_compatible(np, "fsl,sec1.0"))
2945 priv->features |= TALITOS_FTR_SEC1;
2946
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002947 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2948 priv->reg_deu = priv->reg + TALITOS12_DEU;
2949 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2950 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2951 stride = TALITOS1_CH_STRIDE;
2952 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2953 priv->reg_deu = priv->reg + TALITOS10_DEU;
2954 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2955 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2956 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2957 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2958 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2959 stride = TALITOS1_CH_STRIDE;
2960 } else {
2961 priv->reg_deu = priv->reg + TALITOS2_DEU;
2962 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2963 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2964 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2965 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2966 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2967 priv->reg_keu = priv->reg + TALITOS2_KEU;
2968 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2969 stride = TALITOS2_CH_STRIDE;
2970 }
2971
LEROY Christophedd3c0982015-04-17 16:32:13 +02002972 err = talitos_probe_irq(ofdev);
2973 if (err)
2974 goto err_out;
2975
2976 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2977 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2978 (unsigned long)dev);
2979 } else {
2980 if (!priv->irq[1]) {
2981 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2982 (unsigned long)dev);
2983 } else {
2984 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2985 (unsigned long)dev);
2986 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2987 (unsigned long)dev);
2988 }
2989 }
2990
Kim Phillips4b9926282009-08-13 11:50:38 +10002991 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2992 priv->num_channels, GFP_KERNEL);
2993 if (!priv->chan) {
2994 dev_err(dev, "failed to allocate channel management space\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08002995 err = -ENOMEM;
2996 goto err_out;
2997 }
2998
Martin Hicksf641ddd2015-03-03 08:21:33 -05002999 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3000
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003001 for (i = 0; i < priv->num_channels; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003002 priv->chan[i].reg = priv->reg + stride * (i + 1);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003003 if (!priv->irq[1] || !(i & 1))
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003004 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
Kim Phillipsad42d5f2011-11-21 16:13:27 +08003005
Kim Phillips4b9926282009-08-13 11:50:38 +10003006 spin_lock_init(&priv->chan[i].head_lock);
3007 spin_lock_init(&priv->chan[i].tail_lock);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003008
Kim Phillips4b9926282009-08-13 11:50:38 +10003009 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3010 priv->fifo_len, GFP_KERNEL);
3011 if (!priv->chan[i].fifo) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08003012 dev_err(dev, "failed to allocate request fifo %d\n", i);
3013 err = -ENOMEM;
3014 goto err_out;
3015 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003016
Kim Phillips4b9926282009-08-13 11:50:38 +10003017 atomic_set(&priv->chan[i].submit_count,
3018 -(priv->chfifo_len - 1));
Martin Hicksf641ddd2015-03-03 08:21:33 -05003019 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003020
Kim Phillips81eb0242009-08-13 11:51:51 +10003021 dma_set_mask(dev, DMA_BIT_MASK(36));
3022
Kim Phillips9c4a7962008-06-23 19:50:15 +08003023 /* reset and initialize the h/w */
3024 err = init_device(dev);
3025 if (err) {
3026 dev_err(dev, "failed to initialize device\n");
3027 goto err_out;
3028 }
3029
3030 /* register the RNG, if available */
3031 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3032 err = talitos_register_rng(dev);
3033 if (err) {
3034 dev_err(dev, "failed to register hwrng: %d\n", err);
3035 goto err_out;
3036 } else
3037 dev_info(dev, "hwrng\n");
3038 }
3039
3040 /* register crypto algorithms the device supports */
Kim Phillips9c4a7962008-06-23 19:50:15 +08003041 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3042 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3043 struct talitos_crypto_alg *t_alg;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003044 char *name = NULL;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003045
3046 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3047 if (IS_ERR(t_alg)) {
3048 err = PTR_ERR(t_alg);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003049 if (err == -ENOTSUPP)
Lee Nipper79b3a412011-11-21 16:13:25 +08003050 continue;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003051 goto err_out;
3052 }
3053
Lee Nipperacbf7c622010-05-19 19:19:33 +10003054 switch (t_alg->algt.type) {
3055 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3056 case CRYPTO_ALG_TYPE_AEAD:
3057 err = crypto_register_alg(
3058 &t_alg->algt.alg.crypto);
3059 name = t_alg->algt.alg.crypto.cra_driver_name;
3060 break;
3061 case CRYPTO_ALG_TYPE_AHASH:
3062 err = crypto_register_ahash(
3063 &t_alg->algt.alg.hash);
3064 name =
3065 t_alg->algt.alg.hash.halg.base.cra_driver_name;
3066 break;
3067 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003068 if (err) {
3069 dev_err(dev, "%s alg registration failed\n",
Lee Nipperacbf7c622010-05-19 19:19:33 +10003070 name);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003071 kfree(t_alg);
Horia Geanta991155b2013-03-20 16:31:38 +02003072 } else
Kim Phillips9c4a7962008-06-23 19:50:15 +08003073 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003074 }
3075 }
Kim Phillips5b859b6e2011-11-21 16:13:26 +08003076 if (!list_empty(&priv->alg_list))
3077 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3078 (char *)of_get_property(np, "compatible", NULL));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003079
3080 return 0;
3081
3082err_out:
3083 talitos_remove(ofdev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003084
3085 return err;
3086}
3087
Márton Németh6c3f9752010-01-17 21:54:01 +11003088static const struct of_device_id talitos_match[] = {
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003089#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3090 {
3091 .compatible = "fsl,sec1.0",
3092 },
3093#endif
3094#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Kim Phillips9c4a7962008-06-23 19:50:15 +08003095 {
3096 .compatible = "fsl,sec2.0",
3097 },
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003098#endif
Kim Phillips9c4a7962008-06-23 19:50:15 +08003099 {},
3100};
3101MODULE_DEVICE_TABLE(of, talitos_match);
3102
Grant Likely1c48a5c2011-02-17 02:43:24 -07003103static struct platform_driver talitos_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003104 .driver = {
3105 .name = "talitos",
Grant Likely40182942010-04-13 16:13:02 -07003106 .of_match_table = talitos_match,
3107 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08003108 .probe = talitos_probe,
Al Viro596f1032008-11-22 17:34:24 +00003109 .remove = talitos_remove,
Kim Phillips9c4a7962008-06-23 19:50:15 +08003110};
3111
Axel Lin741e8c22011-11-26 21:26:19 +08003112module_platform_driver(talitos_driver);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003113
3114MODULE_LICENSE("GPL");
3115MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3116MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");