blob: 3b20a1bce703fdb7bf3725a9a3d5e1a741e34e76 [file] [log] [blame]
Kim Phillips9c4a7962008-06-23 19:50:15 +08001/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
Kim Phillips5228f0f2011-07-15 11:21:38 +08004 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
Kim Phillips9c4a7962008-06-23 19:50:15 +08005 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
Rob Herring5af50732013-09-17 14:28:33 -050035#include <linux/of_address.h>
36#include <linux/of_irq.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080037#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080043
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
Lee Nipper3952f172008-07-10 18:29:18 +080046#include <crypto/des.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080047#include <crypto/sha.h>
Lee Nipper497f2e62010-05-19 19:20:36 +100048#include <crypto/md5.h>
Herbert Xue98014a2015-05-11 17:47:48 +080049#include <crypto/internal/aead.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080050#include <crypto/authenc.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080051#include <crypto/skcipher.h>
Lee Nipperacbf7c622010-05-19 19:19:33 +100052#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080054#include <crypto/scatterwalk.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080055
56#include "talitos.h"
57
LEROY Christophe922f9dc2015-04-17 16:32:07 +020058static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
Kim Phillips81eb0242009-08-13 11:51:51 +100060{
LEROY Christopheedc6bd62015-04-17 16:31:53 +020061 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
LEROY Christophe922f9dc2015-04-17 16:32:07 +020062 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
Kim Phillips81eb0242009-08-13 11:51:51 +100064}
65
Horia Geant?42e8b0d2015-05-11 20:04:56 +030066static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
LEROY Christophe922f9dc2015-04-17 16:32:07 +020067 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020068{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020069 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
LEROY Christophe538caf82015-04-17 16:31:59 +020075}
76
LEROY Christophe922f9dc2015-04-17 16:32:07 +020077static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020079{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020080 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
LEROY Christophe538caf82015-04-17 16:31:59 +020084}
85
LEROY Christophe922f9dc2015-04-17 16:32:07 +020086static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
LEROY Christophe185eb792015-04-17 16:31:55 +020087{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020088 if (!is_sec1)
89 ptr->j_extent = 0;
LEROY Christophe185eb792015-04-17 16:31:55 +020090}
91
Kim Phillips9c4a7962008-06-23 19:50:15 +080092/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +020096 struct talitos_ptr *ptr,
Horia Geant?42e8b0d2015-05-11 20:04:56 +030097 unsigned int len, void *data,
Kim Phillips9c4a7962008-06-23 19:50:15 +080098 enum dma_data_direction dir)
99{
Kim Phillips81eb0242009-08-13 11:51:51 +1000100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips81eb0242009-08-13 11:51:51 +1000103
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200113 struct talitos_ptr *ptr,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800114 enum dma_data_direction dir)
115{
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200120 from_talitos_ptr_len(ptr, is_sec1), dir);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200127 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800128
LEROY Christophedd3c0982015-04-17 16:32:13 +0200129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800132
LEROY Christophedd3c0982015-04-17 16:32:13 +0200133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
Kim Phillips81eb0242009-08-13 11:51:51 +1000150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
Kim Phillips81eb0242009-08-13 11:51:51 +1000152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800153
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800157 TALITOS_CCCR_LO_IWSE);
158
Kim Phillips9c4a7962008-06-23 19:50:15 +0800159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800168
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800169 setbits32(priv->reg + TALITOS_MCR, mcr);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800170
LEROY Christophedd3c0982015-04-17 16:32:13 +0200171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800172 && --timeout)
173 cpu_relax();
174
Kim Phillips2cdba3c2011-12-12 14:59:11 -0600175 if (priv->irq[1]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
Kim Phillips9c4a7962008-06-23 19:50:15 +0800180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200195 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800228
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800232 TALITOS_MDEUICR_LO_ICE);
233
Kim Phillips9c4a7962008-06-23 19:50:15 +0800234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
Kim Phillips5228f0f2011-07-15 11:21:38 +0800240 * @ch: the SEC device channel to be used
Kim Phillips9c4a7962008-06-23 19:50:15 +0800241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
Horia Geanta865d5062012-07-03 19:16:52 +0300249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800257 unsigned long flags;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800258 int head;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200259 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800260
Kim Phillips4b9926282009-08-13 11:50:38 +1000261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800262
Kim Phillips4b9926282009-08-13 11:50:38 +1000263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
Kim Phillipsec6644d2008-07-17 20:16:40 +0800264 /* h/w fifo is full */
Kim Phillips4b9926282009-08-13 11:50:38 +1000265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800266 return -EAGAIN;
267 }
268
Kim Phillips4b9926282009-08-13 11:50:38 +1000269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
Kim Phillipsec6644d2008-07-17 20:16:40 +0800271
Kim Phillips9c4a7962008-06-23 19:50:15 +0800272 /* map descriptor and save caller data */
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
Kim Phillips4b9926282009-08-13 11:50:38 +1000288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
Kim Phillipsa7524472010-09-23 15:56:38 +0800298 lower_32_bits(request->dma_desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800299
Kim Phillips4b9926282009-08-13 11:50:38 +1000300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800301
302 return -EINPROGRESS;
303}
Horia Geanta865d5062012-07-03 19:16:52 +0300304EXPORT_SYMBOL(talitos_submit);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200315 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800316
Kim Phillips4b9926282009-08-13 11:50:38 +1000317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800318
Kim Phillips4b9926282009-08-13 11:50:38 +1000319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200321 __be32 hdr;
322
Kim Phillips4b9926282009-08-13 11:50:38 +1000323 request = &priv->chan[ch].fifo[tail];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800330 status = 0;
Lee Nipperca38a812008-12-20 17:09:25 +1100331 else
Kim Phillips9c4a7962008-06-23 19:50:15 +0800332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200338 TALITOS_DESC_SIZE,
Kim Phillipse938e462009-03-29 15:53:23 +0800339 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
Kim Phillips4b9926282009-08-13 11:50:38 +1000351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800352
Kim Phillips4b9926282009-08-13 11:50:38 +1000353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800354
Kim Phillips4b9926282009-08-13 11:50:38 +1000355 atomic_dec(&priv->chan[ch].submit_count);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800356
Kim Phillips9c4a7962008-06-23 19:50:15 +0800357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
Kim Phillips4b9926282009-08-13 11:50:38 +1000362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800364 }
365
Kim Phillips4b9926282009-08-13 11:50:38 +1000366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800367}
368
369/*
370 * process completed requests for channels that have done status
371 */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300406 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
Horia Geanta511d63c2012-03-30 17:49:53 +0300422 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800426}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800431
432/*
433 * locate current (offending) descriptor
434 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200435static u32 current_desc_hdr(struct device *dev, int ch)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
Horia Geantab62ffd82013-11-13 12:20:37 +0200438 int tail, iter;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800439 dma_addr_t cur_desc;
440
Horia Geantab62ffd82013-11-13 12:20:37 +0200441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800443
Horia Geantab62ffd82013-11-13 12:20:37 +0200444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800455 dev_err(dev, "couldn't locate current descriptor\n");
Kim Phillips3e721ae2011-10-21 15:20:28 +0200456 return 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800457 }
458 }
459
Horia Geantab62ffd82013-11-13 12:20:37 +0200460 return priv->chan[ch].fifo[iter].desc->hdr;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
Kim Phillips3e721ae2011-10-21 15:20:28 +0200471 if (!desc_hdr)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
Kim Phillips3e721ae2011-10-21 15:20:28 +0200473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800515 break;
516 }
517
Kim Phillips3e721ae2011-10-21 15:20:28 +0200518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800536}
537
538/*
539 * recover from error interrupts
540 */
Kim Phillips5e718a02011-12-12 14:59:12 -0600541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800542{
Kim Phillips9c4a7962008-06-23 19:50:15 +0800543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200545 int ch, error, reset_dev = 0;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300546 u32 v_lo;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800560
561 error = -EINVAL;
562
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800588 if (v_lo & TALITOS_CCPSR_LO_EU)
Kim Phillips3e721ae2011-10-21 15:20:28 +0200589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
LEROY Christophedd3c0982015-04-17 16:32:13 +0200590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
LEROY Christophedd3c0982015-04-17 16:32:13 +0200607 TALITOS2_CCCR_CONT);
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
LEROY Christophedd3c0982015-04-17 16:32:13 +0200610 TALITOS2_CCCR_CONT) && --timeout)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
LEROY Christophedd3c0982015-04-17 16:32:13 +0200619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
LEROY Christophedd3c0982015-04-17 16:32:13 +0200637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
Horia Geanta511d63c2012-03-30 17:49:53 +0300678 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800679 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300680 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
Horia Geanta511d63c2012-03-30 17:49:53 +0300698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800703}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
Kim Phillips9c4a7962008-06-23 19:50:15 +0800723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500769 int err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800770
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
776
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500777 err = hwrng_register(&priv->rng);
778 if (!err)
779 priv->rng_registered = true;
780
781 return err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800782}
783
784static void talitos_unregister_rng(struct device *dev)
785{
786 struct talitos_private *priv = dev_get_drvdata(dev);
787
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500788 if (!priv->rng_registered)
789 return;
790
Kim Phillips9c4a7962008-06-23 19:50:15 +0800791 hwrng_unregister(&priv->rng);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500792 priv->rng_registered = false;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800793}
794
795/*
796 * crypto alg
797 */
798#define TALITOS_CRA_PRIORITY 3000
Horia Geanta357fb602012-07-03 19:16:53 +0300799#define TALITOS_MAX_KEY_SIZE 96
Lee Nipper3952f172008-07-10 18:29:18 +0800800#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
Lee Nipper70bcaca2008-07-03 19:08:46 +0800801
Kim Phillips9c4a7962008-06-23 19:50:15 +0800802struct talitos_ctx {
803 struct device *dev;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800804 int ch;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800805 __be32 desc_hdr_template;
806 u8 key[TALITOS_MAX_KEY_SIZE];
Lee Nipper70bcaca2008-07-03 19:08:46 +0800807 u8 iv[TALITOS_MAX_IV_LENGTH];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800808 unsigned int keylen;
809 unsigned int enckeylen;
810 unsigned int authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800811};
812
Lee Nipper497f2e62010-05-19 19:20:36 +1000813#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
814#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
815
816struct talitos_ahash_req_ctx {
Kim Phillips60f208d2010-05-19 19:21:53 +1000817 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
Lee Nipper497f2e62010-05-19 19:20:36 +1000818 unsigned int hw_context_size;
819 u8 buf[HASH_MAX_BLOCK_SIZE];
820 u8 bufnext[HASH_MAX_BLOCK_SIZE];
Kim Phillips60f208d2010-05-19 19:21:53 +1000821 unsigned int swinit;
Lee Nipper497f2e62010-05-19 19:20:36 +1000822 unsigned int first;
823 unsigned int last;
824 unsigned int to_hash_later;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300825 unsigned int nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +1000826 struct scatterlist bufsl[2];
827 struct scatterlist *psrc;
828};
829
Lee Nipper56af8cd2009-03-29 15:50:50 +0800830static int aead_setkey(struct crypto_aead *authenc,
831 const u8 *key, unsigned int keylen)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800832{
833 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Mathias Krausec306a982013-10-15 13:49:34 +0200834 struct crypto_authenc_keys keys;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800835
Mathias Krausec306a982013-10-15 13:49:34 +0200836 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800837 goto badkey;
838
Mathias Krausec306a982013-10-15 13:49:34 +0200839 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800840 goto badkey;
841
Mathias Krausec306a982013-10-15 13:49:34 +0200842 memcpy(ctx->key, keys.authkey, keys.authkeylen);
843 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800844
Mathias Krausec306a982013-10-15 13:49:34 +0200845 ctx->keylen = keys.authkeylen + keys.enckeylen;
846 ctx->enckeylen = keys.enckeylen;
847 ctx->authkeylen = keys.authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800848
849 return 0;
850
851badkey:
852 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
853 return -EINVAL;
854}
855
856/*
Lee Nipper56af8cd2009-03-29 15:50:50 +0800857 * talitos_edesc - s/w-extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +0800858 * @src_nents: number of segments in input scatterlist
859 * @dst_nents: number of segments in output scatterlist
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300860 * @src_chained: whether src is chained or not
861 * @dst_chained: whether dst is chained or not
Herbert Xuaeb4c132015-07-30 17:53:22 +0800862 * @icv_ool: whether ICV is out-of-line
Horia Geanta79fd31d2012-08-02 17:16:40 +0300863 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips9c4a7962008-06-23 19:50:15 +0800864 * @dma_len: length of dma mapped link_tbl space
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200865 * @dma_link_tbl: bus physical address of link_tbl/buf
Kim Phillips9c4a7962008-06-23 19:50:15 +0800866 * @desc: h/w descriptor
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200867 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
868 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800869 *
870 * if decrypting (with authcheck), or either one of src_nents or dst_nents
871 * is greater than 1, an integrity check value is concatenated to the end
872 * of link_tbl data
873 */
Lee Nipper56af8cd2009-03-29 15:50:50 +0800874struct talitos_edesc {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800875 int src_nents;
876 int dst_nents;
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300877 bool src_chained;
878 bool dst_chained;
Herbert Xuaeb4c132015-07-30 17:53:22 +0800879 bool icv_ool;
Horia Geanta79fd31d2012-08-02 17:16:40 +0300880 dma_addr_t iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800881 int dma_len;
882 dma_addr_t dma_link_tbl;
883 struct talitos_desc desc;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200884 union {
885 struct talitos_ptr link_tbl[0];
886 u8 buf[0];
887 };
Kim Phillips9c4a7962008-06-23 19:50:15 +0800888};
889
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800890static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
891 unsigned int nents, enum dma_data_direction dir,
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300892 bool chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800893{
894 if (unlikely(chained))
895 while (sg) {
896 dma_map_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200897 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800898 }
899 else
900 dma_map_sg(dev, sg, nents, dir);
901 return nents;
902}
903
904static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
905 enum dma_data_direction dir)
906{
907 while (sg) {
908 dma_unmap_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200909 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800910 }
911}
912
913static void talitos_sg_unmap(struct device *dev,
914 struct talitos_edesc *edesc,
915 struct scatterlist *src,
916 struct scatterlist *dst)
917{
918 unsigned int src_nents = edesc->src_nents ? : 1;
919 unsigned int dst_nents = edesc->dst_nents ? : 1;
920
921 if (src != dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300922 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800923 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
924 else
925 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
926
Lee Nipper497f2e62010-05-19 19:20:36 +1000927 if (dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300928 if (edesc->dst_chained)
Lee Nipper497f2e62010-05-19 19:20:36 +1000929 talitos_unmap_sg_chain(dev, dst,
930 DMA_FROM_DEVICE);
931 else
932 dma_unmap_sg(dev, dst, dst_nents,
933 DMA_FROM_DEVICE);
934 }
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800935 } else
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300936 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800937 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
938 else
939 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
940}
941
Kim Phillips9c4a7962008-06-23 19:50:15 +0800942static void ipsec_esp_unmap(struct device *dev,
Lee Nipper56af8cd2009-03-29 15:50:50 +0800943 struct talitos_edesc *edesc,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800944 struct aead_request *areq)
945{
946 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
947 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
949 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
950
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800951 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800952
953 if (edesc->dma_len)
954 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
955 DMA_BIDIRECTIONAL);
956}
957
958/*
959 * ipsec_esp descriptor callbacks
960 */
961static void ipsec_esp_encrypt_done(struct device *dev,
962 struct talitos_desc *desc, void *context,
963 int err)
964{
965 struct aead_request *areq = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800966 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800967 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +0800968 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800969 struct scatterlist *sg;
970 void *icvdata;
971
Kim Phillips19bbbc62009-03-29 15:53:59 +0800972 edesc = container_of(desc, struct talitos_edesc, desc);
973
Kim Phillips9c4a7962008-06-23 19:50:15 +0800974 ipsec_esp_unmap(dev, edesc, areq);
975
976 /* copy the generated ICV to dst */
Herbert Xuaeb4c132015-07-30 17:53:22 +0800977 if (edesc->icv_ool) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800978 icvdata = &edesc->link_tbl[edesc->src_nents +
Herbert Xuaeb4c132015-07-30 17:53:22 +0800979 edesc->dst_nents + 2];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800980 sg = sg_last(areq->dst, edesc->dst_nents);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800981 memcpy((char *)sg_virt(sg) + sg->length - authsize,
982 icvdata, authsize);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800983 }
984
985 kfree(edesc);
986
987 aead_request_complete(areq, err);
988}
989
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800990static void ipsec_esp_decrypt_swauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +0800991 struct talitos_desc *desc,
992 void *context, int err)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800993{
994 struct aead_request *req = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800995 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800996 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +0800997 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800998 struct scatterlist *sg;
Herbert Xuaeb4c132015-07-30 17:53:22 +0800999 char *oicv, *icv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001000
Kim Phillips19bbbc62009-03-29 15:53:59 +08001001 edesc = container_of(desc, struct talitos_edesc, desc);
1002
Kim Phillips9c4a7962008-06-23 19:50:15 +08001003 ipsec_esp_unmap(dev, edesc, req);
1004
1005 if (!err) {
1006 /* auth check */
Kim Phillips9c4a7962008-06-23 19:50:15 +08001007 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001008 icv = (char *)sg_virt(sg) + sg->length - authsize;
1009
1010 if (edesc->dma_len) {
1011 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
1012 edesc->dst_nents + 2];
1013 if (edesc->icv_ool)
1014 icv = oicv + authsize;
1015 } else
1016 oicv = (char *)&edesc->link_tbl[0];
1017
1018 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001019 }
1020
1021 kfree(edesc);
1022
1023 aead_request_complete(req, err);
1024}
1025
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001026static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001027 struct talitos_desc *desc,
1028 void *context, int err)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001029{
1030 struct aead_request *req = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001031 struct talitos_edesc *edesc;
1032
1033 edesc = container_of(desc, struct talitos_edesc, desc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001034
1035 ipsec_esp_unmap(dev, edesc, req);
1036
1037 /* check ICV auth status */
Kim Phillipse938e462009-03-29 15:53:23 +08001038 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1039 DESC_HDR_LO_ICCR1_PASS))
1040 err = -EBADMSG;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001041
1042 kfree(edesc);
1043
1044 aead_request_complete(req, err);
1045}
1046
Kim Phillips9c4a7962008-06-23 19:50:15 +08001047/*
1048 * convert scatterlist to SEC h/w link table format
1049 * stop at cryptlen bytes
1050 */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001051static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1052 unsigned int offset, int cryptlen,
1053 struct talitos_ptr *link_tbl_ptr)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001054{
Lee Nipper70bcaca2008-07-03 19:08:46 +08001055 int n_sg = sg_count;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001056 int count = 0;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001057
Herbert Xuaeb4c132015-07-30 17:53:22 +08001058 while (cryptlen && sg && n_sg--) {
1059 unsigned int len = sg_dma_len(sg);
1060
1061 if (offset >= len) {
1062 offset -= len;
1063 goto next;
1064 }
1065
1066 len -= offset;
1067
1068 if (len > cryptlen)
1069 len = cryptlen;
1070
1071 to_talitos_ptr(link_tbl_ptr + count,
1072 sg_dma_address(sg) + offset, 0);
1073 link_tbl_ptr[count].len = cpu_to_be16(len);
1074 link_tbl_ptr[count].j_extent = 0;
1075 count++;
1076 cryptlen -= len;
1077 offset = 0;
1078
1079next:
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001080 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001081 }
1082
Kim Phillips9c4a7962008-06-23 19:50:15 +08001083 /* tag end of link table */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001084 if (count > 0)
1085 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001086
Herbert Xuaeb4c132015-07-30 17:53:22 +08001087 return count;
1088}
1089
1090static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1091 int cryptlen,
1092 struct talitos_ptr *link_tbl_ptr)
1093{
1094 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1095 link_tbl_ptr);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001096}
1097
1098/*
1099 * fill in and submit ipsec_esp descriptor
1100 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001101static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001102 void (*callback)(struct device *dev,
1103 struct talitos_desc *desc,
1104 void *context, int error))
Kim Phillips9c4a7962008-06-23 19:50:15 +08001105{
1106 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001107 unsigned int authsize = crypto_aead_authsize(aead);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001108 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1109 struct device *dev = ctx->dev;
1110 struct talitos_desc *desc = &edesc->desc;
1111 unsigned int cryptlen = areq->cryptlen;
Kim Phillipse41256f2009-08-13 11:49:06 +10001112 unsigned int ivsize = crypto_aead_ivsize(aead);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001113 int tbl_off = 0;
Kim Phillipsfa86a262008-07-17 20:20:06 +08001114 int sg_count, ret;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001115 int sg_link_tbl_len;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001116
1117 /* hmac key */
1118 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001119 DMA_TO_DEVICE);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001120
Herbert Xuaeb4c132015-07-30 17:53:22 +08001121 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1122 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1123 : DMA_TO_DEVICE,
1124 edesc->src_chained);
1125
Kim Phillips9c4a7962008-06-23 19:50:15 +08001126 /* hmac data */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001127 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1128 if (sg_count > 1 &&
1129 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1130 areq->assoclen,
1131 &edesc->link_tbl[tbl_off])) > 1) {
1132 tbl_off += ret;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001133
1134 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001135 sizeof(struct talitos_ptr), 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001136 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1137
Horia Geanta79fd31d2012-08-02 17:16:40 +03001138 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, DMA_BIDIRECTIONAL);
1140 } else {
Herbert Xuaeb4c132015-07-30 17:53:22 +08001141 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001142 desc->ptr[1].j_extent = 0;
1143 }
1144
Kim Phillips9c4a7962008-06-23 19:50:15 +08001145 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001146 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001147 desc->ptr[2].len = cpu_to_be16(ivsize);
1148 desc->ptr[2].j_extent = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001149
1150 /* cipher key */
1151 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001152 (char *)&ctx->key + ctx->authkeylen,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001153 DMA_TO_DEVICE);
1154
1155 /*
1156 * cipher in
1157 * map and adjust cipher len to aead request cryptlen.
1158 * extent is bytes of HMAC postpended to ciphertext,
1159 * typically 12 for ipsec
1160 */
1161 desc->ptr[4].len = cpu_to_be16(cryptlen);
1162 desc->ptr[4].j_extent = authsize;
1163
Herbert Xuaeb4c132015-07-30 17:53:22 +08001164 sg_link_tbl_len = cryptlen;
1165 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1166 sg_link_tbl_len += authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001167
Herbert Xuaeb4c132015-07-30 17:53:22 +08001168 if (sg_count > 1 &&
1169 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1170 sg_link_tbl_len,
1171 &edesc->link_tbl[tbl_off])) > 1) {
1172 tbl_off += ret;
1173 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1174 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1175 tbl_off *
1176 sizeof(struct talitos_ptr), 0);
1177 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1178 edesc->dma_len,
1179 DMA_BIDIRECTIONAL);
1180 } else
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001181 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001182
1183 /* cipher out */
1184 desc->ptr[5].len = cpu_to_be16(cryptlen);
1185 desc->ptr[5].j_extent = authsize;
1186
Kim Phillipse938e462009-03-29 15:53:23 +08001187 if (areq->src != areq->dst)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001188 sg_count = talitos_map_sg(dev, areq->dst,
1189 edesc->dst_nents ? : 1,
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001190 DMA_FROM_DEVICE, edesc->dst_chained);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001191
Herbert Xuaeb4c132015-07-30 17:53:22 +08001192 edesc->icv_ool = false;
1193
1194 if (sg_count > 1 &&
1195 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1196 areq->assoclen, cryptlen,
1197 &edesc->link_tbl[tbl_off])) >
1198 1) {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001199 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
Kim Phillips9c4a7962008-06-23 19:50:15 +08001200
Kim Phillips81eb0242009-08-13 11:51:51 +10001201 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001202 tbl_off * sizeof(struct talitos_ptr), 0);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001203
Lee Nipperf3c85bc2008-07-30 16:26:57 +08001204 /* Add an entry to the link table for ICV data */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001205 tbl_ptr += sg_count - 1;
1206 tbl_ptr->j_extent = 0;
1207 tbl_ptr++;
1208 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1209 tbl_ptr->len = cpu_to_be16(authsize);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001210
1211 /* icv data follows link tables */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001212 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
Herbert Xuaeb4c132015-07-30 17:53:22 +08001213 (edesc->src_nents + edesc->dst_nents +
1214 2) * sizeof(struct talitos_ptr) +
1215 authsize, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001216 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1217 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1218 edesc->dma_len, DMA_BIDIRECTIONAL);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001219
1220 edesc->icv_ool = true;
1221 } else
1222 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001223
1224 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001225 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001226 DMA_FROM_DEVICE);
1227
Kim Phillips5228f0f2011-07-15 11:21:38 +08001228 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001229 if (ret != -EINPROGRESS) {
1230 ipsec_esp_unmap(dev, edesc, areq);
1231 kfree(edesc);
1232 }
1233 return ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001234}
1235
Kim Phillips9c4a7962008-06-23 19:50:15 +08001236/*
1237 * derive number of elements in scatterlist
1238 */
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001239static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001240{
1241 struct scatterlist *sg = sg_list;
1242 int sg_nents = 0;
1243
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001244 *chained = false;
Horia Geant?bde90792015-05-12 11:28:05 +03001245 while (nbytes > 0 && sg) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001246 sg_nents++;
1247 nbytes -= sg->length;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001248 if (!sg_is_last(sg) && (sg + 1)->length == 0)
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001249 *chained = true;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001250 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001251 }
1252
1253 return sg_nents;
1254}
1255
1256/*
Lee Nipper56af8cd2009-03-29 15:50:50 +08001257 * allocate and map the extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +08001258 */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001259static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1260 struct scatterlist *src,
1261 struct scatterlist *dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001262 u8 *iv,
1263 unsigned int assoclen,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001264 unsigned int cryptlen,
1265 unsigned int authsize,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001266 unsigned int ivsize,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001267 int icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001268 u32 cryptoflags,
1269 bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001270{
Lee Nipper56af8cd2009-03-29 15:50:50 +08001271 struct talitos_edesc *edesc;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001272 int src_nents, dst_nents, alloc_len, dma_len;
1273 bool src_chained = false, dst_chained = false;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001274 dma_addr_t iv_dma = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001275 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
Kim Phillips586725f2008-07-17 20:19:18 +08001276 GFP_ATOMIC;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001277 struct talitos_private *priv = dev_get_drvdata(dev);
1278 bool is_sec1 = has_ftr_sec1(priv);
1279 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001280
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001281 if (cryptlen + authsize > max_len) {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001282 dev_err(dev, "length exceeds h/w max limit\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001283 return ERR_PTR(-EINVAL);
1284 }
1285
Horia Geanta935e99a2013-11-19 14:57:49 +02001286 if (ivsize)
Horia Geanta79fd31d2012-08-02 17:16:40 +03001287 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1288
Horia Geanta62293a32013-11-28 15:11:17 +02001289 if (!dst || dst == src) {
Herbert Xuaeb4c132015-07-30 17:53:22 +08001290 src_nents = sg_count(src, assoclen + cryptlen + authsize,
1291 &src_chained);
Horia Geanta62293a32013-11-28 15:11:17 +02001292 src_nents = (src_nents == 1) ? 0 : src_nents;
1293 dst_nents = dst ? src_nents : 0;
1294 } else { /* dst && dst != src*/
Herbert Xuaeb4c132015-07-30 17:53:22 +08001295 src_nents = sg_count(src, assoclen + cryptlen +
1296 (encrypt ? 0 : authsize),
Horia Geanta62293a32013-11-28 15:11:17 +02001297 &src_chained);
1298 src_nents = (src_nents == 1) ? 0 : src_nents;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001299 dst_nents = sg_count(dst, assoclen + cryptlen +
1300 (encrypt ? authsize : 0),
Horia Geanta62293a32013-11-28 15:11:17 +02001301 &dst_chained);
1302 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001303 }
1304
1305 /*
1306 * allocate space for base edesc plus the link tables,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001307 * allowing for two separate entries for AD and generated ICV (+ 2),
1308 * and space for two sets of ICVs (stashed and generated)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001309 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001310 alloc_len = sizeof(struct talitos_edesc);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001311 if (src_nents || dst_nents) {
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001312 if (is_sec1)
Dan Carpenter608f37d2015-05-11 13:10:09 +03001313 dma_len = (src_nents ? cryptlen : 0) +
1314 (dst_nents ? cryptlen : 0);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001315 else
Herbert Xuaeb4c132015-07-30 17:53:22 +08001316 dma_len = (src_nents + dst_nents + 2) *
1317 sizeof(struct talitos_ptr) + authsize * 2;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001318 alloc_len += dma_len;
1319 } else {
1320 dma_len = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001321 alloc_len += icv_stashing ? authsize : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001322 }
1323
Kim Phillips586725f2008-07-17 20:19:18 +08001324 edesc = kmalloc(alloc_len, GFP_DMA | flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001325 if (!edesc) {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001326 if (iv_dma)
1327 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Horia Geanta935e99a2013-11-19 14:57:49 +02001328
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001329 dev_err(dev, "could not allocate edescriptor\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001330 return ERR_PTR(-ENOMEM);
1331 }
1332
1333 edesc->src_nents = src_nents;
1334 edesc->dst_nents = dst_nents;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001335 edesc->src_chained = src_chained;
1336 edesc->dst_chained = dst_chained;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001337 edesc->iv_dma = iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001338 edesc->dma_len = dma_len;
Lee Nipper497f2e62010-05-19 19:20:36 +10001339 if (dma_len)
1340 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1341 edesc->dma_len,
1342 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001343
1344 return edesc;
1345}
1346
Horia Geanta79fd31d2012-08-02 17:16:40 +03001347static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
Horia Geanta62293a32013-11-28 15:11:17 +02001348 int icv_stashing, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001349{
1350 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001351 unsigned int authsize = crypto_aead_authsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001352 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001353 unsigned int ivsize = crypto_aead_ivsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001354
Herbert Xuaeb4c132015-07-30 17:53:22 +08001355 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001356 iv, areq->assoclen, areq->cryptlen,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001357 authsize, ivsize, icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001358 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001359}
1360
Lee Nipper56af8cd2009-03-29 15:50:50 +08001361static int aead_encrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001362{
1363 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1364 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001365 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001366
1367 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001368 edesc = aead_edesc_alloc(req, req->iv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001369 if (IS_ERR(edesc))
1370 return PTR_ERR(edesc);
1371
1372 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001373 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001374
Herbert Xuaeb4c132015-07-30 17:53:22 +08001375 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001376}
1377
Lee Nipper56af8cd2009-03-29 15:50:50 +08001378static int aead_decrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001379{
1380 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001381 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001382 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001383 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001384 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001385 struct scatterlist *sg;
1386 void *icvdata;
1387
1388 req->cryptlen -= authsize;
1389
1390 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001391 edesc = aead_edesc_alloc(req, req->iv, 1, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001392 if (IS_ERR(edesc))
1393 return PTR_ERR(edesc);
1394
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001395 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
Kim Phillipse938e462009-03-29 15:53:23 +08001396 ((!edesc->src_nents && !edesc->dst_nents) ||
1397 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001398
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001399 /* decrypt and check the ICV */
Kim Phillipse938e462009-03-29 15:53:23 +08001400 edesc->desc.hdr = ctx->desc_hdr_template |
1401 DESC_HDR_DIR_INBOUND |
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001402 DESC_HDR_MODE1_MDEU_CICV;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001403
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001404 /* reset integrity check result bits */
1405 edesc->desc.hdr_lo = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001406
Herbert Xuaeb4c132015-07-30 17:53:22 +08001407 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001408 }
Kim Phillipse938e462009-03-29 15:53:23 +08001409
1410 /* Have to check the ICV with software */
1411 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1412
1413 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1414 if (edesc->dma_len)
Herbert Xuaeb4c132015-07-30 17:53:22 +08001415 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1416 edesc->dst_nents + 2];
Kim Phillipse938e462009-03-29 15:53:23 +08001417 else
1418 icvdata = &edesc->link_tbl[0];
1419
1420 sg = sg_last(req->src, edesc->src_nents ? : 1);
1421
Herbert Xuaeb4c132015-07-30 17:53:22 +08001422 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
Kim Phillipse938e462009-03-29 15:53:23 +08001423
Herbert Xuaeb4c132015-07-30 17:53:22 +08001424 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001425}
1426
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001427static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1428 const u8 *key, unsigned int keylen)
1429{
1430 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001431
1432 memcpy(&ctx->key, key, keylen);
1433 ctx->keylen = keylen;
1434
1435 return 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001436}
1437
LEROY Christophe032d1972015-04-17 16:31:51 +02001438static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1439 struct scatterlist *dst, unsigned int len,
1440 struct talitos_edesc *edesc)
1441{
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001442 struct talitos_private *priv = dev_get_drvdata(dev);
1443 bool is_sec1 = has_ftr_sec1(priv);
1444
1445 if (is_sec1) {
1446 if (!edesc->src_nents) {
1447 dma_unmap_sg(dev, src, 1,
1448 dst != src ? DMA_TO_DEVICE
1449 : DMA_BIDIRECTIONAL);
1450 }
1451 if (dst && edesc->dst_nents) {
1452 dma_sync_single_for_device(dev,
1453 edesc->dma_link_tbl + len,
1454 len, DMA_FROM_DEVICE);
1455 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1456 edesc->buf + len, len);
1457 } else if (dst && dst != src) {
1458 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1459 }
1460 } else {
1461 talitos_sg_unmap(dev, edesc, src, dst);
1462 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001463}
1464
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001465static void common_nonsnoop_unmap(struct device *dev,
1466 struct talitos_edesc *edesc,
1467 struct ablkcipher_request *areq)
1468{
1469 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
LEROY Christophe032d1972015-04-17 16:31:51 +02001470
1471 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001472 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1473 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1474
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001475 if (edesc->dma_len)
1476 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1477 DMA_BIDIRECTIONAL);
1478}
1479
1480static void ablkcipher_done(struct device *dev,
1481 struct talitos_desc *desc, void *context,
1482 int err)
1483{
1484 struct ablkcipher_request *areq = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001485 struct talitos_edesc *edesc;
1486
1487 edesc = container_of(desc, struct talitos_edesc, desc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001488
1489 common_nonsnoop_unmap(dev, edesc, areq);
1490
1491 kfree(edesc);
1492
1493 areq->base.complete(&areq->base, err);
1494}
1495
LEROY Christophe032d1972015-04-17 16:31:51 +02001496int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1497 unsigned int len, struct talitos_edesc *edesc,
1498 enum dma_data_direction dir, struct talitos_ptr *ptr)
1499{
1500 int sg_count;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001501 struct talitos_private *priv = dev_get_drvdata(dev);
1502 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe032d1972015-04-17 16:31:51 +02001503
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001504 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001505
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001506 if (is_sec1) {
1507 sg_count = edesc->src_nents ? : 1;
LEROY Christophe032d1972015-04-17 16:31:51 +02001508
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001509 if (sg_count == 1) {
1510 dma_map_sg(dev, src, 1, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001511 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001512 } else {
1513 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1514 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1515 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1516 len, DMA_TO_DEVICE);
1517 }
1518 } else {
1519 to_talitos_ptr_extent_clear(ptr, is_sec1);
1520
1521 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1522 edesc->src_chained);
1523
1524 if (sg_count == 1) {
1525 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1526 } else {
1527 sg_count = sg_to_link_tbl(src, sg_count, len,
1528 &edesc->link_tbl[0]);
1529 if (sg_count > 1) {
1530 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1531 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1532 dma_sync_single_for_device(dev,
1533 edesc->dma_link_tbl,
1534 edesc->dma_len,
1535 DMA_BIDIRECTIONAL);
1536 } else {
1537 /* Only one segment now, so no link tbl needed*/
1538 to_talitos_ptr(ptr, sg_dma_address(src),
1539 is_sec1);
1540 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001541 }
1542 }
1543 return sg_count;
1544}
1545
1546void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1547 unsigned int len, struct talitos_edesc *edesc,
1548 enum dma_data_direction dir,
1549 struct talitos_ptr *ptr, int sg_count)
1550{
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001551 struct talitos_private *priv = dev_get_drvdata(dev);
1552 bool is_sec1 = has_ftr_sec1(priv);
1553
LEROY Christophe032d1972015-04-17 16:31:51 +02001554 if (dir != DMA_NONE)
1555 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1556 dir, edesc->dst_chained);
1557
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001558 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001559
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001560 if (is_sec1) {
1561 if (sg_count == 1) {
1562 if (dir != DMA_NONE)
1563 dma_map_sg(dev, dst, 1, dir);
1564 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1565 } else {
1566 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1567 dma_sync_single_for_device(dev,
1568 edesc->dma_link_tbl + len,
1569 len, DMA_FROM_DEVICE);
1570 }
1571 } else {
1572 to_talitos_ptr_extent_clear(ptr, is_sec1);
1573
1574 if (sg_count == 1) {
1575 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1576 } else {
1577 struct talitos_ptr *link_tbl_ptr =
1578 &edesc->link_tbl[edesc->src_nents + 1];
1579
1580 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1581 (edesc->src_nents + 1) *
1582 sizeof(struct talitos_ptr), 0);
1583 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
Horia Geant?42e8b0d2015-05-11 20:04:56 +03001584 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001585 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1586 edesc->dma_len,
1587 DMA_BIDIRECTIONAL);
1588 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001589 }
1590}
1591
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001592static int common_nonsnoop(struct talitos_edesc *edesc,
1593 struct ablkcipher_request *areq,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001594 void (*callback) (struct device *dev,
1595 struct talitos_desc *desc,
1596 void *context, int error))
1597{
1598 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1599 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1600 struct device *dev = ctx->dev;
1601 struct talitos_desc *desc = &edesc->desc;
1602 unsigned int cryptlen = areq->nbytes;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001603 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001604 int sg_count, ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001605 struct talitos_private *priv = dev_get_drvdata(dev);
1606 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001607
1608 /* first DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001609 desc->ptr[0] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001610
1611 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001612 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1613 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1614 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001615
1616 /* cipher key */
1617 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001618 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001619
1620 /*
1621 * cipher in
1622 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001623 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1624 (areq->src == areq->dst) ?
1625 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1626 &desc->ptr[3]);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001627
1628 /* cipher out */
LEROY Christophe032d1972015-04-17 16:31:51 +02001629 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1630 (areq->src == areq->dst) ? DMA_NONE
1631 : DMA_FROM_DEVICE,
1632 &desc->ptr[4], sg_count);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001633
1634 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001635 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001636 DMA_FROM_DEVICE);
1637
1638 /* last DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001639 desc->ptr[6] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001640
Kim Phillips5228f0f2011-07-15 11:21:38 +08001641 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001642 if (ret != -EINPROGRESS) {
1643 common_nonsnoop_unmap(dev, edesc, areq);
1644 kfree(edesc);
1645 }
1646 return ret;
1647}
1648
Kim Phillipse938e462009-03-29 15:53:23 +08001649static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
Horia Geanta62293a32013-11-28 15:11:17 +02001650 areq, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001651{
1652 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1653 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001654 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001655
Herbert Xuaeb4c132015-07-30 17:53:22 +08001656 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001657 areq->info, 0, areq->nbytes, 0, ivsize, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001658 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001659}
1660
1661static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1662{
1663 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1664 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1665 struct talitos_edesc *edesc;
1666
1667 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001668 edesc = ablkcipher_edesc_alloc(areq, true);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001669 if (IS_ERR(edesc))
1670 return PTR_ERR(edesc);
1671
1672 /* set encrypt */
1673 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1674
Kim Phillipsfebec542011-07-15 11:21:39 +08001675 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001676}
1677
1678static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1679{
1680 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1681 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1682 struct talitos_edesc *edesc;
1683
1684 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001685 edesc = ablkcipher_edesc_alloc(areq, false);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001686 if (IS_ERR(edesc))
1687 return PTR_ERR(edesc);
1688
1689 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1690
Kim Phillipsfebec542011-07-15 11:21:39 +08001691 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001692}
1693
Lee Nipper497f2e62010-05-19 19:20:36 +10001694static void common_nonsnoop_hash_unmap(struct device *dev,
1695 struct talitos_edesc *edesc,
1696 struct ahash_request *areq)
1697{
1698 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001699 struct talitos_private *priv = dev_get_drvdata(dev);
1700 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001701
1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1703
LEROY Christophe032d1972015-04-17 16:31:51 +02001704 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1705
Lee Nipper497f2e62010-05-19 19:20:36 +10001706 /* When using hashctx-in, must unmap it. */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001707 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001708 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1709 DMA_TO_DEVICE);
1710
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001711 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001712 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1713 DMA_TO_DEVICE);
1714
Lee Nipper497f2e62010-05-19 19:20:36 +10001715 if (edesc->dma_len)
1716 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1717 DMA_BIDIRECTIONAL);
1718
1719}
1720
1721static void ahash_done(struct device *dev,
1722 struct talitos_desc *desc, void *context,
1723 int err)
1724{
1725 struct ahash_request *areq = context;
1726 struct talitos_edesc *edesc =
1727 container_of(desc, struct talitos_edesc, desc);
1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729
1730 if (!req_ctx->last && req_ctx->to_hash_later) {
1731 /* Position any partial block for next update/final/finup */
1732 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
Lee Nipper5e833bc2010-06-16 15:29:15 +10001733 req_ctx->nbuf = req_ctx->to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001734 }
1735 common_nonsnoop_hash_unmap(dev, edesc, areq);
1736
1737 kfree(edesc);
1738
1739 areq->base.complete(&areq->base, err);
1740}
1741
LEROY Christophe2d029052015-04-17 16:32:18 +02001742/*
1743 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1744 * ourself and submit a padded block
1745 */
1746void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1747 struct talitos_edesc *edesc,
1748 struct talitos_ptr *ptr)
1749{
1750 static u8 padded_hash[64] = {
1751 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1752 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1754 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1755 };
1756
1757 pr_err_once("Bug in SEC1, padding ourself\n");
1758 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1759 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1760 (char *)padded_hash, DMA_TO_DEVICE);
1761}
1762
Lee Nipper497f2e62010-05-19 19:20:36 +10001763static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1764 struct ahash_request *areq, unsigned int length,
1765 void (*callback) (struct device *dev,
1766 struct talitos_desc *desc,
1767 void *context, int error))
1768{
1769 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1770 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1771 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 struct device *dev = ctx->dev;
1773 struct talitos_desc *desc = &edesc->desc;
LEROY Christophe032d1972015-04-17 16:31:51 +02001774 int ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001775 struct talitos_private *priv = dev_get_drvdata(dev);
1776 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001777
1778 /* first DWORD empty */
1779 desc->ptr[0] = zero_entry;
1780
Kim Phillips60f208d2010-05-19 19:21:53 +10001781 /* hash context in */
1782 if (!req_ctx->first || req_ctx->swinit) {
Lee Nipper497f2e62010-05-19 19:20:36 +10001783 map_single_talitos_ptr(dev, &desc->ptr[1],
1784 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001785 (char *)req_ctx->hw_context,
Lee Nipper497f2e62010-05-19 19:20:36 +10001786 DMA_TO_DEVICE);
Kim Phillips60f208d2010-05-19 19:21:53 +10001787 req_ctx->swinit = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001788 } else {
1789 desc->ptr[1] = zero_entry;
1790 /* Indicate next op is not the first. */
1791 req_ctx->first = 0;
1792 }
1793
1794 /* HMAC key */
1795 if (ctx->keylen)
1796 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001797 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001798 else
1799 desc->ptr[2] = zero_entry;
1800
1801 /*
1802 * data in
1803 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001804 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1805 DMA_TO_DEVICE, &desc->ptr[3]);
Lee Nipper497f2e62010-05-19 19:20:36 +10001806
1807 /* fifth DWORD empty */
1808 desc->ptr[4] = zero_entry;
1809
1810 /* hash/HMAC out -or- hash context out */
1811 if (req_ctx->last)
1812 map_single_talitos_ptr(dev, &desc->ptr[5],
1813 crypto_ahash_digestsize(tfm),
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001814 areq->result, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001815 else
1816 map_single_talitos_ptr(dev, &desc->ptr[5],
1817 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001818 req_ctx->hw_context, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001819
1820 /* last DWORD empty */
1821 desc->ptr[6] = zero_entry;
1822
LEROY Christophe2d029052015-04-17 16:32:18 +02001823 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1824 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1825
Kim Phillips5228f0f2011-07-15 11:21:38 +08001826 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001827 if (ret != -EINPROGRESS) {
1828 common_nonsnoop_hash_unmap(dev, edesc, areq);
1829 kfree(edesc);
1830 }
1831 return ret;
1832}
1833
1834static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1835 unsigned int nbytes)
1836{
1837 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1838 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1839 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840
Herbert Xuaeb4c132015-07-30 17:53:22 +08001841 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001842 nbytes, 0, 0, 0, areq->base.flags, false);
Lee Nipper497f2e62010-05-19 19:20:36 +10001843}
1844
1845static int ahash_init(struct ahash_request *areq)
1846{
1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1848 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1849
1850 /* Initialize the context */
Lee Nipper5e833bc2010-06-16 15:29:15 +10001851 req_ctx->nbuf = 0;
Kim Phillips60f208d2010-05-19 19:21:53 +10001852 req_ctx->first = 1; /* first indicates h/w must init its context */
1853 req_ctx->swinit = 0; /* assume h/w init of context */
Lee Nipper497f2e62010-05-19 19:20:36 +10001854 req_ctx->hw_context_size =
1855 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1856 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1857 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1858
1859 return 0;
1860}
1861
Kim Phillips60f208d2010-05-19 19:21:53 +10001862/*
1863 * on h/w without explicit sha224 support, we initialize h/w context
1864 * manually with sha224 constants, and tell it to run sha256.
1865 */
1866static int ahash_init_sha224_swinit(struct ahash_request *areq)
1867{
1868 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1869
1870 ahash_init(areq);
1871 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1872
Kim Phillipsa7524472010-09-23 15:56:38 +08001873 req_ctx->hw_context[0] = SHA224_H0;
1874 req_ctx->hw_context[1] = SHA224_H1;
1875 req_ctx->hw_context[2] = SHA224_H2;
1876 req_ctx->hw_context[3] = SHA224_H3;
1877 req_ctx->hw_context[4] = SHA224_H4;
1878 req_ctx->hw_context[5] = SHA224_H5;
1879 req_ctx->hw_context[6] = SHA224_H6;
1880 req_ctx->hw_context[7] = SHA224_H7;
Kim Phillips60f208d2010-05-19 19:21:53 +10001881
1882 /* init 64-bit count */
1883 req_ctx->hw_context[8] = 0;
1884 req_ctx->hw_context[9] = 0;
1885
1886 return 0;
1887}
1888
Lee Nipper497f2e62010-05-19 19:20:36 +10001889static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1890{
1891 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1892 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1893 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1894 struct talitos_edesc *edesc;
1895 unsigned int blocksize =
1896 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1897 unsigned int nbytes_to_hash;
1898 unsigned int to_hash_later;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001899 unsigned int nsg;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001900 bool chained;
Lee Nipper497f2e62010-05-19 19:20:36 +10001901
Lee Nipper5e833bc2010-06-16 15:29:15 +10001902 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1903 /* Buffer up to one whole block */
Lee Nipper497f2e62010-05-19 19:20:36 +10001904 sg_copy_to_buffer(areq->src,
1905 sg_count(areq->src, nbytes, &chained),
Lee Nipper5e833bc2010-06-16 15:29:15 +10001906 req_ctx->buf + req_ctx->nbuf, nbytes);
1907 req_ctx->nbuf += nbytes;
Lee Nipper497f2e62010-05-19 19:20:36 +10001908 return 0;
1909 }
1910
Lee Nipper5e833bc2010-06-16 15:29:15 +10001911 /* At least (blocksize + 1) bytes are available to hash */
1912 nbytes_to_hash = nbytes + req_ctx->nbuf;
1913 to_hash_later = nbytes_to_hash & (blocksize - 1);
1914
1915 if (req_ctx->last)
1916 to_hash_later = 0;
1917 else if (to_hash_later)
1918 /* There is a partial block. Hash the full block(s) now */
1919 nbytes_to_hash -= to_hash_later;
1920 else {
1921 /* Keep one block buffered */
1922 nbytes_to_hash -= blocksize;
1923 to_hash_later = blocksize;
1924 }
1925
1926 /* Chain in any previously buffered data */
1927 if (req_ctx->nbuf) {
1928 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1929 sg_init_table(req_ctx->bufsl, nsg);
1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1931 if (nsg > 1)
Dan Williamsc56f6d12015-08-07 18:15:13 +02001932 sg_chain(req_ctx->bufsl, 2, areq->src);
Lee Nipper497f2e62010-05-19 19:20:36 +10001933 req_ctx->psrc = req_ctx->bufsl;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001934 } else
Lee Nipper497f2e62010-05-19 19:20:36 +10001935 req_ctx->psrc = areq->src;
Lee Nipper497f2e62010-05-19 19:20:36 +10001936
Lee Nipper5e833bc2010-06-16 15:29:15 +10001937 if (to_hash_later) {
1938 int nents = sg_count(areq->src, nbytes, &chained);
Akinobu Mitad0525722013-07-08 16:01:55 -07001939 sg_pcopy_to_buffer(areq->src, nents,
Lee Nipper5e833bc2010-06-16 15:29:15 +10001940 req_ctx->bufnext,
1941 to_hash_later,
1942 nbytes - to_hash_later);
Lee Nipper497f2e62010-05-19 19:20:36 +10001943 }
Lee Nipper5e833bc2010-06-16 15:29:15 +10001944 req_ctx->to_hash_later = to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001945
Lee Nipper5e833bc2010-06-16 15:29:15 +10001946 /* Allocate extended descriptor */
Lee Nipper497f2e62010-05-19 19:20:36 +10001947 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1948 if (IS_ERR(edesc))
1949 return PTR_ERR(edesc);
1950
1951 edesc->desc.hdr = ctx->desc_hdr_template;
1952
1953 /* On last one, request SEC to pad; otherwise continue */
1954 if (req_ctx->last)
1955 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1956 else
1957 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1958
Kim Phillips60f208d2010-05-19 19:21:53 +10001959 /* request SEC to INIT hash. */
1960 if (req_ctx->first && !req_ctx->swinit)
Lee Nipper497f2e62010-05-19 19:20:36 +10001961 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1962
1963 /* When the tfm context has a keylen, it's an HMAC.
1964 * A first or last (ie. not middle) descriptor must request HMAC.
1965 */
1966 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1967 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1968
1969 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1970 ahash_done);
1971}
1972
1973static int ahash_update(struct ahash_request *areq)
1974{
1975 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1976
1977 req_ctx->last = 0;
1978
1979 return ahash_process_req(areq, areq->nbytes);
1980}
1981
1982static int ahash_final(struct ahash_request *areq)
1983{
1984 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1985
1986 req_ctx->last = 1;
1987
1988 return ahash_process_req(areq, 0);
1989}
1990
1991static int ahash_finup(struct ahash_request *areq)
1992{
1993 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1994
1995 req_ctx->last = 1;
1996
1997 return ahash_process_req(areq, areq->nbytes);
1998}
1999
2000static int ahash_digest(struct ahash_request *areq)
2001{
2002 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Kim Phillips60f208d2010-05-19 19:21:53 +10002003 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002004
Kim Phillips60f208d2010-05-19 19:21:53 +10002005 ahash->init(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002006 req_ctx->last = 1;
2007
2008 return ahash_process_req(areq, areq->nbytes);
2009}
2010
Lee Nipper79b3a412011-11-21 16:13:25 +08002011struct keyhash_result {
2012 struct completion completion;
2013 int err;
2014};
2015
2016static void keyhash_complete(struct crypto_async_request *req, int err)
2017{
2018 struct keyhash_result *res = req->data;
2019
2020 if (err == -EINPROGRESS)
2021 return;
2022
2023 res->err = err;
2024 complete(&res->completion);
2025}
2026
2027static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2028 u8 *hash)
2029{
2030 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2031
2032 struct scatterlist sg[1];
2033 struct ahash_request *req;
2034 struct keyhash_result hresult;
2035 int ret;
2036
2037 init_completion(&hresult.completion);
2038
2039 req = ahash_request_alloc(tfm, GFP_KERNEL);
2040 if (!req)
2041 return -ENOMEM;
2042
2043 /* Keep tfm keylen == 0 during hash of the long key */
2044 ctx->keylen = 0;
2045 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2046 keyhash_complete, &hresult);
2047
2048 sg_init_one(&sg[0], key, keylen);
2049
2050 ahash_request_set_crypt(req, sg, hash, keylen);
2051 ret = crypto_ahash_digest(req);
2052 switch (ret) {
2053 case 0:
2054 break;
2055 case -EINPROGRESS:
2056 case -EBUSY:
2057 ret = wait_for_completion_interruptible(
2058 &hresult.completion);
2059 if (!ret)
2060 ret = hresult.err;
2061 break;
2062 default:
2063 break;
2064 }
2065 ahash_request_free(req);
2066
2067 return ret;
2068}
2069
2070static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2071 unsigned int keylen)
2072{
2073 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2074 unsigned int blocksize =
2075 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2076 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2077 unsigned int keysize = keylen;
2078 u8 hash[SHA512_DIGEST_SIZE];
2079 int ret;
2080
2081 if (keylen <= blocksize)
2082 memcpy(ctx->key, key, keysize);
2083 else {
2084 /* Must get the hash of the long key */
2085 ret = keyhash(tfm, key, keylen, hash);
2086
2087 if (ret) {
2088 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2089 return -EINVAL;
2090 }
2091
2092 keysize = digestsize;
2093 memcpy(ctx->key, hash, digestsize);
2094 }
2095
2096 ctx->keylen = keysize;
2097
2098 return 0;
2099}
2100
2101
Kim Phillips9c4a7962008-06-23 19:50:15 +08002102struct talitos_alg_template {
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002103 u32 type;
2104 union {
2105 struct crypto_alg crypto;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002106 struct ahash_alg hash;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002107 struct aead_alg aead;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002108 } alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002109 __be32 desc_hdr_template;
2110};
2111
2112static struct talitos_alg_template driver_algs[] = {
Horia Geanta991155b2013-03-20 16:31:38 +02002113 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002114 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002115 .alg.aead = {
2116 .base = {
2117 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2118 .cra_driver_name = "authenc-hmac-sha1-"
2119 "cbc-aes-talitos",
2120 .cra_blocksize = AES_BLOCK_SIZE,
2121 .cra_flags = CRYPTO_ALG_ASYNC,
2122 },
2123 .ivsize = AES_BLOCK_SIZE,
2124 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002125 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08002126 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2127 DESC_HDR_SEL0_AESU |
2128 DESC_HDR_MODE0_AESU_CBC |
2129 DESC_HDR_SEL1_MDEUA |
2130 DESC_HDR_MODE1_MDEU_INIT |
2131 DESC_HDR_MODE1_MDEU_PAD |
2132 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper70bcaca2008-07-03 19:08:46 +08002133 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002134 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002135 .alg.aead = {
2136 .base = {
2137 .cra_name = "authenc(hmac(sha1),"
2138 "cbc(des3_ede))",
2139 .cra_driver_name = "authenc-hmac-sha1-"
2140 "cbc-3des-talitos",
2141 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2142 .cra_flags = CRYPTO_ALG_ASYNC,
2143 },
2144 .ivsize = DES3_EDE_BLOCK_SIZE,
2145 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002146 },
Lee Nipper70bcaca2008-07-03 19:08:46 +08002147 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2148 DESC_HDR_SEL0_DEU |
2149 DESC_HDR_MODE0_DEU_CBC |
2150 DESC_HDR_MODE0_DEU_3DES |
2151 DESC_HDR_SEL1_MDEUA |
2152 DESC_HDR_MODE1_MDEU_INIT |
2153 DESC_HDR_MODE1_MDEU_PAD |
2154 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper3952f172008-07-10 18:29:18 +08002155 },
Horia Geanta357fb602012-07-03 19:16:53 +03002156 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002157 .alg.aead = {
2158 .base = {
2159 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2160 .cra_driver_name = "authenc-hmac-sha224-"
2161 "cbc-aes-talitos",
2162 .cra_blocksize = AES_BLOCK_SIZE,
2163 .cra_flags = CRYPTO_ALG_ASYNC,
2164 },
2165 .ivsize = AES_BLOCK_SIZE,
2166 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002167 },
2168 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2169 DESC_HDR_SEL0_AESU |
2170 DESC_HDR_MODE0_AESU_CBC |
2171 DESC_HDR_SEL1_MDEUA |
2172 DESC_HDR_MODE1_MDEU_INIT |
2173 DESC_HDR_MODE1_MDEU_PAD |
2174 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2175 },
2176 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002177 .alg.aead = {
2178 .base = {
2179 .cra_name = "authenc(hmac(sha224),"
2180 "cbc(des3_ede))",
2181 .cra_driver_name = "authenc-hmac-sha224-"
2182 "cbc-3des-talitos",
2183 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2184 .cra_flags = CRYPTO_ALG_ASYNC,
2185 },
2186 .ivsize = DES3_EDE_BLOCK_SIZE,
2187 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002188 },
2189 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2190 DESC_HDR_SEL0_DEU |
2191 DESC_HDR_MODE0_DEU_CBC |
2192 DESC_HDR_MODE0_DEU_3DES |
2193 DESC_HDR_SEL1_MDEUA |
2194 DESC_HDR_MODE1_MDEU_INIT |
2195 DESC_HDR_MODE1_MDEU_PAD |
2196 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2197 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002198 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002199 .alg.aead = {
2200 .base = {
2201 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2202 .cra_driver_name = "authenc-hmac-sha256-"
2203 "cbc-aes-talitos",
2204 .cra_blocksize = AES_BLOCK_SIZE,
2205 .cra_flags = CRYPTO_ALG_ASYNC,
2206 },
2207 .ivsize = AES_BLOCK_SIZE,
2208 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002209 },
Lee Nipper3952f172008-07-10 18:29:18 +08002210 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2211 DESC_HDR_SEL0_AESU |
2212 DESC_HDR_MODE0_AESU_CBC |
2213 DESC_HDR_SEL1_MDEUA |
2214 DESC_HDR_MODE1_MDEU_INIT |
2215 DESC_HDR_MODE1_MDEU_PAD |
2216 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2217 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002218 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002219 .alg.aead = {
2220 .base = {
2221 .cra_name = "authenc(hmac(sha256),"
2222 "cbc(des3_ede))",
2223 .cra_driver_name = "authenc-hmac-sha256-"
2224 "cbc-3des-talitos",
2225 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2226 .cra_flags = CRYPTO_ALG_ASYNC,
2227 },
2228 .ivsize = DES3_EDE_BLOCK_SIZE,
2229 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002230 },
Lee Nipper3952f172008-07-10 18:29:18 +08002231 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2232 DESC_HDR_SEL0_DEU |
2233 DESC_HDR_MODE0_DEU_CBC |
2234 DESC_HDR_MODE0_DEU_3DES |
2235 DESC_HDR_SEL1_MDEUA |
2236 DESC_HDR_MODE1_MDEU_INIT |
2237 DESC_HDR_MODE1_MDEU_PAD |
2238 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2239 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002240 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002241 .alg.aead = {
2242 .base = {
2243 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2244 .cra_driver_name = "authenc-hmac-sha384-"
2245 "cbc-aes-talitos",
2246 .cra_blocksize = AES_BLOCK_SIZE,
2247 .cra_flags = CRYPTO_ALG_ASYNC,
2248 },
2249 .ivsize = AES_BLOCK_SIZE,
2250 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002251 },
2252 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2253 DESC_HDR_SEL0_AESU |
2254 DESC_HDR_MODE0_AESU_CBC |
2255 DESC_HDR_SEL1_MDEUB |
2256 DESC_HDR_MODE1_MDEU_INIT |
2257 DESC_HDR_MODE1_MDEU_PAD |
2258 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2259 },
2260 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002261 .alg.aead = {
2262 .base = {
2263 .cra_name = "authenc(hmac(sha384),"
2264 "cbc(des3_ede))",
2265 .cra_driver_name = "authenc-hmac-sha384-"
2266 "cbc-3des-talitos",
2267 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2268 .cra_flags = CRYPTO_ALG_ASYNC,
2269 },
2270 .ivsize = DES3_EDE_BLOCK_SIZE,
2271 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002272 },
2273 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2274 DESC_HDR_SEL0_DEU |
2275 DESC_HDR_MODE0_DEU_CBC |
2276 DESC_HDR_MODE0_DEU_3DES |
2277 DESC_HDR_SEL1_MDEUB |
2278 DESC_HDR_MODE1_MDEU_INIT |
2279 DESC_HDR_MODE1_MDEU_PAD |
2280 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2281 },
2282 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002283 .alg.aead = {
2284 .base = {
2285 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2286 .cra_driver_name = "authenc-hmac-sha512-"
2287 "cbc-aes-talitos",
2288 .cra_blocksize = AES_BLOCK_SIZE,
2289 .cra_flags = CRYPTO_ALG_ASYNC,
2290 },
2291 .ivsize = AES_BLOCK_SIZE,
2292 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002293 },
2294 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2295 DESC_HDR_SEL0_AESU |
2296 DESC_HDR_MODE0_AESU_CBC |
2297 DESC_HDR_SEL1_MDEUB |
2298 DESC_HDR_MODE1_MDEU_INIT |
2299 DESC_HDR_MODE1_MDEU_PAD |
2300 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2301 },
2302 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002303 .alg.aead = {
2304 .base = {
2305 .cra_name = "authenc(hmac(sha512),"
2306 "cbc(des3_ede))",
2307 .cra_driver_name = "authenc-hmac-sha512-"
2308 "cbc-3des-talitos",
2309 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310 .cra_flags = CRYPTO_ALG_ASYNC,
2311 },
2312 .ivsize = DES3_EDE_BLOCK_SIZE,
2313 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002314 },
2315 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2316 DESC_HDR_SEL0_DEU |
2317 DESC_HDR_MODE0_DEU_CBC |
2318 DESC_HDR_MODE0_DEU_3DES |
2319 DESC_HDR_SEL1_MDEUB |
2320 DESC_HDR_MODE1_MDEU_INIT |
2321 DESC_HDR_MODE1_MDEU_PAD |
2322 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2323 },
2324 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002325 .alg.aead = {
2326 .base = {
2327 .cra_name = "authenc(hmac(md5),cbc(aes))",
2328 .cra_driver_name = "authenc-hmac-md5-"
2329 "cbc-aes-talitos",
2330 .cra_blocksize = AES_BLOCK_SIZE,
2331 .cra_flags = CRYPTO_ALG_ASYNC,
2332 },
2333 .ivsize = AES_BLOCK_SIZE,
2334 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002335 },
Lee Nipper3952f172008-07-10 18:29:18 +08002336 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2337 DESC_HDR_SEL0_AESU |
2338 DESC_HDR_MODE0_AESU_CBC |
2339 DESC_HDR_SEL1_MDEUA |
2340 DESC_HDR_MODE1_MDEU_INIT |
2341 DESC_HDR_MODE1_MDEU_PAD |
2342 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2343 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002344 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002345 .alg.aead = {
2346 .base = {
2347 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2348 .cra_driver_name = "authenc-hmac-md5-"
2349 "cbc-3des-talitos",
2350 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2351 .cra_flags = CRYPTO_ALG_ASYNC,
2352 },
2353 .ivsize = DES3_EDE_BLOCK_SIZE,
2354 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002355 },
Lee Nipper3952f172008-07-10 18:29:18 +08002356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2357 DESC_HDR_SEL0_DEU |
2358 DESC_HDR_MODE0_DEU_CBC |
2359 DESC_HDR_MODE0_DEU_3DES |
2360 DESC_HDR_SEL1_MDEUA |
2361 DESC_HDR_MODE1_MDEU_INIT |
2362 DESC_HDR_MODE1_MDEU_PAD |
2363 DESC_HDR_MODE1_MDEU_MD5_HMAC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002364 },
2365 /* ABLKCIPHER algorithms. */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002366 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2367 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002368 .cra_name = "cbc(aes)",
2369 .cra_driver_name = "cbc-aes-talitos",
2370 .cra_blocksize = AES_BLOCK_SIZE,
2371 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2372 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002373 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002374 .min_keysize = AES_MIN_KEY_SIZE,
2375 .max_keysize = AES_MAX_KEY_SIZE,
2376 .ivsize = AES_BLOCK_SIZE,
2377 }
2378 },
2379 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2380 DESC_HDR_SEL0_AESU |
2381 DESC_HDR_MODE0_AESU_CBC,
2382 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002383 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2384 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002385 .cra_name = "cbc(des3_ede)",
2386 .cra_driver_name = "cbc-3des-talitos",
2387 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2388 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2389 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002390 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002391 .min_keysize = DES3_EDE_KEY_SIZE,
2392 .max_keysize = DES3_EDE_KEY_SIZE,
2393 .ivsize = DES3_EDE_BLOCK_SIZE,
2394 }
2395 },
2396 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2397 DESC_HDR_SEL0_DEU |
2398 DESC_HDR_MODE0_DEU_CBC |
2399 DESC_HDR_MODE0_DEU_3DES,
Lee Nipper497f2e62010-05-19 19:20:36 +10002400 },
2401 /* AHASH algorithms. */
2402 { .type = CRYPTO_ALG_TYPE_AHASH,
2403 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002404 .halg.digestsize = MD5_DIGEST_SIZE,
2405 .halg.base = {
2406 .cra_name = "md5",
2407 .cra_driver_name = "md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002408 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper497f2e62010-05-19 19:20:36 +10002409 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2410 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002411 }
2412 },
2413 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2414 DESC_HDR_SEL0_MDEUA |
2415 DESC_HDR_MODE0_MDEU_MD5,
2416 },
2417 { .type = CRYPTO_ALG_TYPE_AHASH,
2418 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002419 .halg.digestsize = SHA1_DIGEST_SIZE,
2420 .halg.base = {
2421 .cra_name = "sha1",
2422 .cra_driver_name = "sha1-talitos",
2423 .cra_blocksize = SHA1_BLOCK_SIZE,
2424 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2425 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002426 }
2427 },
2428 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2429 DESC_HDR_SEL0_MDEUA |
2430 DESC_HDR_MODE0_MDEU_SHA1,
2431 },
2432 { .type = CRYPTO_ALG_TYPE_AHASH,
2433 .alg.hash = {
Kim Phillips60f208d2010-05-19 19:21:53 +10002434 .halg.digestsize = SHA224_DIGEST_SIZE,
2435 .halg.base = {
2436 .cra_name = "sha224",
2437 .cra_driver_name = "sha224-talitos",
2438 .cra_blocksize = SHA224_BLOCK_SIZE,
2439 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2440 CRYPTO_ALG_ASYNC,
Kim Phillips60f208d2010-05-19 19:21:53 +10002441 }
2442 },
2443 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2444 DESC_HDR_SEL0_MDEUA |
2445 DESC_HDR_MODE0_MDEU_SHA224,
2446 },
2447 { .type = CRYPTO_ALG_TYPE_AHASH,
2448 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002449 .halg.digestsize = SHA256_DIGEST_SIZE,
2450 .halg.base = {
2451 .cra_name = "sha256",
2452 .cra_driver_name = "sha256-talitos",
2453 .cra_blocksize = SHA256_BLOCK_SIZE,
2454 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2455 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002456 }
2457 },
2458 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2459 DESC_HDR_SEL0_MDEUA |
2460 DESC_HDR_MODE0_MDEU_SHA256,
2461 },
2462 { .type = CRYPTO_ALG_TYPE_AHASH,
2463 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002464 .halg.digestsize = SHA384_DIGEST_SIZE,
2465 .halg.base = {
2466 .cra_name = "sha384",
2467 .cra_driver_name = "sha384-talitos",
2468 .cra_blocksize = SHA384_BLOCK_SIZE,
2469 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2470 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002471 }
2472 },
2473 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2474 DESC_HDR_SEL0_MDEUB |
2475 DESC_HDR_MODE0_MDEUB_SHA384,
2476 },
2477 { .type = CRYPTO_ALG_TYPE_AHASH,
2478 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002479 .halg.digestsize = SHA512_DIGEST_SIZE,
2480 .halg.base = {
2481 .cra_name = "sha512",
2482 .cra_driver_name = "sha512-talitos",
2483 .cra_blocksize = SHA512_BLOCK_SIZE,
2484 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2485 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002486 }
2487 },
2488 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2489 DESC_HDR_SEL0_MDEUB |
2490 DESC_HDR_MODE0_MDEUB_SHA512,
2491 },
Lee Nipper79b3a412011-11-21 16:13:25 +08002492 { .type = CRYPTO_ALG_TYPE_AHASH,
2493 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002494 .halg.digestsize = MD5_DIGEST_SIZE,
2495 .halg.base = {
2496 .cra_name = "hmac(md5)",
2497 .cra_driver_name = "hmac-md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002498 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper79b3a412011-11-21 16:13:25 +08002499 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2500 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002501 }
2502 },
2503 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2504 DESC_HDR_SEL0_MDEUA |
2505 DESC_HDR_MODE0_MDEU_MD5,
2506 },
2507 { .type = CRYPTO_ALG_TYPE_AHASH,
2508 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002509 .halg.digestsize = SHA1_DIGEST_SIZE,
2510 .halg.base = {
2511 .cra_name = "hmac(sha1)",
2512 .cra_driver_name = "hmac-sha1-talitos",
2513 .cra_blocksize = SHA1_BLOCK_SIZE,
2514 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2515 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002516 }
2517 },
2518 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2519 DESC_HDR_SEL0_MDEUA |
2520 DESC_HDR_MODE0_MDEU_SHA1,
2521 },
2522 { .type = CRYPTO_ALG_TYPE_AHASH,
2523 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002524 .halg.digestsize = SHA224_DIGEST_SIZE,
2525 .halg.base = {
2526 .cra_name = "hmac(sha224)",
2527 .cra_driver_name = "hmac-sha224-talitos",
2528 .cra_blocksize = SHA224_BLOCK_SIZE,
2529 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2530 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002531 }
2532 },
2533 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2534 DESC_HDR_SEL0_MDEUA |
2535 DESC_HDR_MODE0_MDEU_SHA224,
2536 },
2537 { .type = CRYPTO_ALG_TYPE_AHASH,
2538 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002539 .halg.digestsize = SHA256_DIGEST_SIZE,
2540 .halg.base = {
2541 .cra_name = "hmac(sha256)",
2542 .cra_driver_name = "hmac-sha256-talitos",
2543 .cra_blocksize = SHA256_BLOCK_SIZE,
2544 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2545 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002546 }
2547 },
2548 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2549 DESC_HDR_SEL0_MDEUA |
2550 DESC_HDR_MODE0_MDEU_SHA256,
2551 },
2552 { .type = CRYPTO_ALG_TYPE_AHASH,
2553 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002554 .halg.digestsize = SHA384_DIGEST_SIZE,
2555 .halg.base = {
2556 .cra_name = "hmac(sha384)",
2557 .cra_driver_name = "hmac-sha384-talitos",
2558 .cra_blocksize = SHA384_BLOCK_SIZE,
2559 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2560 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002561 }
2562 },
2563 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2564 DESC_HDR_SEL0_MDEUB |
2565 DESC_HDR_MODE0_MDEUB_SHA384,
2566 },
2567 { .type = CRYPTO_ALG_TYPE_AHASH,
2568 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002569 .halg.digestsize = SHA512_DIGEST_SIZE,
2570 .halg.base = {
2571 .cra_name = "hmac(sha512)",
2572 .cra_driver_name = "hmac-sha512-talitos",
2573 .cra_blocksize = SHA512_BLOCK_SIZE,
2574 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2575 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002576 }
2577 },
2578 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2579 DESC_HDR_SEL0_MDEUB |
2580 DESC_HDR_MODE0_MDEUB_SHA512,
2581 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002582};
2583
2584struct talitos_crypto_alg {
2585 struct list_head entry;
2586 struct device *dev;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002587 struct talitos_alg_template algt;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002588};
2589
2590static int talitos_cra_init(struct crypto_tfm *tfm)
2591{
2592 struct crypto_alg *alg = tfm->__crt_alg;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002593 struct talitos_crypto_alg *talitos_alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002594 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips5228f0f2011-07-15 11:21:38 +08002595 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002596
Lee Nipper497f2e62010-05-19 19:20:36 +10002597 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2598 talitos_alg = container_of(__crypto_ahash_alg(alg),
2599 struct talitos_crypto_alg,
2600 algt.alg.hash);
2601 else
2602 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2603 algt.alg.crypto);
Kim Phillips19bbbc62009-03-29 15:53:59 +08002604
Kim Phillips9c4a7962008-06-23 19:50:15 +08002605 /* update context with ptr to dev */
2606 ctx->dev = talitos_alg->dev;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002607
Kim Phillips5228f0f2011-07-15 11:21:38 +08002608 /* assign SEC channel to tfm in round-robin fashion */
2609 priv = dev_get_drvdata(ctx->dev);
2610 ctx->ch = atomic_inc_return(&priv->last_chan) &
2611 (priv->num_channels - 1);
2612
Kim Phillips9c4a7962008-06-23 19:50:15 +08002613 /* copy descriptor header template value */
Lee Nipperacbf7c622010-05-19 19:19:33 +10002614 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002615
Kim Phillips602dba52011-07-15 11:21:39 +08002616 /* select done notification */
2617 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2618
Lee Nipper497f2e62010-05-19 19:20:36 +10002619 return 0;
2620}
2621
Herbert Xuaeb4c132015-07-30 17:53:22 +08002622static int talitos_cra_init_aead(struct crypto_aead *tfm)
Lee Nipper497f2e62010-05-19 19:20:36 +10002623{
Herbert Xuaeb4c132015-07-30 17:53:22 +08002624 talitos_cra_init(crypto_aead_tfm(tfm));
Kim Phillips9c4a7962008-06-23 19:50:15 +08002625 return 0;
2626}
2627
Lee Nipper497f2e62010-05-19 19:20:36 +10002628static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2629{
2630 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2631
2632 talitos_cra_init(tfm);
2633
2634 ctx->keylen = 0;
2635 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2636 sizeof(struct talitos_ahash_req_ctx));
2637
2638 return 0;
2639}
2640
Kim Phillips9c4a7962008-06-23 19:50:15 +08002641/*
2642 * given the alg's descriptor header template, determine whether descriptor
2643 * type and primary/secondary execution units required match the hw
2644 * capabilities description provided in the device tree node.
2645 */
2646static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2647{
2648 struct talitos_private *priv = dev_get_drvdata(dev);
2649 int ret;
2650
2651 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2652 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2653
2654 if (SECONDARY_EU(desc_hdr_template))
2655 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2656 & priv->exec_units);
2657
2658 return ret;
2659}
2660
Grant Likely2dc11582010-08-06 09:25:50 -06002661static int talitos_remove(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002662{
2663 struct device *dev = &ofdev->dev;
2664 struct talitos_private *priv = dev_get_drvdata(dev);
2665 struct talitos_crypto_alg *t_alg, *n;
2666 int i;
2667
2668 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
Lee Nipperacbf7c622010-05-19 19:19:33 +10002669 switch (t_alg->algt.type) {
2670 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipperacbf7c622010-05-19 19:19:33 +10002671 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002672 case CRYPTO_ALG_TYPE_AEAD:
2673 crypto_unregister_aead(&t_alg->algt.alg.aead);
Lee Nipperacbf7c622010-05-19 19:19:33 +10002674 case CRYPTO_ALG_TYPE_AHASH:
2675 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2676 break;
2677 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002678 list_del(&t_alg->entry);
2679 kfree(t_alg);
2680 }
2681
2682 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2683 talitos_unregister_rng(dev);
2684
Aaron Sierra35a3bb32015-08-05 16:52:08 -05002685 for (i = 0; priv->chan && i < priv->num_channels; i++)
Kim Phillips0b798242010-09-23 15:56:08 +08002686 kfree(priv->chan[i].fifo);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002687
Kim Phillips4b9926282009-08-13 11:50:38 +10002688 kfree(priv->chan);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002689
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002690 for (i = 0; i < 2; i++)
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002691 if (priv->irq[i]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002692 free_irq(priv->irq[i], dev);
2693 irq_dispose_mapping(priv->irq[i]);
2694 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002695
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002696 tasklet_kill(&priv->done_task[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002697 if (priv->irq[1])
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002698 tasklet_kill(&priv->done_task[1]);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002699
2700 iounmap(priv->reg);
2701
Kim Phillips9c4a7962008-06-23 19:50:15 +08002702 kfree(priv);
2703
2704 return 0;
2705}
2706
2707static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2708 struct talitos_alg_template
2709 *template)
2710{
Kim Phillips60f208d2010-05-19 19:21:53 +10002711 struct talitos_private *priv = dev_get_drvdata(dev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002712 struct talitos_crypto_alg *t_alg;
2713 struct crypto_alg *alg;
2714
2715 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2716 if (!t_alg)
2717 return ERR_PTR(-ENOMEM);
2718
Lee Nipperacbf7c622010-05-19 19:19:33 +10002719 t_alg->algt = *template;
2720
2721 switch (t_alg->algt.type) {
2722 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipper497f2e62010-05-19 19:20:36 +10002723 alg = &t_alg->algt.alg.crypto;
2724 alg->cra_init = talitos_cra_init;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002725 alg->cra_type = &crypto_ablkcipher_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002726 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2727 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2728 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2729 alg->cra_ablkcipher.geniv = "eseqiv";
Lee Nipper497f2e62010-05-19 19:20:36 +10002730 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002731 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuaeb4c132015-07-30 17:53:22 +08002732 alg = &t_alg->algt.alg.aead.base;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002733 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2734 t_alg->algt.alg.aead.setkey = aead_setkey;
2735 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2736 t_alg->algt.alg.aead.decrypt = aead_decrypt;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002737 break;
2738 case CRYPTO_ALG_TYPE_AHASH:
2739 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipper497f2e62010-05-19 19:20:36 +10002740 alg->cra_init = talitos_cra_init_ahash;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002741 alg->cra_type = &crypto_ahash_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002742 t_alg->algt.alg.hash.init = ahash_init;
2743 t_alg->algt.alg.hash.update = ahash_update;
2744 t_alg->algt.alg.hash.final = ahash_final;
2745 t_alg->algt.alg.hash.finup = ahash_finup;
2746 t_alg->algt.alg.hash.digest = ahash_digest;
2747 t_alg->algt.alg.hash.setkey = ahash_setkey;
2748
Lee Nipper79b3a412011-11-21 16:13:25 +08002749 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
Kim Phillips0b2730d2011-12-12 14:59:10 -06002750 !strncmp(alg->cra_name, "hmac", 4)) {
2751 kfree(t_alg);
Lee Nipper79b3a412011-11-21 16:13:25 +08002752 return ERR_PTR(-ENOTSUPP);
Kim Phillips0b2730d2011-12-12 14:59:10 -06002753 }
Kim Phillips60f208d2010-05-19 19:21:53 +10002754 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
Lee Nipper79b3a412011-11-21 16:13:25 +08002755 (!strcmp(alg->cra_name, "sha224") ||
2756 !strcmp(alg->cra_name, "hmac(sha224)"))) {
Kim Phillips60f208d2010-05-19 19:21:53 +10002757 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2758 t_alg->algt.desc_hdr_template =
2759 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2760 DESC_HDR_SEL0_MDEUA |
2761 DESC_HDR_MODE0_MDEU_SHA256;
2762 }
Lee Nipper497f2e62010-05-19 19:20:36 +10002763 break;
Kim Phillips1d119112010-09-23 15:55:27 +08002764 default:
2765 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
Horia Geant?5fa7dad2015-05-11 20:03:24 +03002766 kfree(t_alg);
Kim Phillips1d119112010-09-23 15:55:27 +08002767 return ERR_PTR(-EINVAL);
Lee Nipperacbf7c622010-05-19 19:19:33 +10002768 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002769
Kim Phillips9c4a7962008-06-23 19:50:15 +08002770 alg->cra_module = THIS_MODULE;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002771 alg->cra_priority = TALITOS_CRA_PRIORITY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002772 alg->cra_alignmask = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002773 alg->cra_ctxsize = sizeof(struct talitos_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002774 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002775
Kim Phillips9c4a7962008-06-23 19:50:15 +08002776 t_alg->dev = dev;
2777
2778 return t_alg;
2779}
2780
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002781static int talitos_probe_irq(struct platform_device *ofdev)
2782{
2783 struct device *dev = &ofdev->dev;
2784 struct device_node *np = ofdev->dev.of_node;
2785 struct talitos_private *priv = dev_get_drvdata(dev);
2786 int err;
LEROY Christophedd3c0982015-04-17 16:32:13 +02002787 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002788
2789 priv->irq[0] = irq_of_parse_and_map(np, 0);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002790 if (!priv->irq[0]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002791 dev_err(dev, "failed to map irq\n");
2792 return -EINVAL;
2793 }
LEROY Christophedd3c0982015-04-17 16:32:13 +02002794 if (is_sec1) {
2795 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2796 dev_driver_string(dev), dev);
2797 goto primary_out;
2798 }
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002799
2800 priv->irq[1] = irq_of_parse_and_map(np, 1);
2801
2802 /* get the primary irq line */
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002803 if (!priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02002804 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002805 dev_driver_string(dev), dev);
2806 goto primary_out;
2807 }
2808
LEROY Christophedd3c0982015-04-17 16:32:13 +02002809 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002810 dev_driver_string(dev), dev);
2811 if (err)
2812 goto primary_out;
2813
2814 /* get the secondary irq line */
LEROY Christophedd3c0982015-04-17 16:32:13 +02002815 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002816 dev_driver_string(dev), dev);
2817 if (err) {
2818 dev_err(dev, "failed to request secondary irq\n");
2819 irq_dispose_mapping(priv->irq[1]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002820 priv->irq[1] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002821 }
2822
2823 return err;
2824
2825primary_out:
2826 if (err) {
2827 dev_err(dev, "failed to request primary irq\n");
2828 irq_dispose_mapping(priv->irq[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002829 priv->irq[0] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002830 }
2831
2832 return err;
2833}
2834
Grant Likely1c48a5c2011-02-17 02:43:24 -07002835static int talitos_probe(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002836{
2837 struct device *dev = &ofdev->dev;
Grant Likely61c7a082010-04-13 16:12:29 -07002838 struct device_node *np = ofdev->dev.of_node;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002839 struct talitos_private *priv;
2840 const unsigned int *prop;
2841 int i, err;
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002842 int stride;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002843
2844 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2845 if (!priv)
2846 return -ENOMEM;
2847
Kevin Haof3de9cb2014-01-28 20:17:23 +08002848 INIT_LIST_HEAD(&priv->alg_list);
2849
Kim Phillips9c4a7962008-06-23 19:50:15 +08002850 dev_set_drvdata(dev, priv);
2851
2852 priv->ofdev = ofdev;
2853
Horia Geanta511d63c2012-03-30 17:49:53 +03002854 spin_lock_init(&priv->reg_lock);
2855
Kim Phillips9c4a7962008-06-23 19:50:15 +08002856 priv->reg = of_iomap(np, 0);
2857 if (!priv->reg) {
2858 dev_err(dev, "failed to of_iomap\n");
2859 err = -ENOMEM;
2860 goto err_out;
2861 }
2862
2863 /* get SEC version capabilities from device tree */
2864 prop = of_get_property(np, "fsl,num-channels", NULL);
2865 if (prop)
2866 priv->num_channels = *prop;
2867
2868 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2869 if (prop)
2870 priv->chfifo_len = *prop;
2871
2872 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2873 if (prop)
2874 priv->exec_units = *prop;
2875
2876 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2877 if (prop)
2878 priv->desc_types = *prop;
2879
2880 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2881 !priv->exec_units || !priv->desc_types) {
2882 dev_err(dev, "invalid property data in device tree node\n");
2883 err = -EINVAL;
2884 goto err_out;
2885 }
2886
Lee Nipperf3c85bc2008-07-30 16:26:57 +08002887 if (of_device_is_compatible(np, "fsl,sec3.0"))
2888 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2889
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002890 if (of_device_is_compatible(np, "fsl,sec2.1"))
Kim Phillips60f208d2010-05-19 19:21:53 +10002891 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
Lee Nipper79b3a412011-11-21 16:13:25 +08002892 TALITOS_FTR_SHA224_HWINIT |
2893 TALITOS_FTR_HMAC_OK;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002894
LEROY Christophe21590882015-04-17 16:32:05 +02002895 if (of_device_is_compatible(np, "fsl,sec1.0"))
2896 priv->features |= TALITOS_FTR_SEC1;
2897
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002898 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2899 priv->reg_deu = priv->reg + TALITOS12_DEU;
2900 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2901 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2902 stride = TALITOS1_CH_STRIDE;
2903 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2904 priv->reg_deu = priv->reg + TALITOS10_DEU;
2905 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2906 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2907 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2908 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2909 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2910 stride = TALITOS1_CH_STRIDE;
2911 } else {
2912 priv->reg_deu = priv->reg + TALITOS2_DEU;
2913 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2914 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2915 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2916 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2917 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2918 priv->reg_keu = priv->reg + TALITOS2_KEU;
2919 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2920 stride = TALITOS2_CH_STRIDE;
2921 }
2922
LEROY Christophedd3c0982015-04-17 16:32:13 +02002923 err = talitos_probe_irq(ofdev);
2924 if (err)
2925 goto err_out;
2926
2927 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2928 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2929 (unsigned long)dev);
2930 } else {
2931 if (!priv->irq[1]) {
2932 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2933 (unsigned long)dev);
2934 } else {
2935 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2936 (unsigned long)dev);
2937 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2938 (unsigned long)dev);
2939 }
2940 }
2941
Kim Phillips4b9926282009-08-13 11:50:38 +10002942 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2943 priv->num_channels, GFP_KERNEL);
2944 if (!priv->chan) {
2945 dev_err(dev, "failed to allocate channel management space\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08002946 err = -ENOMEM;
2947 goto err_out;
2948 }
2949
Martin Hicksf641ddd2015-03-03 08:21:33 -05002950 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2951
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002952 for (i = 0; i < priv->num_channels; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002953 priv->chan[i].reg = priv->reg + stride * (i + 1);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002954 if (!priv->irq[1] || !(i & 1))
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002955 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
Kim Phillipsad42d5f2011-11-21 16:13:27 +08002956
Kim Phillips4b9926282009-08-13 11:50:38 +10002957 spin_lock_init(&priv->chan[i].head_lock);
2958 spin_lock_init(&priv->chan[i].tail_lock);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002959
Kim Phillips4b9926282009-08-13 11:50:38 +10002960 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2961 priv->fifo_len, GFP_KERNEL);
2962 if (!priv->chan[i].fifo) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08002963 dev_err(dev, "failed to allocate request fifo %d\n", i);
2964 err = -ENOMEM;
2965 goto err_out;
2966 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002967
Kim Phillips4b9926282009-08-13 11:50:38 +10002968 atomic_set(&priv->chan[i].submit_count,
2969 -(priv->chfifo_len - 1));
Martin Hicksf641ddd2015-03-03 08:21:33 -05002970 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002971
Kim Phillips81eb0242009-08-13 11:51:51 +10002972 dma_set_mask(dev, DMA_BIT_MASK(36));
2973
Kim Phillips9c4a7962008-06-23 19:50:15 +08002974 /* reset and initialize the h/w */
2975 err = init_device(dev);
2976 if (err) {
2977 dev_err(dev, "failed to initialize device\n");
2978 goto err_out;
2979 }
2980
2981 /* register the RNG, if available */
2982 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2983 err = talitos_register_rng(dev);
2984 if (err) {
2985 dev_err(dev, "failed to register hwrng: %d\n", err);
2986 goto err_out;
2987 } else
2988 dev_info(dev, "hwrng\n");
2989 }
2990
2991 /* register crypto algorithms the device supports */
Kim Phillips9c4a7962008-06-23 19:50:15 +08002992 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2993 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2994 struct talitos_crypto_alg *t_alg;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002995 struct crypto_alg *alg = NULL;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002996
2997 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2998 if (IS_ERR(t_alg)) {
2999 err = PTR_ERR(t_alg);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003000 if (err == -ENOTSUPP)
Lee Nipper79b3a412011-11-21 16:13:25 +08003001 continue;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003002 goto err_out;
3003 }
3004
Lee Nipperacbf7c622010-05-19 19:19:33 +10003005 switch (t_alg->algt.type) {
3006 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipperacbf7c622010-05-19 19:19:33 +10003007 err = crypto_register_alg(
3008 &t_alg->algt.alg.crypto);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003009 alg = &t_alg->algt.alg.crypto;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003010 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003011
3012 case CRYPTO_ALG_TYPE_AEAD:
3013 err = crypto_register_aead(
3014 &t_alg->algt.alg.aead);
3015 alg = &t_alg->algt.alg.aead.base;
3016 break;
3017
Lee Nipperacbf7c622010-05-19 19:19:33 +10003018 case CRYPTO_ALG_TYPE_AHASH:
3019 err = crypto_register_ahash(
3020 &t_alg->algt.alg.hash);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003021 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003022 break;
3023 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003024 if (err) {
3025 dev_err(dev, "%s alg registration failed\n",
Herbert Xuaeb4c132015-07-30 17:53:22 +08003026 alg->cra_driver_name);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003027 kfree(t_alg);
Horia Geanta991155b2013-03-20 16:31:38 +02003028 } else
Kim Phillips9c4a7962008-06-23 19:50:15 +08003029 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003030 }
3031 }
Kim Phillips5b859b6e2011-11-21 16:13:26 +08003032 if (!list_empty(&priv->alg_list))
3033 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3034 (char *)of_get_property(np, "compatible", NULL));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003035
3036 return 0;
3037
3038err_out:
3039 talitos_remove(ofdev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003040
3041 return err;
3042}
3043
Márton Németh6c3f9752010-01-17 21:54:01 +11003044static const struct of_device_id talitos_match[] = {
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003045#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3046 {
3047 .compatible = "fsl,sec1.0",
3048 },
3049#endif
3050#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Kim Phillips9c4a7962008-06-23 19:50:15 +08003051 {
3052 .compatible = "fsl,sec2.0",
3053 },
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003054#endif
Kim Phillips9c4a7962008-06-23 19:50:15 +08003055 {},
3056};
3057MODULE_DEVICE_TABLE(of, talitos_match);
3058
Grant Likely1c48a5c2011-02-17 02:43:24 -07003059static struct platform_driver talitos_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003060 .driver = {
3061 .name = "talitos",
Grant Likely40182942010-04-13 16:13:02 -07003062 .of_match_table = talitos_match,
3063 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08003064 .probe = talitos_probe,
Al Viro596f1032008-11-22 17:34:24 +00003065 .remove = talitos_remove,
Kim Phillips9c4a7962008-06-23 19:50:15 +08003066};
3067
Axel Lin741e8c22011-11-26 21:26:19 +08003068module_platform_driver(talitos_driver);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003069
3070MODULE_LICENSE("GPL");
3071MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3072MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");