blob: 43727c915a59bd36ece6cefe6779b2d037fc9723 [file] [log] [blame]
Kim Phillips9c4a7962008-06-23 19:50:15 +08001/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
Kim Phillips5228f0f2011-07-15 11:21:38 +08004 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
Kim Phillips9c4a7962008-06-23 19:50:15 +08005 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
Rob Herring5af50732013-09-17 14:28:33 -050035#include <linux/of_address.h>
36#include <linux/of_irq.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080037#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080043
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
Lee Nipper3952f172008-07-10 18:29:18 +080046#include <crypto/des.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080047#include <crypto/sha.h>
Lee Nipper497f2e62010-05-19 19:20:36 +100048#include <crypto/md5.h>
Herbert Xue98014a2015-05-11 17:47:48 +080049#include <crypto/internal/aead.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080050#include <crypto/authenc.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080051#include <crypto/skcipher.h>
Lee Nipperacbf7c622010-05-19 19:19:33 +100052#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080054#include <crypto/scatterwalk.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080055
56#include "talitos.h"
57
LEROY Christophe922f9dc2015-04-17 16:32:07 +020058static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
Kim Phillips81eb0242009-08-13 11:51:51 +100060{
LEROY Christopheedc6bd62015-04-17 16:31:53 +020061 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
LEROY Christophe922f9dc2015-04-17 16:32:07 +020062 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
Kim Phillips81eb0242009-08-13 11:51:51 +100064}
65
Horia Geant?42e8b0d2015-05-11 20:04:56 +030066static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
LEROY Christophe922f9dc2015-04-17 16:32:07 +020067 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020068{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020069 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
LEROY Christophe538caf82015-04-17 16:31:59 +020075}
76
LEROY Christophe922f9dc2015-04-17 16:32:07 +020077static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020079{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020080 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
LEROY Christophe538caf82015-04-17 16:31:59 +020084}
85
LEROY Christophe922f9dc2015-04-17 16:32:07 +020086static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
LEROY Christophe185eb792015-04-17 16:31:55 +020087{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020088 if (!is_sec1)
89 ptr->j_extent = 0;
LEROY Christophe185eb792015-04-17 16:31:55 +020090}
91
Kim Phillips9c4a7962008-06-23 19:50:15 +080092/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +020096 struct talitos_ptr *ptr,
Horia Geant?42e8b0d2015-05-11 20:04:56 +030097 unsigned int len, void *data,
Kim Phillips9c4a7962008-06-23 19:50:15 +080098 enum dma_data_direction dir)
99{
Kim Phillips81eb0242009-08-13 11:51:51 +1000100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips81eb0242009-08-13 11:51:51 +1000103
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200113 struct talitos_ptr *ptr,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800114 enum dma_data_direction dir)
115{
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
LEROY Christopheedc6bd62015-04-17 16:31:53 +0200119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200120 from_talitos_ptr_len(ptr, is_sec1), dir);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200127 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800128
LEROY Christophedd3c0982015-04-17 16:32:13 +0200129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800132
LEROY Christophedd3c0982015-04-17 16:32:13 +0200133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
Kim Phillips81eb0242009-08-13 11:51:51 +1000150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
Kim Phillips81eb0242009-08-13 11:51:51 +1000152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800153
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800157 TALITOS_CCCR_LO_IWSE);
158
Kim Phillips9c4a7962008-06-23 19:50:15 +0800159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800168
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800169 setbits32(priv->reg + TALITOS_MCR, mcr);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800170
LEROY Christophedd3c0982015-04-17 16:32:13 +0200171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800172 && --timeout)
173 cpu_relax();
174
Kim Phillips2cdba3c2011-12-12 14:59:11 -0600175 if (priv->irq[1]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
Kim Phillips9c4a7962008-06-23 19:50:15 +0800180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200195 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800228
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800232 TALITOS_MDEUICR_LO_ICE);
233
Kim Phillips9c4a7962008-06-23 19:50:15 +0800234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
Kim Phillips5228f0f2011-07-15 11:21:38 +0800240 * @ch: the SEC device channel to be used
Kim Phillips9c4a7962008-06-23 19:50:15 +0800241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
Horia Geanta865d5062012-07-03 19:16:52 +0300249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800257 unsigned long flags;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800258 int head;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200259 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800260
Kim Phillips4b9926282009-08-13 11:50:38 +1000261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800262
Kim Phillips4b9926282009-08-13 11:50:38 +1000263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
Kim Phillipsec6644d2008-07-17 20:16:40 +0800264 /* h/w fifo is full */
Kim Phillips4b9926282009-08-13 11:50:38 +1000265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800266 return -EAGAIN;
267 }
268
Kim Phillips4b9926282009-08-13 11:50:38 +1000269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
Kim Phillipsec6644d2008-07-17 20:16:40 +0800271
Kim Phillips9c4a7962008-06-23 19:50:15 +0800272 /* map descriptor and save caller data */
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
Kim Phillips4b9926282009-08-13 11:50:38 +1000288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
Kim Phillipsa7524472010-09-23 15:56:38 +0800298 lower_32_bits(request->dma_desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800299
Kim Phillips4b9926282009-08-13 11:50:38 +1000300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800301
302 return -EINPROGRESS;
303}
Horia Geanta865d5062012-07-03 19:16:52 +0300304EXPORT_SYMBOL(talitos_submit);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200315 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800316
Kim Phillips4b9926282009-08-13 11:50:38 +1000317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800318
Kim Phillips4b9926282009-08-13 11:50:38 +1000319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200321 __be32 hdr;
322
Kim Phillips4b9926282009-08-13 11:50:38 +1000323 request = &priv->chan[ch].fifo[tail];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800330 status = 0;
Lee Nipperca38a812008-12-20 17:09:25 +1100331 else
Kim Phillips9c4a7962008-06-23 19:50:15 +0800332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200338 TALITOS_DESC_SIZE,
Kim Phillipse938e462009-03-29 15:53:23 +0800339 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
Kim Phillips4b9926282009-08-13 11:50:38 +1000351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800352
Kim Phillips4b9926282009-08-13 11:50:38 +1000353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800354
Kim Phillips4b9926282009-08-13 11:50:38 +1000355 atomic_dec(&priv->chan[ch].submit_count);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800356
Kim Phillips9c4a7962008-06-23 19:50:15 +0800357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
Kim Phillips4b9926282009-08-13 11:50:38 +1000362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800364 }
365
Kim Phillips4b9926282009-08-13 11:50:38 +1000366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800367}
368
369/*
370 * process completed requests for channels that have done status
371 */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300406 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
Horia Geanta511d63c2012-03-30 17:49:53 +0300422 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800426}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800431
432/*
433 * locate current (offending) descriptor
434 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200435static u32 current_desc_hdr(struct device *dev, int ch)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
Horia Geantab62ffd82013-11-13 12:20:37 +0200438 int tail, iter;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800439 dma_addr_t cur_desc;
440
Horia Geantab62ffd82013-11-13 12:20:37 +0200441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800443
Horia Geantab62ffd82013-11-13 12:20:37 +0200444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800455 dev_err(dev, "couldn't locate current descriptor\n");
Kim Phillips3e721ae2011-10-21 15:20:28 +0200456 return 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800457 }
458 }
459
Horia Geantab62ffd82013-11-13 12:20:37 +0200460 return priv->chan[ch].fifo[iter].desc->hdr;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
Kim Phillips3e721ae2011-10-21 15:20:28 +0200471 if (!desc_hdr)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
Kim Phillips3e721ae2011-10-21 15:20:28 +0200473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800515 break;
516 }
517
Kim Phillips3e721ae2011-10-21 15:20:28 +0200518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800536}
537
538/*
539 * recover from error interrupts
540 */
Kim Phillips5e718a02011-12-12 14:59:12 -0600541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800542{
Kim Phillips9c4a7962008-06-23 19:50:15 +0800543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200545 int ch, error, reset_dev = 0;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300546 u32 v_lo;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800560
561 error = -EINVAL;
562
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800588 if (v_lo & TALITOS_CCPSR_LO_EU)
Kim Phillips3e721ae2011-10-21 15:20:28 +0200589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
LEROY Christophedd3c0982015-04-17 16:32:13 +0200590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
LEROY Christophedd3c0982015-04-17 16:32:13 +0200607 TALITOS2_CCCR_CONT);
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
LEROY Christophedd3c0982015-04-17 16:32:13 +0200610 TALITOS2_CCCR_CONT) && --timeout)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
LEROY Christophedd3c0982015-04-17 16:32:13 +0200619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
LEROY Christophedd3c0982015-04-17 16:32:13 +0200637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
Horia Geanta511d63c2012-03-30 17:49:53 +0300678 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800679 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300680 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
Horia Geanta511d63c2012-03-30 17:49:53 +0300698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800703}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
Kim Phillips9c4a7962008-06-23 19:50:15 +0800723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
769
770 priv->rng.name = dev_driver_string(dev),
771 priv->rng.init = talitos_rng_init,
772 priv->rng.data_present = talitos_rng_data_present,
773 priv->rng.data_read = talitos_rng_data_read,
774 priv->rng.priv = (unsigned long)dev;
775
776 return hwrng_register(&priv->rng);
777}
778
779static void talitos_unregister_rng(struct device *dev)
780{
781 struct talitos_private *priv = dev_get_drvdata(dev);
782
783 hwrng_unregister(&priv->rng);
784}
785
786/*
787 * crypto alg
788 */
789#define TALITOS_CRA_PRIORITY 3000
Horia Geanta357fb602012-07-03 19:16:53 +0300790#define TALITOS_MAX_KEY_SIZE 96
Lee Nipper3952f172008-07-10 18:29:18 +0800791#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
Lee Nipper70bcaca2008-07-03 19:08:46 +0800792
Kim Phillips9c4a7962008-06-23 19:50:15 +0800793struct talitos_ctx {
794 struct device *dev;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800795 int ch;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800796 __be32 desc_hdr_template;
797 u8 key[TALITOS_MAX_KEY_SIZE];
Lee Nipper70bcaca2008-07-03 19:08:46 +0800798 u8 iv[TALITOS_MAX_IV_LENGTH];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800799 unsigned int keylen;
800 unsigned int enckeylen;
801 unsigned int authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800802};
803
Lee Nipper497f2e62010-05-19 19:20:36 +1000804#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
805#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
806
807struct talitos_ahash_req_ctx {
Kim Phillips60f208d2010-05-19 19:21:53 +1000808 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
Lee Nipper497f2e62010-05-19 19:20:36 +1000809 unsigned int hw_context_size;
810 u8 buf[HASH_MAX_BLOCK_SIZE];
811 u8 bufnext[HASH_MAX_BLOCK_SIZE];
Kim Phillips60f208d2010-05-19 19:21:53 +1000812 unsigned int swinit;
Lee Nipper497f2e62010-05-19 19:20:36 +1000813 unsigned int first;
814 unsigned int last;
815 unsigned int to_hash_later;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300816 unsigned int nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +1000817 struct scatterlist bufsl[2];
818 struct scatterlist *psrc;
819};
820
Lee Nipper56af8cd2009-03-29 15:50:50 +0800821static int aead_setkey(struct crypto_aead *authenc,
822 const u8 *key, unsigned int keylen)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800823{
824 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Mathias Krausec306a982013-10-15 13:49:34 +0200825 struct crypto_authenc_keys keys;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800826
Mathias Krausec306a982013-10-15 13:49:34 +0200827 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800828 goto badkey;
829
Mathias Krausec306a982013-10-15 13:49:34 +0200830 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800831 goto badkey;
832
Mathias Krausec306a982013-10-15 13:49:34 +0200833 memcpy(ctx->key, keys.authkey, keys.authkeylen);
834 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800835
Mathias Krausec306a982013-10-15 13:49:34 +0200836 ctx->keylen = keys.authkeylen + keys.enckeylen;
837 ctx->enckeylen = keys.enckeylen;
838 ctx->authkeylen = keys.authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800839
840 return 0;
841
842badkey:
843 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
844 return -EINVAL;
845}
846
847/*
Lee Nipper56af8cd2009-03-29 15:50:50 +0800848 * talitos_edesc - s/w-extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +0800849 * @src_nents: number of segments in input scatterlist
850 * @dst_nents: number of segments in output scatterlist
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300851 * @src_chained: whether src is chained or not
852 * @dst_chained: whether dst is chained or not
Herbert Xuaeb4c132015-07-30 17:53:22 +0800853 * @icv_ool: whether ICV is out-of-line
Horia Geanta79fd31d2012-08-02 17:16:40 +0300854 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips9c4a7962008-06-23 19:50:15 +0800855 * @dma_len: length of dma mapped link_tbl space
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200856 * @dma_link_tbl: bus physical address of link_tbl/buf
Kim Phillips9c4a7962008-06-23 19:50:15 +0800857 * @desc: h/w descriptor
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200858 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
859 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800860 *
861 * if decrypting (with authcheck), or either one of src_nents or dst_nents
862 * is greater than 1, an integrity check value is concatenated to the end
863 * of link_tbl data
864 */
Lee Nipper56af8cd2009-03-29 15:50:50 +0800865struct talitos_edesc {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800866 int src_nents;
867 int dst_nents;
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300868 bool src_chained;
869 bool dst_chained;
Herbert Xuaeb4c132015-07-30 17:53:22 +0800870 bool icv_ool;
Horia Geanta79fd31d2012-08-02 17:16:40 +0300871 dma_addr_t iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800872 int dma_len;
873 dma_addr_t dma_link_tbl;
874 struct talitos_desc desc;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +0200875 union {
876 struct talitos_ptr link_tbl[0];
877 u8 buf[0];
878 };
Kim Phillips9c4a7962008-06-23 19:50:15 +0800879};
880
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800881static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
882 unsigned int nents, enum dma_data_direction dir,
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300883 bool chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800884{
885 if (unlikely(chained))
886 while (sg) {
887 dma_map_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200888 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800889 }
890 else
891 dma_map_sg(dev, sg, nents, dir);
892 return nents;
893}
894
895static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
896 enum dma_data_direction dir)
897{
898 while (sg) {
899 dma_unmap_sg(dev, sg, 1, dir);
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200900 sg = sg_next(sg);
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800901 }
902}
903
904static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc,
906 struct scatterlist *src,
907 struct scatterlist *dst)
908{
909 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1;
911
912 if (src != dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300913 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800914 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
915 else
916 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
917
Lee Nipper497f2e62010-05-19 19:20:36 +1000918 if (dst) {
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300919 if (edesc->dst_chained)
Lee Nipper497f2e62010-05-19 19:20:36 +1000920 talitos_unmap_sg_chain(dev, dst,
921 DMA_FROM_DEVICE);
922 else
923 dma_unmap_sg(dev, dst, dst_nents,
924 DMA_FROM_DEVICE);
925 }
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800926 } else
Horia Geanta2a1cfe42012-08-02 17:16:39 +0300927 if (edesc->src_chained)
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800928 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
929 else
930 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
931}
932
Kim Phillips9c4a7962008-06-23 19:50:15 +0800933static void ipsec_esp_unmap(struct device *dev,
Lee Nipper56af8cd2009-03-29 15:50:50 +0800934 struct talitos_edesc *edesc,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800935 struct aead_request *areq)
936{
937 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
938 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
939 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
940 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
941
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800942 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800943
944 if (edesc->dma_len)
945 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
946 DMA_BIDIRECTIONAL);
947}
948
949/*
950 * ipsec_esp descriptor callbacks
951 */
952static void ipsec_esp_encrypt_done(struct device *dev,
953 struct talitos_desc *desc, void *context,
954 int err)
955{
956 struct aead_request *areq = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800957 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800958 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +0800959 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800960 struct scatterlist *sg;
961 void *icvdata;
962
Kim Phillips19bbbc62009-03-29 15:53:59 +0800963 edesc = container_of(desc, struct talitos_edesc, desc);
964
Kim Phillips9c4a7962008-06-23 19:50:15 +0800965 ipsec_esp_unmap(dev, edesc, areq);
966
967 /* copy the generated ICV to dst */
Herbert Xuaeb4c132015-07-30 17:53:22 +0800968 if (edesc->icv_ool) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800969 icvdata = &edesc->link_tbl[edesc->src_nents +
Herbert Xuaeb4c132015-07-30 17:53:22 +0800970 edesc->dst_nents + 2];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800971 sg = sg_last(areq->dst, edesc->dst_nents);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800972 memcpy((char *)sg_virt(sg) + sg->length - authsize,
973 icvdata, authsize);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800974 }
975
976 kfree(edesc);
977
978 aead_request_complete(areq, err);
979}
980
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800981static void ipsec_esp_decrypt_swauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +0800982 struct talitos_desc *desc,
983 void *context, int err)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800984{
985 struct aead_request *req = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800986 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800987 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +0800988 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800989 struct scatterlist *sg;
Herbert Xuaeb4c132015-07-30 17:53:22 +0800990 char *oicv, *icv;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800991
Kim Phillips19bbbc62009-03-29 15:53:59 +0800992 edesc = container_of(desc, struct talitos_edesc, desc);
993
Kim Phillips9c4a7962008-06-23 19:50:15 +0800994 ipsec_esp_unmap(dev, edesc, req);
995
996 if (!err) {
997 /* auth check */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800998 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
Herbert Xuaeb4c132015-07-30 17:53:22 +0800999 icv = (char *)sg_virt(sg) + sg->length - authsize;
1000
1001 if (edesc->dma_len) {
1002 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
1003 edesc->dst_nents + 2];
1004 if (edesc->icv_ool)
1005 icv = oicv + authsize;
1006 } else
1007 oicv = (char *)&edesc->link_tbl[0];
1008
1009 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001010 }
1011
1012 kfree(edesc);
1013
1014 aead_request_complete(req, err);
1015}
1016
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001017static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001018 struct talitos_desc *desc,
1019 void *context, int err)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001020{
1021 struct aead_request *req = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001022 struct talitos_edesc *edesc;
1023
1024 edesc = container_of(desc, struct talitos_edesc, desc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001025
1026 ipsec_esp_unmap(dev, edesc, req);
1027
1028 /* check ICV auth status */
Kim Phillipse938e462009-03-29 15:53:23 +08001029 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1030 DESC_HDR_LO_ICCR1_PASS))
1031 err = -EBADMSG;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001032
1033 kfree(edesc);
1034
1035 aead_request_complete(req, err);
1036}
1037
Kim Phillips9c4a7962008-06-23 19:50:15 +08001038/*
1039 * convert scatterlist to SEC h/w link table format
1040 * stop at cryptlen bytes
1041 */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001042static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1043 unsigned int offset, int cryptlen,
1044 struct talitos_ptr *link_tbl_ptr)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001045{
Lee Nipper70bcaca2008-07-03 19:08:46 +08001046 int n_sg = sg_count;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001047 int count = 0;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001048
Herbert Xuaeb4c132015-07-30 17:53:22 +08001049 while (cryptlen && sg && n_sg--) {
1050 unsigned int len = sg_dma_len(sg);
1051
1052 if (offset >= len) {
1053 offset -= len;
1054 goto next;
1055 }
1056
1057 len -= offset;
1058
1059 if (len > cryptlen)
1060 len = cryptlen;
1061
1062 to_talitos_ptr(link_tbl_ptr + count,
1063 sg_dma_address(sg) + offset, 0);
1064 link_tbl_ptr[count].len = cpu_to_be16(len);
1065 link_tbl_ptr[count].j_extent = 0;
1066 count++;
1067 cryptlen -= len;
1068 offset = 0;
1069
1070next:
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001071 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001072 }
1073
Kim Phillips9c4a7962008-06-23 19:50:15 +08001074 /* tag end of link table */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001075 if (count > 0)
1076 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001077
Herbert Xuaeb4c132015-07-30 17:53:22 +08001078 return count;
1079}
1080
1081static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1082 int cryptlen,
1083 struct talitos_ptr *link_tbl_ptr)
1084{
1085 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1086 link_tbl_ptr);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001087}
1088
1089/*
1090 * fill in and submit ipsec_esp descriptor
1091 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001092static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001093 void (*callback)(struct device *dev,
1094 struct talitos_desc *desc,
1095 void *context, int error))
Kim Phillips9c4a7962008-06-23 19:50:15 +08001096{
1097 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001098 unsigned int authsize = crypto_aead_authsize(aead);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001099 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1100 struct device *dev = ctx->dev;
1101 struct talitos_desc *desc = &edesc->desc;
1102 unsigned int cryptlen = areq->cryptlen;
Kim Phillipse41256f2009-08-13 11:49:06 +10001103 unsigned int ivsize = crypto_aead_ivsize(aead);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001104 int tbl_off = 0;
Kim Phillipsfa86a262008-07-17 20:20:06 +08001105 int sg_count, ret;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001106 int sg_link_tbl_len;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001107
1108 /* hmac key */
1109 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001110 DMA_TO_DEVICE);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001111
Herbert Xuaeb4c132015-07-30 17:53:22 +08001112 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1113 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1114 : DMA_TO_DEVICE,
1115 edesc->src_chained);
1116
Kim Phillips9c4a7962008-06-23 19:50:15 +08001117 /* hmac data */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001118 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1119 if (sg_count > 1 &&
1120 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1121 areq->assoclen,
1122 &edesc->link_tbl[tbl_off])) > 1) {
1123 tbl_off += ret;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001124
1125 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001126 sizeof(struct talitos_ptr), 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001127 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1128
Horia Geanta79fd31d2012-08-02 17:16:40 +03001129 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1130 edesc->dma_len, DMA_BIDIRECTIONAL);
1131 } else {
Herbert Xuaeb4c132015-07-30 17:53:22 +08001132 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001133 desc->ptr[1].j_extent = 0;
1134 }
1135
Kim Phillips9c4a7962008-06-23 19:50:15 +08001136 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001137 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001138 desc->ptr[2].len = cpu_to_be16(ivsize);
1139 desc->ptr[2].j_extent = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001140
1141 /* cipher key */
1142 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001143 (char *)&ctx->key + ctx->authkeylen,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001144 DMA_TO_DEVICE);
1145
1146 /*
1147 * cipher in
1148 * map and adjust cipher len to aead request cryptlen.
1149 * extent is bytes of HMAC postpended to ciphertext,
1150 * typically 12 for ipsec
1151 */
1152 desc->ptr[4].len = cpu_to_be16(cryptlen);
1153 desc->ptr[4].j_extent = authsize;
1154
Herbert Xuaeb4c132015-07-30 17:53:22 +08001155 sg_link_tbl_len = cryptlen;
1156 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1157 sg_link_tbl_len += authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001158
Herbert Xuaeb4c132015-07-30 17:53:22 +08001159 if (sg_count > 1 &&
1160 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1161 sg_link_tbl_len,
1162 &edesc->link_tbl[tbl_off])) > 1) {
1163 tbl_off += ret;
1164 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1165 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1166 tbl_off *
1167 sizeof(struct talitos_ptr), 0);
1168 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1169 edesc->dma_len,
1170 DMA_BIDIRECTIONAL);
1171 } else
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001172 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001173
1174 /* cipher out */
1175 desc->ptr[5].len = cpu_to_be16(cryptlen);
1176 desc->ptr[5].j_extent = authsize;
1177
Kim Phillipse938e462009-03-29 15:53:23 +08001178 if (areq->src != areq->dst)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001179 sg_count = talitos_map_sg(dev, areq->dst,
1180 edesc->dst_nents ? : 1,
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001181 DMA_FROM_DEVICE, edesc->dst_chained);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001182
Herbert Xuaeb4c132015-07-30 17:53:22 +08001183 edesc->icv_ool = false;
1184
1185 if (sg_count > 1 &&
1186 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1187 areq->assoclen, cryptlen,
1188 &edesc->link_tbl[tbl_off])) >
1189 1) {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001190 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
Kim Phillips9c4a7962008-06-23 19:50:15 +08001191
Kim Phillips81eb0242009-08-13 11:51:51 +10001192 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001193 tbl_off * sizeof(struct talitos_ptr), 0);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001194
Lee Nipperf3c85bc2008-07-30 16:26:57 +08001195 /* Add an entry to the link table for ICV data */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001196 tbl_ptr += sg_count - 1;
1197 tbl_ptr->j_extent = 0;
1198 tbl_ptr++;
1199 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1200 tbl_ptr->len = cpu_to_be16(authsize);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001201
1202 /* icv data follows link tables */
Horia Geanta79fd31d2012-08-02 17:16:40 +03001203 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
Herbert Xuaeb4c132015-07-30 17:53:22 +08001204 (edesc->src_nents + edesc->dst_nents +
1205 2) * sizeof(struct talitos_ptr) +
1206 authsize, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001207 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1208 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1209 edesc->dma_len, DMA_BIDIRECTIONAL);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001210
1211 edesc->icv_ool = true;
1212 } else
1213 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001214
1215 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001216 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
Kim Phillips9c4a7962008-06-23 19:50:15 +08001217 DMA_FROM_DEVICE);
1218
Kim Phillips5228f0f2011-07-15 11:21:38 +08001219 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001220 if (ret != -EINPROGRESS) {
1221 ipsec_esp_unmap(dev, edesc, areq);
1222 kfree(edesc);
1223 }
1224 return ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001225}
1226
Kim Phillips9c4a7962008-06-23 19:50:15 +08001227/*
1228 * derive number of elements in scatterlist
1229 */
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001230static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001231{
1232 struct scatterlist *sg = sg_list;
1233 int sg_nents = 0;
1234
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001235 *chained = false;
Horia Geant?bde90792015-05-12 11:28:05 +03001236 while (nbytes > 0 && sg) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001237 sg_nents++;
1238 nbytes -= sg->length;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001239 if (!sg_is_last(sg) && (sg + 1)->length == 0)
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001240 *chained = true;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001241 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001242 }
1243
1244 return sg_nents;
1245}
1246
1247/*
Lee Nipper56af8cd2009-03-29 15:50:50 +08001248 * allocate and map the extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +08001249 */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001250static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1251 struct scatterlist *src,
1252 struct scatterlist *dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001253 u8 *iv,
1254 unsigned int assoclen,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001255 unsigned int cryptlen,
1256 unsigned int authsize,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001257 unsigned int ivsize,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001258 int icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001259 u32 cryptoflags,
1260 bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001261{
Lee Nipper56af8cd2009-03-29 15:50:50 +08001262 struct talitos_edesc *edesc;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001263 int src_nents, dst_nents, alloc_len, dma_len;
1264 bool src_chained = false, dst_chained = false;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001265 dma_addr_t iv_dma = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001266 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
Kim Phillips586725f2008-07-17 20:19:18 +08001267 GFP_ATOMIC;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001268 struct talitos_private *priv = dev_get_drvdata(dev);
1269 bool is_sec1 = has_ftr_sec1(priv);
1270 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001271
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001272 if (cryptlen + authsize > max_len) {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001273 dev_err(dev, "length exceeds h/w max limit\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001274 return ERR_PTR(-EINVAL);
1275 }
1276
Horia Geanta935e99a2013-11-19 14:57:49 +02001277 if (ivsize)
Horia Geanta79fd31d2012-08-02 17:16:40 +03001278 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1279
Horia Geanta62293a32013-11-28 15:11:17 +02001280 if (!dst || dst == src) {
Herbert Xuaeb4c132015-07-30 17:53:22 +08001281 src_nents = sg_count(src, assoclen + cryptlen + authsize,
1282 &src_chained);
Horia Geanta62293a32013-11-28 15:11:17 +02001283 src_nents = (src_nents == 1) ? 0 : src_nents;
1284 dst_nents = dst ? src_nents : 0;
1285 } else { /* dst && dst != src*/
Herbert Xuaeb4c132015-07-30 17:53:22 +08001286 src_nents = sg_count(src, assoclen + cryptlen +
1287 (encrypt ? 0 : authsize),
Horia Geanta62293a32013-11-28 15:11:17 +02001288 &src_chained);
1289 src_nents = (src_nents == 1) ? 0 : src_nents;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001290 dst_nents = sg_count(dst, assoclen + cryptlen +
1291 (encrypt ? authsize : 0),
Horia Geanta62293a32013-11-28 15:11:17 +02001292 &dst_chained);
1293 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001294 }
1295
1296 /*
1297 * allocate space for base edesc plus the link tables,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001298 * allowing for two separate entries for AD and generated ICV (+ 2),
1299 * and space for two sets of ICVs (stashed and generated)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001300 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001301 alloc_len = sizeof(struct talitos_edesc);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001302 if (src_nents || dst_nents) {
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001303 if (is_sec1)
Dan Carpenter608f37d2015-05-11 13:10:09 +03001304 dma_len = (src_nents ? cryptlen : 0) +
1305 (dst_nents ? cryptlen : 0);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001306 else
Herbert Xuaeb4c132015-07-30 17:53:22 +08001307 dma_len = (src_nents + dst_nents + 2) *
1308 sizeof(struct talitos_ptr) + authsize * 2;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001309 alloc_len += dma_len;
1310 } else {
1311 dma_len = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001312 alloc_len += icv_stashing ? authsize : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001313 }
1314
Kim Phillips586725f2008-07-17 20:19:18 +08001315 edesc = kmalloc(alloc_len, GFP_DMA | flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001316 if (!edesc) {
Horia Geanta79fd31d2012-08-02 17:16:40 +03001317 if (iv_dma)
1318 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Horia Geanta935e99a2013-11-19 14:57:49 +02001319
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001320 dev_err(dev, "could not allocate edescriptor\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001321 return ERR_PTR(-ENOMEM);
1322 }
1323
1324 edesc->src_nents = src_nents;
1325 edesc->dst_nents = dst_nents;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001326 edesc->src_chained = src_chained;
1327 edesc->dst_chained = dst_chained;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001328 edesc->iv_dma = iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001329 edesc->dma_len = dma_len;
Lee Nipper497f2e62010-05-19 19:20:36 +10001330 if (dma_len)
1331 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1332 edesc->dma_len,
1333 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001334
1335 return edesc;
1336}
1337
Horia Geanta79fd31d2012-08-02 17:16:40 +03001338static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
Horia Geanta62293a32013-11-28 15:11:17 +02001339 int icv_stashing, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001340{
1341 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001342 unsigned int authsize = crypto_aead_authsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001343 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001344 unsigned int ivsize = crypto_aead_ivsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001345
Herbert Xuaeb4c132015-07-30 17:53:22 +08001346 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001347 iv, areq->assoclen, areq->cryptlen,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001348 authsize, ivsize, icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001349 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001350}
1351
Lee Nipper56af8cd2009-03-29 15:50:50 +08001352static int aead_encrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001353{
1354 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1355 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001356 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001357
1358 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001359 edesc = aead_edesc_alloc(req, req->iv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001360 if (IS_ERR(edesc))
1361 return PTR_ERR(edesc);
1362
1363 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001364 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001365
Herbert Xuaeb4c132015-07-30 17:53:22 +08001366 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001367}
1368
Lee Nipper56af8cd2009-03-29 15:50:50 +08001369static int aead_decrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001370{
1371 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001372 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001373 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001374 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001375 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001376 struct scatterlist *sg;
1377 void *icvdata;
1378
1379 req->cryptlen -= authsize;
1380
1381 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001382 edesc = aead_edesc_alloc(req, req->iv, 1, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001383 if (IS_ERR(edesc))
1384 return PTR_ERR(edesc);
1385
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001386 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
Kim Phillipse938e462009-03-29 15:53:23 +08001387 ((!edesc->src_nents && !edesc->dst_nents) ||
1388 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001389
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001390 /* decrypt and check the ICV */
Kim Phillipse938e462009-03-29 15:53:23 +08001391 edesc->desc.hdr = ctx->desc_hdr_template |
1392 DESC_HDR_DIR_INBOUND |
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001393 DESC_HDR_MODE1_MDEU_CICV;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001394
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001395 /* reset integrity check result bits */
1396 edesc->desc.hdr_lo = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001397
Herbert Xuaeb4c132015-07-30 17:53:22 +08001398 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001399 }
Kim Phillipse938e462009-03-29 15:53:23 +08001400
1401 /* Have to check the ICV with software */
1402 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1403
1404 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1405 if (edesc->dma_len)
Herbert Xuaeb4c132015-07-30 17:53:22 +08001406 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1407 edesc->dst_nents + 2];
Kim Phillipse938e462009-03-29 15:53:23 +08001408 else
1409 icvdata = &edesc->link_tbl[0];
1410
1411 sg = sg_last(req->src, edesc->src_nents ? : 1);
1412
Herbert Xuaeb4c132015-07-30 17:53:22 +08001413 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
Kim Phillipse938e462009-03-29 15:53:23 +08001414
Herbert Xuaeb4c132015-07-30 17:53:22 +08001415 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001416}
1417
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001418static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1419 const u8 *key, unsigned int keylen)
1420{
1421 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001422
1423 memcpy(&ctx->key, key, keylen);
1424 ctx->keylen = keylen;
1425
1426 return 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001427}
1428
LEROY Christophe032d1972015-04-17 16:31:51 +02001429static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1430 struct scatterlist *dst, unsigned int len,
1431 struct talitos_edesc *edesc)
1432{
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001433 struct talitos_private *priv = dev_get_drvdata(dev);
1434 bool is_sec1 = has_ftr_sec1(priv);
1435
1436 if (is_sec1) {
1437 if (!edesc->src_nents) {
1438 dma_unmap_sg(dev, src, 1,
1439 dst != src ? DMA_TO_DEVICE
1440 : DMA_BIDIRECTIONAL);
1441 }
1442 if (dst && edesc->dst_nents) {
1443 dma_sync_single_for_device(dev,
1444 edesc->dma_link_tbl + len,
1445 len, DMA_FROM_DEVICE);
1446 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1447 edesc->buf + len, len);
1448 } else if (dst && dst != src) {
1449 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1450 }
1451 } else {
1452 talitos_sg_unmap(dev, edesc, src, dst);
1453 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001454}
1455
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001456static void common_nonsnoop_unmap(struct device *dev,
1457 struct talitos_edesc *edesc,
1458 struct ablkcipher_request *areq)
1459{
1460 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
LEROY Christophe032d1972015-04-17 16:31:51 +02001461
1462 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001463 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1464 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1465
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001466 if (edesc->dma_len)
1467 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1468 DMA_BIDIRECTIONAL);
1469}
1470
1471static void ablkcipher_done(struct device *dev,
1472 struct talitos_desc *desc, void *context,
1473 int err)
1474{
1475 struct ablkcipher_request *areq = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001476 struct talitos_edesc *edesc;
1477
1478 edesc = container_of(desc, struct talitos_edesc, desc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001479
1480 common_nonsnoop_unmap(dev, edesc, areq);
1481
1482 kfree(edesc);
1483
1484 areq->base.complete(&areq->base, err);
1485}
1486
LEROY Christophe032d1972015-04-17 16:31:51 +02001487int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1488 unsigned int len, struct talitos_edesc *edesc,
1489 enum dma_data_direction dir, struct talitos_ptr *ptr)
1490{
1491 int sg_count;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001492 struct talitos_private *priv = dev_get_drvdata(dev);
1493 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe032d1972015-04-17 16:31:51 +02001494
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001495 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001496
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001497 if (is_sec1) {
1498 sg_count = edesc->src_nents ? : 1;
LEROY Christophe032d1972015-04-17 16:31:51 +02001499
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001500 if (sg_count == 1) {
1501 dma_map_sg(dev, src, 1, dir);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001502 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001503 } else {
1504 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1505 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1506 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1507 len, DMA_TO_DEVICE);
1508 }
1509 } else {
1510 to_talitos_ptr_extent_clear(ptr, is_sec1);
1511
1512 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1513 edesc->src_chained);
1514
1515 if (sg_count == 1) {
1516 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1517 } else {
1518 sg_count = sg_to_link_tbl(src, sg_count, len,
1519 &edesc->link_tbl[0]);
1520 if (sg_count > 1) {
1521 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1522 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1523 dma_sync_single_for_device(dev,
1524 edesc->dma_link_tbl,
1525 edesc->dma_len,
1526 DMA_BIDIRECTIONAL);
1527 } else {
1528 /* Only one segment now, so no link tbl needed*/
1529 to_talitos_ptr(ptr, sg_dma_address(src),
1530 is_sec1);
1531 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001532 }
1533 }
1534 return sg_count;
1535}
1536
1537void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1538 unsigned int len, struct talitos_edesc *edesc,
1539 enum dma_data_direction dir,
1540 struct talitos_ptr *ptr, int sg_count)
1541{
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001542 struct talitos_private *priv = dev_get_drvdata(dev);
1543 bool is_sec1 = has_ftr_sec1(priv);
1544
LEROY Christophe032d1972015-04-17 16:31:51 +02001545 if (dir != DMA_NONE)
1546 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1547 dir, edesc->dst_chained);
1548
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001549 to_talitos_ptr_len(ptr, len, is_sec1);
LEROY Christophe032d1972015-04-17 16:31:51 +02001550
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001551 if (is_sec1) {
1552 if (sg_count == 1) {
1553 if (dir != DMA_NONE)
1554 dma_map_sg(dev, dst, 1, dir);
1555 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1556 } else {
1557 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1558 dma_sync_single_for_device(dev,
1559 edesc->dma_link_tbl + len,
1560 len, DMA_FROM_DEVICE);
1561 }
1562 } else {
1563 to_talitos_ptr_extent_clear(ptr, is_sec1);
1564
1565 if (sg_count == 1) {
1566 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1567 } else {
1568 struct talitos_ptr *link_tbl_ptr =
1569 &edesc->link_tbl[edesc->src_nents + 1];
1570
1571 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1572 (edesc->src_nents + 1) *
1573 sizeof(struct talitos_ptr), 0);
1574 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
Horia Geant?42e8b0d2015-05-11 20:04:56 +03001575 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001576 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1577 edesc->dma_len,
1578 DMA_BIDIRECTIONAL);
1579 }
LEROY Christophe032d1972015-04-17 16:31:51 +02001580 }
1581}
1582
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001583static int common_nonsnoop(struct talitos_edesc *edesc,
1584 struct ablkcipher_request *areq,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001585 void (*callback) (struct device *dev,
1586 struct talitos_desc *desc,
1587 void *context, int error))
1588{
1589 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1590 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1591 struct device *dev = ctx->dev;
1592 struct talitos_desc *desc = &edesc->desc;
1593 unsigned int cryptlen = areq->nbytes;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001594 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001595 int sg_count, ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001596 struct talitos_private *priv = dev_get_drvdata(dev);
1597 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001598
1599 /* first DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001600 desc->ptr[0] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001601
1602 /* cipher iv */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001603 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1604 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1605 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001606
1607 /* cipher key */
1608 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001609 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001610
1611 /*
1612 * cipher in
1613 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001614 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1615 (areq->src == areq->dst) ?
1616 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1617 &desc->ptr[3]);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001618
1619 /* cipher out */
LEROY Christophe032d1972015-04-17 16:31:51 +02001620 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1621 (areq->src == areq->dst) ? DMA_NONE
1622 : DMA_FROM_DEVICE,
1623 &desc->ptr[4], sg_count);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001624
1625 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001626 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001627 DMA_FROM_DEVICE);
1628
1629 /* last DWORD empty */
LEROY Christophe2529bc32015-04-17 16:31:49 +02001630 desc->ptr[6] = zero_entry;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001631
Kim Phillips5228f0f2011-07-15 11:21:38 +08001632 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001633 if (ret != -EINPROGRESS) {
1634 common_nonsnoop_unmap(dev, edesc, areq);
1635 kfree(edesc);
1636 }
1637 return ret;
1638}
1639
Kim Phillipse938e462009-03-29 15:53:23 +08001640static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
Horia Geanta62293a32013-11-28 15:11:17 +02001641 areq, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001642{
1643 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1644 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001645 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001646
Herbert Xuaeb4c132015-07-30 17:53:22 +08001647 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001648 areq->info, 0, areq->nbytes, 0, ivsize, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001649 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001650}
1651
1652static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1653{
1654 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1655 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1656 struct talitos_edesc *edesc;
1657
1658 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001659 edesc = ablkcipher_edesc_alloc(areq, true);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001660 if (IS_ERR(edesc))
1661 return PTR_ERR(edesc);
1662
1663 /* set encrypt */
1664 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1665
Kim Phillipsfebec542011-07-15 11:21:39 +08001666 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001667}
1668
1669static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1670{
1671 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1672 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1673 struct talitos_edesc *edesc;
1674
1675 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001676 edesc = ablkcipher_edesc_alloc(areq, false);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001677 if (IS_ERR(edesc))
1678 return PTR_ERR(edesc);
1679
1680 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1681
Kim Phillipsfebec542011-07-15 11:21:39 +08001682 return common_nonsnoop(edesc, areq, ablkcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001683}
1684
Lee Nipper497f2e62010-05-19 19:20:36 +10001685static void common_nonsnoop_hash_unmap(struct device *dev,
1686 struct talitos_edesc *edesc,
1687 struct ahash_request *areq)
1688{
1689 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001690 struct talitos_private *priv = dev_get_drvdata(dev);
1691 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001692
1693 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1694
LEROY Christophe032d1972015-04-17 16:31:51 +02001695 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1696
Lee Nipper497f2e62010-05-19 19:20:36 +10001697 /* When using hashctx-in, must unmap it. */
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001698 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001699 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1700 DMA_TO_DEVICE);
1701
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001702 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
Lee Nipper497f2e62010-05-19 19:20:36 +10001703 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1704 DMA_TO_DEVICE);
1705
Lee Nipper497f2e62010-05-19 19:20:36 +10001706 if (edesc->dma_len)
1707 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1708 DMA_BIDIRECTIONAL);
1709
1710}
1711
1712static void ahash_done(struct device *dev,
1713 struct talitos_desc *desc, void *context,
1714 int err)
1715{
1716 struct ahash_request *areq = context;
1717 struct talitos_edesc *edesc =
1718 container_of(desc, struct talitos_edesc, desc);
1719 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1720
1721 if (!req_ctx->last && req_ctx->to_hash_later) {
1722 /* Position any partial block for next update/final/finup */
1723 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
Lee Nipper5e833bc2010-06-16 15:29:15 +10001724 req_ctx->nbuf = req_ctx->to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001725 }
1726 common_nonsnoop_hash_unmap(dev, edesc, areq);
1727
1728 kfree(edesc);
1729
1730 areq->base.complete(&areq->base, err);
1731}
1732
LEROY Christophe2d029052015-04-17 16:32:18 +02001733/*
1734 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1735 * ourself and submit a padded block
1736 */
1737void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1738 struct talitos_edesc *edesc,
1739 struct talitos_ptr *ptr)
1740{
1741 static u8 padded_hash[64] = {
1742 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1743 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1744 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1745 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1746 };
1747
1748 pr_err_once("Bug in SEC1, padding ourself\n");
1749 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1750 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1751 (char *)padded_hash, DMA_TO_DEVICE);
1752}
1753
Lee Nipper497f2e62010-05-19 19:20:36 +10001754static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1755 struct ahash_request *areq, unsigned int length,
1756 void (*callback) (struct device *dev,
1757 struct talitos_desc *desc,
1758 void *context, int error))
1759{
1760 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1761 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1762 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1763 struct device *dev = ctx->dev;
1764 struct talitos_desc *desc = &edesc->desc;
LEROY Christophe032d1972015-04-17 16:31:51 +02001765 int ret;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001766 struct talitos_private *priv = dev_get_drvdata(dev);
1767 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper497f2e62010-05-19 19:20:36 +10001768
1769 /* first DWORD empty */
1770 desc->ptr[0] = zero_entry;
1771
Kim Phillips60f208d2010-05-19 19:21:53 +10001772 /* hash context in */
1773 if (!req_ctx->first || req_ctx->swinit) {
Lee Nipper497f2e62010-05-19 19:20:36 +10001774 map_single_talitos_ptr(dev, &desc->ptr[1],
1775 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001776 (char *)req_ctx->hw_context,
Lee Nipper497f2e62010-05-19 19:20:36 +10001777 DMA_TO_DEVICE);
Kim Phillips60f208d2010-05-19 19:21:53 +10001778 req_ctx->swinit = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001779 } else {
1780 desc->ptr[1] = zero_entry;
1781 /* Indicate next op is not the first. */
1782 req_ctx->first = 0;
1783 }
1784
1785 /* HMAC key */
1786 if (ctx->keylen)
1787 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001788 (char *)&ctx->key, DMA_TO_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001789 else
1790 desc->ptr[2] = zero_entry;
1791
1792 /*
1793 * data in
1794 */
LEROY Christophe032d1972015-04-17 16:31:51 +02001795 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1796 DMA_TO_DEVICE, &desc->ptr[3]);
Lee Nipper497f2e62010-05-19 19:20:36 +10001797
1798 /* fifth DWORD empty */
1799 desc->ptr[4] = zero_entry;
1800
1801 /* hash/HMAC out -or- hash context out */
1802 if (req_ctx->last)
1803 map_single_talitos_ptr(dev, &desc->ptr[5],
1804 crypto_ahash_digestsize(tfm),
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001805 areq->result, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001806 else
1807 map_single_talitos_ptr(dev, &desc->ptr[5],
1808 req_ctx->hw_context_size,
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001809 req_ctx->hw_context, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001810
1811 /* last DWORD empty */
1812 desc->ptr[6] = zero_entry;
1813
LEROY Christophe2d029052015-04-17 16:32:18 +02001814 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1815 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1816
Kim Phillips5228f0f2011-07-15 11:21:38 +08001817 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001818 if (ret != -EINPROGRESS) {
1819 common_nonsnoop_hash_unmap(dev, edesc, areq);
1820 kfree(edesc);
1821 }
1822 return ret;
1823}
1824
1825static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1826 unsigned int nbytes)
1827{
1828 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1829 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1830 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1831
Herbert Xuaeb4c132015-07-30 17:53:22 +08001832 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001833 nbytes, 0, 0, 0, areq->base.flags, false);
Lee Nipper497f2e62010-05-19 19:20:36 +10001834}
1835
1836static int ahash_init(struct ahash_request *areq)
1837{
1838 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1839 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840
1841 /* Initialize the context */
Lee Nipper5e833bc2010-06-16 15:29:15 +10001842 req_ctx->nbuf = 0;
Kim Phillips60f208d2010-05-19 19:21:53 +10001843 req_ctx->first = 1; /* first indicates h/w must init its context */
1844 req_ctx->swinit = 0; /* assume h/w init of context */
Lee Nipper497f2e62010-05-19 19:20:36 +10001845 req_ctx->hw_context_size =
1846 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1847 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1848 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1849
1850 return 0;
1851}
1852
Kim Phillips60f208d2010-05-19 19:21:53 +10001853/*
1854 * on h/w without explicit sha224 support, we initialize h/w context
1855 * manually with sha224 constants, and tell it to run sha256.
1856 */
1857static int ahash_init_sha224_swinit(struct ahash_request *areq)
1858{
1859 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1860
1861 ahash_init(areq);
1862 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1863
Kim Phillipsa7524472010-09-23 15:56:38 +08001864 req_ctx->hw_context[0] = SHA224_H0;
1865 req_ctx->hw_context[1] = SHA224_H1;
1866 req_ctx->hw_context[2] = SHA224_H2;
1867 req_ctx->hw_context[3] = SHA224_H3;
1868 req_ctx->hw_context[4] = SHA224_H4;
1869 req_ctx->hw_context[5] = SHA224_H5;
1870 req_ctx->hw_context[6] = SHA224_H6;
1871 req_ctx->hw_context[7] = SHA224_H7;
Kim Phillips60f208d2010-05-19 19:21:53 +10001872
1873 /* init 64-bit count */
1874 req_ctx->hw_context[8] = 0;
1875 req_ctx->hw_context[9] = 0;
1876
1877 return 0;
1878}
1879
Lee Nipper497f2e62010-05-19 19:20:36 +10001880static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1881{
1882 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1883 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1884 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1885 struct talitos_edesc *edesc;
1886 unsigned int blocksize =
1887 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1888 unsigned int nbytes_to_hash;
1889 unsigned int to_hash_later;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001890 unsigned int nsg;
Horia Geanta2a1cfe42012-08-02 17:16:39 +03001891 bool chained;
Lee Nipper497f2e62010-05-19 19:20:36 +10001892
Lee Nipper5e833bc2010-06-16 15:29:15 +10001893 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1894 /* Buffer up to one whole block */
Lee Nipper497f2e62010-05-19 19:20:36 +10001895 sg_copy_to_buffer(areq->src,
1896 sg_count(areq->src, nbytes, &chained),
Lee Nipper5e833bc2010-06-16 15:29:15 +10001897 req_ctx->buf + req_ctx->nbuf, nbytes);
1898 req_ctx->nbuf += nbytes;
Lee Nipper497f2e62010-05-19 19:20:36 +10001899 return 0;
1900 }
1901
Lee Nipper5e833bc2010-06-16 15:29:15 +10001902 /* At least (blocksize + 1) bytes are available to hash */
1903 nbytes_to_hash = nbytes + req_ctx->nbuf;
1904 to_hash_later = nbytes_to_hash & (blocksize - 1);
1905
1906 if (req_ctx->last)
1907 to_hash_later = 0;
1908 else if (to_hash_later)
1909 /* There is a partial block. Hash the full block(s) now */
1910 nbytes_to_hash -= to_hash_later;
1911 else {
1912 /* Keep one block buffered */
1913 nbytes_to_hash -= blocksize;
1914 to_hash_later = blocksize;
1915 }
1916
1917 /* Chain in any previously buffered data */
1918 if (req_ctx->nbuf) {
1919 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1920 sg_init_table(req_ctx->bufsl, nsg);
1921 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1922 if (nsg > 1)
1923 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
Lee Nipper497f2e62010-05-19 19:20:36 +10001924 req_ctx->psrc = req_ctx->bufsl;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001925 } else
Lee Nipper497f2e62010-05-19 19:20:36 +10001926 req_ctx->psrc = areq->src;
Lee Nipper497f2e62010-05-19 19:20:36 +10001927
Lee Nipper5e833bc2010-06-16 15:29:15 +10001928 if (to_hash_later) {
1929 int nents = sg_count(areq->src, nbytes, &chained);
Akinobu Mitad0525722013-07-08 16:01:55 -07001930 sg_pcopy_to_buffer(areq->src, nents,
Lee Nipper5e833bc2010-06-16 15:29:15 +10001931 req_ctx->bufnext,
1932 to_hash_later,
1933 nbytes - to_hash_later);
Lee Nipper497f2e62010-05-19 19:20:36 +10001934 }
Lee Nipper5e833bc2010-06-16 15:29:15 +10001935 req_ctx->to_hash_later = to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001936
Lee Nipper5e833bc2010-06-16 15:29:15 +10001937 /* Allocate extended descriptor */
Lee Nipper497f2e62010-05-19 19:20:36 +10001938 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1939 if (IS_ERR(edesc))
1940 return PTR_ERR(edesc);
1941
1942 edesc->desc.hdr = ctx->desc_hdr_template;
1943
1944 /* On last one, request SEC to pad; otherwise continue */
1945 if (req_ctx->last)
1946 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1947 else
1948 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1949
Kim Phillips60f208d2010-05-19 19:21:53 +10001950 /* request SEC to INIT hash. */
1951 if (req_ctx->first && !req_ctx->swinit)
Lee Nipper497f2e62010-05-19 19:20:36 +10001952 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1953
1954 /* When the tfm context has a keylen, it's an HMAC.
1955 * A first or last (ie. not middle) descriptor must request HMAC.
1956 */
1957 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1958 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1959
1960 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1961 ahash_done);
1962}
1963
1964static int ahash_update(struct ahash_request *areq)
1965{
1966 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1967
1968 req_ctx->last = 0;
1969
1970 return ahash_process_req(areq, areq->nbytes);
1971}
1972
1973static int ahash_final(struct ahash_request *areq)
1974{
1975 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1976
1977 req_ctx->last = 1;
1978
1979 return ahash_process_req(areq, 0);
1980}
1981
1982static int ahash_finup(struct ahash_request *areq)
1983{
1984 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1985
1986 req_ctx->last = 1;
1987
1988 return ahash_process_req(areq, areq->nbytes);
1989}
1990
1991static int ahash_digest(struct ahash_request *areq)
1992{
1993 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Kim Phillips60f208d2010-05-19 19:21:53 +10001994 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001995
Kim Phillips60f208d2010-05-19 19:21:53 +10001996 ahash->init(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001997 req_ctx->last = 1;
1998
1999 return ahash_process_req(areq, areq->nbytes);
2000}
2001
Lee Nipper79b3a412011-11-21 16:13:25 +08002002struct keyhash_result {
2003 struct completion completion;
2004 int err;
2005};
2006
2007static void keyhash_complete(struct crypto_async_request *req, int err)
2008{
2009 struct keyhash_result *res = req->data;
2010
2011 if (err == -EINPROGRESS)
2012 return;
2013
2014 res->err = err;
2015 complete(&res->completion);
2016}
2017
2018static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2019 u8 *hash)
2020{
2021 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2022
2023 struct scatterlist sg[1];
2024 struct ahash_request *req;
2025 struct keyhash_result hresult;
2026 int ret;
2027
2028 init_completion(&hresult.completion);
2029
2030 req = ahash_request_alloc(tfm, GFP_KERNEL);
2031 if (!req)
2032 return -ENOMEM;
2033
2034 /* Keep tfm keylen == 0 during hash of the long key */
2035 ctx->keylen = 0;
2036 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2037 keyhash_complete, &hresult);
2038
2039 sg_init_one(&sg[0], key, keylen);
2040
2041 ahash_request_set_crypt(req, sg, hash, keylen);
2042 ret = crypto_ahash_digest(req);
2043 switch (ret) {
2044 case 0:
2045 break;
2046 case -EINPROGRESS:
2047 case -EBUSY:
2048 ret = wait_for_completion_interruptible(
2049 &hresult.completion);
2050 if (!ret)
2051 ret = hresult.err;
2052 break;
2053 default:
2054 break;
2055 }
2056 ahash_request_free(req);
2057
2058 return ret;
2059}
2060
2061static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2062 unsigned int keylen)
2063{
2064 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2065 unsigned int blocksize =
2066 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2067 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2068 unsigned int keysize = keylen;
2069 u8 hash[SHA512_DIGEST_SIZE];
2070 int ret;
2071
2072 if (keylen <= blocksize)
2073 memcpy(ctx->key, key, keysize);
2074 else {
2075 /* Must get the hash of the long key */
2076 ret = keyhash(tfm, key, keylen, hash);
2077
2078 if (ret) {
2079 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2080 return -EINVAL;
2081 }
2082
2083 keysize = digestsize;
2084 memcpy(ctx->key, hash, digestsize);
2085 }
2086
2087 ctx->keylen = keysize;
2088
2089 return 0;
2090}
2091
2092
Kim Phillips9c4a7962008-06-23 19:50:15 +08002093struct talitos_alg_template {
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002094 u32 type;
2095 union {
2096 struct crypto_alg crypto;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002097 struct ahash_alg hash;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002098 struct aead_alg aead;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002099 } alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002100 __be32 desc_hdr_template;
2101};
2102
2103static struct talitos_alg_template driver_algs[] = {
Horia Geanta991155b2013-03-20 16:31:38 +02002104 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002105 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002106 .alg.aead = {
2107 .base = {
2108 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2109 .cra_driver_name = "authenc-hmac-sha1-"
2110 "cbc-aes-talitos",
2111 .cra_blocksize = AES_BLOCK_SIZE,
2112 .cra_flags = CRYPTO_ALG_ASYNC,
2113 },
2114 .ivsize = AES_BLOCK_SIZE,
2115 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002116 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08002117 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2118 DESC_HDR_SEL0_AESU |
2119 DESC_HDR_MODE0_AESU_CBC |
2120 DESC_HDR_SEL1_MDEUA |
2121 DESC_HDR_MODE1_MDEU_INIT |
2122 DESC_HDR_MODE1_MDEU_PAD |
2123 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper70bcaca2008-07-03 19:08:46 +08002124 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002125 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002126 .alg.aead = {
2127 .base = {
2128 .cra_name = "authenc(hmac(sha1),"
2129 "cbc(des3_ede))",
2130 .cra_driver_name = "authenc-hmac-sha1-"
2131 "cbc-3des-talitos",
2132 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2133 .cra_flags = CRYPTO_ALG_ASYNC,
2134 },
2135 .ivsize = DES3_EDE_BLOCK_SIZE,
2136 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002137 },
Lee Nipper70bcaca2008-07-03 19:08:46 +08002138 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2139 DESC_HDR_SEL0_DEU |
2140 DESC_HDR_MODE0_DEU_CBC |
2141 DESC_HDR_MODE0_DEU_3DES |
2142 DESC_HDR_SEL1_MDEUA |
2143 DESC_HDR_MODE1_MDEU_INIT |
2144 DESC_HDR_MODE1_MDEU_PAD |
2145 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper3952f172008-07-10 18:29:18 +08002146 },
Horia Geanta357fb602012-07-03 19:16:53 +03002147 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002148 .alg.aead = {
2149 .base = {
2150 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2151 .cra_driver_name = "authenc-hmac-sha224-"
2152 "cbc-aes-talitos",
2153 .cra_blocksize = AES_BLOCK_SIZE,
2154 .cra_flags = CRYPTO_ALG_ASYNC,
2155 },
2156 .ivsize = AES_BLOCK_SIZE,
2157 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002158 },
2159 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2160 DESC_HDR_SEL0_AESU |
2161 DESC_HDR_MODE0_AESU_CBC |
2162 DESC_HDR_SEL1_MDEUA |
2163 DESC_HDR_MODE1_MDEU_INIT |
2164 DESC_HDR_MODE1_MDEU_PAD |
2165 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2166 },
2167 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002168 .alg.aead = {
2169 .base = {
2170 .cra_name = "authenc(hmac(sha224),"
2171 "cbc(des3_ede))",
2172 .cra_driver_name = "authenc-hmac-sha224-"
2173 "cbc-3des-talitos",
2174 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2175 .cra_flags = CRYPTO_ALG_ASYNC,
2176 },
2177 .ivsize = DES3_EDE_BLOCK_SIZE,
2178 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002179 },
2180 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2181 DESC_HDR_SEL0_DEU |
2182 DESC_HDR_MODE0_DEU_CBC |
2183 DESC_HDR_MODE0_DEU_3DES |
2184 DESC_HDR_SEL1_MDEUA |
2185 DESC_HDR_MODE1_MDEU_INIT |
2186 DESC_HDR_MODE1_MDEU_PAD |
2187 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2188 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002189 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002190 .alg.aead = {
2191 .base = {
2192 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2193 .cra_driver_name = "authenc-hmac-sha256-"
2194 "cbc-aes-talitos",
2195 .cra_blocksize = AES_BLOCK_SIZE,
2196 .cra_flags = CRYPTO_ALG_ASYNC,
2197 },
2198 .ivsize = AES_BLOCK_SIZE,
2199 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002200 },
Lee Nipper3952f172008-07-10 18:29:18 +08002201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2202 DESC_HDR_SEL0_AESU |
2203 DESC_HDR_MODE0_AESU_CBC |
2204 DESC_HDR_SEL1_MDEUA |
2205 DESC_HDR_MODE1_MDEU_INIT |
2206 DESC_HDR_MODE1_MDEU_PAD |
2207 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2208 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002209 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002210 .alg.aead = {
2211 .base = {
2212 .cra_name = "authenc(hmac(sha256),"
2213 "cbc(des3_ede))",
2214 .cra_driver_name = "authenc-hmac-sha256-"
2215 "cbc-3des-talitos",
2216 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217 .cra_flags = CRYPTO_ALG_ASYNC,
2218 },
2219 .ivsize = DES3_EDE_BLOCK_SIZE,
2220 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002221 },
Lee Nipper3952f172008-07-10 18:29:18 +08002222 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2223 DESC_HDR_SEL0_DEU |
2224 DESC_HDR_MODE0_DEU_CBC |
2225 DESC_HDR_MODE0_DEU_3DES |
2226 DESC_HDR_SEL1_MDEUA |
2227 DESC_HDR_MODE1_MDEU_INIT |
2228 DESC_HDR_MODE1_MDEU_PAD |
2229 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2230 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002231 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002232 .alg.aead = {
2233 .base = {
2234 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2235 .cra_driver_name = "authenc-hmac-sha384-"
2236 "cbc-aes-talitos",
2237 .cra_blocksize = AES_BLOCK_SIZE,
2238 .cra_flags = CRYPTO_ALG_ASYNC,
2239 },
2240 .ivsize = AES_BLOCK_SIZE,
2241 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002242 },
2243 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2244 DESC_HDR_SEL0_AESU |
2245 DESC_HDR_MODE0_AESU_CBC |
2246 DESC_HDR_SEL1_MDEUB |
2247 DESC_HDR_MODE1_MDEU_INIT |
2248 DESC_HDR_MODE1_MDEU_PAD |
2249 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2250 },
2251 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002252 .alg.aead = {
2253 .base = {
2254 .cra_name = "authenc(hmac(sha384),"
2255 "cbc(des3_ede))",
2256 .cra_driver_name = "authenc-hmac-sha384-"
2257 "cbc-3des-talitos",
2258 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2259 .cra_flags = CRYPTO_ALG_ASYNC,
2260 },
2261 .ivsize = DES3_EDE_BLOCK_SIZE,
2262 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002263 },
2264 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2265 DESC_HDR_SEL0_DEU |
2266 DESC_HDR_MODE0_DEU_CBC |
2267 DESC_HDR_MODE0_DEU_3DES |
2268 DESC_HDR_SEL1_MDEUB |
2269 DESC_HDR_MODE1_MDEU_INIT |
2270 DESC_HDR_MODE1_MDEU_PAD |
2271 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2272 },
2273 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002274 .alg.aead = {
2275 .base = {
2276 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2277 .cra_driver_name = "authenc-hmac-sha512-"
2278 "cbc-aes-talitos",
2279 .cra_blocksize = AES_BLOCK_SIZE,
2280 .cra_flags = CRYPTO_ALG_ASYNC,
2281 },
2282 .ivsize = AES_BLOCK_SIZE,
2283 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002284 },
2285 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2286 DESC_HDR_SEL0_AESU |
2287 DESC_HDR_MODE0_AESU_CBC |
2288 DESC_HDR_SEL1_MDEUB |
2289 DESC_HDR_MODE1_MDEU_INIT |
2290 DESC_HDR_MODE1_MDEU_PAD |
2291 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2292 },
2293 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002294 .alg.aead = {
2295 .base = {
2296 .cra_name = "authenc(hmac(sha512),"
2297 "cbc(des3_ede))",
2298 .cra_driver_name = "authenc-hmac-sha512-"
2299 "cbc-3des-talitos",
2300 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2301 .cra_flags = CRYPTO_ALG_ASYNC,
2302 },
2303 .ivsize = DES3_EDE_BLOCK_SIZE,
2304 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002305 },
2306 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2307 DESC_HDR_SEL0_DEU |
2308 DESC_HDR_MODE0_DEU_CBC |
2309 DESC_HDR_MODE0_DEU_3DES |
2310 DESC_HDR_SEL1_MDEUB |
2311 DESC_HDR_MODE1_MDEU_INIT |
2312 DESC_HDR_MODE1_MDEU_PAD |
2313 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2314 },
2315 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002316 .alg.aead = {
2317 .base = {
2318 .cra_name = "authenc(hmac(md5),cbc(aes))",
2319 .cra_driver_name = "authenc-hmac-md5-"
2320 "cbc-aes-talitos",
2321 .cra_blocksize = AES_BLOCK_SIZE,
2322 .cra_flags = CRYPTO_ALG_ASYNC,
2323 },
2324 .ivsize = AES_BLOCK_SIZE,
2325 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002326 },
Lee Nipper3952f172008-07-10 18:29:18 +08002327 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2328 DESC_HDR_SEL0_AESU |
2329 DESC_HDR_MODE0_AESU_CBC |
2330 DESC_HDR_SEL1_MDEUA |
2331 DESC_HDR_MODE1_MDEU_INIT |
2332 DESC_HDR_MODE1_MDEU_PAD |
2333 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2334 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002335 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002336 .alg.aead = {
2337 .base = {
2338 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2339 .cra_driver_name = "authenc-hmac-md5-"
2340 "cbc-3des-talitos",
2341 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342 .cra_flags = CRYPTO_ALG_ASYNC,
2343 },
2344 .ivsize = DES3_EDE_BLOCK_SIZE,
2345 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002346 },
Lee Nipper3952f172008-07-10 18:29:18 +08002347 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2348 DESC_HDR_SEL0_DEU |
2349 DESC_HDR_MODE0_DEU_CBC |
2350 DESC_HDR_MODE0_DEU_3DES |
2351 DESC_HDR_SEL1_MDEUA |
2352 DESC_HDR_MODE1_MDEU_INIT |
2353 DESC_HDR_MODE1_MDEU_PAD |
2354 DESC_HDR_MODE1_MDEU_MD5_HMAC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002355 },
2356 /* ABLKCIPHER algorithms. */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002357 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2358 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002359 .cra_name = "cbc(aes)",
2360 .cra_driver_name = "cbc-aes-talitos",
2361 .cra_blocksize = AES_BLOCK_SIZE,
2362 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2363 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002364 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002365 .min_keysize = AES_MIN_KEY_SIZE,
2366 .max_keysize = AES_MAX_KEY_SIZE,
2367 .ivsize = AES_BLOCK_SIZE,
2368 }
2369 },
2370 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2371 DESC_HDR_SEL0_AESU |
2372 DESC_HDR_MODE0_AESU_CBC,
2373 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002374 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2375 .alg.crypto = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002376 .cra_name = "cbc(des3_ede)",
2377 .cra_driver_name = "cbc-3des-talitos",
2378 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2379 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2380 CRYPTO_ALG_ASYNC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002381 .cra_ablkcipher = {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002382 .min_keysize = DES3_EDE_KEY_SIZE,
2383 .max_keysize = DES3_EDE_KEY_SIZE,
2384 .ivsize = DES3_EDE_BLOCK_SIZE,
2385 }
2386 },
2387 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2388 DESC_HDR_SEL0_DEU |
2389 DESC_HDR_MODE0_DEU_CBC |
2390 DESC_HDR_MODE0_DEU_3DES,
Lee Nipper497f2e62010-05-19 19:20:36 +10002391 },
2392 /* AHASH algorithms. */
2393 { .type = CRYPTO_ALG_TYPE_AHASH,
2394 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002395 .halg.digestsize = MD5_DIGEST_SIZE,
2396 .halg.base = {
2397 .cra_name = "md5",
2398 .cra_driver_name = "md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002399 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper497f2e62010-05-19 19:20:36 +10002400 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2401 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002402 }
2403 },
2404 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405 DESC_HDR_SEL0_MDEUA |
2406 DESC_HDR_MODE0_MDEU_MD5,
2407 },
2408 { .type = CRYPTO_ALG_TYPE_AHASH,
2409 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002410 .halg.digestsize = SHA1_DIGEST_SIZE,
2411 .halg.base = {
2412 .cra_name = "sha1",
2413 .cra_driver_name = "sha1-talitos",
2414 .cra_blocksize = SHA1_BLOCK_SIZE,
2415 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2416 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002417 }
2418 },
2419 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2420 DESC_HDR_SEL0_MDEUA |
2421 DESC_HDR_MODE0_MDEU_SHA1,
2422 },
2423 { .type = CRYPTO_ALG_TYPE_AHASH,
2424 .alg.hash = {
Kim Phillips60f208d2010-05-19 19:21:53 +10002425 .halg.digestsize = SHA224_DIGEST_SIZE,
2426 .halg.base = {
2427 .cra_name = "sha224",
2428 .cra_driver_name = "sha224-talitos",
2429 .cra_blocksize = SHA224_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2431 CRYPTO_ALG_ASYNC,
Kim Phillips60f208d2010-05-19 19:21:53 +10002432 }
2433 },
2434 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2435 DESC_HDR_SEL0_MDEUA |
2436 DESC_HDR_MODE0_MDEU_SHA224,
2437 },
2438 { .type = CRYPTO_ALG_TYPE_AHASH,
2439 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002440 .halg.digestsize = SHA256_DIGEST_SIZE,
2441 .halg.base = {
2442 .cra_name = "sha256",
2443 .cra_driver_name = "sha256-talitos",
2444 .cra_blocksize = SHA256_BLOCK_SIZE,
2445 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2446 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002447 }
2448 },
2449 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2450 DESC_HDR_SEL0_MDEUA |
2451 DESC_HDR_MODE0_MDEU_SHA256,
2452 },
2453 { .type = CRYPTO_ALG_TYPE_AHASH,
2454 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002455 .halg.digestsize = SHA384_DIGEST_SIZE,
2456 .halg.base = {
2457 .cra_name = "sha384",
2458 .cra_driver_name = "sha384-talitos",
2459 .cra_blocksize = SHA384_BLOCK_SIZE,
2460 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2461 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002462 }
2463 },
2464 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2465 DESC_HDR_SEL0_MDEUB |
2466 DESC_HDR_MODE0_MDEUB_SHA384,
2467 },
2468 { .type = CRYPTO_ALG_TYPE_AHASH,
2469 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002470 .halg.digestsize = SHA512_DIGEST_SIZE,
2471 .halg.base = {
2472 .cra_name = "sha512",
2473 .cra_driver_name = "sha512-talitos",
2474 .cra_blocksize = SHA512_BLOCK_SIZE,
2475 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2476 CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002477 }
2478 },
2479 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2480 DESC_HDR_SEL0_MDEUB |
2481 DESC_HDR_MODE0_MDEUB_SHA512,
2482 },
Lee Nipper79b3a412011-11-21 16:13:25 +08002483 { .type = CRYPTO_ALG_TYPE_AHASH,
2484 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002485 .halg.digestsize = MD5_DIGEST_SIZE,
2486 .halg.base = {
2487 .cra_name = "hmac(md5)",
2488 .cra_driver_name = "hmac-md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002489 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Lee Nipper79b3a412011-11-21 16:13:25 +08002490 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2491 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002492 }
2493 },
2494 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2495 DESC_HDR_SEL0_MDEUA |
2496 DESC_HDR_MODE0_MDEU_MD5,
2497 },
2498 { .type = CRYPTO_ALG_TYPE_AHASH,
2499 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002500 .halg.digestsize = SHA1_DIGEST_SIZE,
2501 .halg.base = {
2502 .cra_name = "hmac(sha1)",
2503 .cra_driver_name = "hmac-sha1-talitos",
2504 .cra_blocksize = SHA1_BLOCK_SIZE,
2505 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2506 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002507 }
2508 },
2509 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2510 DESC_HDR_SEL0_MDEUA |
2511 DESC_HDR_MODE0_MDEU_SHA1,
2512 },
2513 { .type = CRYPTO_ALG_TYPE_AHASH,
2514 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002515 .halg.digestsize = SHA224_DIGEST_SIZE,
2516 .halg.base = {
2517 .cra_name = "hmac(sha224)",
2518 .cra_driver_name = "hmac-sha224-talitos",
2519 .cra_blocksize = SHA224_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2521 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002522 }
2523 },
2524 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2525 DESC_HDR_SEL0_MDEUA |
2526 DESC_HDR_MODE0_MDEU_SHA224,
2527 },
2528 { .type = CRYPTO_ALG_TYPE_AHASH,
2529 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002530 .halg.digestsize = SHA256_DIGEST_SIZE,
2531 .halg.base = {
2532 .cra_name = "hmac(sha256)",
2533 .cra_driver_name = "hmac-sha256-talitos",
2534 .cra_blocksize = SHA256_BLOCK_SIZE,
2535 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2536 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002537 }
2538 },
2539 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2540 DESC_HDR_SEL0_MDEUA |
2541 DESC_HDR_MODE0_MDEU_SHA256,
2542 },
2543 { .type = CRYPTO_ALG_TYPE_AHASH,
2544 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002545 .halg.digestsize = SHA384_DIGEST_SIZE,
2546 .halg.base = {
2547 .cra_name = "hmac(sha384)",
2548 .cra_driver_name = "hmac-sha384-talitos",
2549 .cra_blocksize = SHA384_BLOCK_SIZE,
2550 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2551 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002552 }
2553 },
2554 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2555 DESC_HDR_SEL0_MDEUB |
2556 DESC_HDR_MODE0_MDEUB_SHA384,
2557 },
2558 { .type = CRYPTO_ALG_TYPE_AHASH,
2559 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002560 .halg.digestsize = SHA512_DIGEST_SIZE,
2561 .halg.base = {
2562 .cra_name = "hmac(sha512)",
2563 .cra_driver_name = "hmac-sha512-talitos",
2564 .cra_blocksize = SHA512_BLOCK_SIZE,
2565 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2566 CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002567 }
2568 },
2569 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2570 DESC_HDR_SEL0_MDEUB |
2571 DESC_HDR_MODE0_MDEUB_SHA512,
2572 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002573};
2574
2575struct talitos_crypto_alg {
2576 struct list_head entry;
2577 struct device *dev;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002578 struct talitos_alg_template algt;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002579};
2580
2581static int talitos_cra_init(struct crypto_tfm *tfm)
2582{
2583 struct crypto_alg *alg = tfm->__crt_alg;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002584 struct talitos_crypto_alg *talitos_alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002585 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips5228f0f2011-07-15 11:21:38 +08002586 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002587
Lee Nipper497f2e62010-05-19 19:20:36 +10002588 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2589 talitos_alg = container_of(__crypto_ahash_alg(alg),
2590 struct talitos_crypto_alg,
2591 algt.alg.hash);
2592 else
2593 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2594 algt.alg.crypto);
Kim Phillips19bbbc62009-03-29 15:53:59 +08002595
Kim Phillips9c4a7962008-06-23 19:50:15 +08002596 /* update context with ptr to dev */
2597 ctx->dev = talitos_alg->dev;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002598
Kim Phillips5228f0f2011-07-15 11:21:38 +08002599 /* assign SEC channel to tfm in round-robin fashion */
2600 priv = dev_get_drvdata(ctx->dev);
2601 ctx->ch = atomic_inc_return(&priv->last_chan) &
2602 (priv->num_channels - 1);
2603
Kim Phillips9c4a7962008-06-23 19:50:15 +08002604 /* copy descriptor header template value */
Lee Nipperacbf7c622010-05-19 19:19:33 +10002605 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002606
Kim Phillips602dba52011-07-15 11:21:39 +08002607 /* select done notification */
2608 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2609
Lee Nipper497f2e62010-05-19 19:20:36 +10002610 return 0;
2611}
2612
Herbert Xuaeb4c132015-07-30 17:53:22 +08002613static int talitos_cra_init_aead(struct crypto_aead *tfm)
Lee Nipper497f2e62010-05-19 19:20:36 +10002614{
Herbert Xuaeb4c132015-07-30 17:53:22 +08002615 talitos_cra_init(crypto_aead_tfm(tfm));
Kim Phillips9c4a7962008-06-23 19:50:15 +08002616 return 0;
2617}
2618
Lee Nipper497f2e62010-05-19 19:20:36 +10002619static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2620{
2621 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2622
2623 talitos_cra_init(tfm);
2624
2625 ctx->keylen = 0;
2626 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2627 sizeof(struct talitos_ahash_req_ctx));
2628
2629 return 0;
2630}
2631
Kim Phillips9c4a7962008-06-23 19:50:15 +08002632/*
2633 * given the alg's descriptor header template, determine whether descriptor
2634 * type and primary/secondary execution units required match the hw
2635 * capabilities description provided in the device tree node.
2636 */
2637static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2638{
2639 struct talitos_private *priv = dev_get_drvdata(dev);
2640 int ret;
2641
2642 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2643 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2644
2645 if (SECONDARY_EU(desc_hdr_template))
2646 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2647 & priv->exec_units);
2648
2649 return ret;
2650}
2651
Grant Likely2dc11582010-08-06 09:25:50 -06002652static int talitos_remove(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002653{
2654 struct device *dev = &ofdev->dev;
2655 struct talitos_private *priv = dev_get_drvdata(dev);
2656 struct talitos_crypto_alg *t_alg, *n;
2657 int i;
2658
2659 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
Lee Nipperacbf7c622010-05-19 19:19:33 +10002660 switch (t_alg->algt.type) {
2661 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipperacbf7c622010-05-19 19:19:33 +10002662 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002663 case CRYPTO_ALG_TYPE_AEAD:
2664 crypto_unregister_aead(&t_alg->algt.alg.aead);
Lee Nipperacbf7c622010-05-19 19:19:33 +10002665 case CRYPTO_ALG_TYPE_AHASH:
2666 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2667 break;
2668 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002669 list_del(&t_alg->entry);
2670 kfree(t_alg);
2671 }
2672
2673 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2674 talitos_unregister_rng(dev);
2675
Kim Phillips4b9926282009-08-13 11:50:38 +10002676 for (i = 0; i < priv->num_channels; i++)
Kim Phillips0b798242010-09-23 15:56:08 +08002677 kfree(priv->chan[i].fifo);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002678
Kim Phillips4b9926282009-08-13 11:50:38 +10002679 kfree(priv->chan);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002680
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002681 for (i = 0; i < 2; i++)
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002682 if (priv->irq[i]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002683 free_irq(priv->irq[i], dev);
2684 irq_dispose_mapping(priv->irq[i]);
2685 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002686
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002687 tasklet_kill(&priv->done_task[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002688 if (priv->irq[1])
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002689 tasklet_kill(&priv->done_task[1]);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002690
2691 iounmap(priv->reg);
2692
Kim Phillips9c4a7962008-06-23 19:50:15 +08002693 kfree(priv);
2694
2695 return 0;
2696}
2697
2698static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2699 struct talitos_alg_template
2700 *template)
2701{
Kim Phillips60f208d2010-05-19 19:21:53 +10002702 struct talitos_private *priv = dev_get_drvdata(dev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002703 struct talitos_crypto_alg *t_alg;
2704 struct crypto_alg *alg;
2705
2706 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2707 if (!t_alg)
2708 return ERR_PTR(-ENOMEM);
2709
Lee Nipperacbf7c622010-05-19 19:19:33 +10002710 t_alg->algt = *template;
2711
2712 switch (t_alg->algt.type) {
2713 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipper497f2e62010-05-19 19:20:36 +10002714 alg = &t_alg->algt.alg.crypto;
2715 alg->cra_init = talitos_cra_init;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002716 alg->cra_type = &crypto_ablkcipher_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002717 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2718 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2719 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2720 alg->cra_ablkcipher.geniv = "eseqiv";
Lee Nipper497f2e62010-05-19 19:20:36 +10002721 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002722 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuaeb4c132015-07-30 17:53:22 +08002723 alg = &t_alg->algt.alg.aead.base;
2724 alg->cra_flags |= CRYPTO_ALG_AEAD_NEW;
2725 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2726 t_alg->algt.alg.aead.setkey = aead_setkey;
2727 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2728 t_alg->algt.alg.aead.decrypt = aead_decrypt;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002729 break;
2730 case CRYPTO_ALG_TYPE_AHASH:
2731 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipper497f2e62010-05-19 19:20:36 +10002732 alg->cra_init = talitos_cra_init_ahash;
Kim Phillipsd4cd3282012-08-08 20:32:00 -05002733 alg->cra_type = &crypto_ahash_type;
Kim Phillipsb286e002012-08-08 20:33:34 -05002734 t_alg->algt.alg.hash.init = ahash_init;
2735 t_alg->algt.alg.hash.update = ahash_update;
2736 t_alg->algt.alg.hash.final = ahash_final;
2737 t_alg->algt.alg.hash.finup = ahash_finup;
2738 t_alg->algt.alg.hash.digest = ahash_digest;
2739 t_alg->algt.alg.hash.setkey = ahash_setkey;
2740
Lee Nipper79b3a412011-11-21 16:13:25 +08002741 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
Kim Phillips0b2730d2011-12-12 14:59:10 -06002742 !strncmp(alg->cra_name, "hmac", 4)) {
2743 kfree(t_alg);
Lee Nipper79b3a412011-11-21 16:13:25 +08002744 return ERR_PTR(-ENOTSUPP);
Kim Phillips0b2730d2011-12-12 14:59:10 -06002745 }
Kim Phillips60f208d2010-05-19 19:21:53 +10002746 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
Lee Nipper79b3a412011-11-21 16:13:25 +08002747 (!strcmp(alg->cra_name, "sha224") ||
2748 !strcmp(alg->cra_name, "hmac(sha224)"))) {
Kim Phillips60f208d2010-05-19 19:21:53 +10002749 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2750 t_alg->algt.desc_hdr_template =
2751 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2752 DESC_HDR_SEL0_MDEUA |
2753 DESC_HDR_MODE0_MDEU_SHA256;
2754 }
Lee Nipper497f2e62010-05-19 19:20:36 +10002755 break;
Kim Phillips1d119112010-09-23 15:55:27 +08002756 default:
2757 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
Horia Geant?5fa7dad2015-05-11 20:03:24 +03002758 kfree(t_alg);
Kim Phillips1d119112010-09-23 15:55:27 +08002759 return ERR_PTR(-EINVAL);
Lee Nipperacbf7c622010-05-19 19:19:33 +10002760 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002761
Kim Phillips9c4a7962008-06-23 19:50:15 +08002762 alg->cra_module = THIS_MODULE;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002763 alg->cra_priority = TALITOS_CRA_PRIORITY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002764 alg->cra_alignmask = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002765 alg->cra_ctxsize = sizeof(struct talitos_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002766 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002767
Kim Phillips9c4a7962008-06-23 19:50:15 +08002768 t_alg->dev = dev;
2769
2770 return t_alg;
2771}
2772
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002773static int talitos_probe_irq(struct platform_device *ofdev)
2774{
2775 struct device *dev = &ofdev->dev;
2776 struct device_node *np = ofdev->dev.of_node;
2777 struct talitos_private *priv = dev_get_drvdata(dev);
2778 int err;
LEROY Christophedd3c0982015-04-17 16:32:13 +02002779 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002780
2781 priv->irq[0] = irq_of_parse_and_map(np, 0);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002782 if (!priv->irq[0]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002783 dev_err(dev, "failed to map irq\n");
2784 return -EINVAL;
2785 }
LEROY Christophedd3c0982015-04-17 16:32:13 +02002786 if (is_sec1) {
2787 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2788 dev_driver_string(dev), dev);
2789 goto primary_out;
2790 }
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002791
2792 priv->irq[1] = irq_of_parse_and_map(np, 1);
2793
2794 /* get the primary irq line */
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002795 if (!priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02002796 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002797 dev_driver_string(dev), dev);
2798 goto primary_out;
2799 }
2800
LEROY Christophedd3c0982015-04-17 16:32:13 +02002801 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002802 dev_driver_string(dev), dev);
2803 if (err)
2804 goto primary_out;
2805
2806 /* get the secondary irq line */
LEROY Christophedd3c0982015-04-17 16:32:13 +02002807 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002808 dev_driver_string(dev), dev);
2809 if (err) {
2810 dev_err(dev, "failed to request secondary irq\n");
2811 irq_dispose_mapping(priv->irq[1]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002812 priv->irq[1] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002813 }
2814
2815 return err;
2816
2817primary_out:
2818 if (err) {
2819 dev_err(dev, "failed to request primary irq\n");
2820 irq_dispose_mapping(priv->irq[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002821 priv->irq[0] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002822 }
2823
2824 return err;
2825}
2826
Grant Likely1c48a5c2011-02-17 02:43:24 -07002827static int talitos_probe(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002828{
2829 struct device *dev = &ofdev->dev;
Grant Likely61c7a082010-04-13 16:12:29 -07002830 struct device_node *np = ofdev->dev.of_node;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002831 struct talitos_private *priv;
2832 const unsigned int *prop;
2833 int i, err;
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002834 int stride;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002835
2836 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2837 if (!priv)
2838 return -ENOMEM;
2839
Kevin Haof3de9cb2014-01-28 20:17:23 +08002840 INIT_LIST_HEAD(&priv->alg_list);
2841
Kim Phillips9c4a7962008-06-23 19:50:15 +08002842 dev_set_drvdata(dev, priv);
2843
2844 priv->ofdev = ofdev;
2845
Horia Geanta511d63c2012-03-30 17:49:53 +03002846 spin_lock_init(&priv->reg_lock);
2847
Kim Phillips9c4a7962008-06-23 19:50:15 +08002848 priv->reg = of_iomap(np, 0);
2849 if (!priv->reg) {
2850 dev_err(dev, "failed to of_iomap\n");
2851 err = -ENOMEM;
2852 goto err_out;
2853 }
2854
2855 /* get SEC version capabilities from device tree */
2856 prop = of_get_property(np, "fsl,num-channels", NULL);
2857 if (prop)
2858 priv->num_channels = *prop;
2859
2860 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2861 if (prop)
2862 priv->chfifo_len = *prop;
2863
2864 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2865 if (prop)
2866 priv->exec_units = *prop;
2867
2868 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2869 if (prop)
2870 priv->desc_types = *prop;
2871
2872 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2873 !priv->exec_units || !priv->desc_types) {
2874 dev_err(dev, "invalid property data in device tree node\n");
2875 err = -EINVAL;
2876 goto err_out;
2877 }
2878
Lee Nipperf3c85bc2008-07-30 16:26:57 +08002879 if (of_device_is_compatible(np, "fsl,sec3.0"))
2880 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2881
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002882 if (of_device_is_compatible(np, "fsl,sec2.1"))
Kim Phillips60f208d2010-05-19 19:21:53 +10002883 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
Lee Nipper79b3a412011-11-21 16:13:25 +08002884 TALITOS_FTR_SHA224_HWINIT |
2885 TALITOS_FTR_HMAC_OK;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08002886
LEROY Christophe21590882015-04-17 16:32:05 +02002887 if (of_device_is_compatible(np, "fsl,sec1.0"))
2888 priv->features |= TALITOS_FTR_SEC1;
2889
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002890 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2891 priv->reg_deu = priv->reg + TALITOS12_DEU;
2892 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2893 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2894 stride = TALITOS1_CH_STRIDE;
2895 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2896 priv->reg_deu = priv->reg + TALITOS10_DEU;
2897 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2898 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2899 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2900 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2901 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2902 stride = TALITOS1_CH_STRIDE;
2903 } else {
2904 priv->reg_deu = priv->reg + TALITOS2_DEU;
2905 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2906 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2907 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2908 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2909 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2910 priv->reg_keu = priv->reg + TALITOS2_KEU;
2911 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2912 stride = TALITOS2_CH_STRIDE;
2913 }
2914
LEROY Christophedd3c0982015-04-17 16:32:13 +02002915 err = talitos_probe_irq(ofdev);
2916 if (err)
2917 goto err_out;
2918
2919 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2920 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2921 (unsigned long)dev);
2922 } else {
2923 if (!priv->irq[1]) {
2924 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2925 (unsigned long)dev);
2926 } else {
2927 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2928 (unsigned long)dev);
2929 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2930 (unsigned long)dev);
2931 }
2932 }
2933
Kim Phillips4b9926282009-08-13 11:50:38 +10002934 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2935 priv->num_channels, GFP_KERNEL);
2936 if (!priv->chan) {
2937 dev_err(dev, "failed to allocate channel management space\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08002938 err = -ENOMEM;
2939 goto err_out;
2940 }
2941
Martin Hicksf641ddd2015-03-03 08:21:33 -05002942 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2943
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002944 for (i = 0; i < priv->num_channels; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02002945 priv->chan[i].reg = priv->reg + stride * (i + 1);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06002946 if (!priv->irq[1] || !(i & 1))
Kim Phillipsc3e337f2011-11-21 16:13:27 +08002947 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
Kim Phillipsad42d5f2011-11-21 16:13:27 +08002948
Kim Phillips4b9926282009-08-13 11:50:38 +10002949 spin_lock_init(&priv->chan[i].head_lock);
2950 spin_lock_init(&priv->chan[i].tail_lock);
Kim Phillips9c4a7962008-06-23 19:50:15 +08002951
Kim Phillips4b9926282009-08-13 11:50:38 +10002952 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2953 priv->fifo_len, GFP_KERNEL);
2954 if (!priv->chan[i].fifo) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08002955 dev_err(dev, "failed to allocate request fifo %d\n", i);
2956 err = -ENOMEM;
2957 goto err_out;
2958 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002959
Kim Phillips4b9926282009-08-13 11:50:38 +10002960 atomic_set(&priv->chan[i].submit_count,
2961 -(priv->chfifo_len - 1));
Martin Hicksf641ddd2015-03-03 08:21:33 -05002962 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002963
Kim Phillips81eb0242009-08-13 11:51:51 +10002964 dma_set_mask(dev, DMA_BIT_MASK(36));
2965
Kim Phillips9c4a7962008-06-23 19:50:15 +08002966 /* reset and initialize the h/w */
2967 err = init_device(dev);
2968 if (err) {
2969 dev_err(dev, "failed to initialize device\n");
2970 goto err_out;
2971 }
2972
2973 /* register the RNG, if available */
2974 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2975 err = talitos_register_rng(dev);
2976 if (err) {
2977 dev_err(dev, "failed to register hwrng: %d\n", err);
2978 goto err_out;
2979 } else
2980 dev_info(dev, "hwrng\n");
2981 }
2982
2983 /* register crypto algorithms the device supports */
Kim Phillips9c4a7962008-06-23 19:50:15 +08002984 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2985 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2986 struct talitos_crypto_alg *t_alg;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002987 struct crypto_alg *alg = NULL;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002988
2989 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2990 if (IS_ERR(t_alg)) {
2991 err = PTR_ERR(t_alg);
Kim Phillips0b2730d2011-12-12 14:59:10 -06002992 if (err == -ENOTSUPP)
Lee Nipper79b3a412011-11-21 16:13:25 +08002993 continue;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002994 goto err_out;
2995 }
2996
Lee Nipperacbf7c622010-05-19 19:19:33 +10002997 switch (t_alg->algt.type) {
2998 case CRYPTO_ALG_TYPE_ABLKCIPHER:
Lee Nipperacbf7c622010-05-19 19:19:33 +10002999 err = crypto_register_alg(
3000 &t_alg->algt.alg.crypto);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003001 alg = &t_alg->algt.alg.crypto;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003002 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003003
3004 case CRYPTO_ALG_TYPE_AEAD:
3005 err = crypto_register_aead(
3006 &t_alg->algt.alg.aead);
3007 alg = &t_alg->algt.alg.aead.base;
3008 break;
3009
Lee Nipperacbf7c622010-05-19 19:19:33 +10003010 case CRYPTO_ALG_TYPE_AHASH:
3011 err = crypto_register_ahash(
3012 &t_alg->algt.alg.hash);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003013 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003014 break;
3015 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003016 if (err) {
3017 dev_err(dev, "%s alg registration failed\n",
Herbert Xuaeb4c132015-07-30 17:53:22 +08003018 alg->cra_driver_name);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003019 kfree(t_alg);
Horia Geanta991155b2013-03-20 16:31:38 +02003020 } else
Kim Phillips9c4a7962008-06-23 19:50:15 +08003021 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003022 }
3023 }
Kim Phillips5b859b6e2011-11-21 16:13:26 +08003024 if (!list_empty(&priv->alg_list))
3025 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3026 (char *)of_get_property(np, "compatible", NULL));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003027
3028 return 0;
3029
3030err_out:
3031 talitos_remove(ofdev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003032
3033 return err;
3034}
3035
Márton Németh6c3f9752010-01-17 21:54:01 +11003036static const struct of_device_id talitos_match[] = {
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003037#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3038 {
3039 .compatible = "fsl,sec1.0",
3040 },
3041#endif
3042#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Kim Phillips9c4a7962008-06-23 19:50:15 +08003043 {
3044 .compatible = "fsl,sec2.0",
3045 },
LEROY Christophe0635b7d2015-04-17 16:32:20 +02003046#endif
Kim Phillips9c4a7962008-06-23 19:50:15 +08003047 {},
3048};
3049MODULE_DEVICE_TABLE(of, talitos_match);
3050
Grant Likely1c48a5c2011-02-17 02:43:24 -07003051static struct platform_driver talitos_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003052 .driver = {
3053 .name = "talitos",
Grant Likely40182942010-04-13 16:13:02 -07003054 .of_match_table = talitos_match,
3055 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08003056 .probe = talitos_probe,
Al Viro596f1032008-11-22 17:34:24 +00003057 .remove = talitos_remove,
Kim Phillips9c4a7962008-06-23 19:50:15 +08003058};
3059
Axel Lin741e8c22011-11-26 21:26:19 +08003060module_platform_driver(talitos_driver);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003061
3062MODULE_LICENSE("GPL");
3063MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3064MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");