3 * sep_crypto.c - Crypto interface structures
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pm_runtime.h>
48 #include <linux/err.h>
49 #include <linux/device.h>
50 #include <linux/errno.h>
51 #include <linux/interrupt.h>
52 #include <linux/kernel.h>
53 #include <linux/clk.h>
54 #include <linux/irq.h>
56 #include <linux/platform_device.h>
57 #include <linux/list.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/delay.h>
60 #include <linux/jiffies.h>
61 #include <linux/workqueue.h>
62 #include <linux/crypto.h>
63 #include <crypto/internal/hash.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/sha.h>
66 #include <crypto/md5.h>
67 #include <crypto/aes.h>
68 #include <crypto/des.h>
69 #include <crypto/hash.h>
70 #include "sep_driver_hw_defs.h"
71 #include "sep_driver_config.h"
72 #include "sep_driver_api.h"
74 #include "sep_crypto.h"
76 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
78 /* Globals for queuing */
79 static spinlock_t queue_lock;
80 static struct crypto_queue sep_queue;
82 /* Declare of dequeuer */
83 static void sep_dequeuer(void *data);
88 * @work: pointer to work_struct
89 * This is what is called by the queue; it is generic so that it
90 * can be used by any type of operation as each different callback
91 * function can use the data parameter in its own way
93 static void sep_do_callback(struct work_struct *work)
95 struct sep_work_struct *sep_work = container_of(work,
96 struct sep_work_struct, work);
97 if (sep_work != NULL) {
98 (sep_work->callback)(sep_work->data);
101 pr_debug("sep crypto: do callback - NULL container\n");
107 * @work_queue: pointer to struct_workqueue
108 * @funct: pointer to function to execute
109 * @data: pointer to data; function will know
111 * This is a generic API to submit something to
112 * the queue. The callback function will depend
113 * on what operation is to be done
115 static int sep_submit_work(struct workqueue_struct *work_queue,
116 void(*funct)(void *),
119 struct sep_work_struct *sep_work;
122 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
124 if (sep_work == NULL) {
125 pr_debug("sep crypto: cant allocate work structure\n");
129 sep_work->callback = funct;
130 sep_work->data = data;
131 INIT_WORK(&sep_work->work, sep_do_callback);
132 result = queue_work(work_queue, &sep_work->work);
134 pr_debug("sep_crypto: queue_work failed\n");
142 * @sep: pointer to struct sep_device
143 * @size: total size of area
144 * @block_size: minimum size of chunks
145 * each page is minimum or modulo this size
146 * @returns: pointer to struct scatterlist for new
149 static struct scatterlist *sep_alloc_sg_buf(
150 struct sep_device *sep,
158 size_t real_page_size;
160 struct scatterlist *sg, *sg_temp;
165 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
169 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
171 * The size of each page must be modulo of the operation
172 * block size; increment by the modified page size until
173 * the total size is reached, then you have the number of
176 while (current_size < size) {
177 current_size += real_page_size;
181 sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
185 sg_init_table(sg, nbr_pages);
189 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
190 buf = (void *)get_zeroed_page(GFP_ATOMIC);
192 dev_warn(&sep->pdev->dev,
193 "Cannot allocate page for new buffer\n");
198 sg_set_buf(sg_temp, buf, real_page_size);
199 if ((size - current_size) > real_page_size) {
200 sg_temp->length = real_page_size;
201 current_size += real_page_size;
203 sg_temp->length = (size - current_size);
206 sg_temp = sg_next(sg);
213 * @sg: pointer to struct scatterlist; points to area to free
215 static void sep_free_sg_buf(struct scatterlist *sg)
217 struct scatterlist *sg_temp = sg;
219 free_page((unsigned long)sg_virt(sg_temp));
220 sg_temp = sg_next(sg_temp);
227 * @sep: pointer to struct sep_device
228 * @sg_src: pointer to struct scatterlist for source
229 * @sg_dst: pointer to struct scatterlist for destination
230 * @size: size (in bytes) of data to copy
232 * Copy data from one scatterlist to another; both must
235 static void sep_copy_sg(
236 struct sep_device *sep,
237 struct scatterlist *sg_src,
238 struct scatterlist *sg_dst,
242 u32 in_offset, out_offset;
245 struct scatterlist *sg_src_tmp = sg_src;
246 struct scatterlist *sg_dst_tmp = sg_dst;
250 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
252 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
255 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
257 while (count < size) {
258 if ((sg_src_tmp->length - in_offset) >
259 (sg_dst_tmp->length - out_offset))
260 seg_size = sg_dst_tmp->length - out_offset;
262 seg_size = sg_src_tmp->length - in_offset;
264 if (seg_size > (size - count))
265 seg_size = (size = count);
267 memcpy(sg_virt(sg_dst_tmp) + out_offset,
268 sg_virt(sg_src_tmp) + in_offset,
271 in_offset += seg_size;
272 out_offset += seg_size;
275 if (in_offset >= sg_src_tmp->length) {
276 sg_src_tmp = sg_next(sg_src_tmp);
280 if (out_offset >= sg_dst_tmp->length) {
281 sg_dst_tmp = sg_next(sg_dst_tmp);
288 * sep_oddball_pages -
289 * @sep: pointer to struct sep_device
290 * @sg: pointer to struct scatterlist - buffer to check
291 * @size: total data size
292 * @blocksize: minimum block size; must be multiples of this size
293 * @to_copy: 1 means do copy, 0 means do not copy
294 * @new_sg: pointer to location to put pointer to new sg area
295 * @returns: 1 if new scatterlist is needed; 0 if not needed;
296 * error value if operation failed
298 * The SEP device requires all pages to be multiples of the
299 * minimum block size appropriate for the operation
300 * This function check all pages; if any are oddball sizes
301 * (not multiple of block sizes), it creates a new scatterlist.
302 * If the to_copy parameter is set to 1, then a scatter list
303 * copy is performed. The pointer to the new scatterlist is
304 * put into the address supplied by the new_sg parameter; if
305 * no new scatterlist is needed, then a NULL is put into
306 * the location at new_sg.
309 static int sep_oddball_pages(
310 struct sep_device *sep,
311 struct scatterlist *sg,
314 struct scatterlist **new_sg,
317 struct scatterlist *sg_temp;
319 u32 nbr_pages, page_count;
321 dev_dbg(&sep->pdev->dev, "sep oddball\n");
322 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
325 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
333 sg_temp = sg_next(sg_temp);
337 while ((sg_temp) && (flag == 0)) {
339 if (sg_temp->length % block_size)
342 sg_temp = sg_next(sg_temp);
345 /* Do not process if last (or only) page is oddball */
346 if (nbr_pages == page_count)
350 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
351 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
352 if (*new_sg == NULL) {
353 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
358 sep_copy_sg(sep, sg, *new_sg, data_size);
367 * sep_copy_offset_sg -
368 * @sep: pointer to struct sep_device;
369 * @sg: pointer to struct scatterlist
370 * @offset: offset into scatterlist memory
371 * @dst: place to put data
372 * @len: length of data
373 * @returns: number of bytes copies
375 * This copies data from scatterlist buffer
376 * offset from beginning - it is needed for
377 * handling tail data in hash
379 static size_t sep_copy_offset_sg(
380 struct sep_device *sep,
381 struct scatterlist *sg,
388 size_t offset_within_page;
389 size_t length_within_page;
390 size_t length_remaining;
391 size_t current_offset;
393 /* Find which page is beginning of segment */
395 page_end = sg->length;
396 while ((sg) && (offset > page_end)) {
397 page_start += sg->length;
400 page_end += sg->length;
406 offset_within_page = offset - page_start;
407 if ((sg->length - offset_within_page) >= len) {
408 /* All within this page */
409 memcpy(dst, sg_virt(sg) + offset_within_page, len);
412 /* Scattered multiple pages */
414 length_remaining = len;
415 while ((sg) && (current_offset < len)) {
416 length_within_page = sg->length - offset_within_page;
417 if (length_within_page >= length_remaining) {
418 memcpy(dst+current_offset,
419 sg_virt(sg) + offset_within_page,
421 length_remaining = 0;
422 current_offset = len;
424 memcpy(dst+current_offset,
425 sg_virt(sg) + offset_within_page,
427 length_remaining -= length_within_page;
428 current_offset += length_within_page;
429 offset_within_page = 0;
442 * @src_ptr: source pointer
443 * @dst_ptr: destination pointer
444 * @nbytes: number of bytes
445 * @returns: 0 for success; -1 for failure
446 * We cannot have any partial overlap. Total overlap
447 * where src is the same as dst is okay
449 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
451 /* Check for partial overlap */
452 if (src_ptr != dst_ptr) {
453 if (src_ptr < dst_ptr) {
454 if ((src_ptr + nbytes) > dst_ptr)
457 if ((dst_ptr + nbytes) > src_ptr)
465 /* Debug - prints only if DEBUG is defined */
466 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
470 struct sep_aes_internal_context *aes_internal;
471 struct sep_des_internal_context *des_internal;
474 struct this_task_ctx *ta_ctx;
475 struct crypto_ablkcipher *tfm;
476 struct sep_system_ctx *sctx;
478 ta_ctx = ablkcipher_request_ctx(req);
479 tfm = crypto_ablkcipher_reqtfm(req);
480 sctx = crypto_ablkcipher_ctx(tfm);
482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
483 if ((ta_ctx->current_request == DES_CBC) &&
484 (ta_ctx->des_opmode == SEP_DES_CBC)) {
486 des_internal = (struct sep_des_internal_context *)
487 sctx->des_private_ctx.ctx_buf;
489 dev_dbg(&ta_ctx->sep_used->pdev->dev,
490 "sep - vendor iv for DES\n");
491 cptr = (unsigned char *)des_internal->iv_context;
492 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
493 dev_dbg(&ta_ctx->sep_used->pdev->dev,
494 "%02x\n", *(cptr + ct1));
497 dev_dbg(&ta_ctx->sep_used->pdev->dev,
498 "sep - walk from kernel crypto iv for DES\n");
499 cptr = (unsigned char *)ta_ctx->walk.iv;
500 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
501 dev_dbg(&ta_ctx->sep_used->pdev->dev,
502 "%02x\n", *(cptr + ct1));
503 } else if ((ta_ctx->current_request == AES_CBC) &&
504 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
506 aes_internal = (struct sep_aes_internal_context *)
507 sctx->aes_private_ctx.cbuff;
509 dev_dbg(&ta_ctx->sep_used->pdev->dev,
510 "sep - vendor iv for AES\n");
511 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
512 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
513 dev_dbg(&ta_ctx->sep_used->pdev->dev,
514 "%02x\n", *(cptr + ct1));
517 dev_dbg(&ta_ctx->sep_used->pdev->dev,
518 "sep - walk from kernel crypto iv for AES\n");
519 cptr = (unsigned char *)ta_ctx->walk.iv;
520 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
521 dev_dbg(&ta_ctx->sep_used->pdev->dev,
522 "%02x\n", *(cptr + ct1));
527 * RFC2451: Weak key check
528 * Returns: 1 (weak), 0 (not weak)
530 static int sep_weak_key(const u8 *key, unsigned int keylen)
532 static const u8 parity[] = {
533 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
534 0, 8, 8, 0, 8, 0, 0, 8, 8,
536 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
537 8, 0, 0, 8, 0, 8, 8, 0, 0,
539 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
540 8, 0, 0, 8, 0, 8, 8, 0, 0,
542 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
543 0, 8, 8, 0, 8, 0, 0, 8, 8,
545 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
546 8, 0, 0, 8, 0, 8, 8, 0, 0,
548 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
549 0, 8, 8, 0, 8, 0, 0, 8, 8,
551 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
552 0, 8, 8, 0, 8, 0, 0, 8, 8,
554 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
555 8, 5, 0, 8, 0, 8, 8, 0, 0,
561 n = parity[key[0]]; n <<= 4;
562 n |= parity[key[1]]; n <<= 4;
563 n |= parity[key[2]]; n <<= 4;
564 n |= parity[key[3]]; n <<= 4;
565 n |= parity[key[4]]; n <<= 4;
566 n |= parity[key[5]]; n <<= 4;
567 n |= parity[key[6]]; n <<= 4;
571 /* 1 in 10^10 keys passes this test */
572 if (!((n - (w >> 3)) & w)) {
573 if (n < 0x41415151) {
574 if (n < 0x31312121) {
575 if (n < 0x14141515) {
576 /* 01 01 01 01 01 01 01 01 */
579 /* 01 1F 01 1F 01 0E 01 0E */
583 /* 01 E0 01 E0 01 F1 01 F1 */
586 /* 01 FE 01 FE 01 FE 01 FE */
591 if (n < 0x34342525) {
592 /* 1F 01 1F 01 0E 01 0E 01 */
595 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
599 /* 1F E0 1F E0 0E F1 0E F1 */
602 /* 1F FE 1F FE 0E FE 0E FE */
608 if (n < 0x61616161) {
609 if (n < 0x44445555) {
610 /* E0 01 E0 01 F1 01 F1 01 */
613 /* E0 1F E0 1F F1 0E F1 0E */
617 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
620 /* E0 FE E0 FE F1 FE F1 FE */
625 if (n < 0x64646565) {
626 /* FE 01 FE 01 FE 01 FE 01 */
629 /* FE 1F FE 1F FE 0E FE 0E */
633 /* FE E0 FE E0 FE F1 FE F1 */
636 /* FE FE FE FE FE FE FE FE */
650 static u32 sep_sg_nents(struct scatterlist *sg)
663 * @ta_ctx: pointer to struct this_task_ctx
664 * @returns: offset to place for the next word in the message
665 * Set up pointer in message pool for new message
667 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
670 ta_ctx->msg_len_words = 2;
671 ta_ctx->msgptr = ta_ctx->msg;
672 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
673 ta_ctx->msgptr += sizeof(u32) * 2;
674 word_ptr = (u32 *)ta_ctx->msgptr;
675 *word_ptr = SEP_START_MSG_TOKEN;
676 return sizeof(u32) * 2;
681 * @ta_ctx: pointer to struct this_task_ctx
682 * @messages_offset: current message offset
683 * Returns: 0 for success; <0 otherwise
684 * End message; set length and CRC; and
685 * send interrupt to the SEP
687 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
690 /* Msg size goes into msg after token */
691 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
692 word_ptr = (u32 *)ta_ctx->msgptr;
694 *word_ptr = ta_ctx->msg_len_words;
696 /* CRC (currently 0) goes at end of msg */
697 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
702 * sep_start_inbound_msg -
703 * @ta_ctx: pointer to struct this_task_ctx
704 * @msg_offset: offset to place for the next word in the message
705 * @returns: 0 for success; error value for failure
706 * Set up pointer in message pool for inbound message
708 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
714 *msg_offset = sizeof(u32) * 2;
715 word_ptr = (u32 *)ta_ctx->msgptr;
717 ta_ctx->msg_len_words = *(word_ptr + 1);
719 if (token != SEP_START_MSG_TOKEN) {
720 error = SEP_INVALID_START;
731 * @ta_ctx: pointer to struct this_task_ctx
732 * @in_addr: pointer to start of parameter
733 * @size: size of parameter to copy (in bytes)
734 * @max_size: size to move up offset; SEP mesg is in word sizes
735 * @msg_offset: pointer to current offset (is updated)
736 * @byte_array: flag ti indicate whether endian must be changed
737 * Copies data into the message area from caller
739 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
740 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
744 void_ptr = ta_ctx->msgptr + *msg_offset;
745 word_ptr = (u32 *)void_ptr;
746 memcpy(void_ptr, in_addr, size);
747 *msg_offset += max_size;
749 /* Do we need to manipulate endian? */
752 for (i = 0; i < ((size + 3) / 4); i += 1)
753 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
759 * @ta_ctx: pointer to struct this_task_ctx
760 * @msg_offset: pointer to current offset (is updated)
761 * @op_code: op code to put into message
762 * Puts op code into message and updates offset
764 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
769 *msg_offset = sep_start_msg(ta_ctx);
770 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
772 *msg_offset += sizeof(u32);
779 * @ta_ctx: pointer to struct this_task_ctx
780 * @in_addr: pointer to start of parameter
781 * @size: size of parameter to copy (in bytes)
782 * @max_size: size to move up offset; SEP mesg is in word sizes
783 * @msg_offset: pointer to current offset (is updated)
784 * @byte_array: flag ti indicate whether endian must be changed
785 * Copies data out of the message area to caller
787 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
788 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
792 void_ptr = ta_ctx->msgptr + *msg_offset;
793 word_ptr = (u32 *)void_ptr;
795 /* Do we need to manipulate endian? */
798 for (i = 0; i < ((size + 3) / 4); i += 1)
799 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
802 memcpy(in_addr, void_ptr, size);
803 *msg_offset += max_size;
808 * @ta_ctx: pointer to struct this_task_ctx
809 * @op_code: expected op_code
810 * @msg_offset: pointer to current offset (is updated)
811 * @returns: 0 for success; error for failure
813 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
819 struct sep_device *sep = ta_ctx->sep_used;
821 dev_dbg(&sep->pdev->dev, "dumping return message\n");
822 error = sep_start_inbound_msg(ta_ctx, msg_offset);
824 dev_warn(&sep->pdev->dev,
825 "sep_start_inbound_msg error\n");
829 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
832 if (in_ary[0] != op_code) {
833 dev_warn(&sep->pdev->dev,
834 "sep got back wrong opcode\n");
835 dev_warn(&sep->pdev->dev,
836 "got back %x; expected %x\n",
838 return SEP_WRONG_OPCODE;
841 if (in_ary[1] != SEP_OK) {
842 dev_warn(&sep->pdev->dev,
843 "sep execution error\n");
844 dev_warn(&sep->pdev->dev,
845 "got back %x; expected %x\n",
855 * @ta_ctx: pointer to struct this_task_ctx
856 * @msg_offset: point to current place in SEP msg; is updated
857 * @dst: pointer to place to put the context
858 * @len: size of the context structure (differs for crypro/hash)
859 * This function reads the context from the msg area
860 * There is a special way the vendor needs to have the maximum
861 * length calculated so that the msg_offset is updated properly;
862 * it skips over some words in the msg area depending on the size
865 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
868 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
869 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
873 * sep_write_context -
874 * @ta_ctx: pointer to struct this_task_ctx
875 * @msg_offset: point to current place in SEP msg; is updated
876 * @src: pointer to the current context
877 * @len: size of the context structure (differs for crypro/hash)
878 * This function writes the context to the msg area
879 * There is a special way the vendor needs to have the maximum
880 * length calculated so that the msg_offset is updated properly;
881 * it skips over some words in the msg area depending on the size
884 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
887 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
888 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
893 * @ta_ctx: pointer to struct this_task_ctx
894 * Clear out crypto related values in sep device structure
895 * to enable device to be used by anyone; either kernel
896 * crypto or userspace app via middleware
898 static void sep_clear_out(struct this_task_ctx *ta_ctx)
900 if (ta_ctx->src_sg_hold) {
901 sep_free_sg_buf(ta_ctx->src_sg_hold);
902 ta_ctx->src_sg_hold = NULL;
905 if (ta_ctx->dst_sg_hold) {
906 sep_free_sg_buf(ta_ctx->dst_sg_hold);
907 ta_ctx->dst_sg_hold = NULL;
910 ta_ctx->src_sg = NULL;
911 ta_ctx->dst_sg = NULL;
913 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
915 if (ta_ctx->i_own_sep) {
917 * The following unlocks the sep and makes it available
918 * to any other application
919 * First, null out crypto entries in sep before releasing it
921 ta_ctx->sep_used->current_hash_req = NULL;
922 ta_ctx->sep_used->current_cypher_req = NULL;
923 ta_ctx->sep_used->current_request = 0;
924 ta_ctx->sep_used->current_hash_stage = 0;
925 ta_ctx->sep_used->ta_ctx = NULL;
926 ta_ctx->sep_used->in_kernel = 0;
928 ta_ctx->call_status.status = 0;
930 /* Remove anything confidential */
931 memset(ta_ctx->sep_used->shared_addr, 0,
932 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
934 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
936 #ifdef SEP_ENABLE_RUNTIME_PM
937 ta_ctx->sep_used->in_use = 0;
938 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
939 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
942 clear_bit(SEP_WORKING_LOCK_BIT,
943 &ta_ctx->sep_used->in_use_flags);
944 ta_ctx->sep_used->pid_doing_transaction = 0;
946 dev_dbg(&ta_ctx->sep_used->pdev->dev,
947 "[PID%d] waking up next transaction\n",
950 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
951 &ta_ctx->sep_used->in_use_flags);
952 wake_up(&ta_ctx->sep_used->event_transactions);
954 ta_ctx->i_own_sep = 0;
959 * Release crypto infrastructure from EINPROGRESS and
960 * clear sep_dev so that SEP is available to anyone
962 static void sep_crypto_release(struct sep_system_ctx *sctx,
963 struct this_task_ctx *ta_ctx, u32 error)
965 struct ahash_request *hash_req = ta_ctx->current_hash_req;
966 struct ablkcipher_request *cypher_req =
967 ta_ctx->current_cypher_req;
968 struct sep_device *sep = ta_ctx->sep_used;
970 sep_clear_out(ta_ctx);
973 * This may not yet exist depending when we
974 * chose to bail out. If it does exist, set
977 if (ta_ctx->are_we_done_yet != NULL)
978 *ta_ctx->are_we_done_yet = 1;
980 if (cypher_req != NULL) {
981 if ((sctx->key_sent == 1) ||
982 ((error != 0) && (error != -EINPROGRESS))) {
983 if (cypher_req->base.complete == NULL) {
984 dev_dbg(&sep->pdev->dev,
985 "release is null for cypher!");
987 cypher_req->base.complete(
988 &cypher_req->base, error);
993 if (hash_req != NULL) {
994 if (hash_req->base.complete == NULL) {
995 dev_dbg(&sep->pdev->dev,
996 "release is null for hash!");
998 hash_req->base.complete(
999 &hash_req->base, error);
1005 * This is where we grab the sep itself and tell it to do something.
1006 * It will sleep if the sep is currently busy
1007 * and it will return 0 if sep is now ours; error value if there
1010 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1012 struct sep_device *sep = ta_ctx->sep_used;
1014 struct sep_msgarea_hdr *my_msg_header;
1016 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1018 /* add to status queue */
1019 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1020 ta_ctx->nbytes, current->pid,
1021 current->comm, sizeof(current->comm));
1023 if (!ta_ctx->queue_elem) {
1024 dev_dbg(&sep->pdev->dev,
1025 "[PID%d] updating queue status error\n", current->pid);
1029 /* get the device; this can sleep */
1030 result = sep_wait_transaction(sep);
1034 if (sep_dev->power_save_setup == 1)
1035 pm_runtime_get_sync(&sep_dev->pdev->dev);
1037 /* Copy in the message */
1038 memcpy(sep->shared_addr, ta_ctx->msg,
1039 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1041 /* Copy in the dcb information if there is any */
1042 if (ta_ctx->dcb_region) {
1043 result = sep_activate_dcb_dmatables_context(sep,
1044 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1050 /* Mark the device so we know how to finish the job in the tasklet */
1051 if (ta_ctx->current_hash_req)
1052 sep->current_hash_req = ta_ctx->current_hash_req;
1054 sep->current_cypher_req = ta_ctx->current_cypher_req;
1056 sep->current_request = ta_ctx->current_request;
1057 sep->current_hash_stage = ta_ctx->current_hash_stage;
1058 sep->ta_ctx = ta_ctx;
1060 ta_ctx->i_own_sep = 1;
1062 /* need to set bit first to avoid race condition with interrupt */
1063 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1065 result = sep_send_command_handler(sep);
1067 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1071 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1074 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1076 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1077 &ta_ctx->call_status.status);
1084 * This function sets things up for a crypto data block process
1085 * This does all preparation, but does not try to grab the
1087 * @req: pointer to struct ablkcipher_request
1088 * returns: 0 if all went well, non zero if error
1090 static int sep_crypto_block_data(struct ablkcipher_request *req)
1099 static char small_buf[100];
1100 ssize_t copy_result;
1103 struct scatterlist *new_sg;
1104 struct this_task_ctx *ta_ctx;
1105 struct crypto_ablkcipher *tfm;
1106 struct sep_system_ctx *sctx;
1108 struct sep_des_internal_context *des_internal;
1109 struct sep_aes_internal_context *aes_internal;
1111 ta_ctx = ablkcipher_request_ctx(req);
1112 tfm = crypto_ablkcipher_reqtfm(req);
1113 sctx = crypto_ablkcipher_ctx(tfm);
1115 /* start the walk on scatterlists */
1116 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1117 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1120 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1122 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1127 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1128 "crypto block: src is %lx dst is %lx\n",
1129 (unsigned long)req->src, (unsigned long)req->dst);
1131 /* Make sure all pages are even block */
1132 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1133 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1135 if (int_error < 0) {
1136 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1138 } else if (int_error == 1) {
1139 ta_ctx->src_sg = new_sg;
1140 ta_ctx->src_sg_hold = new_sg;
1142 ta_ctx->src_sg = req->src;
1143 ta_ctx->src_sg_hold = NULL;
1146 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1147 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1149 if (int_error < 0) {
1150 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1153 } else if (int_error == 1) {
1154 ta_ctx->dst_sg = new_sg;
1155 ta_ctx->dst_sg_hold = new_sg;
1157 ta_ctx->dst_sg = req->dst;
1158 ta_ctx->dst_sg_hold = NULL;
1161 /* set nbytes for queue status */
1162 ta_ctx->nbytes = req->nbytes;
1164 /* Key already done; this is for data */
1165 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1167 /* check for valid data and proper spacing */
1168 src_ptr = sg_virt(ta_ctx->src_sg);
1169 dst_ptr = sg_virt(ta_ctx->dst_sg);
1171 if (!src_ptr || !dst_ptr ||
1172 (ta_ctx->current_cypher_req->nbytes %
1173 crypto_ablkcipher_blocksize(tfm))) {
1175 dev_warn(&ta_ctx->sep_used->pdev->dev,
1176 "cipher block size odd\n");
1177 dev_warn(&ta_ctx->sep_used->pdev->dev,
1178 "cipher block size is %x\n",
1179 crypto_ablkcipher_blocksize(tfm));
1180 dev_warn(&ta_ctx->sep_used->pdev->dev,
1181 "cipher data size is %x\n",
1182 ta_ctx->current_cypher_req->nbytes);
1186 if (partial_overlap(src_ptr, dst_ptr,
1187 ta_ctx->current_cypher_req->nbytes)) {
1188 dev_warn(&ta_ctx->sep_used->pdev->dev,
1189 "block partial overlap\n");
1193 /* Put together the message */
1194 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1196 /* If des, and size is 1 block, put directly in msg */
1197 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1198 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1200 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1201 "writing out one block des\n");
1203 copy_result = sg_copy_to_buffer(
1204 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1205 small_buf, crypto_ablkcipher_blocksize(tfm));
1207 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1208 dev_warn(&ta_ctx->sep_used->pdev->dev,
1209 "des block copy failed\n");
1213 /* Put data into message */
1214 sep_write_msg(ta_ctx, small_buf,
1215 crypto_ablkcipher_blocksize(tfm),
1216 crypto_ablkcipher_blocksize(tfm) * 2,
1219 /* Put size into message */
1220 sep_write_msg(ta_ctx, &req->nbytes,
1221 sizeof(u32), sizeof(u32), &msg_offset, 0);
1223 /* Otherwise, fill out dma tables */
1224 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1225 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1226 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1227 ta_ctx->dcb_input_data.block_size =
1228 crypto_ablkcipher_blocksize(tfm);
1229 ta_ctx->dcb_input_data.tail_block_size = 0;
1230 ta_ctx->dcb_input_data.is_applet = 0;
1231 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1232 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1234 result = sep_create_dcb_dmatables_context_kernel(
1236 &ta_ctx->dcb_region,
1237 &ta_ctx->dmatables_region,
1239 &ta_ctx->dcb_input_data,
1242 dev_warn(&ta_ctx->sep_used->pdev->dev,
1243 "crypto dma table create failed\n");
1247 /* Portion of msg is nulled (no data) */
1253 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1254 sizeof(u32) * 5, &msg_offset, 0);
1258 * Before we write the message, we need to overwrite the
1259 * vendor's IV with the one from our own ablkcipher walk
1260 * iv because this is needed for dm-crypt
1262 sep_dump_ivs(req, "sending data block to sep\n");
1263 if ((ta_ctx->current_request == DES_CBC) &&
1264 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1266 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1267 "overwrite vendor iv on DES\n");
1268 des_internal = (struct sep_des_internal_context *)
1269 sctx->des_private_ctx.ctx_buf;
1270 memcpy((void *)des_internal->iv_context,
1271 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1272 } else if ((ta_ctx->current_request == AES_CBC) &&
1273 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1275 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1276 "overwrite vendor iv on AES\n");
1277 aes_internal = (struct sep_aes_internal_context *)
1278 sctx->aes_private_ctx.cbuff;
1279 memcpy((void *)aes_internal->aes_ctx_iv,
1280 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1283 /* Write context into message */
1284 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1285 sep_write_context(ta_ctx, &msg_offset,
1286 &sctx->des_private_ctx,
1287 sizeof(struct sep_des_private_context));
1289 sep_write_context(ta_ctx, &msg_offset,
1290 &sctx->aes_private_ctx,
1291 sizeof(struct sep_aes_private_context));
1294 /* conclude message */
1295 sep_end_msg(ta_ctx, msg_offset);
1297 /* Parent (caller) is now ready to tell the sep to do ahead */
1303 * This function sets things up for a crypto key submit process
1304 * This does all preparation, but does not try to grab the
1306 * @req: pointer to struct ablkcipher_request
1307 * returns: 0 if all went well, non zero if error
1309 static int sep_crypto_send_key(struct ablkcipher_request *req)
1317 struct this_task_ctx *ta_ctx;
1318 struct crypto_ablkcipher *tfm;
1319 struct sep_system_ctx *sctx;
1321 ta_ctx = ablkcipher_request_ctx(req);
1322 tfm = crypto_ablkcipher_reqtfm(req);
1323 sctx = crypto_ablkcipher_ctx(tfm);
1325 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1327 /* start the walk on scatterlists */
1328 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1329 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1330 "sep crypto block data size of %x\n", req->nbytes);
1332 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1334 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1340 if ((ta_ctx->current_request == DES_CBC) &&
1341 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1342 if (!ta_ctx->walk.iv) {
1343 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1347 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1350 if ((ta_ctx->current_request == AES_CBC) &&
1351 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1352 if (!ta_ctx->walk.iv) {
1353 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1357 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1360 /* put together message to SEP */
1361 /* Start with op code */
1362 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1364 /* now deal with IV */
1365 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1366 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1367 sep_write_msg(ta_ctx, ta_ctx->iv,
1368 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1372 msg_offset += 4 * sizeof(u32);
1375 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1376 sizeof(u32)) * sizeof(u32);
1377 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1378 sep_write_msg(ta_ctx, ta_ctx->iv,
1379 SEP_AES_IV_SIZE_BYTES, max_length,
1383 msg_offset += max_length;
1388 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1389 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1390 sizeof(u32) * 8, sizeof(u32) * 8,
1393 msg[0] = (u32)sctx->des_nbr_keys;
1394 msg[1] = (u32)ta_ctx->des_encmode;
1395 msg[2] = (u32)ta_ctx->des_opmode;
1397 sep_write_msg(ta_ctx, (void *)msg,
1398 sizeof(u32) * 3, sizeof(u32) * 3,
1401 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1403 SEP_AES_MAX_KEY_SIZE_BYTES,
1406 msg[0] = (u32)sctx->aes_key_size;
1407 msg[1] = (u32)ta_ctx->aes_encmode;
1408 msg[2] = (u32)ta_ctx->aes_opmode;
1409 msg[3] = (u32)0; /* Secret key is not used */
1410 sep_write_msg(ta_ctx, (void *)msg,
1411 sizeof(u32) * 4, sizeof(u32) * 4,
1415 /* conclude message */
1416 sep_end_msg(ta_ctx, msg_offset);
1418 /* Parent (caller) is now ready to tell the sep to do ahead */
1423 /* This needs to be run as a work queue as it can be put asleep */
1424 static void sep_crypto_block(void *data)
1426 unsigned long end_time;
1430 struct ablkcipher_request *req;
1431 struct this_task_ctx *ta_ctx;
1432 struct crypto_ablkcipher *tfm;
1433 struct sep_system_ctx *sctx;
1434 int are_we_done_yet;
1436 req = (struct ablkcipher_request *)data;
1437 ta_ctx = ablkcipher_request_ctx(req);
1438 tfm = crypto_ablkcipher_reqtfm(req);
1439 sctx = crypto_ablkcipher_ctx(tfm);
1441 ta_ctx->are_we_done_yet = &are_we_done_yet;
1443 pr_debug("sep_crypto_block\n");
1444 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1446 pr_debug("key_sent is %d\n", sctx->key_sent);
1448 /* do we need to send the key */
1449 if (sctx->key_sent == 0) {
1450 are_we_done_yet = 0;
1451 result = sep_crypto_send_key(req); /* prep to send key */
1453 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1454 "could not prep key %x\n", result);
1455 sep_crypto_release(sctx, ta_ctx, result);
1459 result = sep_crypto_take_sep(ta_ctx);
1461 dev_warn(&ta_ctx->sep_used->pdev->dev,
1462 "sep_crypto_take_sep for key send failed\n");
1463 sep_crypto_release(sctx, ta_ctx, result);
1467 /* now we sit and wait up to a fixed time for completion */
1468 end_time = jiffies + (WAIT_TIME * HZ);
1469 while ((time_before(jiffies, end_time)) &&
1470 (are_we_done_yet == 0))
1473 /* Done waiting; still not done yet? */
1474 if (are_we_done_yet == 0) {
1475 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1476 "Send key job never got done\n");
1477 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1481 /* Set the key sent variable so this can be skipped later */
1485 /* Key sent (or maybe not if we did not have to), now send block */
1486 are_we_done_yet = 0;
1488 result = sep_crypto_block_data(req);
1491 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1492 "could prep not send block %x\n", result);
1493 sep_crypto_release(sctx, ta_ctx, result);
1497 result = sep_crypto_take_sep(ta_ctx);
1499 dev_warn(&ta_ctx->sep_used->pdev->dev,
1500 "sep_crypto_take_sep for block send failed\n");
1501 sep_crypto_release(sctx, ta_ctx, result);
1505 /* now we sit and wait up to a fixed time for completion */
1506 end_time = jiffies + (WAIT_TIME * HZ);
1507 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1510 /* Done waiting; still not done yet? */
1511 if (are_we_done_yet == 0) {
1512 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1513 "Send block job never got done\n");
1514 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1518 /* That's it; entire thing done, get out of queue */
1520 pr_debug("crypto_block leaving\n");
1521 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1525 * Post operation (after interrupt) for crypto block
1527 static u32 crypto_post_op(struct sep_device *sep)
1533 ssize_t copy_result;
1534 static char small_buf[100];
1536 struct ablkcipher_request *req;
1537 struct this_task_ctx *ta_ctx;
1538 struct sep_system_ctx *sctx;
1539 struct crypto_ablkcipher *tfm;
1541 struct sep_des_internal_context *des_internal;
1542 struct sep_aes_internal_context *aes_internal;
1544 if (!sep->current_cypher_req)
1547 /* hold req since we need to submit work after clearing sep */
1548 req = sep->current_cypher_req;
1550 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1551 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1552 sctx = crypto_ablkcipher_ctx(tfm);
1554 pr_debug("crypto_post op\n");
1555 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1556 sctx->key_sent, tfm, sctx, ta_ctx);
1558 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1559 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1561 /* first bring msg from shared area to local area */
1562 memcpy(ta_ctx->msg, sep->shared_addr,
1563 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1565 /* Is this the result of performing init (key to SEP */
1566 if (sctx->key_sent == 0) {
1568 /* Did SEP do it okay */
1569 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1572 dev_warn(&ta_ctx->sep_used->pdev->dev,
1573 "aes init error %x\n", u32_error);
1574 sep_crypto_release(sctx, ta_ctx, u32_error);
1579 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1580 sep_read_context(ta_ctx, &msg_offset,
1581 &sctx->des_private_ctx,
1582 sizeof(struct sep_des_private_context));
1584 sep_read_context(ta_ctx, &msg_offset,
1585 &sctx->aes_private_ctx,
1586 sizeof(struct sep_aes_private_context));
1589 sep_dump_ivs(req, "after sending key to sep\n");
1591 /* key sent went okay; release sep, and set are_we_done_yet */
1593 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1598 * This is the result of a block request
1600 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1601 "crypto_post_op block response\n");
1603 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1607 dev_warn(&ta_ctx->sep_used->pdev->dev,
1608 "sep block error %x\n", u32_error);
1609 sep_crypto_release(sctx, ta_ctx, u32_error);
1613 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1615 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1616 "post op for DES\n");
1618 /* special case for 1 block des */
1619 if (sep->current_cypher_req->nbytes ==
1620 crypto_ablkcipher_blocksize(tfm)) {
1622 sep_read_msg(ta_ctx, small_buf,
1623 crypto_ablkcipher_blocksize(tfm),
1624 crypto_ablkcipher_blocksize(tfm) * 2,
1627 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1628 "reading in block des\n");
1630 copy_result = sg_copy_from_buffer(
1632 sep_sg_nents(ta_ctx->dst_sg),
1634 crypto_ablkcipher_blocksize(tfm));
1637 crypto_ablkcipher_blocksize(tfm)) {
1639 dev_warn(&ta_ctx->sep_used->pdev->dev,
1640 "des block copy failed\n");
1641 sep_crypto_release(sctx, ta_ctx,
1648 sep_read_context(ta_ctx, &msg_offset,
1649 &sctx->des_private_ctx,
1650 sizeof(struct sep_des_private_context));
1653 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1654 "post op for AES\n");
1656 /* Skip the MAC Output */
1657 msg_offset += (sizeof(u32) * 4);
1660 sep_read_context(ta_ctx, &msg_offset,
1661 &sctx->aes_private_ctx,
1662 sizeof(struct sep_aes_private_context));
1665 /* Copy to correct sg if this block had oddball pages */
1666 if (ta_ctx->dst_sg_hold)
1667 sep_copy_sg(ta_ctx->sep_used,
1669 ta_ctx->current_cypher_req->dst,
1670 ta_ctx->current_cypher_req->nbytes);
1673 * Copy the iv's back to the walk.iv
1674 * This is required for dm_crypt
1676 sep_dump_ivs(req, "got data block from sep\n");
1677 if ((ta_ctx->current_request == DES_CBC) &&
1678 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1680 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1681 "returning result iv to walk on DES\n");
1682 des_internal = (struct sep_des_internal_context *)
1683 sctx->des_private_ctx.ctx_buf;
1684 memcpy(ta_ctx->walk.iv,
1685 (void *)des_internal->iv_context,
1686 crypto_ablkcipher_ivsize(tfm));
1687 } else if ((ta_ctx->current_request == AES_CBC) &&
1688 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1690 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1691 "returning result iv to walk on AES\n");
1692 aes_internal = (struct sep_aes_internal_context *)
1693 sctx->aes_private_ctx.cbuff;
1694 memcpy(ta_ctx->walk.iv,
1695 (void *)aes_internal->aes_ctx_iv,
1696 crypto_ablkcipher_ivsize(tfm));
1699 /* finished, release everything */
1700 sep_crypto_release(sctx, ta_ctx, 0);
1702 pr_debug("crypto_post_op done\n");
1703 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1704 sctx->key_sent, tfm, sctx, ta_ctx);
1709 static u32 hash_init_post_op(struct sep_device *sep)
1713 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1714 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1715 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1716 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1717 "hash init post op\n");
1719 /* first bring msg from shared area to local area */
1720 memcpy(ta_ctx->msg, sep->shared_addr,
1721 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1723 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1727 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1729 sep_crypto_release(sctx, ta_ctx, u32_error);
1734 sep_read_context(ta_ctx, &msg_offset,
1735 &sctx->hash_private_ctx,
1736 sizeof(struct sep_hash_private_context));
1738 /* Signal to crypto infrastructure and clear out */
1739 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1740 sep_crypto_release(sctx, ta_ctx, 0);
1744 static u32 hash_update_post_op(struct sep_device *sep)
1748 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1749 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1750 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1751 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1752 "hash update post op\n");
1754 /* first bring msg from shared area to local area */
1755 memcpy(ta_ctx->msg, sep->shared_addr,
1756 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1758 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1762 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1764 sep_crypto_release(sctx, ta_ctx, u32_error);
1769 sep_read_context(ta_ctx, &msg_offset,
1770 &sctx->hash_private_ctx,
1771 sizeof(struct sep_hash_private_context));
1774 * Following is only for finup; if we just completed the
1775 * data portion of finup, we now need to kick off the
1776 * finish portion of finup.
1779 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1781 /* first reset stage to HASH_FINUP_FINISH */
1782 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1784 /* now enqueue the finish operation */
1785 spin_lock_irq(&queue_lock);
1786 u32_error = crypto_enqueue_request(&sep_queue,
1787 &ta_ctx->sep_used->current_hash_req->base);
1788 spin_unlock_irq(&queue_lock);
1790 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1791 dev_warn(&ta_ctx->sep_used->pdev->dev,
1792 "spe cypher post op cant queue\n");
1793 sep_crypto_release(sctx, ta_ctx, u32_error);
1797 /* schedule the data send */
1798 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1799 sep_dequeuer, (void *)&sep_queue);
1802 dev_warn(&ta_ctx->sep_used->pdev->dev,
1803 "cant submit work sep_crypto_block\n");
1804 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1809 /* Signal to crypto infrastructure and clear out */
1810 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1811 sep_crypto_release(sctx, ta_ctx, 0);
1815 static u32 hash_final_post_op(struct sep_device *sep)
1820 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1821 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1822 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1823 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1824 "hash final post op\n");
1826 /* first bring msg from shared area to local area */
1827 memcpy(ta_ctx->msg, sep->shared_addr,
1828 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1830 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1834 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1836 sep_crypto_release(sctx, ta_ctx, u32_error);
1840 /* Grab the result */
1841 if (ta_ctx->current_hash_req->result == NULL) {
1842 /* Oops, null buffer; error out here */
1843 dev_warn(&ta_ctx->sep_used->pdev->dev,
1844 "hash finish null buffer\n");
1845 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1849 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1850 sizeof(u32)) * sizeof(u32);
1852 sep_read_msg(ta_ctx,
1853 ta_ctx->current_hash_req->result,
1854 crypto_ahash_digestsize(tfm), max_length,
1857 /* Signal to crypto infrastructure and clear out */
1858 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1859 sep_crypto_release(sctx, ta_ctx, 0);
1863 static u32 hash_digest_post_op(struct sep_device *sep)
1868 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1869 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1870 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1871 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1872 "hash digest post op\n");
1874 /* first bring msg from shared area to local area */
1875 memcpy(ta_ctx->msg, sep->shared_addr,
1876 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1878 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1882 dev_warn(&ta_ctx->sep_used->pdev->dev,
1883 "hash digest finish error %x\n", u32_error);
1885 sep_crypto_release(sctx, ta_ctx, u32_error);
1889 /* Grab the result */
1890 if (ta_ctx->current_hash_req->result == NULL) {
1891 /* Oops, null buffer; error out here */
1892 dev_warn(&ta_ctx->sep_used->pdev->dev,
1893 "hash digest finish null buffer\n");
1894 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1898 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1899 sizeof(u32)) * sizeof(u32);
1901 sep_read_msg(ta_ctx,
1902 ta_ctx->current_hash_req->result,
1903 crypto_ahash_digestsize(tfm), max_length,
1906 /* Signal to crypto infrastructure and clear out */
1907 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1908 "hash digest finish post op done\n");
1910 sep_crypto_release(sctx, ta_ctx, 0);
1915 * The sep_finish function is the function that is scheduled (via tasklet)
1916 * by the interrupt service routine when the SEP sends and interrupt
1917 * This is only called by the interrupt handler as a tasklet.
1919 static void sep_finish(unsigned long data)
1921 struct sep_device *sep_dev;
1927 pr_debug("sep_finish called with null data\n");
1931 sep_dev = (struct sep_device *)data;
1932 if (sep_dev == NULL) {
1933 pr_debug("sep_finish; sep_dev is NULL\n");
1937 if (sep_dev->in_kernel == (u32)0) {
1938 dev_warn(&sep_dev->pdev->dev,
1939 "sep_finish; not in kernel operation\n");
1943 /* Did we really do a sep command prior to this? */
1944 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1945 &sep_dev->ta_ctx->call_status.status)) {
1947 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1952 if (sep_dev->send_ct != sep_dev->reply_ct) {
1953 dev_warn(&sep_dev->pdev->dev,
1954 "[PID%d] poll; no message came back\n",
1959 /* Check for error (In case time ran out) */
1960 if ((res != 0x0) && (res != 0x8)) {
1961 dev_warn(&sep_dev->pdev->dev,
1962 "[PID%d] poll; poll error GPR3 is %x\n",
1967 /* What kind of interrupt from sep was this? */
1968 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1970 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1973 /* Print request? */
1974 if ((res >> 30) & 0x1) {
1975 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1977 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1979 (char *)(sep_dev->shared_addr +
1980 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1984 /* Request for daemon (not currently in POR)? */
1986 dev_dbg(&sep_dev->pdev->dev,
1987 "[PID%d] sep request; ignoring\n",
1992 /* If we got here, then we have a replay to a sep command */
1994 dev_dbg(&sep_dev->pdev->dev,
1995 "[PID%d] sep reply to command; processing request: %x\n",
1996 current->pid, sep_dev->current_request);
1998 switch (sep_dev->current_request) {
2003 res = crypto_post_op(sep_dev);
2009 switch (sep_dev->current_hash_stage) {
2011 res = hash_init_post_op(sep_dev);
2014 case HASH_FINUP_DATA:
2015 res = hash_update_post_op(sep_dev);
2017 case HASH_FINUP_FINISH:
2019 res = hash_final_post_op(sep_dev);
2022 res = hash_digest_post_op(sep_dev);
2025 pr_debug("sep - invalid stage for hash finish\n");
2029 pr_debug("sep - invalid request for finish\n");
2033 pr_debug("sep - finish returned error %x\n", res);
2036 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2038 const char *alg_name = crypto_tfm_alg_name(tfm);
2040 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2042 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2043 sizeof(struct this_task_ctx));
2047 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2049 pr_debug("sep_hash_cra_exit\n");
2052 static void sep_hash_init(void *data)
2056 struct ahash_request *req;
2057 struct crypto_ahash *tfm;
2058 struct this_task_ctx *ta_ctx;
2059 struct sep_system_ctx *sctx;
2060 unsigned long end_time;
2061 int are_we_done_yet;
2063 req = (struct ahash_request *)data;
2064 tfm = crypto_ahash_reqtfm(req);
2065 sctx = crypto_ahash_ctx(tfm);
2066 ta_ctx = ahash_request_ctx(req);
2067 ta_ctx->sep_used = sep_dev;
2069 ta_ctx->are_we_done_yet = &are_we_done_yet;
2071 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2073 ta_ctx->current_hash_stage = HASH_INIT;
2074 /* opcode and mode */
2075 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2076 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2077 sizeof(u32), sizeof(u32), &msg_offset, 0);
2078 sep_end_msg(ta_ctx, msg_offset);
2080 are_we_done_yet = 0;
2081 result = sep_crypto_take_sep(ta_ctx);
2083 dev_warn(&ta_ctx->sep_used->pdev->dev,
2084 "sep_hash_init take sep failed\n");
2085 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2088 /* now we sit and wait up to a fixed time for completion */
2089 end_time = jiffies + (WAIT_TIME * HZ);
2090 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2093 /* Done waiting; still not done yet? */
2094 if (are_we_done_yet == 0) {
2095 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2096 "hash init never got done\n");
2097 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2103 static void sep_hash_update(void *data)
2108 struct sep_hash_internal_context *int_ctx;
2112 int are_we_done_yet;
2115 static char small_buf[100];
2117 struct scatterlist *new_sg;
2118 ssize_t copy_result;
2119 struct ahash_request *req;
2120 struct crypto_ahash *tfm;
2121 struct this_task_ctx *ta_ctx;
2122 struct sep_system_ctx *sctx;
2123 unsigned long end_time;
2125 req = (struct ahash_request *)data;
2126 tfm = crypto_ahash_reqtfm(req);
2127 sctx = crypto_ahash_ctx(tfm);
2128 ta_ctx = ahash_request_ctx(req);
2129 ta_ctx->sep_used = sep_dev;
2131 ta_ctx->are_we_done_yet = &are_we_done_yet;
2133 /* length for queue status */
2134 ta_ctx->nbytes = req->nbytes;
2136 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2137 "sep_hash_update\n");
2138 ta_ctx->current_hash_stage = HASH_UPDATE;
2141 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2142 tail_len = req->nbytes % block_size;
2143 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2144 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2145 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2147 /* Compute header/tail sizes */
2148 int_ctx = (struct sep_hash_internal_context *)&sctx->
2149 hash_private_ctx.internal_context;
2150 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2151 tail_len = (req->nbytes - head_len) % block_size;
2153 /* Make sure all pages are an even block */
2154 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2156 block_size, &new_sg, 1);
2158 if (int_error < 0) {
2159 dev_warn(&ta_ctx->sep_used->pdev->dev,
2160 "oddball pages error in crash update\n");
2161 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2163 } else if (int_error == 1) {
2164 ta_ctx->src_sg = new_sg;
2165 ta_ctx->src_sg_hold = new_sg;
2167 ta_ctx->src_sg = req->src;
2168 ta_ctx->src_sg_hold = NULL;
2171 src_ptr = sg_virt(ta_ctx->src_sg);
2173 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2178 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2179 ta_ctx->dcb_input_data.data_in_size =
2180 req->nbytes - (head_len + tail_len);
2181 ta_ctx->dcb_input_data.app_out_address = NULL;
2182 ta_ctx->dcb_input_data.block_size = block_size;
2183 ta_ctx->dcb_input_data.tail_block_size = 0;
2184 ta_ctx->dcb_input_data.is_applet = 0;
2185 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2186 ta_ctx->dcb_input_data.dst_sg = NULL;
2188 int_error = sep_create_dcb_dmatables_context_kernel(
2190 &ta_ctx->dcb_region,
2191 &ta_ctx->dmatables_region,
2193 &ta_ctx->dcb_input_data,
2196 dev_warn(&ta_ctx->sep_used->pdev->dev,
2197 "hash update dma table create failed\n");
2198 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2202 /* Construct message to SEP */
2203 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2209 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2212 /* Handle remainders */
2215 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2216 sizeof(u32), &msg_offset, 0);
2219 copy_result = sg_copy_to_buffer(
2221 sep_sg_nents(ta_ctx->src_sg),
2222 small_buf, head_len);
2224 if (copy_result != head_len) {
2225 dev_warn(&ta_ctx->sep_used->pdev->dev,
2226 "sg head copy failure in hash block\n");
2227 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2231 sep_write_msg(ta_ctx, small_buf, head_len,
2232 sizeof(u32) * 32, &msg_offset, 1);
2234 msg_offset += sizeof(u32) * 32;
2238 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2239 sizeof(u32), &msg_offset, 0);
2242 copy_result = sep_copy_offset_sg(
2245 req->nbytes - tail_len,
2246 small_buf, tail_len);
2248 if (copy_result != tail_len) {
2249 dev_warn(&ta_ctx->sep_used->pdev->dev,
2250 "sg tail copy failure in hash block\n");
2251 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2255 sep_write_msg(ta_ctx, small_buf, tail_len,
2256 sizeof(u32) * 32, &msg_offset, 1);
2258 msg_offset += sizeof(u32) * 32;
2262 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2263 sizeof(struct sep_hash_private_context));
2265 sep_end_msg(ta_ctx, msg_offset);
2266 are_we_done_yet = 0;
2267 int_error = sep_crypto_take_sep(ta_ctx);
2269 dev_warn(&ta_ctx->sep_used->pdev->dev,
2270 "sep_hash_update take sep failed\n");
2271 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2274 /* now we sit and wait up to a fixed time for completion */
2275 end_time = jiffies + (WAIT_TIME * HZ);
2276 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2279 /* Done waiting; still not done yet? */
2280 if (are_we_done_yet == 0) {
2281 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2282 "hash update never got done\n");
2283 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2289 static void sep_hash_final(void *data)
2292 struct ahash_request *req;
2293 struct crypto_ahash *tfm;
2294 struct this_task_ctx *ta_ctx;
2295 struct sep_system_ctx *sctx;
2297 unsigned long end_time;
2298 int are_we_done_yet;
2300 req = (struct ahash_request *)data;
2301 tfm = crypto_ahash_reqtfm(req);
2302 sctx = crypto_ahash_ctx(tfm);
2303 ta_ctx = ahash_request_ctx(req);
2304 ta_ctx->sep_used = sep_dev;
2306 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2307 "sep_hash_final\n");
2308 ta_ctx->current_hash_stage = HASH_FINISH;
2310 ta_ctx->are_we_done_yet = &are_we_done_yet;
2312 /* opcode and mode */
2313 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2316 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2317 sizeof(struct sep_hash_private_context));
2319 sep_end_msg(ta_ctx, msg_offset);
2320 are_we_done_yet = 0;
2321 result = sep_crypto_take_sep(ta_ctx);
2323 dev_warn(&ta_ctx->sep_used->pdev->dev,
2324 "sep_hash_final take sep failed\n");
2325 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2328 /* now we sit and wait up to a fixed time for completion */
2329 end_time = jiffies + (WAIT_TIME * HZ);
2330 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2333 /* Done waiting; still not done yet? */
2334 if (are_we_done_yet == 0) {
2335 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2336 "hash final job never got done\n");
2337 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2343 static void sep_hash_digest(void *data)
2351 int are_we_done_yet;
2353 static char small_buf[100];
2354 struct scatterlist *new_sg;
2357 struct ahash_request *req;
2358 struct crypto_ahash *tfm;
2359 struct this_task_ctx *ta_ctx;
2360 struct sep_system_ctx *sctx;
2361 unsigned long end_time;
2363 req = (struct ahash_request *)data;
2364 tfm = crypto_ahash_reqtfm(req);
2365 sctx = crypto_ahash_ctx(tfm);
2366 ta_ctx = ahash_request_ctx(req);
2367 ta_ctx->sep_used = sep_dev;
2369 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2370 "sep_hash_digest\n");
2371 ta_ctx->current_hash_stage = HASH_DIGEST;
2373 ta_ctx->are_we_done_yet = &are_we_done_yet;
2375 /* length for queue status */
2376 ta_ctx->nbytes = req->nbytes;
2378 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2379 tail_len = req->nbytes % block_size;
2380 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2381 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2382 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2384 /* Make sure all pages are an even block */
2385 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2387 block_size, &new_sg, 1);
2389 if (int_error < 0) {
2390 dev_warn(&ta_ctx->sep_used->pdev->dev,
2391 "oddball pages error in crash update\n");
2392 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2394 } else if (int_error == 1) {
2395 ta_ctx->src_sg = new_sg;
2396 ta_ctx->src_sg_hold = new_sg;
2398 ta_ctx->src_sg = req->src;
2399 ta_ctx->src_sg_hold = NULL;
2402 src_ptr = sg_virt(ta_ctx->src_sg);
2404 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2409 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2410 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2411 ta_ctx->dcb_input_data.app_out_address = NULL;
2412 ta_ctx->dcb_input_data.block_size = block_size;
2413 ta_ctx->dcb_input_data.tail_block_size = 0;
2414 ta_ctx->dcb_input_data.is_applet = 0;
2415 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2416 ta_ctx->dcb_input_data.dst_sg = NULL;
2418 int_error = sep_create_dcb_dmatables_context_kernel(
2420 &ta_ctx->dcb_region,
2421 &ta_ctx->dmatables_region,
2423 &ta_ctx->dcb_input_data,
2426 dev_warn(&ta_ctx->sep_used->pdev->dev,
2427 "hash update dma table create failed\n");
2428 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2432 /* Construct message to SEP */
2433 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2434 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2435 sizeof(u32), sizeof(u32), &msg_offset, 0);
2441 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2445 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2446 sizeof(u32), &msg_offset, 0);
2449 copy_result = sep_copy_offset_sg(
2452 req->nbytes - tail_len,
2453 small_buf, tail_len);
2455 if (copy_result != tail_len) {
2456 dev_warn(&ta_ctx->sep_used->pdev->dev,
2457 "sg tail copy failure in hash block\n");
2458 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2462 sep_write_msg(ta_ctx, small_buf, tail_len,
2463 sizeof(u32) * 32, &msg_offset, 1);
2465 msg_offset += sizeof(u32) * 32;
2468 sep_end_msg(ta_ctx, msg_offset);
2470 are_we_done_yet = 0;
2471 result = sep_crypto_take_sep(ta_ctx);
2473 dev_warn(&ta_ctx->sep_used->pdev->dev,
2474 "sep_hash_digest take sep failed\n");
2475 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2478 /* now we sit and wait up to a fixed time for completion */
2479 end_time = jiffies + (WAIT_TIME * HZ);
2480 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2483 /* Done waiting; still not done yet? */
2484 if (are_we_done_yet == 0) {
2485 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2486 "hash digest job never got done\n");
2487 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2494 * This is what is called by each of the API's provided
2495 * in the kernel crypto descriptors. It is run in a process
2496 * context using the kernel workqueues. Therefore it can
2499 static void sep_dequeuer(void *data)
2501 struct crypto_queue *this_queue;
2502 struct crypto_async_request *async_req;
2503 struct crypto_async_request *backlog;
2504 struct ablkcipher_request *cypher_req;
2505 struct ahash_request *hash_req;
2506 struct sep_system_ctx *sctx;
2507 struct crypto_ahash *hash_tfm;
2508 struct this_task_ctx *ta_ctx;
2511 this_queue = (struct crypto_queue *)data;
2513 spin_lock_irq(&queue_lock);
2514 backlog = crypto_get_backlog(this_queue);
2515 async_req = crypto_dequeue_request(this_queue);
2516 spin_unlock_irq(&queue_lock);
2519 pr_debug("sep crypto queue is empty\n");
2524 pr_debug("sep crypto backlog set\n");
2525 if (backlog->complete)
2526 backlog->complete(backlog, -EINPROGRESS);
2530 if (!async_req->tfm) {
2531 pr_debug("sep crypto queue null tfm\n");
2535 if (!async_req->tfm->__crt_alg) {
2536 pr_debug("sep crypto queue null __crt_alg\n");
2540 if (!async_req->tfm->__crt_alg->cra_type) {
2541 pr_debug("sep crypto queue null cra_type\n");
2545 /* we have stuff in the queue */
2546 if (async_req->tfm->__crt_alg->cra_type !=
2547 &crypto_ahash_type) {
2548 /* This is for a cypher */
2549 pr_debug("sep crypto queue doing cipher\n");
2550 cypher_req = container_of(async_req,
2551 struct ablkcipher_request,
2554 pr_debug("sep crypto queue null cypher_req\n");
2558 sep_crypto_block((void *)cypher_req);
2561 /* This is a hash */
2562 pr_debug("sep crypto queue doing hash\n");
2564 * This is a bit more complex than cipher; we
2565 * need to figure out what type of operation
2567 hash_req = ahash_request_cast(async_req);
2569 pr_debug("sep crypto queue null hash_req\n");
2573 hash_tfm = crypto_ahash_reqtfm(hash_req);
2575 pr_debug("sep crypto queue null hash_tfm\n");
2580 sctx = crypto_ahash_ctx(hash_tfm);
2582 pr_debug("sep crypto queue null sctx\n");
2586 ta_ctx = ahash_request_ctx(hash_req);
2588 if (ta_ctx->current_hash_stage == HASH_INIT) {
2589 pr_debug("sep crypto queue hash init\n");
2590 sep_hash_init((void *)hash_req);
2592 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2593 pr_debug("sep crypto queue hash update\n");
2594 sep_hash_update((void *)hash_req);
2596 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2597 pr_debug("sep crypto queue hash final\n");
2598 sep_hash_final((void *)hash_req);
2600 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2601 pr_debug("sep crypto queue hash digest\n");
2602 sep_hash_digest((void *)hash_req);
2604 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2605 pr_debug("sep crypto queue hash digest\n");
2606 sep_hash_update((void *)hash_req);
2608 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2609 pr_debug("sep crypto queue hash digest\n");
2610 sep_hash_final((void *)hash_req);
2613 pr_debug("sep crypto queue hash oops nothing\n");
2619 static int sep_sha1_init(struct ahash_request *req)
2623 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2625 pr_debug("sep - doing sha1 init\n");
2627 /* Clear out task context */
2628 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2630 ta_ctx->sep_used = sep_dev;
2631 ta_ctx->current_request = SHA1;
2632 ta_ctx->current_hash_req = req;
2633 ta_ctx->current_cypher_req = NULL;
2634 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2635 ta_ctx->current_hash_stage = HASH_INIT;
2637 /* lock necessary so that only one entity touches the queues */
2638 spin_lock_irq(&queue_lock);
2639 error = crypto_enqueue_request(&sep_queue, &req->base);
2641 if ((error != 0) && (error != -EINPROGRESS))
2642 pr_debug(" sep - crypto enqueue failed: %x\n",
2644 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2645 sep_dequeuer, (void *)&sep_queue);
2647 pr_debug(" sep - workqueue submit failed: %x\n",
2649 spin_unlock_irq(&queue_lock);
2650 /* We return result of crypto enqueue */
2654 static int sep_sha1_update(struct ahash_request *req)
2658 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2660 pr_debug("sep - doing sha1 update\n");
2662 ta_ctx->sep_used = sep_dev;
2663 ta_ctx->current_request = SHA1;
2664 ta_ctx->current_hash_req = req;
2665 ta_ctx->current_cypher_req = NULL;
2666 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2667 ta_ctx->current_hash_stage = HASH_UPDATE;
2669 /* lock necessary so that only one entity touches the queues */
2670 spin_lock_irq(&queue_lock);
2671 error = crypto_enqueue_request(&sep_queue, &req->base);
2673 if ((error != 0) && (error != -EINPROGRESS))
2674 pr_debug(" sep - crypto enqueue failed: %x\n",
2676 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2677 sep_dequeuer, (void *)&sep_queue);
2679 pr_debug(" sep - workqueue submit failed: %x\n",
2681 spin_unlock_irq(&queue_lock);
2682 /* We return result of crypto enqueue */
2686 static int sep_sha1_final(struct ahash_request *req)
2690 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2691 pr_debug("sep - doing sha1 final\n");
2693 ta_ctx->sep_used = sep_dev;
2694 ta_ctx->current_request = SHA1;
2695 ta_ctx->current_hash_req = req;
2696 ta_ctx->current_cypher_req = NULL;
2697 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2698 ta_ctx->current_hash_stage = HASH_FINISH;
2700 /* lock necessary so that only one entity touches the queues */
2701 spin_lock_irq(&queue_lock);
2702 error = crypto_enqueue_request(&sep_queue, &req->base);
2704 if ((error != 0) && (error != -EINPROGRESS))
2705 pr_debug(" sep - crypto enqueue failed: %x\n",
2707 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2708 sep_dequeuer, (void *)&sep_queue);
2710 pr_debug(" sep - workqueue submit failed: %x\n",
2712 spin_unlock_irq(&queue_lock);
2713 /* We return result of crypto enqueue */
2717 static int sep_sha1_digest(struct ahash_request *req)
2721 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2722 pr_debug("sep - doing sha1 digest\n");
2724 /* Clear out task context */
2725 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2727 ta_ctx->sep_used = sep_dev;
2728 ta_ctx->current_request = SHA1;
2729 ta_ctx->current_hash_req = req;
2730 ta_ctx->current_cypher_req = NULL;
2731 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2732 ta_ctx->current_hash_stage = HASH_DIGEST;
2734 /* lock necessary so that only one entity touches the queues */
2735 spin_lock_irq(&queue_lock);
2736 error = crypto_enqueue_request(&sep_queue, &req->base);
2738 if ((error != 0) && (error != -EINPROGRESS))
2739 pr_debug(" sep - crypto enqueue failed: %x\n",
2741 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2742 sep_dequeuer, (void *)&sep_queue);
2744 pr_debug(" sep - workqueue submit failed: %x\n",
2746 spin_unlock_irq(&queue_lock);
2747 /* We return result of crypto enqueue */
2751 static int sep_sha1_finup(struct ahash_request *req)
2755 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2756 pr_debug("sep - doing sha1 finup\n");
2758 ta_ctx->sep_used = sep_dev;
2759 ta_ctx->current_request = SHA1;
2760 ta_ctx->current_hash_req = req;
2761 ta_ctx->current_cypher_req = NULL;
2762 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2763 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2765 /* lock necessary so that only one entity touches the queues */
2766 spin_lock_irq(&queue_lock);
2767 error = crypto_enqueue_request(&sep_queue, &req->base);
2769 if ((error != 0) && (error != -EINPROGRESS))
2770 pr_debug(" sep - crypto enqueue failed: %x\n",
2772 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2773 sep_dequeuer, (void *)&sep_queue);
2775 pr_debug(" sep - workqueue submit failed: %x\n",
2777 spin_unlock_irq(&queue_lock);
2778 /* We return result of crypto enqueue */
2782 static int sep_md5_init(struct ahash_request *req)
2786 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2787 pr_debug("sep - doing md5 init\n");
2789 /* Clear out task context */
2790 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2792 ta_ctx->sep_used = sep_dev;
2793 ta_ctx->current_request = MD5;
2794 ta_ctx->current_hash_req = req;
2795 ta_ctx->current_cypher_req = NULL;
2796 ta_ctx->hash_opmode = SEP_HASH_MD5;
2797 ta_ctx->current_hash_stage = HASH_INIT;
2799 /* lock necessary so that only one entity touches the queues */
2800 spin_lock_irq(&queue_lock);
2801 error = crypto_enqueue_request(&sep_queue, &req->base);
2803 if ((error != 0) && (error != -EINPROGRESS))
2804 pr_debug(" sep - crypto enqueue failed: %x\n",
2806 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2807 sep_dequeuer, (void *)&sep_queue);
2809 pr_debug(" sep - workqueue submit failed: %x\n",
2811 spin_unlock_irq(&queue_lock);
2812 /* We return result of crypto enqueue */
2816 static int sep_md5_update(struct ahash_request *req)
2820 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2821 pr_debug("sep - doing md5 update\n");
2823 ta_ctx->sep_used = sep_dev;
2824 ta_ctx->current_request = MD5;
2825 ta_ctx->current_hash_req = req;
2826 ta_ctx->current_cypher_req = NULL;
2827 ta_ctx->hash_opmode = SEP_HASH_MD5;
2828 ta_ctx->current_hash_stage = HASH_UPDATE;
2830 /* lock necessary so that only one entity touches the queues */
2831 spin_lock_irq(&queue_lock);
2832 error = crypto_enqueue_request(&sep_queue, &req->base);
2834 if ((error != 0) && (error != -EINPROGRESS))
2835 pr_debug(" sep - crypto enqueue failed: %x\n",
2837 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2838 sep_dequeuer, (void *)&sep_queue);
2840 pr_debug(" sep - workqueue submit failed: %x\n",
2842 spin_unlock_irq(&queue_lock);
2843 /* We return result of crypto enqueue */
2847 static int sep_md5_final(struct ahash_request *req)
2851 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2852 pr_debug("sep - doing md5 final\n");
2854 ta_ctx->sep_used = sep_dev;
2855 ta_ctx->current_request = MD5;
2856 ta_ctx->current_hash_req = req;
2857 ta_ctx->current_cypher_req = NULL;
2858 ta_ctx->hash_opmode = SEP_HASH_MD5;
2859 ta_ctx->current_hash_stage = HASH_FINISH;
2861 /* lock necessary so that only one entity touches the queues */
2862 spin_lock_irq(&queue_lock);
2863 error = crypto_enqueue_request(&sep_queue, &req->base);
2865 if ((error != 0) && (error != -EINPROGRESS))
2866 pr_debug(" sep - crypto enqueue failed: %x\n",
2868 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2869 sep_dequeuer, (void *)&sep_queue);
2871 pr_debug(" sep - workqueue submit failed: %x\n",
2873 spin_unlock_irq(&queue_lock);
2874 /* We return result of crypto enqueue */
2878 static int sep_md5_digest(struct ahash_request *req)
2882 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2884 pr_debug("sep - doing md5 digest\n");
2886 /* Clear out task context */
2887 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2889 ta_ctx->sep_used = sep_dev;
2890 ta_ctx->current_request = MD5;
2891 ta_ctx->current_hash_req = req;
2892 ta_ctx->current_cypher_req = NULL;
2893 ta_ctx->hash_opmode = SEP_HASH_MD5;
2894 ta_ctx->current_hash_stage = HASH_DIGEST;
2896 /* lock necessary so that only one entity touches the queues */
2897 spin_lock_irq(&queue_lock);
2898 error = crypto_enqueue_request(&sep_queue, &req->base);
2900 if ((error != 0) && (error != -EINPROGRESS))
2901 pr_debug(" sep - crypto enqueue failed: %x\n",
2903 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2904 sep_dequeuer, (void *)&sep_queue);
2906 pr_debug(" sep - workqueue submit failed: %x\n",
2908 spin_unlock_irq(&queue_lock);
2909 /* We return result of crypto enqueue */
2913 static int sep_md5_finup(struct ahash_request *req)
2917 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2919 pr_debug("sep - doing md5 finup\n");
2921 ta_ctx->sep_used = sep_dev;
2922 ta_ctx->current_request = MD5;
2923 ta_ctx->current_hash_req = req;
2924 ta_ctx->current_cypher_req = NULL;
2925 ta_ctx->hash_opmode = SEP_HASH_MD5;
2926 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2928 /* lock necessary so that only one entity touches the queues */
2929 spin_lock_irq(&queue_lock);
2930 error = crypto_enqueue_request(&sep_queue, &req->base);
2932 if ((error != 0) && (error != -EINPROGRESS))
2933 pr_debug(" sep - crypto enqueue failed: %x\n",
2935 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2936 sep_dequeuer, (void *)&sep_queue);
2938 pr_debug(" sep - workqueue submit failed: %x\n",
2940 spin_unlock_irq(&queue_lock);
2941 /* We return result of crypto enqueue */
2945 static int sep_sha224_init(struct ahash_request *req)
2949 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2950 pr_debug("sep - doing sha224 init\n");
2952 /* Clear out task context */
2953 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2955 ta_ctx->sep_used = sep_dev;
2956 ta_ctx->current_request = SHA224;
2957 ta_ctx->current_hash_req = req;
2958 ta_ctx->current_cypher_req = NULL;
2959 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2960 ta_ctx->current_hash_stage = HASH_INIT;
2962 /* lock necessary so that only one entity touches the queues */
2963 spin_lock_irq(&queue_lock);
2964 error = crypto_enqueue_request(&sep_queue, &req->base);
2966 if ((error != 0) && (error != -EINPROGRESS))
2967 pr_debug(" sep - crypto enqueue failed: %x\n",
2969 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2970 sep_dequeuer, (void *)&sep_queue);
2972 pr_debug(" sep - workqueue submit failed: %x\n",
2974 spin_unlock_irq(&queue_lock);
2975 /* We return result of crypto enqueue */
2979 static int sep_sha224_update(struct ahash_request *req)
2983 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2984 pr_debug("sep - doing sha224 update\n");
2986 ta_ctx->sep_used = sep_dev;
2987 ta_ctx->current_request = SHA224;
2988 ta_ctx->current_hash_req = req;
2989 ta_ctx->current_cypher_req = NULL;
2990 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2991 ta_ctx->current_hash_stage = HASH_UPDATE;
2993 /* lock necessary so that only one entity touches the queues */
2994 spin_lock_irq(&queue_lock);
2995 error = crypto_enqueue_request(&sep_queue, &req->base);
2997 if ((error != 0) && (error != -EINPROGRESS))
2998 pr_debug(" sep - crypto enqueue failed: %x\n",
3000 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3001 sep_dequeuer, (void *)&sep_queue);
3003 pr_debug(" sep - workqueue submit failed: %x\n",
3005 spin_unlock_irq(&queue_lock);
3006 /* We return result of crypto enqueue */
3010 static int sep_sha224_final(struct ahash_request *req)
3014 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3015 pr_debug("sep - doing sha224 final\n");
3017 ta_ctx->sep_used = sep_dev;
3018 ta_ctx->current_request = SHA224;
3019 ta_ctx->current_hash_req = req;
3020 ta_ctx->current_cypher_req = NULL;
3021 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3022 ta_ctx->current_hash_stage = HASH_FINISH;
3024 /* lock necessary so that only one entity touches the queues */
3025 spin_lock_irq(&queue_lock);
3026 error = crypto_enqueue_request(&sep_queue, &req->base);
3028 if ((error != 0) && (error != -EINPROGRESS))
3029 pr_debug(" sep - crypto enqueue failed: %x\n",
3031 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3032 sep_dequeuer, (void *)&sep_queue);
3034 pr_debug(" sep - workqueue submit failed: %x\n",
3036 spin_unlock_irq(&queue_lock);
3037 /* We return result of crypto enqueue */
3041 static int sep_sha224_digest(struct ahash_request *req)
3045 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3047 pr_debug("sep - doing sha224 digest\n");
3049 /* Clear out task context */
3050 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3052 ta_ctx->sep_used = sep_dev;
3053 ta_ctx->current_request = SHA224;
3054 ta_ctx->current_hash_req = req;
3055 ta_ctx->current_cypher_req = NULL;
3056 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3057 ta_ctx->current_hash_stage = HASH_DIGEST;
3059 /* lock necessary so that only one entity touches the queues */
3060 spin_lock_irq(&queue_lock);
3061 error = crypto_enqueue_request(&sep_queue, &req->base);
3063 if ((error != 0) && (error != -EINPROGRESS))
3064 pr_debug(" sep - crypto enqueue failed: %x\n",
3066 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3067 sep_dequeuer, (void *)&sep_queue);
3069 pr_debug(" sep - workqueue submit failed: %x\n",
3071 spin_unlock_irq(&queue_lock);
3072 /* We return result of crypto enqueue */
3076 static int sep_sha224_finup(struct ahash_request *req)
3080 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3082 pr_debug("sep - doing sha224 finup\n");
3084 ta_ctx->sep_used = sep_dev;
3085 ta_ctx->current_request = SHA224;
3086 ta_ctx->current_hash_req = req;
3087 ta_ctx->current_cypher_req = NULL;
3088 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3089 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3091 /* lock necessary so that only one entity touches the queues */
3092 spin_lock_irq(&queue_lock);
3093 error = crypto_enqueue_request(&sep_queue, &req->base);
3095 if ((error != 0) && (error != -EINPROGRESS))
3096 pr_debug(" sep - crypto enqueue failed: %x\n",
3098 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3099 sep_dequeuer, (void *)&sep_queue);
3101 pr_debug(" sep - workqueue submit failed: %x\n",
3103 spin_unlock_irq(&queue_lock);
3104 /* We return result of crypto enqueue */
3108 static int sep_sha256_init(struct ahash_request *req)
3112 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3113 pr_debug("sep - doing sha256 init\n");
3115 /* Clear out task context */
3116 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3118 ta_ctx->sep_used = sep_dev;
3119 ta_ctx->current_request = SHA256;
3120 ta_ctx->current_hash_req = req;
3121 ta_ctx->current_cypher_req = NULL;
3122 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3123 ta_ctx->current_hash_stage = HASH_INIT;
3125 /* lock necessary so that only one entity touches the queues */
3126 spin_lock_irq(&queue_lock);
3127 error = crypto_enqueue_request(&sep_queue, &req->base);
3129 if ((error != 0) && (error != -EINPROGRESS))
3130 pr_debug(" sep - crypto enqueue failed: %x\n",
3132 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3133 sep_dequeuer, (void *)&sep_queue);
3135 pr_debug(" sep - workqueue submit failed: %x\n",
3137 spin_unlock_irq(&queue_lock);
3138 /* We return result of crypto enqueue */
3142 static int sep_sha256_update(struct ahash_request *req)
3146 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3147 pr_debug("sep - doing sha256 update\n");
3149 ta_ctx->sep_used = sep_dev;
3150 ta_ctx->current_request = SHA256;
3151 ta_ctx->current_hash_req = req;
3152 ta_ctx->current_cypher_req = NULL;
3153 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3154 ta_ctx->current_hash_stage = HASH_UPDATE;
3156 /* lock necessary so that only one entity touches the queues */
3157 spin_lock_irq(&queue_lock);
3158 error = crypto_enqueue_request(&sep_queue, &req->base);
3160 if ((error != 0) && (error != -EINPROGRESS))
3161 pr_debug(" sep - crypto enqueue failed: %x\n",
3163 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3164 sep_dequeuer, (void *)&sep_queue);
3166 pr_debug(" sep - workqueue submit failed: %x\n",
3168 spin_unlock_irq(&queue_lock);
3169 /* We return result of crypto enqueue */
3173 static int sep_sha256_final(struct ahash_request *req)
3177 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3178 pr_debug("sep - doing sha256 final\n");
3180 ta_ctx->sep_used = sep_dev;
3181 ta_ctx->current_request = SHA256;
3182 ta_ctx->current_hash_req = req;
3183 ta_ctx->current_cypher_req = NULL;
3184 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3185 ta_ctx->current_hash_stage = HASH_FINISH;
3187 /* lock necessary so that only one entity touches the queues */
3188 spin_lock_irq(&queue_lock);
3189 error = crypto_enqueue_request(&sep_queue, &req->base);
3191 if ((error != 0) && (error != -EINPROGRESS))
3192 pr_debug(" sep - crypto enqueue failed: %x\n",
3194 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3195 sep_dequeuer, (void *)&sep_queue);
3197 pr_debug(" sep - workqueue submit failed: %x\n",
3199 spin_unlock_irq(&queue_lock);
3200 /* We return result of crypto enqueue */
3204 static int sep_sha256_digest(struct ahash_request *req)
3208 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3210 pr_debug("sep - doing sha256 digest\n");
3212 /* Clear out task context */
3213 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3215 ta_ctx->sep_used = sep_dev;
3216 ta_ctx->current_request = SHA256;
3217 ta_ctx->current_hash_req = req;
3218 ta_ctx->current_cypher_req = NULL;
3219 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3220 ta_ctx->current_hash_stage = HASH_DIGEST;
3222 /* lock necessary so that only one entity touches the queues */
3223 spin_lock_irq(&queue_lock);
3224 error = crypto_enqueue_request(&sep_queue, &req->base);
3226 if ((error != 0) && (error != -EINPROGRESS))
3227 pr_debug(" sep - crypto enqueue failed: %x\n",
3229 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3230 sep_dequeuer, (void *)&sep_queue);
3232 pr_debug(" sep - workqueue submit failed: %x\n",
3234 spin_unlock_irq(&queue_lock);
3235 /* We return result of crypto enqueue */
3239 static int sep_sha256_finup(struct ahash_request *req)
3243 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3245 pr_debug("sep - doing sha256 finup\n");
3247 ta_ctx->sep_used = sep_dev;
3248 ta_ctx->current_request = SHA256;
3249 ta_ctx->current_hash_req = req;
3250 ta_ctx->current_cypher_req = NULL;
3251 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3252 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3254 /* lock necessary so that only one entity touches the queues */
3255 spin_lock_irq(&queue_lock);
3256 error = crypto_enqueue_request(&sep_queue, &req->base);
3258 if ((error != 0) && (error != -EINPROGRESS))
3259 pr_debug(" sep - crypto enqueue failed: %x\n",
3261 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3262 sep_dequeuer, (void *)&sep_queue);
3264 pr_debug(" sep - workqueue submit failed: %x\n",
3266 spin_unlock_irq(&queue_lock);
3267 /* We return result of crypto enqueue */
3271 static int sep_crypto_init(struct crypto_tfm *tfm)
3273 const char *alg_name = crypto_tfm_alg_name(tfm);
3275 if (alg_name == NULL)
3276 pr_debug("sep_crypto_init alg is NULL\n");
3278 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3280 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3284 static void sep_crypto_exit(struct crypto_tfm *tfm)
3286 pr_debug("sep_crypto_exit\n");
3289 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3290 unsigned int keylen)
3292 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3294 pr_debug("sep aes setkey\n");
3296 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3298 case SEP_AES_KEY_128_SIZE:
3299 sctx->aes_key_size = AES_128;
3301 case SEP_AES_KEY_192_SIZE:
3302 sctx->aes_key_size = AES_192;
3304 case SEP_AES_KEY_256_SIZE:
3305 sctx->aes_key_size = AES_256;
3307 case SEP_AES_KEY_512_SIZE:
3308 sctx->aes_key_size = AES_512;
3311 pr_debug("invalid sep aes key size %x\n",
3316 memset(&sctx->key.aes, 0, sizeof(u32) *
3317 SEP_AES_MAX_KEY_SIZE_WORDS);
3318 memcpy(&sctx->key.aes, key, keylen);
3319 sctx->keylen = keylen;
3320 /* Indicate to encrypt/decrypt function to send key to SEP */
3326 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3330 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3332 pr_debug("sep - doing aes ecb encrypt\n");
3334 /* Clear out task context */
3335 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3337 ta_ctx->sep_used = sep_dev;
3338 ta_ctx->current_request = AES_ECB;
3339 ta_ctx->current_hash_req = NULL;
3340 ta_ctx->current_cypher_req = req;
3341 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3342 ta_ctx->aes_opmode = SEP_AES_ECB;
3343 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3344 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3346 /* lock necessary so that only one entity touches the queues */
3347 spin_lock_irq(&queue_lock);
3348 error = crypto_enqueue_request(&sep_queue, &req->base);
3350 if ((error != 0) && (error != -EINPROGRESS))
3351 pr_debug(" sep - crypto enqueue failed: %x\n",
3353 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3354 sep_dequeuer, (void *)&sep_queue);
3356 pr_debug(" sep - workqueue submit failed: %x\n",
3358 spin_unlock_irq(&queue_lock);
3359 /* We return result of crypto enqueue */
3363 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3367 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3369 pr_debug("sep - doing aes ecb decrypt\n");
3371 /* Clear out task context */
3372 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3374 ta_ctx->sep_used = sep_dev;
3375 ta_ctx->current_request = AES_ECB;
3376 ta_ctx->current_hash_req = NULL;
3377 ta_ctx->current_cypher_req = req;
3378 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3379 ta_ctx->aes_opmode = SEP_AES_ECB;
3380 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3381 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3383 /* lock necessary so that only one entity touches the queues */
3384 spin_lock_irq(&queue_lock);
3385 error = crypto_enqueue_request(&sep_queue, &req->base);
3387 if ((error != 0) && (error != -EINPROGRESS))
3388 pr_debug(" sep - crypto enqueue failed: %x\n",
3390 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3391 sep_dequeuer, (void *)&sep_queue);
3393 pr_debug(" sep - workqueue submit failed: %x\n",
3395 spin_unlock_irq(&queue_lock);
3396 /* We return result of crypto enqueue */
3400 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3404 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3405 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3406 crypto_ablkcipher_reqtfm(req));
3408 pr_debug("sep - doing aes cbc encrypt\n");
3410 /* Clear out task context */
3411 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3413 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3414 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3416 ta_ctx->sep_used = sep_dev;
3417 ta_ctx->current_request = AES_CBC;
3418 ta_ctx->current_hash_req = NULL;
3419 ta_ctx->current_cypher_req = req;
3420 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3421 ta_ctx->aes_opmode = SEP_AES_CBC;
3422 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3423 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3425 /* lock necessary so that only one entity touches the queues */
3426 spin_lock_irq(&queue_lock);
3427 error = crypto_enqueue_request(&sep_queue, &req->base);
3429 if ((error != 0) && (error != -EINPROGRESS))
3430 pr_debug(" sep - crypto enqueue failed: %x\n",
3432 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3433 sep_dequeuer, (void *)&sep_queue);
3435 pr_debug(" sep - workqueue submit failed: %x\n",
3437 spin_unlock_irq(&queue_lock);
3438 /* We return result of crypto enqueue */
3442 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3446 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3447 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3448 crypto_ablkcipher_reqtfm(req));
3450 pr_debug("sep - doing aes cbc decrypt\n");
3452 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3453 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3455 /* Clear out task context */
3456 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3458 ta_ctx->sep_used = sep_dev;
3459 ta_ctx->current_request = AES_CBC;
3460 ta_ctx->current_hash_req = NULL;
3461 ta_ctx->current_cypher_req = req;
3462 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3463 ta_ctx->aes_opmode = SEP_AES_CBC;
3464 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3465 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3467 /* lock necessary so that only one entity touches the queues */
3468 spin_lock_irq(&queue_lock);
3469 error = crypto_enqueue_request(&sep_queue, &req->base);
3471 if ((error != 0) && (error != -EINPROGRESS))
3472 pr_debug(" sep - crypto enqueue failed: %x\n",
3474 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3475 sep_dequeuer, (void *)&sep_queue);
3477 pr_debug(" sep - workqueue submit failed: %x\n",
3479 spin_unlock_irq(&queue_lock);
3480 /* We return result of crypto enqueue */
3484 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3485 unsigned int keylen)
3487 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3488 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3489 u32 *flags = &ctfm->crt_flags;
3491 pr_debug("sep des setkey\n");
3495 sctx->des_nbr_keys = DES_KEY_1;
3497 case DES_KEY_SIZE * 2:
3498 sctx->des_nbr_keys = DES_KEY_2;
3500 case DES_KEY_SIZE * 3:
3501 sctx->des_nbr_keys = DES_KEY_3;
3504 pr_debug("invalid key size %x\n",
3509 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3510 (sep_weak_key(key, keylen))) {
3512 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3513 pr_debug("weak key\n");
3517 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3518 memcpy(&sctx->key.des.key1, key, keylen);
3519 sctx->keylen = keylen;
3520 /* Indicate to encrypt/decrypt function to send key to SEP */
3526 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3530 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3532 pr_debug("sep - doing des ecb encrypt\n");
3534 /* Clear out task context */
3535 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3537 ta_ctx->sep_used = sep_dev;
3538 ta_ctx->current_request = DES_ECB;
3539 ta_ctx->current_hash_req = NULL;
3540 ta_ctx->current_cypher_req = req;
3541 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3542 ta_ctx->des_opmode = SEP_DES_ECB;
3543 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3544 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3546 /* lock necessary so that only one entity touches the queues */
3547 spin_lock_irq(&queue_lock);
3548 error = crypto_enqueue_request(&sep_queue, &req->base);
3550 if ((error != 0) && (error != -EINPROGRESS))
3551 pr_debug(" sep - crypto enqueue failed: %x\n",
3553 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3554 sep_dequeuer, (void *)&sep_queue);
3556 pr_debug(" sep - workqueue submit failed: %x\n",
3558 spin_unlock_irq(&queue_lock);
3559 /* We return result of crypto enqueue */
3563 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3567 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3569 pr_debug("sep - doing des ecb decrypt\n");
3571 /* Clear out task context */
3572 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3574 ta_ctx->sep_used = sep_dev;
3575 ta_ctx->current_request = DES_ECB;
3576 ta_ctx->current_hash_req = NULL;
3577 ta_ctx->current_cypher_req = req;
3578 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3579 ta_ctx->des_opmode = SEP_DES_ECB;
3580 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3581 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3583 /* lock necessary so that only one entity touches the queues */
3584 spin_lock_irq(&queue_lock);
3585 error = crypto_enqueue_request(&sep_queue, &req->base);
3587 if ((error != 0) && (error != -EINPROGRESS))
3588 pr_debug(" sep - crypto enqueue failed: %x\n",
3590 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3591 sep_dequeuer, (void *)&sep_queue);
3593 pr_debug(" sep - workqueue submit failed: %x\n",
3595 spin_unlock_irq(&queue_lock);
3596 /* We return result of crypto enqueue */
3600 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3604 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3606 pr_debug("sep - doing des cbc encrypt\n");
3608 /* Clear out task context */
3609 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3611 ta_ctx->sep_used = sep_dev;
3612 ta_ctx->current_request = DES_CBC;
3613 ta_ctx->current_hash_req = NULL;
3614 ta_ctx->current_cypher_req = req;
3615 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3616 ta_ctx->des_opmode = SEP_DES_CBC;
3617 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3618 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3620 /* lock necessary so that only one entity touches the queues */
3621 spin_lock_irq(&queue_lock);
3622 error = crypto_enqueue_request(&sep_queue, &req->base);
3624 if ((error != 0) && (error != -EINPROGRESS))
3625 pr_debug(" sep - crypto enqueue failed: %x\n",
3627 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3628 sep_dequeuer, (void *)&sep_queue);
3630 pr_debug(" sep - workqueue submit failed: %x\n",
3632 spin_unlock_irq(&queue_lock);
3633 /* We return result of crypto enqueue */
3637 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3641 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3643 pr_debug("sep - doing des ecb decrypt\n");
3645 /* Clear out task context */
3646 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3648 ta_ctx->sep_used = sep_dev;
3649 ta_ctx->current_request = DES_CBC;
3650 ta_ctx->current_hash_req = NULL;
3651 ta_ctx->current_cypher_req = req;
3652 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3653 ta_ctx->des_opmode = SEP_DES_CBC;
3654 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3655 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3657 /* lock necessary so that only one entity touches the queues */
3658 spin_lock_irq(&queue_lock);
3659 error = crypto_enqueue_request(&sep_queue, &req->base);
3661 if ((error != 0) && (error != -EINPROGRESS))
3662 pr_debug(" sep - crypto enqueue failed: %x\n",
3664 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3665 sep_dequeuer, (void *)&sep_queue);
3667 pr_debug(" sep - workqueue submit failed: %x\n",
3669 spin_unlock_irq(&queue_lock);
3670 /* We return result of crypto enqueue */
3674 static struct ahash_alg hash_algs[] = {
3676 .init = sep_sha1_init,
3677 .update = sep_sha1_update,
3678 .final = sep_sha1_final,
3679 .digest = sep_sha1_digest,
3680 .finup = sep_sha1_finup,
3682 .digestsize = SHA1_DIGEST_SIZE,
3685 .cra_driver_name = "sha1-sep",
3686 .cra_priority = 100,
3687 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3689 .cra_blocksize = SHA1_BLOCK_SIZE,
3690 .cra_ctxsize = sizeof(struct sep_system_ctx),
3692 .cra_module = THIS_MODULE,
3693 .cra_init = sep_hash_cra_init,
3694 .cra_exit = sep_hash_cra_exit,
3699 .init = sep_md5_init,
3700 .update = sep_md5_update,
3701 .final = sep_md5_final,
3702 .digest = sep_md5_digest,
3703 .finup = sep_md5_finup,
3705 .digestsize = MD5_DIGEST_SIZE,
3708 .cra_driver_name = "md5-sep",
3709 .cra_priority = 100,
3710 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3712 .cra_blocksize = SHA1_BLOCK_SIZE,
3713 .cra_ctxsize = sizeof(struct sep_system_ctx),
3715 .cra_module = THIS_MODULE,
3716 .cra_init = sep_hash_cra_init,
3717 .cra_exit = sep_hash_cra_exit,
3722 .init = sep_sha224_init,
3723 .update = sep_sha224_update,
3724 .final = sep_sha224_final,
3725 .digest = sep_sha224_digest,
3726 .finup = sep_sha224_finup,
3728 .digestsize = SHA224_DIGEST_SIZE,
3730 .cra_name = "sha224",
3731 .cra_driver_name = "sha224-sep",
3732 .cra_priority = 100,
3733 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3735 .cra_blocksize = SHA224_BLOCK_SIZE,
3736 .cra_ctxsize = sizeof(struct sep_system_ctx),
3738 .cra_module = THIS_MODULE,
3739 .cra_init = sep_hash_cra_init,
3740 .cra_exit = sep_hash_cra_exit,
3745 .init = sep_sha256_init,
3746 .update = sep_sha256_update,
3747 .final = sep_sha256_final,
3748 .digest = sep_sha256_digest,
3749 .finup = sep_sha256_finup,
3751 .digestsize = SHA256_DIGEST_SIZE,
3753 .cra_name = "sha256",
3754 .cra_driver_name = "sha256-sep",
3755 .cra_priority = 100,
3756 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3758 .cra_blocksize = SHA256_BLOCK_SIZE,
3759 .cra_ctxsize = sizeof(struct sep_system_ctx),
3761 .cra_module = THIS_MODULE,
3762 .cra_init = sep_hash_cra_init,
3763 .cra_exit = sep_hash_cra_exit,
3769 static struct crypto_alg crypto_algs[] = {
3771 .cra_name = "ecb(aes)",
3772 .cra_driver_name = "ecb-aes-sep",
3773 .cra_priority = 100,
3774 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3775 .cra_blocksize = AES_BLOCK_SIZE,
3776 .cra_ctxsize = sizeof(struct sep_system_ctx),
3778 .cra_type = &crypto_ablkcipher_type,
3779 .cra_module = THIS_MODULE,
3780 .cra_init = sep_crypto_init,
3781 .cra_exit = sep_crypto_exit,
3782 .cra_u.ablkcipher = {
3783 .min_keysize = AES_MIN_KEY_SIZE,
3784 .max_keysize = AES_MAX_KEY_SIZE,
3785 .setkey = sep_aes_setkey,
3786 .encrypt = sep_aes_ecb_encrypt,
3787 .decrypt = sep_aes_ecb_decrypt,
3791 .cra_name = "cbc(aes)",
3792 .cra_driver_name = "cbc-aes-sep",
3793 .cra_priority = 100,
3794 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3795 .cra_blocksize = AES_BLOCK_SIZE,
3796 .cra_ctxsize = sizeof(struct sep_system_ctx),
3798 .cra_type = &crypto_ablkcipher_type,
3799 .cra_module = THIS_MODULE,
3800 .cra_init = sep_crypto_init,
3801 .cra_exit = sep_crypto_exit,
3802 .cra_u.ablkcipher = {
3803 .min_keysize = AES_MIN_KEY_SIZE,
3804 .max_keysize = AES_MAX_KEY_SIZE,
3805 .setkey = sep_aes_setkey,
3806 .encrypt = sep_aes_cbc_encrypt,
3807 .ivsize = AES_BLOCK_SIZE,
3808 .decrypt = sep_aes_cbc_decrypt,
3812 .cra_name = "ebc(des)",
3813 .cra_driver_name = "ebc-des-sep",
3814 .cra_priority = 100,
3815 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3816 .cra_blocksize = DES_BLOCK_SIZE,
3817 .cra_ctxsize = sizeof(struct sep_system_ctx),
3819 .cra_type = &crypto_ablkcipher_type,
3820 .cra_module = THIS_MODULE,
3821 .cra_init = sep_crypto_init,
3822 .cra_exit = sep_crypto_exit,
3823 .cra_u.ablkcipher = {
3824 .min_keysize = DES_KEY_SIZE,
3825 .max_keysize = DES_KEY_SIZE,
3826 .setkey = sep_des_setkey,
3827 .encrypt = sep_des_ebc_encrypt,
3828 .decrypt = sep_des_ebc_decrypt,
3832 .cra_name = "cbc(des)",
3833 .cra_driver_name = "cbc-des-sep",
3834 .cra_priority = 100,
3835 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3836 .cra_blocksize = DES_BLOCK_SIZE,
3837 .cra_ctxsize = sizeof(struct sep_system_ctx),
3839 .cra_type = &crypto_ablkcipher_type,
3840 .cra_module = THIS_MODULE,
3841 .cra_init = sep_crypto_init,
3842 .cra_exit = sep_crypto_exit,
3843 .cra_u.ablkcipher = {
3844 .min_keysize = DES_KEY_SIZE,
3845 .max_keysize = DES_KEY_SIZE,
3846 .setkey = sep_des_setkey,
3847 .encrypt = sep_des_cbc_encrypt,
3848 .ivsize = DES_BLOCK_SIZE,
3849 .decrypt = sep_des_cbc_decrypt,
3853 .cra_name = "ebc(des3-ede)",
3854 .cra_driver_name = "ebc-des3-ede-sep",
3855 .cra_priority = 100,
3856 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3857 .cra_blocksize = DES_BLOCK_SIZE,
3858 .cra_ctxsize = sizeof(struct sep_system_ctx),
3860 .cra_type = &crypto_ablkcipher_type,
3861 .cra_module = THIS_MODULE,
3862 .cra_init = sep_crypto_init,
3863 .cra_exit = sep_crypto_exit,
3864 .cra_u.ablkcipher = {
3865 .min_keysize = DES3_EDE_KEY_SIZE,
3866 .max_keysize = DES3_EDE_KEY_SIZE,
3867 .setkey = sep_des_setkey,
3868 .encrypt = sep_des_ebc_encrypt,
3869 .decrypt = sep_des_ebc_decrypt,
3873 .cra_name = "cbc(des3-ede)",
3874 .cra_driver_name = "cbc-des3--ede-sep",
3875 .cra_priority = 100,
3876 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3877 .cra_blocksize = DES_BLOCK_SIZE,
3878 .cra_ctxsize = sizeof(struct sep_system_ctx),
3880 .cra_type = &crypto_ablkcipher_type,
3881 .cra_module = THIS_MODULE,
3882 .cra_init = sep_crypto_init,
3883 .cra_exit = sep_crypto_exit,
3884 .cra_u.ablkcipher = {
3885 .min_keysize = DES3_EDE_KEY_SIZE,
3886 .max_keysize = DES3_EDE_KEY_SIZE,
3887 .setkey = sep_des_setkey,
3888 .encrypt = sep_des_cbc_encrypt,
3889 .decrypt = sep_des_cbc_decrypt,
3894 int sep_crypto_setup(void)
3897 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3898 (unsigned long)sep_dev);
3900 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3902 sep_dev->workqueue = create_singlethread_workqueue(
3903 "sep_crypto_workqueue");
3904 if (!sep_dev->workqueue) {
3905 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3909 spin_lock_init(&queue_lock);
3912 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3913 err = crypto_register_ahash(&hash_algs[i]);
3919 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3920 err = crypto_register_alg(&crypto_algs[j]);
3922 goto err_crypto_algs;
3928 for (k = 0; k < i; k++)
3929 crypto_unregister_ahash(&hash_algs[k]);
3933 for (k = 0; k < j; k++)
3934 crypto_unregister_alg(&crypto_algs[k]);
3938 void sep_crypto_takedown(void)
3943 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3944 crypto_unregister_ahash(&hash_algs[i]);
3945 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3946 crypto_unregister_alg(&crypto_algs[i]);
3948 tasklet_kill(&sep_dev->finish_tasklet);