crypto: omap-aes - Add support for cases of unaligned lengths
authorJoel Fernandes <joelf@ti.com>
Sun, 18 Aug 2013 02:42:32 +0000 (21:42 -0500)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 21 Aug 2013 11:28:05 +0000 (21:28 +1000)
For cases where offset/length of on any page of the input SG is not aligned by
AES_BLOCK_SIZE, we copy all the pages from the input SG list into a contiguous
buffer and prepare a single element SG list for this buffer with length as the
total bytes to crypt.

This is requried for cases such as when an SG list of 16 bytes total size
contains 16 pages each containing 1 byte. DMA using the direct buffers of such
instances is not possible.

For this purpose, we first detect if the unaligned case and accordingly
allocate enough number of pages to satisfy the request and prepare SG lists.
We then copy data into the buffer, and copy data out of it on completion.

Signed-off-by: Joel Fernandes <joelf@ti.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/omap-aes.c

index 7a08a152838a1a95c6923149442ff73764153929..2fd22ca6a58f754e5b89e0ae5b7b1a5f09e12ea2 100644 (file)
@@ -158,9 +158,23 @@ struct omap_aes_dev {
        struct tasklet_struct   queue_task;
 
        struct ablkcipher_request       *req;
+
+       /*
+        * total is used by PIO mode for book keeping so introduce
+        * variable total_save as need it to calc page_order
+        */
        size_t                          total;
+       size_t                          total_save;
+
        struct scatterlist              *in_sg;
        struct scatterlist              *out_sg;
+
+       /* Buffers for copying for unaligned cases */
+       struct scatterlist              in_sgl;
+       struct scatterlist              out_sgl;
+       struct scatterlist              *orig_out;
+       int                             sgs_copied;
+
        struct scatter_walk             in_walk;
        struct scatter_walk             out_walk;
        int                     dma_in;
@@ -537,12 +551,51 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
        dmaengine_terminate_all(dd->dma_lch_in);
        dmaengine_terminate_all(dd->dma_lch_out);
 
-       dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
-       dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
        return err;
 }
 
+int omap_aes_check_aligned(struct scatterlist *sg)
+{
+       while (sg) {
+               if (!IS_ALIGNED(sg->offset, 4))
+                       return -1;
+               if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+                       return -1;
+               sg = sg_next(sg);
+       }
+       return 0;
+}
+
+int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+{
+       void *buf_in, *buf_out;
+       int pages;
+
+       pages = get_order(dd->total);
+
+       buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+       buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+       if (!buf_in || !buf_out) {
+               pr_err("Couldn't allocated pages for unaligned cases.\n");
+               return -1;
+       }
+
+       dd->orig_out = dd->out_sg;
+
+       sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
+
+       sg_init_table(&dd->in_sgl, 1);
+       sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+       dd->in_sg = &dd->in_sgl;
+
+       sg_init_table(&dd->out_sgl, 1);
+       sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+       dd->out_sg = &dd->out_sgl;
+
+       return 0;
+}
+
 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
                               struct ablkcipher_request *req)
 {
@@ -576,9 +629,19 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        /* assign new request to device */
        dd->req = req;
        dd->total = req->nbytes;
+       dd->total_save = req->nbytes;
        dd->in_sg = req->src;
        dd->out_sg = req->dst;
 
+       if (omap_aes_check_aligned(dd->in_sg) ||
+           omap_aes_check_aligned(dd->out_sg)) {
+               if (omap_aes_copy_sgs(dd))
+                       pr_err("Failed to copy SGs for unaligned cases\n");
+               dd->sgs_copied = 1;
+       } else {
+               dd->sgs_copied = 0;
+       }
+
        dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
        dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
        BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
@@ -606,14 +669,31 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 static void omap_aes_done_task(unsigned long data)
 {
        struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+       void *buf_in, *buf_out;
+       int pages;
 
        pr_debug("enter done_task\n");
 
        if (!dd->pio_only) {
                dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
                                       DMA_FROM_DEVICE);
+               dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+               dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+                            DMA_FROM_DEVICE);
                omap_aes_crypt_dma_stop(dd);
        }
+
+       if (dd->sgs_copied) {
+               buf_in = sg_virt(&dd->in_sgl);
+               buf_out = sg_virt(&dd->out_sgl);
+
+               sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
+
+               pages = get_order(dd->total_save);
+               free_pages((unsigned long)buf_in, pages);
+               free_pages((unsigned long)buf_out, pages);
+       }
+
        omap_aes_finish_req(dd, 0);
        omap_aes_handle_queue(dd, NULL);