Commit | Line | Data |
---|---|---|
4c3f9727 | 1 | // SPDX-License-Identifier: GPL-2.0 |
03963cae | 2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
4c3f9727 | 3 | |
ff27e85a | 4 | #include <crypto/internal/aead.h> |
4c3f9727 GBY |
5 | #include <crypto/authenc.h> |
6 | #include <crypto/scatterwalk.h> | |
7 | #include <linux/dmapool.h> | |
8 | #include <linux/dma-mapping.h> | |
9 | ||
10 | #include "cc_buffer_mgr.h" | |
11 | #include "cc_lli_defs.h" | |
63ee04c8 | 12 | #include "cc_cipher.h" |
63893811 | 13 | #include "cc_hash.h" |
ff27e85a | 14 | #include "cc_aead.h" |
4c3f9727 | 15 | |
4c3f9727 GBY |
16 | union buffer_array_entry { |
17 | struct scatterlist *sgl; | |
18 | dma_addr_t buffer_dma; | |
19 | }; | |
20 | ||
21 | struct buffer_array { | |
22 | unsigned int num_of_buffers; | |
23 | union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; | |
24 | unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; | |
25 | int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; | |
26 | int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; | |
4c3f9727 GBY |
27 | bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
28 | u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; | |
29 | }; | |
30 | ||
31 | static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) | |
32 | { | |
33 | switch (type) { | |
34 | case CC_DMA_BUF_NULL: | |
35 | return "BUF_NULL"; | |
36 | case CC_DMA_BUF_DLLI: | |
37 | return "BUF_DLLI"; | |
38 | case CC_DMA_BUF_MLLI: | |
39 | return "BUF_MLLI"; | |
40 | default: | |
41 | return "BUF_INVALID"; | |
42 | } | |
43 | } | |
44 | ||
ff27e85a GBY |
45 | /** |
46 | * cc_copy_mac() - Copy MAC to temporary location | |
47 | * | |
48 | * @dev: device object | |
49 | * @req: aead request object | |
50 | * @dir: [IN] copy from/to sgl | |
51 | */ | |
52 | static void cc_copy_mac(struct device *dev, struct aead_request *req, | |
53 | enum cc_sg_cpy_direct dir) | |
54 | { | |
55 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
0eae14a0 | 56 | u32 skip = req->assoclen + req->cryptlen; |
ff27e85a GBY |
57 | |
58 | cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, | |
59 | (skip - areq_ctx->req_authsize), skip, dir); | |
60 | } | |
61 | ||
4c3f9727 GBY |
62 | /** |
63 | * cc_get_sgl_nents() - Get scatterlist number of entries. | |
64 | * | |
ae02fcfe | 65 | * @dev: Device object |
4c3f9727 GBY |
66 | * @sg_list: SG list |
67 | * @nbytes: [IN] Total SGL data bytes. | |
68 | * @lbytes: [OUT] Returns the amount of bytes at the last entry | |
ae02fcfe GU |
69 | * |
70 | * Return: | |
71 | * Number of entries in the scatterlist | |
4c3f9727 GBY |
72 | */ |
73 | static unsigned int cc_get_sgl_nents(struct device *dev, | |
74 | struct scatterlist *sg_list, | |
c4b22bf5 | 75 | unsigned int nbytes, u32 *lbytes) |
4c3f9727 GBY |
76 | { |
77 | unsigned int nents = 0; | |
78 | ||
ce0fc6db GBY |
79 | *lbytes = 0; |
80 | ||
4c3f9727 | 81 | while (nbytes && sg_list) { |
c4b22bf5 GBY |
82 | nents++; |
83 | /* get the number of bytes in the last entry */ | |
84 | *lbytes = nbytes; | |
85 | nbytes -= (sg_list->length > nbytes) ? | |
86 | nbytes : sg_list->length; | |
87 | sg_list = sg_next(sg_list); | |
4c3f9727 | 88 | } |
ce0fc6db | 89 | |
4c3f9727 GBY |
90 | dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); |
91 | return nents; | |
92 | } | |
93 | ||
4c3f9727 GBY |
94 | /** |
95 | * cc_copy_sg_portion() - Copy scatter list data, | |
96 | * from to_skip to end, to dest and vice versa | |
97 | * | |
ae02fcfe GU |
98 | * @dev: Device object |
99 | * @dest: Buffer to copy to/from | |
100 | * @sg: SG list | |
101 | * @to_skip: Number of bytes to skip before copying | |
102 | * @end: Offset of last byte to copy | |
103 | * @direct: Transfer direction (true == from SG list to buffer, false == from | |
104 | * buffer to SG list) | |
4c3f9727 GBY |
105 | */ |
106 | void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, | |
107 | u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) | |
108 | { | |
b7ec8530 | 109 | u32 nents; |
4c3f9727 | 110 | |
b7ec8530 | 111 | nents = sg_nents_for_len(sg, end); |
f4274eec | 112 | sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, |
4c3f9727 GBY |
113 | (direct == CC_SG_TO_BUF)); |
114 | } | |
115 | ||
116 | static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, | |
117 | u32 buff_size, u32 *curr_nents, | |
118 | u32 **mlli_entry_pp) | |
119 | { | |
120 | u32 *mlli_entry_p = *mlli_entry_pp; | |
121 | u32 new_nents; | |
122 | ||
123 | /* Verify there is no memory overflow*/ | |
124 | new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); | |
32be4c5b HG |
125 | if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { |
126 | dev_err(dev, "Too many mlli entries. current %d max %d\n", | |
127 | new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); | |
4c3f9727 | 128 | return -ENOMEM; |
32be4c5b | 129 | } |
4c3f9727 GBY |
130 | |
131 | /*handle buffer longer than 64 kbytes */ | |
132 | while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { | |
133 | cc_lli_set_addr(mlli_entry_p, buff_dma); | |
134 | cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); | |
135 | dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", | |
136 | *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], | |
137 | mlli_entry_p[LLI_WORD1_OFFSET]); | |
138 | buff_dma += CC_MAX_MLLI_ENTRY_SIZE; | |
139 | buff_size -= CC_MAX_MLLI_ENTRY_SIZE; | |
140 | mlli_entry_p = mlli_entry_p + 2; | |
141 | (*curr_nents)++; | |
142 | } | |
143 | /*Last entry */ | |
144 | cc_lli_set_addr(mlli_entry_p, buff_dma); | |
145 | cc_lli_set_size(mlli_entry_p, buff_size); | |
146 | dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", | |
147 | *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], | |
148 | mlli_entry_p[LLI_WORD1_OFFSET]); | |
149 | mlli_entry_p = mlli_entry_p + 2; | |
150 | *mlli_entry_pp = mlli_entry_p; | |
151 | (*curr_nents)++; | |
152 | return 0; | |
153 | } | |
154 | ||
155 | static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, | |
156 | u32 sgl_data_len, u32 sgl_offset, | |
157 | u32 *curr_nents, u32 **mlli_entry_pp) | |
158 | { | |
159 | struct scatterlist *curr_sgl = sgl; | |
160 | u32 *mlli_entry_p = *mlli_entry_pp; | |
161 | s32 rc = 0; | |
162 | ||
163 | for ( ; (curr_sgl && sgl_data_len); | |
164 | curr_sgl = sg_next(curr_sgl)) { | |
165 | u32 entry_data_len = | |
166 | (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? | |
167 | sg_dma_len(curr_sgl) - sgl_offset : | |
168 | sgl_data_len; | |
169 | sgl_data_len -= entry_data_len; | |
170 | rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + | |
171 | sgl_offset, entry_data_len, | |
172 | curr_nents, &mlli_entry_p); | |
173 | if (rc) | |
174 | return rc; | |
175 | ||
176 | sgl_offset = 0; | |
177 | } | |
178 | *mlli_entry_pp = mlli_entry_p; | |
179 | return 0; | |
180 | } | |
181 | ||
182 | static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, | |
183 | struct mlli_params *mlli_params, gfp_t flags) | |
184 | { | |
185 | u32 *mlli_p; | |
186 | u32 total_nents = 0, prev_total_nents = 0; | |
187 | int rc = 0, i; | |
188 | ||
189 | dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); | |
190 | ||
191 | /* Allocate memory from the pointed pool */ | |
192 | mlli_params->mlli_virt_addr = | |
193 | dma_pool_alloc(mlli_params->curr_pool, flags, | |
194 | &mlli_params->mlli_dma_addr); | |
195 | if (!mlli_params->mlli_virt_addr) { | |
196 | dev_err(dev, "dma_pool_alloc() failed\n"); | |
197 | rc = -ENOMEM; | |
198 | goto build_mlli_exit; | |
199 | } | |
200 | /* Point to start of MLLI */ | |
ba99b6f9 | 201 | mlli_p = mlli_params->mlli_virt_addr; |
4c3f9727 GBY |
202 | /* go over all SG's and link it to one MLLI table */ |
203 | for (i = 0; i < sg_data->num_of_buffers; i++) { | |
204 | union buffer_array_entry *entry = &sg_data->entry[i]; | |
205 | u32 tot_len = sg_data->total_data_len[i]; | |
206 | u32 offset = sg_data->offset[i]; | |
207 | ||
0eae14a0 GBY |
208 | rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, |
209 | &total_nents, &mlli_p); | |
4c3f9727 GBY |
210 | if (rc) |
211 | return rc; | |
212 | ||
213 | /* set last bit in the current table */ | |
214 | if (sg_data->mlli_nents[i]) { | |
215 | /*Calculate the current MLLI table length for the | |
216 | *length field in the descriptor | |
217 | */ | |
218 | *sg_data->mlli_nents[i] += | |
219 | (total_nents - prev_total_nents); | |
220 | prev_total_nents = total_nents; | |
221 | } | |
222 | } | |
223 | ||
224 | /* Set MLLI size for the bypass operation */ | |
225 | mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); | |
226 | ||
227 | dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", | |
228 | mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, | |
229 | mlli_params->mlli_len); | |
230 | ||
231 | build_mlli_exit: | |
232 | return rc; | |
233 | } | |
234 | ||
235 | static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, | |
236 | unsigned int nents, struct scatterlist *sgl, | |
237 | unsigned int data_len, unsigned int data_offset, | |
238 | bool is_last_table, u32 *mlli_nents) | |
239 | { | |
240 | unsigned int index = sgl_data->num_of_buffers; | |
241 | ||
242 | dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", | |
243 | index, nents, sgl, data_len, is_last_table); | |
244 | sgl_data->nents[index] = nents; | |
245 | sgl_data->entry[index].sgl = sgl; | |
246 | sgl_data->offset[index] = data_offset; | |
247 | sgl_data->total_data_len[index] = data_len; | |
4c3f9727 GBY |
248 | sgl_data->is_last[index] = is_last_table; |
249 | sgl_data->mlli_nents[index] = mlli_nents; | |
250 | if (sgl_data->mlli_nents[index]) | |
251 | *sgl_data->mlli_nents[index] = 0; | |
252 | sgl_data->num_of_buffers++; | |
253 | } | |
254 | ||
4c3f9727 GBY |
255 | static int cc_map_sg(struct device *dev, struct scatterlist *sg, |
256 | unsigned int nbytes, int direction, u32 *nents, | |
257 | u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) | |
258 | { | |
ce0fc6db GBY |
259 | int ret = 0; |
260 | ||
1fb37b56 GBY |
261 | if (!nbytes) { |
262 | *mapped_nents = 0; | |
263 | *lbytes = 0; | |
264 | *nents = 0; | |
265 | return 0; | |
266 | } | |
267 | ||
ce0fc6db GBY |
268 | *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); |
269 | if (*nents > max_sg_nents) { | |
270 | *nents = 0; | |
271 | dev_err(dev, "Too many fragments. current %d max %d\n", | |
272 | *nents, max_sg_nents); | |
273 | return -ENOMEM; | |
4c3f9727 GBY |
274 | } |
275 | ||
ce0fc6db GBY |
276 | ret = dma_map_sg(dev, sg, *nents, direction); |
277 | if (dma_mapping_error(dev, ret)) { | |
278 | *nents = 0; | |
279 | dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); | |
280 | return -ENOMEM; | |
281 | } | |
282 | ||
283 | *mapped_nents = ret; | |
284 | ||
4c3f9727 GBY |
285 | return 0; |
286 | } | |
287 | ||
ff27e85a GBY |
288 | static int |
289 | cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, | |
290 | u8 *config_data, struct buffer_array *sg_data, | |
291 | unsigned int assoclen) | |
292 | { | |
293 | dev_dbg(dev, " handle additional data config set to DLLI\n"); | |
294 | /* create sg for the current buffer */ | |
295 | sg_init_one(&areq_ctx->ccm_adata_sg, config_data, | |
296 | AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); | |
297 | if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { | |
298 | dev_err(dev, "dma_map_sg() config buffer failed\n"); | |
299 | return -ENOMEM; | |
300 | } | |
301 | dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", | |
302 | &sg_dma_address(&areq_ctx->ccm_adata_sg), | |
303 | sg_page(&areq_ctx->ccm_adata_sg), | |
304 | sg_virt(&areq_ctx->ccm_adata_sg), | |
305 | areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); | |
306 | /* prepare for case of MLLI */ | |
307 | if (assoclen > 0) { | |
308 | cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, | |
309 | (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), | |
310 | 0, false, NULL); | |
311 | } | |
312 | return 0; | |
313 | } | |
314 | ||
63893811 GBY |
315 | static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, |
316 | u8 *curr_buff, u32 curr_buff_cnt, | |
317 | struct buffer_array *sg_data) | |
318 | { | |
319 | dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); | |
320 | /* create sg for the current buffer */ | |
321 | sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); | |
322 | if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { | |
323 | dev_err(dev, "dma_map_sg() src buffer failed\n"); | |
324 | return -ENOMEM; | |
325 | } | |
326 | dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", | |
327 | &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), | |
328 | sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, | |
329 | areq_ctx->buff_sg->length); | |
330 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; | |
331 | areq_ctx->curr_sg = areq_ctx->buff_sg; | |
332 | areq_ctx->in_nents = 0; | |
333 | /* prepare for case of MLLI */ | |
334 | cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, | |
335 | false, NULL); | |
336 | return 0; | |
337 | } | |
338 | ||
63ee04c8 | 339 | void cc_unmap_cipher_request(struct device *dev, void *ctx, |
63893811 GBY |
340 | unsigned int ivsize, struct scatterlist *src, |
341 | struct scatterlist *dst) | |
63ee04c8 GBY |
342 | { |
343 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; | |
344 | ||
345 | if (req_ctx->gen_ctx.iv_dma_addr) { | |
346 | dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", | |
347 | &req_ctx->gen_ctx.iv_dma_addr, ivsize); | |
348 | dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, | |
6f17e00f | 349 | ivsize, DMA_BIDIRECTIONAL); |
63ee04c8 GBY |
350 | } |
351 | /* Release pool */ | |
352 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && | |
353 | req_ctx->mlli_params.mlli_virt_addr) { | |
354 | dma_pool_free(req_ctx->mlli_params.curr_pool, | |
355 | req_ctx->mlli_params.mlli_virt_addr, | |
356 | req_ctx->mlli_params.mlli_dma_addr); | |
357 | } | |
358 | ||
359 | dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); | |
360 | dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); | |
361 | ||
362 | if (src != dst) { | |
363 | dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); | |
364 | dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); | |
365 | } | |
366 | } | |
367 | ||
368 | int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, | |
369 | unsigned int ivsize, unsigned int nbytes, | |
370 | void *info, struct scatterlist *src, | |
371 | struct scatterlist *dst, gfp_t flags) | |
372 | { | |
373 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; | |
374 | struct mlli_params *mlli_params = &req_ctx->mlli_params; | |
63ee04c8 GBY |
375 | struct device *dev = drvdata_to_dev(drvdata); |
376 | struct buffer_array sg_data; | |
377 | u32 dummy = 0; | |
378 | int rc = 0; | |
379 | u32 mapped_nents = 0; | |
380 | ||
381 | req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; | |
382 | mlli_params->curr_pool = NULL; | |
383 | sg_data.num_of_buffers = 0; | |
384 | ||
385 | /* Map IV buffer */ | |
386 | if (ivsize) { | |
f4274eec | 387 | dump_byte_array("iv", info, ivsize); |
63ee04c8 | 388 | req_ctx->gen_ctx.iv_dma_addr = |
f4274eec | 389 | dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); |
63ee04c8 GBY |
390 | if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { |
391 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", | |
392 | ivsize, info); | |
393 | return -ENOMEM; | |
394 | } | |
395 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", | |
396 | ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); | |
397 | } else { | |
398 | req_ctx->gen_ctx.iv_dma_addr = 0; | |
399 | } | |
400 | ||
401 | /* Map the src SGL */ | |
402 | rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, | |
403 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); | |
ccba2f11 | 404 | if (rc) |
63ee04c8 | 405 | goto cipher_exit; |
63ee04c8 GBY |
406 | if (mapped_nents > 1) |
407 | req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; | |
408 | ||
409 | if (src == dst) { | |
410 | /* Handle inplace operation */ | |
411 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { | |
412 | req_ctx->out_nents = 0; | |
413 | cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, | |
414 | nbytes, 0, true, | |
415 | &req_ctx->in_mlli_nents); | |
416 | } | |
417 | } else { | |
418 | /* Map the dst sg */ | |
ccba2f11 HG |
419 | rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, |
420 | &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, | |
421 | &dummy, &mapped_nents); | |
422 | if (rc) | |
63ee04c8 | 423 | goto cipher_exit; |
63ee04c8 GBY |
424 | if (mapped_nents > 1) |
425 | req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; | |
426 | ||
427 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { | |
428 | cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, | |
429 | nbytes, 0, true, | |
430 | &req_ctx->in_mlli_nents); | |
431 | cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, | |
432 | nbytes, 0, true, | |
433 | &req_ctx->out_mlli_nents); | |
434 | } | |
435 | } | |
436 | ||
437 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { | |
040187a0 | 438 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
63ee04c8 GBY |
439 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
440 | if (rc) | |
441 | goto cipher_exit; | |
442 | } | |
443 | ||
444 | dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", | |
445 | cc_dma_buf_type(req_ctx->dma_buf_type)); | |
446 | ||
447 | return 0; | |
448 | ||
449 | cipher_exit: | |
450 | cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); | |
451 | return rc; | |
452 | } | |
453 | ||
ff27e85a GBY |
454 | void cc_unmap_aead_request(struct device *dev, struct aead_request *req) |
455 | { | |
456 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
457 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; | |
ff27e85a | 458 | struct cc_drvdata *drvdata = dev_get_drvdata(dev); |
ff27e85a GBY |
459 | |
460 | if (areq_ctx->mac_buf_dma_addr) { | |
461 | dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, | |
462 | MAX_MAC_SIZE, DMA_BIDIRECTIONAL); | |
463 | } | |
464 | ||
465 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { | |
466 | if (areq_ctx->hkey_dma_addr) { | |
467 | dma_unmap_single(dev, areq_ctx->hkey_dma_addr, | |
468 | AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); | |
469 | } | |
470 | ||
471 | if (areq_ctx->gcm_block_len_dma_addr) { | |
472 | dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, | |
473 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
474 | } | |
475 | ||
476 | if (areq_ctx->gcm_iv_inc1_dma_addr) { | |
477 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, | |
478 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
479 | } | |
480 | ||
481 | if (areq_ctx->gcm_iv_inc2_dma_addr) { | |
482 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, | |
483 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
484 | } | |
485 | } | |
486 | ||
487 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | |
488 | if (areq_ctx->ccm_iv0_dma_addr) { | |
489 | dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, | |
490 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
491 | } | |
492 | ||
493 | dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); | |
494 | } | |
495 | if (areq_ctx->gen_ctx.iv_dma_addr) { | |
496 | dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, | |
497 | hw_iv_size, DMA_BIDIRECTIONAL); | |
453431a5 | 498 | kfree_sensitive(areq_ctx->gen_ctx.iv); |
ff27e85a GBY |
499 | } |
500 | ||
a4941195 HG |
501 | /* Release pool */ |
502 | if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || | |
503 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && | |
504 | (areq_ctx->mlli_params.mlli_virt_addr)) { | |
ff27e85a GBY |
505 | dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", |
506 | &areq_ctx->mlli_params.mlli_dma_addr, | |
507 | areq_ctx->mlli_params.mlli_virt_addr); | |
508 | dma_pool_free(areq_ctx->mlli_params.curr_pool, | |
509 | areq_ctx->mlli_params.mlli_virt_addr, | |
510 | areq_ctx->mlli_params.mlli_dma_addr); | |
511 | } | |
512 | ||
513 | dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", | |
514 | sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, | |
da3cf67f | 515 | areq_ctx->assoclen, req->cryptlen); |
ff27e85a | 516 | |
ce0fc6db GBY |
517 | dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, |
518 | DMA_BIDIRECTIONAL); | |
ff27e85a GBY |
519 | if (req->src != req->dst) { |
520 | dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", | |
521 | sg_virt(req->dst)); | |
ce0fc6db | 522 | dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, |
ff27e85a GBY |
523 | DMA_BIDIRECTIONAL); |
524 | } | |
525 | if (drvdata->coherent && | |
526 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && | |
527 | req->src == req->dst) { | |
528 | /* copy back mac from temporary location to deal with possible | |
529 | * data memory overriding that caused by cache coherence | |
530 | * problem. | |
531 | */ | |
532 | cc_copy_mac(dev, req, CC_SG_FROM_BUF); | |
533 | } | |
534 | } | |
535 | ||
6825cfd6 GBY |
536 | static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, |
537 | u32 last_entry_data_size) | |
ff27e85a | 538 | { |
6825cfd6 | 539 | return ((sgl_nents > 1) && (last_entry_data_size < authsize)); |
ff27e85a GBY |
540 | } |
541 | ||
542 | static int cc_aead_chain_iv(struct cc_drvdata *drvdata, | |
543 | struct aead_request *req, | |
544 | struct buffer_array *sg_data, | |
545 | bool is_last, bool do_chain) | |
546 | { | |
547 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
548 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; | |
549 | struct device *dev = drvdata_to_dev(drvdata); | |
e8662a6a | 550 | gfp_t flags = cc_gfp_flags(&req->base); |
ff27e85a GBY |
551 | int rc = 0; |
552 | ||
553 | if (!req->iv) { | |
554 | areq_ctx->gen_ctx.iv_dma_addr = 0; | |
e8662a6a | 555 | areq_ctx->gen_ctx.iv = NULL; |
ff27e85a GBY |
556 | goto chain_iv_exit; |
557 | } | |
558 | ||
e8662a6a GBY |
559 | areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); |
560 | if (!areq_ctx->gen_ctx.iv) | |
561 | return -ENOMEM; | |
562 | ||
563 | areq_ctx->gen_ctx.iv_dma_addr = | |
564 | dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, | |
565 | DMA_BIDIRECTIONAL); | |
ff27e85a GBY |
566 | if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { |
567 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", | |
568 | hw_iv_size, req->iv); | |
453431a5 | 569 | kfree_sensitive(areq_ctx->gen_ctx.iv); |
e8662a6a | 570 | areq_ctx->gen_ctx.iv = NULL; |
ff27e85a GBY |
571 | rc = -ENOMEM; |
572 | goto chain_iv_exit; | |
573 | } | |
574 | ||
575 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", | |
576 | hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); | |
ff27e85a GBY |
577 | |
578 | chain_iv_exit: | |
579 | return rc; | |
580 | } | |
581 | ||
582 | static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, | |
583 | struct aead_request *req, | |
584 | struct buffer_array *sg_data, | |
585 | bool is_last, bool do_chain) | |
586 | { | |
587 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
588 | int rc = 0; | |
c776f7d3 | 589 | int mapped_nents = 0; |
ff27e85a GBY |
590 | struct device *dev = drvdata_to_dev(drvdata); |
591 | ||
ff27e85a GBY |
592 | if (!sg_data) { |
593 | rc = -EINVAL; | |
594 | goto chain_assoc_exit; | |
595 | } | |
596 | ||
da3cf67f | 597 | if (areq_ctx->assoclen == 0) { |
ff27e85a GBY |
598 | areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; |
599 | areq_ctx->assoc.nents = 0; | |
600 | areq_ctx->assoc.mlli_nents = 0; | |
601 | dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", | |
602 | cc_dma_buf_type(areq_ctx->assoc_buff_type), | |
603 | areq_ctx->assoc.nents); | |
604 | goto chain_assoc_exit; | |
605 | } | |
606 | ||
0eae14a0 | 607 | mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); |
c776f7d3 GBY |
608 | if (mapped_nents < 0) |
609 | return mapped_nents; | |
610 | ||
ff27e85a GBY |
611 | if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { |
612 | dev_err(dev, "Too many fragments. current %d max %d\n", | |
613 | mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); | |
614 | return -ENOMEM; | |
615 | } | |
616 | areq_ctx->assoc.nents = mapped_nents; | |
617 | ||
618 | /* in CCM case we have additional entry for | |
619 | * ccm header configurations | |
620 | */ | |
621 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | |
622 | if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { | |
623 | dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", | |
624 | (areq_ctx->assoc.nents + 1), | |
625 | LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); | |
626 | rc = -ENOMEM; | |
627 | goto chain_assoc_exit; | |
628 | } | |
629 | } | |
630 | ||
631 | if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) | |
632 | areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; | |
633 | else | |
634 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; | |
635 | ||
636 | if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { | |
637 | dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", | |
638 | cc_dma_buf_type(areq_ctx->assoc_buff_type), | |
639 | areq_ctx->assoc.nents); | |
640 | cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, | |
da3cf67f | 641 | areq_ctx->assoclen, 0, is_last, |
ff27e85a GBY |
642 | &areq_ctx->assoc.mlli_nents); |
643 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; | |
644 | } | |
645 | ||
646 | chain_assoc_exit: | |
647 | return rc; | |
648 | } | |
649 | ||
650 | static void cc_prepare_aead_data_dlli(struct aead_request *req, | |
651 | u32 *src_last_bytes, u32 *dst_last_bytes) | |
652 | { | |
653 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
654 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | |
655 | unsigned int authsize = areq_ctx->req_authsize; | |
c9877cbc GBY |
656 | struct scatterlist *sg; |
657 | ssize_t offset; | |
ff27e85a GBY |
658 | |
659 | areq_ctx->is_icv_fragmented = false; | |
c9877cbc GBY |
660 | |
661 | if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | |
662 | sg = areq_ctx->src_sgl; | |
663 | offset = *src_last_bytes - authsize; | |
ff27e85a | 664 | } else { |
c9877cbc GBY |
665 | sg = areq_ctx->dst_sgl; |
666 | offset = *dst_last_bytes - authsize; | |
ff27e85a | 667 | } |
c9877cbc GBY |
668 | |
669 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; | |
670 | areq_ctx->icv_virt_addr = sg_virt(sg) + offset; | |
ff27e85a GBY |
671 | } |
672 | ||
6825cfd6 GBY |
673 | static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, |
674 | struct aead_request *req, | |
675 | struct buffer_array *sg_data, | |
676 | u32 *src_last_bytes, u32 *dst_last_bytes, | |
677 | bool is_last_table) | |
ff27e85a GBY |
678 | { |
679 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
680 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | |
681 | unsigned int authsize = areq_ctx->req_authsize; | |
ff27e85a GBY |
682 | struct device *dev = drvdata_to_dev(drvdata); |
683 | struct scatterlist *sg; | |
684 | ||
685 | if (req->src == req->dst) { | |
686 | /*INPLACE*/ | |
687 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | |
688 | areq_ctx->src_sgl, areq_ctx->cryptlen, | |
689 | areq_ctx->src_offset, is_last_table, | |
690 | &areq_ctx->src.mlli_nents); | |
691 | ||
6825cfd6 GBY |
692 | areq_ctx->is_icv_fragmented = |
693 | cc_is_icv_frag(areq_ctx->src.nents, authsize, | |
694 | *src_last_bytes); | |
ff27e85a GBY |
695 | |
696 | if (areq_ctx->is_icv_fragmented) { | |
697 | /* Backup happens only when ICV is fragmented, ICV | |
698 | * verification is made by CPU compare in order to | |
699 | * simplify MAC verification upon request completion | |
700 | */ | |
701 | if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | |
702 | /* In coherent platforms (e.g. ACP) | |
703 | * already copying ICV for any | |
704 | * INPLACE-DECRYPT operation, hence | |
705 | * we must neglect this code. | |
706 | */ | |
707 | if (!drvdata->coherent) | |
708 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | |
709 | ||
710 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; | |
711 | } else { | |
712 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; | |
713 | areq_ctx->icv_dma_addr = | |
714 | areq_ctx->mac_buf_dma_addr; | |
715 | } | |
716 | } else { /* Contig. ICV */ | |
717 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; | |
718 | /*Should hanlde if the sg is not contig.*/ | |
719 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | |
720 | (*src_last_bytes - authsize); | |
721 | areq_ctx->icv_virt_addr = sg_virt(sg) + | |
722 | (*src_last_bytes - authsize); | |
723 | } | |
724 | ||
725 | } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | |
726 | /*NON-INPLACE and DECRYPT*/ | |
727 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | |
728 | areq_ctx->src_sgl, areq_ctx->cryptlen, | |
729 | areq_ctx->src_offset, is_last_table, | |
730 | &areq_ctx->src.mlli_nents); | |
731 | cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, | |
732 | areq_ctx->dst_sgl, areq_ctx->cryptlen, | |
733 | areq_ctx->dst_offset, is_last_table, | |
734 | &areq_ctx->dst.mlli_nents); | |
735 | ||
6825cfd6 GBY |
736 | areq_ctx->is_icv_fragmented = |
737 | cc_is_icv_frag(areq_ctx->src.nents, authsize, | |
738 | *src_last_bytes); | |
ff27e85a | 739 | /* Backup happens only when ICV is fragmented, ICV |
6825cfd6 | 740 | |
ff27e85a GBY |
741 | * verification is made by CPU compare in order to simplify |
742 | * MAC verification upon request completion | |
743 | */ | |
744 | if (areq_ctx->is_icv_fragmented) { | |
745 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | |
746 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; | |
747 | ||
748 | } else { /* Contig. ICV */ | |
749 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; | |
750 | /*Should hanlde if the sg is not contig.*/ | |
751 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | |
752 | (*src_last_bytes - authsize); | |
753 | areq_ctx->icv_virt_addr = sg_virt(sg) + | |
754 | (*src_last_bytes - authsize); | |
755 | } | |
756 | ||
757 | } else { | |
758 | /*NON-INPLACE and ENCRYPT*/ | |
759 | cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, | |
760 | areq_ctx->dst_sgl, areq_ctx->cryptlen, | |
761 | areq_ctx->dst_offset, is_last_table, | |
762 | &areq_ctx->dst.mlli_nents); | |
763 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | |
764 | areq_ctx->src_sgl, areq_ctx->cryptlen, | |
765 | areq_ctx->src_offset, is_last_table, | |
766 | &areq_ctx->src.mlli_nents); | |
767 | ||
6825cfd6 GBY |
768 | areq_ctx->is_icv_fragmented = |
769 | cc_is_icv_frag(areq_ctx->dst.nents, authsize, | |
770 | *dst_last_bytes); | |
ff27e85a GBY |
771 | |
772 | if (!areq_ctx->is_icv_fragmented) { | |
773 | sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; | |
774 | /* Contig. ICV */ | |
775 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | |
776 | (*dst_last_bytes - authsize); | |
777 | areq_ctx->icv_virt_addr = sg_virt(sg) + | |
778 | (*dst_last_bytes - authsize); | |
779 | } else { | |
780 | areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; | |
781 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; | |
782 | } | |
783 | } | |
ff27e85a GBY |
784 | } |
785 | ||
786 | static int cc_aead_chain_data(struct cc_drvdata *drvdata, | |
787 | struct aead_request *req, | |
788 | struct buffer_array *sg_data, | |
789 | bool is_last_table, bool do_chain) | |
790 | { | |
791 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
792 | struct device *dev = drvdata_to_dev(drvdata); | |
793 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | |
794 | unsigned int authsize = areq_ctx->req_authsize; | |
795 | unsigned int src_last_bytes = 0, dst_last_bytes = 0; | |
796 | int rc = 0; | |
797 | u32 src_mapped_nents = 0, dst_mapped_nents = 0; | |
798 | u32 offset = 0; | |
799 | /* non-inplace mode */ | |
0eae14a0 | 800 | unsigned int size_for_map = req->assoclen + req->cryptlen; |
ff27e85a | 801 | u32 sg_index = 0; |
0eae14a0 | 802 | u32 size_to_skip = req->assoclen; |
c776f7d3 | 803 | struct scatterlist *sgl; |
ff27e85a | 804 | |
ff27e85a GBY |
805 | offset = size_to_skip; |
806 | ||
807 | if (!sg_data) | |
808 | return -EINVAL; | |
809 | ||
810 | areq_ctx->src_sgl = req->src; | |
811 | areq_ctx->dst_sgl = req->dst; | |
812 | ||
ff27e85a GBY |
813 | size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? |
814 | authsize : 0; | |
815 | src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, | |
c4b22bf5 | 816 | &src_last_bytes); |
ff27e85a GBY |
817 | sg_index = areq_ctx->src_sgl->length; |
818 | //check where the data starts | |
ce0fc6db | 819 | while (src_mapped_nents && (sg_index <= size_to_skip)) { |
c776f7d3 | 820 | src_mapped_nents--; |
ff27e85a | 821 | offset -= areq_ctx->src_sgl->length; |
c776f7d3 GBY |
822 | sgl = sg_next(areq_ctx->src_sgl); |
823 | if (!sgl) | |
824 | break; | |
825 | areq_ctx->src_sgl = sgl; | |
ff27e85a | 826 | sg_index += areq_ctx->src_sgl->length; |
ff27e85a GBY |
827 | } |
828 | if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { | |
829 | dev_err(dev, "Too many fragments. current %d max %d\n", | |
830 | src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); | |
831 | return -ENOMEM; | |
832 | } | |
833 | ||
834 | areq_ctx->src.nents = src_mapped_nents; | |
835 | ||
836 | areq_ctx->src_offset = offset; | |
837 | ||
838 | if (req->src != req->dst) { | |
0eae14a0 | 839 | size_for_map = req->assoclen + req->cryptlen; |
8962c6d2 GBY |
840 | |
841 | if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) | |
842 | size_for_map += authsize; | |
843 | else | |
844 | size_for_map -= authsize; | |
845 | ||
ff27e85a | 846 | rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, |
ce0fc6db | 847 | &areq_ctx->dst.mapped_nents, |
ff27e85a GBY |
848 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, |
849 | &dst_mapped_nents); | |
ccba2f11 | 850 | if (rc) |
ff27e85a | 851 | goto chain_data_exit; |
ff27e85a GBY |
852 | } |
853 | ||
854 | dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, | |
c4b22bf5 | 855 | &dst_last_bytes); |
ff27e85a GBY |
856 | sg_index = areq_ctx->dst_sgl->length; |
857 | offset = size_to_skip; | |
858 | ||
859 | //check where the data starts | |
ce0fc6db | 860 | while (dst_mapped_nents && sg_index <= size_to_skip) { |
c776f7d3 | 861 | dst_mapped_nents--; |
ff27e85a | 862 | offset -= areq_ctx->dst_sgl->length; |
c776f7d3 GBY |
863 | sgl = sg_next(areq_ctx->dst_sgl); |
864 | if (!sgl) | |
865 | break; | |
866 | areq_ctx->dst_sgl = sgl; | |
ff27e85a | 867 | sg_index += areq_ctx->dst_sgl->length; |
ff27e85a GBY |
868 | } |
869 | if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { | |
870 | dev_err(dev, "Too many fragments. current %d max %d\n", | |
871 | dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); | |
872 | return -ENOMEM; | |
873 | } | |
874 | areq_ctx->dst.nents = dst_mapped_nents; | |
875 | areq_ctx->dst_offset = offset; | |
876 | if (src_mapped_nents > 1 || | |
877 | dst_mapped_nents > 1 || | |
878 | do_chain) { | |
879 | areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; | |
6825cfd6 GBY |
880 | cc_prepare_aead_data_mlli(drvdata, req, sg_data, |
881 | &src_last_bytes, &dst_last_bytes, | |
882 | is_last_table); | |
ff27e85a GBY |
883 | } else { |
884 | areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; | |
885 | cc_prepare_aead_data_dlli(req, &src_last_bytes, | |
886 | &dst_last_bytes); | |
887 | } | |
888 | ||
889 | chain_data_exit: | |
890 | return rc; | |
891 | } | |
892 | ||
893 | static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, | |
894 | struct aead_request *req) | |
895 | { | |
896 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
897 | u32 curr_mlli_size = 0; | |
898 | ||
899 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { | |
900 | areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; | |
901 | curr_mlli_size = areq_ctx->assoc.mlli_nents * | |
902 | LLI_ENTRY_BYTE_SIZE; | |
903 | } | |
904 | ||
905 | if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { | |
906 | /*Inplace case dst nents equal to src nents*/ | |
907 | if (req->src == req->dst) { | |
908 | areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; | |
909 | areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + | |
910 | curr_mlli_size; | |
911 | areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; | |
912 | if (!areq_ctx->is_single_pass) | |
913 | areq_ctx->assoc.mlli_nents += | |
914 | areq_ctx->src.mlli_nents; | |
915 | } else { | |
916 | if (areq_ctx->gen_ctx.op_type == | |
917 | DRV_CRYPTO_DIRECTION_DECRYPT) { | |
918 | areq_ctx->src.sram_addr = | |
919 | drvdata->mlli_sram_addr + | |
920 | curr_mlli_size; | |
921 | areq_ctx->dst.sram_addr = | |
922 | areq_ctx->src.sram_addr + | |
923 | areq_ctx->src.mlli_nents * | |
924 | LLI_ENTRY_BYTE_SIZE; | |
925 | if (!areq_ctx->is_single_pass) | |
926 | areq_ctx->assoc.mlli_nents += | |
927 | areq_ctx->src.mlli_nents; | |
928 | } else { | |
929 | areq_ctx->dst.sram_addr = | |
930 | drvdata->mlli_sram_addr + | |
931 | curr_mlli_size; | |
932 | areq_ctx->src.sram_addr = | |
933 | areq_ctx->dst.sram_addr + | |
934 | areq_ctx->dst.mlli_nents * | |
935 | LLI_ENTRY_BYTE_SIZE; | |
936 | if (!areq_ctx->is_single_pass) | |
937 | areq_ctx->assoc.mlli_nents += | |
938 | areq_ctx->dst.mlli_nents; | |
939 | } | |
940 | } | |
941 | } | |
942 | } | |
943 | ||
944 | int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) | |
945 | { | |
946 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | |
947 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; | |
948 | struct device *dev = drvdata_to_dev(drvdata); | |
949 | struct buffer_array sg_data; | |
950 | unsigned int authsize = areq_ctx->req_authsize; | |
ff27e85a | 951 | int rc = 0; |
ff27e85a GBY |
952 | dma_addr_t dma_addr; |
953 | u32 mapped_nents = 0; | |
954 | u32 dummy = 0; /*used for the assoc data fragments */ | |
0eae14a0 | 955 | u32 size_to_map; |
ff27e85a GBY |
956 | gfp_t flags = cc_gfp_flags(&req->base); |
957 | ||
958 | mlli_params->curr_pool = NULL; | |
959 | sg_data.num_of_buffers = 0; | |
960 | ||
961 | /* copy mac to a temporary location to deal with possible | |
962 | * data memory overriding that caused by cache coherence problem. | |
963 | */ | |
964 | if (drvdata->coherent && | |
965 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && | |
966 | req->src == req->dst) | |
967 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | |
968 | ||
969 | /* cacluate the size for cipher remove ICV in decrypt*/ | |
970 | areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == | |
971 | DRV_CRYPTO_DIRECTION_ENCRYPT) ? | |
972 | req->cryptlen : | |
973 | (req->cryptlen - authsize); | |
974 | ||
975 | dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, | |
976 | DMA_BIDIRECTIONAL); | |
977 | if (dma_mapping_error(dev, dma_addr)) { | |
978 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", | |
979 | MAX_MAC_SIZE, areq_ctx->mac_buf); | |
980 | rc = -ENOMEM; | |
981 | goto aead_map_failure; | |
982 | } | |
983 | areq_ctx->mac_buf_dma_addr = dma_addr; | |
984 | ||
985 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | |
986 | void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; | |
987 | ||
988 | dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, | |
989 | DMA_TO_DEVICE); | |
990 | ||
991 | if (dma_mapping_error(dev, dma_addr)) { | |
992 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", | |
993 | AES_BLOCK_SIZE, addr); | |
994 | areq_ctx->ccm_iv0_dma_addr = 0; | |
995 | rc = -ENOMEM; | |
996 | goto aead_map_failure; | |
997 | } | |
998 | areq_ctx->ccm_iv0_dma_addr = dma_addr; | |
999 | ||
ccba2f11 | 1000 | rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, |
da3cf67f | 1001 | &sg_data, areq_ctx->assoclen); |
ccba2f11 | 1002 | if (rc) |
ff27e85a | 1003 | goto aead_map_failure; |
ff27e85a GBY |
1004 | } |
1005 | ||
1006 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { | |
1007 | dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, | |
1008 | DMA_BIDIRECTIONAL); | |
1009 | if (dma_mapping_error(dev, dma_addr)) { | |
1010 | dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", | |
1011 | AES_BLOCK_SIZE, areq_ctx->hkey); | |
1012 | rc = -ENOMEM; | |
1013 | goto aead_map_failure; | |
1014 | } | |
1015 | areq_ctx->hkey_dma_addr = dma_addr; | |
1016 | ||
1017 | dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, | |
1018 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
1019 | if (dma_mapping_error(dev, dma_addr)) { | |
1020 | dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", | |
1021 | AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); | |
1022 | rc = -ENOMEM; | |
1023 | goto aead_map_failure; | |
1024 | } | |
1025 | areq_ctx->gcm_block_len_dma_addr = dma_addr; | |
1026 | ||
1027 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, | |
1028 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
1029 | ||
1030 | if (dma_mapping_error(dev, dma_addr)) { | |
1031 | dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", | |
1032 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); | |
1033 | areq_ctx->gcm_iv_inc1_dma_addr = 0; | |
1034 | rc = -ENOMEM; | |
1035 | goto aead_map_failure; | |
1036 | } | |
1037 | areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; | |
1038 | ||
1039 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, | |
1040 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | |
1041 | ||
1042 | if (dma_mapping_error(dev, dma_addr)) { | |
1043 | dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", | |
1044 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); | |
1045 | areq_ctx->gcm_iv_inc2_dma_addr = 0; | |
1046 | rc = -ENOMEM; | |
1047 | goto aead_map_failure; | |
1048 | } | |
1049 | areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; | |
1050 | } | |
1051 | ||
0eae14a0 | 1052 | size_to_map = req->cryptlen + req->assoclen; |
504e84ab GBY |
1053 | /* If we do in-place encryption, we also need the auth tag */ |
1054 | if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && | |
1055 | (req->src == req->dst)) { | |
ff27e85a | 1056 | size_to_map += authsize; |
504e84ab | 1057 | } |
0eae14a0 | 1058 | |
ff27e85a | 1059 | rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, |
ce0fc6db | 1060 | &areq_ctx->src.mapped_nents, |
ff27e85a GBY |
1061 | (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + |
1062 | LLI_MAX_NUM_OF_DATA_ENTRIES), | |
1063 | &dummy, &mapped_nents); | |
ccba2f11 | 1064 | if (rc) |
ff27e85a | 1065 | goto aead_map_failure; |
ff27e85a GBY |
1066 | |
1067 | if (areq_ctx->is_single_pass) { | |
1068 | /* | |
1069 | * Create MLLI table for: | |
1070 | * (1) Assoc. data | |
1071 | * (2) Src/Dst SGLs | |
1072 | * Note: IV is contg. buffer (not an SGL) | |
1073 | */ | |
1074 | rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); | |
1075 | if (rc) | |
1076 | goto aead_map_failure; | |
1077 | rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); | |
1078 | if (rc) | |
1079 | goto aead_map_failure; | |
1080 | rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); | |
1081 | if (rc) | |
1082 | goto aead_map_failure; | |
1083 | } else { /* DOUBLE-PASS flow */ | |
1084 | /* | |
1085 | * Prepare MLLI table(s) in this order: | |
1086 | * | |
1087 | * If ENCRYPT/DECRYPT (inplace): | |
1088 | * (1) MLLI table for assoc | |
1089 | * (2) IV entry (chained right after end of assoc) | |
1090 | * (3) MLLI for src/dst (inplace operation) | |
1091 | * | |
1092 | * If ENCRYPT (non-inplace) | |
1093 | * (1) MLLI table for assoc | |
1094 | * (2) IV entry (chained right after end of assoc) | |
1095 | * (3) MLLI for dst | |
1096 | * (4) MLLI for src | |
1097 | * | |
1098 | * If DECRYPT (non-inplace) | |
1099 | * (1) MLLI table for assoc | |
1100 | * (2) IV entry (chained right after end of assoc) | |
1101 | * (3) MLLI for src | |
1102 | * (4) MLLI for dst | |
1103 | */ | |
1104 | rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); | |
1105 | if (rc) | |
1106 | goto aead_map_failure; | |
1107 | rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); | |
1108 | if (rc) | |
1109 | goto aead_map_failure; | |
1110 | rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); | |
1111 | if (rc) | |
1112 | goto aead_map_failure; | |
1113 | } | |
1114 | ||
1115 | /* Mlli support -start building the MLLI according to the above | |
1116 | * results | |
1117 | */ | |
1118 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || | |
1119 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { | |
040187a0 | 1120 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
ff27e85a GBY |
1121 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
1122 | if (rc) | |
1123 | goto aead_map_failure; | |
1124 | ||
1125 | cc_update_aead_mlli_nents(drvdata, req); | |
1126 | dev_dbg(dev, "assoc params mn %d\n", | |
1127 | areq_ctx->assoc.mlli_nents); | |
1128 | dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); | |
1129 | dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); | |
1130 | } | |
1131 | return 0; | |
1132 | ||
1133 | aead_map_failure: | |
1134 | cc_unmap_aead_request(dev, req); | |
1135 | return rc; | |
1136 | } | |
1137 | ||
63893811 GBY |
1138 | int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, |
1139 | struct scatterlist *src, unsigned int nbytes, | |
1140 | bool do_update, gfp_t flags) | |
1141 | { | |
1142 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; | |
1143 | struct device *dev = drvdata_to_dev(drvdata); | |
1144 | u8 *curr_buff = cc_hash_buf(areq_ctx); | |
1145 | u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); | |
1146 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; | |
1147 | struct buffer_array sg_data; | |
ccba2f11 | 1148 | int rc = 0; |
63893811 GBY |
1149 | u32 dummy = 0; |
1150 | u32 mapped_nents = 0; | |
1151 | ||
1152 | dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", | |
1153 | curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); | |
1154 | /* Init the type of the dma buffer */ | |
1155 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; | |
1156 | mlli_params->curr_pool = NULL; | |
1157 | sg_data.num_of_buffers = 0; | |
1158 | areq_ctx->in_nents = 0; | |
1159 | ||
1160 | if (nbytes == 0 && *curr_buff_cnt == 0) { | |
1161 | /* nothing to do */ | |
1162 | return 0; | |
1163 | } | |
1164 | ||
63893811 GBY |
1165 | /* map the previous buffer */ |
1166 | if (*curr_buff_cnt) { | |
ccba2f11 HG |
1167 | rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, |
1168 | &sg_data); | |
1169 | if (rc) | |
1170 | return rc; | |
63893811 GBY |
1171 | } |
1172 | ||
1173 | if (src && nbytes > 0 && do_update) { | |
ccba2f11 HG |
1174 | rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, |
1175 | &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, | |
1176 | &dummy, &mapped_nents); | |
1177 | if (rc) | |
63893811 | 1178 | goto unmap_curr_buff; |
63893811 GBY |
1179 | if (src && mapped_nents == 1 && |
1180 | areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { | |
1181 | memcpy(areq_ctx->buff_sg, src, | |
1182 | sizeof(struct scatterlist)); | |
1183 | areq_ctx->buff_sg->length = nbytes; | |
1184 | areq_ctx->curr_sg = areq_ctx->buff_sg; | |
1185 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; | |
1186 | } else { | |
1187 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | /*build mlli */ | |
1192 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { | |
040187a0 | 1193 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
63893811 GBY |
1194 | /* add the src data to the sg_data */ |
1195 | cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, | |
1196 | 0, true, &areq_ctx->mlli_nents); | |
ccba2f11 HG |
1197 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
1198 | if (rc) | |
63893811 GBY |
1199 | goto fail_unmap_din; |
1200 | } | |
1201 | /* change the buffer index for the unmap function */ | |
1202 | areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); | |
1203 | dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", | |
1204 | cc_dma_buf_type(areq_ctx->data_dma_buf_type)); | |
1205 | return 0; | |
1206 | ||
1207 | fail_unmap_din: | |
1208 | dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); | |
1209 | ||
1210 | unmap_curr_buff: | |
1211 | if (*curr_buff_cnt) | |
1212 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); | |
1213 | ||
ccba2f11 | 1214 | return rc; |
63893811 GBY |
1215 | } |
1216 | ||
1217 | int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, | |
1218 | struct scatterlist *src, unsigned int nbytes, | |
1219 | unsigned int block_size, gfp_t flags) | |
1220 | { | |
1221 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; | |
1222 | struct device *dev = drvdata_to_dev(drvdata); | |
1223 | u8 *curr_buff = cc_hash_buf(areq_ctx); | |
1224 | u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); | |
1225 | u8 *next_buff = cc_next_buf(areq_ctx); | |
1226 | u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); | |
1227 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; | |
1228 | unsigned int update_data_len; | |
1229 | u32 total_in_len = nbytes + *curr_buff_cnt; | |
1230 | struct buffer_array sg_data; | |
63893811 | 1231 | unsigned int swap_index = 0; |
ccba2f11 | 1232 | int rc = 0; |
63893811 GBY |
1233 | u32 dummy = 0; |
1234 | u32 mapped_nents = 0; | |
1235 | ||
1236 | dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", | |
1237 | curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); | |
1238 | /* Init the type of the dma buffer */ | |
1239 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; | |
1240 | mlli_params->curr_pool = NULL; | |
1241 | areq_ctx->curr_sg = NULL; | |
1242 | sg_data.num_of_buffers = 0; | |
1243 | areq_ctx->in_nents = 0; | |
1244 | ||
1245 | if (total_in_len < block_size) { | |
1246 | dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", | |
1247 | curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); | |
b7ec8530 | 1248 | areq_ctx->in_nents = sg_nents_for_len(src, nbytes); |
63893811 GBY |
1249 | sg_copy_to_buffer(src, areq_ctx->in_nents, |
1250 | &curr_buff[*curr_buff_cnt], nbytes); | |
1251 | *curr_buff_cnt += nbytes; | |
1252 | return 1; | |
1253 | } | |
1254 | ||
1255 | /* Calculate the residue size*/ | |
1256 | *next_buff_cnt = total_in_len & (block_size - 1); | |
1257 | /* update data len */ | |
1258 | update_data_len = total_in_len - *next_buff_cnt; | |
1259 | ||
1260 | dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", | |
1261 | *next_buff_cnt, update_data_len); | |
1262 | ||
1263 | /* Copy the new residue to next buffer */ | |
1264 | if (*next_buff_cnt) { | |
1265 | dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", | |
1266 | next_buff, (update_data_len - *curr_buff_cnt), | |
1267 | *next_buff_cnt); | |
1268 | cc_copy_sg_portion(dev, next_buff, src, | |
1269 | (update_data_len - *curr_buff_cnt), | |
1270 | nbytes, CC_SG_TO_BUF); | |
1271 | /* change the buffer index for next operation */ | |
1272 | swap_index = 1; | |
1273 | } | |
1274 | ||
1275 | if (*curr_buff_cnt) { | |
ccba2f11 HG |
1276 | rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, |
1277 | &sg_data); | |
1278 | if (rc) | |
1279 | return rc; | |
63893811 GBY |
1280 | /* change the buffer index for next operation */ |
1281 | swap_index = 1; | |
1282 | } | |
1283 | ||
1284 | if (update_data_len > *curr_buff_cnt) { | |
ccba2f11 HG |
1285 | rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), |
1286 | DMA_TO_DEVICE, &areq_ctx->in_nents, | |
1287 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, | |
1288 | &mapped_nents); | |
1289 | if (rc) | |
63893811 | 1290 | goto unmap_curr_buff; |
63893811 GBY |
1291 | if (mapped_nents == 1 && |
1292 | areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { | |
1293 | /* only one entry in the SG and no previous data */ | |
1294 | memcpy(areq_ctx->buff_sg, src, | |
1295 | sizeof(struct scatterlist)); | |
1296 | areq_ctx->buff_sg->length = update_data_len; | |
1297 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; | |
1298 | areq_ctx->curr_sg = areq_ctx->buff_sg; | |
1299 | } else { | |
1300 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { | |
040187a0 | 1305 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
63893811 GBY |
1306 | /* add the src data to the sg_data */ |
1307 | cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, | |
1308 | (update_data_len - *curr_buff_cnt), 0, true, | |
1309 | &areq_ctx->mlli_nents); | |
ccba2f11 HG |
1310 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
1311 | if (rc) | |
63893811 GBY |
1312 | goto fail_unmap_din; |
1313 | } | |
1314 | areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); | |
1315 | ||
1316 | return 0; | |
1317 | ||
1318 | fail_unmap_din: | |
1319 | dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); | |
1320 | ||
1321 | unmap_curr_buff: | |
1322 | if (*curr_buff_cnt) | |
1323 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); | |
1324 | ||
ccba2f11 | 1325 | return rc; |
63893811 GBY |
1326 | } |
1327 | ||
1328 | void cc_unmap_hash_request(struct device *dev, void *ctx, | |
1329 | struct scatterlist *src, bool do_revert) | |
1330 | { | |
1331 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; | |
1332 | u32 *prev_len = cc_next_buf_cnt(areq_ctx); | |
1333 | ||
1334 | /*In case a pool was set, a table was | |
1335 | *allocated and should be released | |
1336 | */ | |
1337 | if (areq_ctx->mlli_params.curr_pool) { | |
1338 | dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", | |
1339 | &areq_ctx->mlli_params.mlli_dma_addr, | |
1340 | areq_ctx->mlli_params.mlli_virt_addr); | |
1341 | dma_pool_free(areq_ctx->mlli_params.curr_pool, | |
1342 | areq_ctx->mlli_params.mlli_virt_addr, | |
1343 | areq_ctx->mlli_params.mlli_dma_addr); | |
1344 | } | |
1345 | ||
1346 | if (src && areq_ctx->in_nents) { | |
1347 | dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", | |
1348 | sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); | |
1349 | dma_unmap_sg(dev, src, | |
1350 | areq_ctx->in_nents, DMA_TO_DEVICE); | |
1351 | } | |
1352 | ||
1353 | if (*prev_len) { | |
1354 | dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", | |
1355 | sg_virt(areq_ctx->buff_sg), | |
1356 | &sg_dma_address(areq_ctx->buff_sg), | |
1357 | sg_dma_len(areq_ctx->buff_sg)); | |
1358 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); | |
1359 | if (!do_revert) { | |
1360 | /* clean the previous data length for update | |
1361 | * operation | |
1362 | */ | |
1363 | *prev_len = 0; | |
1364 | } else { | |
1365 | areq_ctx->buff_index ^= 1; | |
1366 | } | |
1367 | } | |
1368 | } | |
1369 | ||
4c3f9727 GBY |
1370 | int cc_buffer_mgr_init(struct cc_drvdata *drvdata) |
1371 | { | |
4c3f9727 GBY |
1372 | struct device *dev = drvdata_to_dev(drvdata); |
1373 | ||
040187a0 | 1374 | drvdata->mlli_buffs_pool = |
4c3f9727 GBY |
1375 | dma_pool_create("dx_single_mlli_tables", dev, |
1376 | MAX_NUM_OF_TOTAL_MLLI_ENTRIES * | |
1377 | LLI_ENTRY_BYTE_SIZE, | |
1378 | MLLI_TABLE_MIN_ALIGNMENT, 0); | |
1379 | ||
040187a0 GU |
1380 | if (!drvdata->mlli_buffs_pool) |
1381 | return -ENOMEM; | |
4c3f9727 GBY |
1382 | |
1383 | return 0; | |
4c3f9727 GBY |
1384 | } |
1385 | ||
1386 | int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) | |
1387 | { | |
040187a0 | 1388 | dma_pool_destroy(drvdata->mlli_buffs_pool); |
4c3f9727 GBY |
1389 | return 0; |
1390 | } |