Commit | Line | Data |
---|---|---|
85a7f0ac SAS |
1 | /* |
2 | * Support for Marvell's crypto engine which can be found on some Orion5X | |
3 | * boards. | |
4 | * | |
5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | |
6 | * License: GPLv2 | |
7 | * | |
8 | */ | |
9 | #include <crypto/aes.h> | |
10 | #include <crypto/algapi.h> | |
11 | #include <linux/crypto.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/scatterlist.h> | |
17 | ||
18 | #include "mv_cesa.h" | |
19 | /* | |
20 | * STM: | |
21 | * /---------------------------------------\ | |
22 | * | | request complete | |
23 | * \./ | | |
24 | * IDLE -> new request -> BUSY -> done -> DEQUEUE | |
25 | * /°\ | | |
26 | * | | more scatter entries | |
27 | * \________________/ | |
28 | */ | |
29 | enum engine_status { | |
30 | ENGINE_IDLE, | |
31 | ENGINE_BUSY, | |
32 | ENGINE_W_DEQUEUE, | |
33 | }; | |
34 | ||
35 | /** | |
36 | * struct req_progress - used for every crypt request | |
37 | * @src_sg_it: sg iterator for src | |
38 | * @dst_sg_it: sg iterator for dst | |
39 | * @sg_src_left: bytes left in src to process (scatter list) | |
40 | * @src_start: offset to add to src start position (scatter list) | |
41 | * @crypt_len: length of current crypt process | |
3b61a905 | 42 | * @hw_nbytes: total bytes to process in hw for this request |
f0d03dea | 43 | * @copy_back: whether to copy data back (crypt) or not (hash) |
85a7f0ac SAS |
44 | * @sg_dst_left: bytes left dst to process in this scatter list |
45 | * @dst_start: offset to add to dst start position (scatter list) | |
7a5f691e | 46 | * @hw_processed_bytes: number of bytes processed by hw (request). |
85a7f0ac SAS |
47 | * |
48 | * sg helper are used to iterate over the scatterlist. Since the size of the | |
49 | * SRAM may be less than the scatter size, this struct struct is used to keep | |
50 | * track of progress within current scatterlist. | |
51 | */ | |
52 | struct req_progress { | |
53 | struct sg_mapping_iter src_sg_it; | |
54 | struct sg_mapping_iter dst_sg_it; | |
a58094ac US |
55 | void (*complete) (void); |
56 | void (*process) (int is_first); | |
85a7f0ac SAS |
57 | |
58 | /* src mostly */ | |
59 | int sg_src_left; | |
60 | int src_start; | |
61 | int crypt_len; | |
3b61a905 | 62 | int hw_nbytes; |
85a7f0ac | 63 | /* dst mostly */ |
f0d03dea | 64 | int copy_back; |
85a7f0ac SAS |
65 | int sg_dst_left; |
66 | int dst_start; | |
7a5f691e | 67 | int hw_processed_bytes; |
85a7f0ac SAS |
68 | }; |
69 | ||
70 | struct crypto_priv { | |
71 | void __iomem *reg; | |
72 | void __iomem *sram; | |
73 | int irq; | |
74 | struct task_struct *queue_th; | |
75 | ||
76 | /* the lock protects queue and eng_st */ | |
77 | spinlock_t lock; | |
78 | struct crypto_queue queue; | |
79 | enum engine_status eng_st; | |
3b61a905 | 80 | struct crypto_async_request *cur_req; |
85a7f0ac SAS |
81 | struct req_progress p; |
82 | int max_req_size; | |
83 | int sram_size; | |
84 | }; | |
85 | ||
86 | static struct crypto_priv *cpg; | |
87 | ||
88 | struct mv_ctx { | |
89 | u8 aes_enc_key[AES_KEY_LEN]; | |
90 | u32 aes_dec_key[8]; | |
91 | int key_len; | |
92 | u32 need_calc_aes_dkey; | |
93 | }; | |
94 | ||
95 | enum crypto_op { | |
96 | COP_AES_ECB, | |
97 | COP_AES_CBC, | |
98 | }; | |
99 | ||
100 | struct mv_req_ctx { | |
101 | enum crypto_op op; | |
102 | int decrypt; | |
103 | }; | |
104 | ||
105 | static void compute_aes_dec_key(struct mv_ctx *ctx) | |
106 | { | |
107 | struct crypto_aes_ctx gen_aes_key; | |
108 | int key_pos; | |
109 | ||
110 | if (!ctx->need_calc_aes_dkey) | |
111 | return; | |
112 | ||
113 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | |
114 | ||
115 | key_pos = ctx->key_len + 24; | |
116 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | |
117 | switch (ctx->key_len) { | |
118 | case AES_KEYSIZE_256: | |
119 | key_pos -= 2; | |
120 | /* fall */ | |
121 | case AES_KEYSIZE_192: | |
122 | key_pos -= 2; | |
123 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | |
124 | 4 * 4); | |
125 | break; | |
126 | } | |
127 | ctx->need_calc_aes_dkey = 0; | |
128 | } | |
129 | ||
130 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | |
131 | unsigned int len) | |
132 | { | |
133 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
134 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | |
135 | ||
136 | switch (len) { | |
137 | case AES_KEYSIZE_128: | |
138 | case AES_KEYSIZE_192: | |
139 | case AES_KEYSIZE_256: | |
140 | break; | |
141 | default: | |
142 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
143 | return -EINVAL; | |
144 | } | |
145 | ctx->key_len = len; | |
146 | ctx->need_calc_aes_dkey = 1; | |
147 | ||
148 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | |
149 | return 0; | |
150 | } | |
151 | ||
15d4dd35 | 152 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) |
85a7f0ac SAS |
153 | { |
154 | int ret; | |
15d4dd35 US |
155 | void *sbuf; |
156 | int copied = 0; | |
85a7f0ac | 157 | |
15d4dd35 US |
158 | while (1) { |
159 | if (!p->sg_src_left) { | |
160 | ret = sg_miter_next(&p->src_sg_it); | |
161 | BUG_ON(!ret); | |
162 | p->sg_src_left = p->src_sg_it.length; | |
163 | p->src_start = 0; | |
164 | } | |
85a7f0ac | 165 | |
15d4dd35 US |
166 | sbuf = p->src_sg_it.addr + p->src_start; |
167 | ||
168 | if (p->sg_src_left <= len - copied) { | |
169 | memcpy(dbuf + copied, sbuf, p->sg_src_left); | |
170 | copied += p->sg_src_left; | |
171 | p->sg_src_left = 0; | |
172 | if (copied >= len) | |
173 | break; | |
174 | } else { | |
175 | int copy_len = len - copied; | |
176 | memcpy(dbuf + copied, sbuf, copy_len); | |
177 | p->src_start += copy_len; | |
178 | p->sg_src_left -= copy_len; | |
179 | break; | |
180 | } | |
181 | } | |
182 | } | |
85a7f0ac | 183 | |
3b61a905 | 184 | static void setup_data_in(void) |
15d4dd35 US |
185 | { |
186 | struct req_progress *p = &cpg->p; | |
187 | p->crypt_len = | |
7a5f691e | 188 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); |
15d4dd35 US |
189 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START, |
190 | p->crypt_len); | |
85a7f0ac SAS |
191 | } |
192 | ||
193 | static void mv_process_current_q(int first_block) | |
194 | { | |
3b61a905 | 195 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
85a7f0ac SAS |
196 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
197 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | |
198 | struct sec_accel_config op; | |
199 | ||
200 | switch (req_ctx->op) { | |
201 | case COP_AES_ECB: | |
202 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | |
203 | break; | |
204 | case COP_AES_CBC: | |
6bc6fcd6 | 205 | default: |
85a7f0ac SAS |
206 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
207 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | |
208 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | |
209 | if (first_block) | |
210 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | |
211 | break; | |
212 | } | |
213 | if (req_ctx->decrypt) { | |
214 | op.config |= CFG_DIR_DEC; | |
215 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | |
216 | AES_KEY_LEN); | |
217 | } else { | |
218 | op.config |= CFG_DIR_ENC; | |
219 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | |
220 | AES_KEY_LEN); | |
221 | } | |
222 | ||
223 | switch (ctx->key_len) { | |
224 | case AES_KEYSIZE_128: | |
225 | op.config |= CFG_AES_LEN_128; | |
226 | break; | |
227 | case AES_KEYSIZE_192: | |
228 | op.config |= CFG_AES_LEN_192; | |
229 | break; | |
230 | case AES_KEYSIZE_256: | |
231 | op.config |= CFG_AES_LEN_256; | |
232 | break; | |
233 | } | |
234 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | |
235 | ENC_P_DST(SRAM_DATA_OUT_START); | |
236 | op.enc_key_p = SRAM_DATA_KEY_P; | |
237 | ||
3b61a905 | 238 | setup_data_in(); |
85a7f0ac SAS |
239 | op.enc_len = cpg->p.crypt_len; |
240 | memcpy(cpg->sram + SRAM_CONFIG, &op, | |
241 | sizeof(struct sec_accel_config)); | |
242 | ||
243 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | |
244 | /* GO */ | |
245 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | |
246 | ||
247 | /* | |
248 | * XXX: add timer if the interrupt does not occur for some mystery | |
249 | * reason | |
250 | */ | |
251 | } | |
252 | ||
253 | static void mv_crypto_algo_completion(void) | |
254 | { | |
3b61a905 | 255 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
85a7f0ac SAS |
256 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
257 | ||
a58094ac US |
258 | sg_miter_stop(&cpg->p.src_sg_it); |
259 | sg_miter_stop(&cpg->p.dst_sg_it); | |
260 | ||
85a7f0ac SAS |
261 | if (req_ctx->op != COP_AES_CBC) |
262 | return ; | |
263 | ||
264 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | |
265 | } | |
266 | ||
267 | static void dequeue_complete_req(void) | |
268 | { | |
3b61a905 | 269 | struct crypto_async_request *req = cpg->cur_req; |
85a7f0ac SAS |
270 | void *buf; |
271 | int ret; | |
7a5f691e | 272 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; |
f0d03dea US |
273 | if (cpg->p.copy_back) { |
274 | int need_copy_len = cpg->p.crypt_len; | |
275 | int sram_offset = 0; | |
276 | do { | |
277 | int dst_copy; | |
278 | ||
279 | if (!cpg->p.sg_dst_left) { | |
280 | ret = sg_miter_next(&cpg->p.dst_sg_it); | |
281 | BUG_ON(!ret); | |
282 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | |
283 | cpg->p.dst_start = 0; | |
284 | } | |
285 | ||
286 | buf = cpg->p.dst_sg_it.addr; | |
287 | buf += cpg->p.dst_start; | |
288 | ||
289 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); | |
290 | ||
291 | memcpy(buf, | |
292 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, | |
293 | dst_copy); | |
294 | sram_offset += dst_copy; | |
295 | cpg->p.sg_dst_left -= dst_copy; | |
296 | need_copy_len -= dst_copy; | |
297 | cpg->p.dst_start += dst_copy; | |
298 | } while (need_copy_len > 0); | |
299 | } | |
85a7f0ac | 300 | |
85a7f0ac SAS |
301 | |
302 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | |
7a5f691e | 303 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { |
85a7f0ac SAS |
304 | /* process next scatter list entry */ |
305 | cpg->eng_st = ENGINE_BUSY; | |
a58094ac | 306 | cpg->p.process(0); |
85a7f0ac | 307 | } else { |
a58094ac | 308 | cpg->p.complete(); |
85a7f0ac | 309 | cpg->eng_st = ENGINE_IDLE; |
0328ac26 | 310 | local_bh_disable(); |
3b61a905 | 311 | req->complete(req, 0); |
0328ac26 | 312 | local_bh_enable(); |
85a7f0ac SAS |
313 | } |
314 | } | |
315 | ||
316 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | |
317 | { | |
318 | int i = 0; | |
15d4dd35 US |
319 | size_t cur_len; |
320 | ||
321 | while (1) { | |
322 | cur_len = sl[i].length; | |
323 | ++i; | |
324 | if (total_bytes > cur_len) | |
325 | total_bytes -= cur_len; | |
326 | else | |
327 | break; | |
328 | } | |
85a7f0ac SAS |
329 | |
330 | return i; | |
331 | } | |
332 | ||
333 | static void mv_enqueue_new_req(struct ablkcipher_request *req) | |
334 | { | |
3b61a905 | 335 | struct req_progress *p = &cpg->p; |
85a7f0ac SAS |
336 | int num_sgs; |
337 | ||
3b61a905 US |
338 | cpg->cur_req = &req->base; |
339 | memset(p, 0, sizeof(struct req_progress)); | |
340 | p->hw_nbytes = req->nbytes; | |
a58094ac US |
341 | p->complete = mv_crypto_algo_completion; |
342 | p->process = mv_process_current_q; | |
f0d03dea | 343 | p->copy_back = 1; |
85a7f0ac SAS |
344 | |
345 | num_sgs = count_sgs(req->src, req->nbytes); | |
3b61a905 | 346 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
85a7f0ac SAS |
347 | |
348 | num_sgs = count_sgs(req->dst, req->nbytes); | |
3b61a905 US |
349 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
350 | ||
85a7f0ac SAS |
351 | mv_process_current_q(1); |
352 | } | |
353 | ||
354 | static int queue_manag(void *data) | |
355 | { | |
356 | cpg->eng_st = ENGINE_IDLE; | |
357 | do { | |
358 | struct ablkcipher_request *req; | |
359 | struct crypto_async_request *async_req = NULL; | |
360 | struct crypto_async_request *backlog; | |
361 | ||
362 | __set_current_state(TASK_INTERRUPTIBLE); | |
363 | ||
364 | if (cpg->eng_st == ENGINE_W_DEQUEUE) | |
365 | dequeue_complete_req(); | |
366 | ||
367 | spin_lock_irq(&cpg->lock); | |
368 | if (cpg->eng_st == ENGINE_IDLE) { | |
369 | backlog = crypto_get_backlog(&cpg->queue); | |
370 | async_req = crypto_dequeue_request(&cpg->queue); | |
371 | if (async_req) { | |
372 | BUG_ON(cpg->eng_st != ENGINE_IDLE); | |
373 | cpg->eng_st = ENGINE_BUSY; | |
374 | } | |
375 | } | |
376 | spin_unlock_irq(&cpg->lock); | |
377 | ||
378 | if (backlog) { | |
379 | backlog->complete(backlog, -EINPROGRESS); | |
380 | backlog = NULL; | |
381 | } | |
382 | ||
383 | if (async_req) { | |
384 | req = container_of(async_req, | |
385 | struct ablkcipher_request, base); | |
386 | mv_enqueue_new_req(req); | |
387 | async_req = NULL; | |
388 | } | |
389 | ||
390 | schedule(); | |
391 | ||
392 | } while (!kthread_should_stop()); | |
393 | return 0; | |
394 | } | |
395 | ||
3b61a905 | 396 | static int mv_handle_req(struct crypto_async_request *req) |
85a7f0ac SAS |
397 | { |
398 | unsigned long flags; | |
399 | int ret; | |
400 | ||
401 | spin_lock_irqsave(&cpg->lock, flags); | |
3b61a905 | 402 | ret = crypto_enqueue_request(&cpg->queue, req); |
85a7f0ac SAS |
403 | spin_unlock_irqrestore(&cpg->lock, flags); |
404 | wake_up_process(cpg->queue_th); | |
405 | return ret; | |
406 | } | |
407 | ||
408 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | |
409 | { | |
410 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | |
411 | ||
412 | req_ctx->op = COP_AES_ECB; | |
413 | req_ctx->decrypt = 0; | |
414 | ||
3b61a905 | 415 | return mv_handle_req(&req->base); |
85a7f0ac SAS |
416 | } |
417 | ||
418 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | |
419 | { | |
420 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
421 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | |
422 | ||
423 | req_ctx->op = COP_AES_ECB; | |
424 | req_ctx->decrypt = 1; | |
425 | ||
426 | compute_aes_dec_key(ctx); | |
3b61a905 | 427 | return mv_handle_req(&req->base); |
85a7f0ac SAS |
428 | } |
429 | ||
430 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | |
431 | { | |
432 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | |
433 | ||
434 | req_ctx->op = COP_AES_CBC; | |
435 | req_ctx->decrypt = 0; | |
436 | ||
3b61a905 | 437 | return mv_handle_req(&req->base); |
85a7f0ac SAS |
438 | } |
439 | ||
440 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | |
441 | { | |
442 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
443 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | |
444 | ||
445 | req_ctx->op = COP_AES_CBC; | |
446 | req_ctx->decrypt = 1; | |
447 | ||
448 | compute_aes_dec_key(ctx); | |
3b61a905 | 449 | return mv_handle_req(&req->base); |
85a7f0ac SAS |
450 | } |
451 | ||
452 | static int mv_cra_init(struct crypto_tfm *tfm) | |
453 | { | |
454 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | |
455 | return 0; | |
456 | } | |
457 | ||
458 | irqreturn_t crypto_int(int irq, void *priv) | |
459 | { | |
460 | u32 val; | |
461 | ||
462 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | |
463 | if (!(val & SEC_INT_ACCEL0_DONE)) | |
464 | return IRQ_NONE; | |
465 | ||
466 | val &= ~SEC_INT_ACCEL0_DONE; | |
467 | writel(val, cpg->reg + FPGA_INT_STATUS); | |
468 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | |
469 | BUG_ON(cpg->eng_st != ENGINE_BUSY); | |
470 | cpg->eng_st = ENGINE_W_DEQUEUE; | |
471 | wake_up_process(cpg->queue_th); | |
472 | return IRQ_HANDLED; | |
473 | } | |
474 | ||
475 | struct crypto_alg mv_aes_alg_ecb = { | |
476 | .cra_name = "ecb(aes)", | |
477 | .cra_driver_name = "mv-ecb-aes", | |
478 | .cra_priority = 300, | |
479 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
480 | .cra_blocksize = 16, | |
481 | .cra_ctxsize = sizeof(struct mv_ctx), | |
482 | .cra_alignmask = 0, | |
483 | .cra_type = &crypto_ablkcipher_type, | |
484 | .cra_module = THIS_MODULE, | |
485 | .cra_init = mv_cra_init, | |
486 | .cra_u = { | |
487 | .ablkcipher = { | |
488 | .min_keysize = AES_MIN_KEY_SIZE, | |
489 | .max_keysize = AES_MAX_KEY_SIZE, | |
490 | .setkey = mv_setkey_aes, | |
491 | .encrypt = mv_enc_aes_ecb, | |
492 | .decrypt = mv_dec_aes_ecb, | |
493 | }, | |
494 | }, | |
495 | }; | |
496 | ||
497 | struct crypto_alg mv_aes_alg_cbc = { | |
498 | .cra_name = "cbc(aes)", | |
499 | .cra_driver_name = "mv-cbc-aes", | |
500 | .cra_priority = 300, | |
501 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
502 | .cra_blocksize = AES_BLOCK_SIZE, | |
503 | .cra_ctxsize = sizeof(struct mv_ctx), | |
504 | .cra_alignmask = 0, | |
505 | .cra_type = &crypto_ablkcipher_type, | |
506 | .cra_module = THIS_MODULE, | |
507 | .cra_init = mv_cra_init, | |
508 | .cra_u = { | |
509 | .ablkcipher = { | |
510 | .ivsize = AES_BLOCK_SIZE, | |
511 | .min_keysize = AES_MIN_KEY_SIZE, | |
512 | .max_keysize = AES_MAX_KEY_SIZE, | |
513 | .setkey = mv_setkey_aes, | |
514 | .encrypt = mv_enc_aes_cbc, | |
515 | .decrypt = mv_dec_aes_cbc, | |
516 | }, | |
517 | }, | |
518 | }; | |
519 | ||
520 | static int mv_probe(struct platform_device *pdev) | |
521 | { | |
522 | struct crypto_priv *cp; | |
523 | struct resource *res; | |
524 | int irq; | |
525 | int ret; | |
526 | ||
527 | if (cpg) { | |
528 | printk(KERN_ERR "Second crypto dev?\n"); | |
529 | return -EEXIST; | |
530 | } | |
531 | ||
532 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | |
533 | if (!res) | |
534 | return -ENXIO; | |
535 | ||
536 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | |
537 | if (!cp) | |
538 | return -ENOMEM; | |
539 | ||
540 | spin_lock_init(&cp->lock); | |
541 | crypto_init_queue(&cp->queue, 50); | |
542 | cp->reg = ioremap(res->start, res->end - res->start + 1); | |
543 | if (!cp->reg) { | |
544 | ret = -ENOMEM; | |
545 | goto err; | |
546 | } | |
547 | ||
548 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | |
549 | if (!res) { | |
550 | ret = -ENXIO; | |
551 | goto err_unmap_reg; | |
552 | } | |
553 | cp->sram_size = res->end - res->start + 1; | |
554 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | |
555 | cp->sram = ioremap(res->start, cp->sram_size); | |
556 | if (!cp->sram) { | |
557 | ret = -ENOMEM; | |
558 | goto err_unmap_reg; | |
559 | } | |
560 | ||
561 | irq = platform_get_irq(pdev, 0); | |
562 | if (irq < 0 || irq == NO_IRQ) { | |
563 | ret = irq; | |
564 | goto err_unmap_sram; | |
565 | } | |
566 | cp->irq = irq; | |
567 | ||
568 | platform_set_drvdata(pdev, cp); | |
569 | cpg = cp; | |
570 | ||
571 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | |
572 | if (IS_ERR(cp->queue_th)) { | |
573 | ret = PTR_ERR(cp->queue_th); | |
574 | goto err_thread; | |
575 | } | |
576 | ||
577 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | |
578 | cp); | |
579 | if (ret) | |
580 | goto err_unmap_sram; | |
581 | ||
582 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | |
583 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | |
584 | ||
585 | ret = crypto_register_alg(&mv_aes_alg_ecb); | |
586 | if (ret) | |
587 | goto err_reg; | |
588 | ||
589 | ret = crypto_register_alg(&mv_aes_alg_cbc); | |
590 | if (ret) | |
591 | goto err_unreg_ecb; | |
592 | return 0; | |
593 | err_unreg_ecb: | |
594 | crypto_unregister_alg(&mv_aes_alg_ecb); | |
595 | err_thread: | |
596 | free_irq(irq, cp); | |
597 | err_reg: | |
598 | kthread_stop(cp->queue_th); | |
599 | err_unmap_sram: | |
600 | iounmap(cp->sram); | |
601 | err_unmap_reg: | |
602 | iounmap(cp->reg); | |
603 | err: | |
604 | kfree(cp); | |
605 | cpg = NULL; | |
606 | platform_set_drvdata(pdev, NULL); | |
607 | return ret; | |
608 | } | |
609 | ||
610 | static int mv_remove(struct platform_device *pdev) | |
611 | { | |
612 | struct crypto_priv *cp = platform_get_drvdata(pdev); | |
613 | ||
614 | crypto_unregister_alg(&mv_aes_alg_ecb); | |
615 | crypto_unregister_alg(&mv_aes_alg_cbc); | |
616 | kthread_stop(cp->queue_th); | |
617 | free_irq(cp->irq, cp); | |
618 | memset(cp->sram, 0, cp->sram_size); | |
619 | iounmap(cp->sram); | |
620 | iounmap(cp->reg); | |
621 | kfree(cp); | |
622 | cpg = NULL; | |
623 | return 0; | |
624 | } | |
625 | ||
626 | static struct platform_driver marvell_crypto = { | |
627 | .probe = mv_probe, | |
628 | .remove = mv_remove, | |
629 | .driver = { | |
630 | .owner = THIS_MODULE, | |
631 | .name = "mv_crypto", | |
632 | }, | |
633 | }; | |
634 | MODULE_ALIAS("platform:mv_crypto"); | |
635 | ||
636 | static int __init mv_crypto_init(void) | |
637 | { | |
638 | return platform_driver_register(&marvell_crypto); | |
639 | } | |
640 | module_init(mv_crypto_init); | |
641 | ||
642 | static void __exit mv_crypto_exit(void) | |
643 | { | |
644 | platform_driver_unregister(&marvell_crypto); | |
645 | } | |
646 | module_exit(mv_crypto_exit); | |
647 | ||
648 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); | |
649 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | |
650 | MODULE_LICENSE("GPL"); |