Commit | Line | Data |
---|---|---|
d358f1ab JH |
1 | /* |
2 | * Copyright (c) 2014 Imagination Technologies | |
3 | * Authors: Will Thomas, James Hartley | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * Interface structure taken from omap-sham driver | |
10 | */ | |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/of_device.h> | |
19 | #include <linux/platform_device.h> | |
20 | #include <linux/scatterlist.h> | |
21 | ||
22 | #include <crypto/internal/hash.h> | |
23 | #include <crypto/md5.h> | |
24 | #include <crypto/sha.h> | |
25 | ||
26 | #define CR_RESET 0 | |
27 | #define CR_RESET_SET 1 | |
28 | #define CR_RESET_UNSET 0 | |
29 | ||
30 | #define CR_MESSAGE_LENGTH_H 0x4 | |
31 | #define CR_MESSAGE_LENGTH_L 0x8 | |
32 | ||
33 | #define CR_CONTROL 0xc | |
34 | #define CR_CONTROL_BYTE_ORDER_3210 0 | |
35 | #define CR_CONTROL_BYTE_ORDER_0123 1 | |
36 | #define CR_CONTROL_BYTE_ORDER_2310 2 | |
37 | #define CR_CONTROL_BYTE_ORDER_1032 3 | |
38 | #define CR_CONTROL_BYTE_ORDER_SHIFT 8 | |
39 | #define CR_CONTROL_ALGO_MD5 0 | |
40 | #define CR_CONTROL_ALGO_SHA1 1 | |
41 | #define CR_CONTROL_ALGO_SHA224 2 | |
42 | #define CR_CONTROL_ALGO_SHA256 3 | |
43 | ||
44 | #define CR_INTSTAT 0x10 | |
45 | #define CR_INTENAB 0x14 | |
46 | #define CR_INTCLEAR 0x18 | |
47 | #define CR_INT_RESULTS_AVAILABLE BIT(0) | |
48 | #define CR_INT_NEW_RESULTS_SET BIT(1) | |
49 | #define CR_INT_RESULT_READ_ERR BIT(2) | |
50 | #define CR_INT_MESSAGE_WRITE_ERROR BIT(3) | |
51 | #define CR_INT_STATUS BIT(8) | |
52 | ||
53 | #define CR_RESULT_QUEUE 0x1c | |
54 | #define CR_RSD0 0x40 | |
55 | #define CR_CORE_REV 0x50 | |
56 | #define CR_CORE_DES1 0x60 | |
57 | #define CR_CORE_DES2 0x70 | |
58 | ||
59 | #define DRIVER_FLAGS_BUSY BIT(0) | |
60 | #define DRIVER_FLAGS_FINAL BIT(1) | |
61 | #define DRIVER_FLAGS_DMA_ACTIVE BIT(2) | |
62 | #define DRIVER_FLAGS_OUTPUT_READY BIT(3) | |
63 | #define DRIVER_FLAGS_INIT BIT(4) | |
64 | #define DRIVER_FLAGS_CPU BIT(5) | |
65 | #define DRIVER_FLAGS_DMA_READY BIT(6) | |
66 | #define DRIVER_FLAGS_ERROR BIT(7) | |
67 | #define DRIVER_FLAGS_SG BIT(8) | |
68 | #define DRIVER_FLAGS_SHA1 BIT(18) | |
69 | #define DRIVER_FLAGS_SHA224 BIT(19) | |
70 | #define DRIVER_FLAGS_SHA256 BIT(20) | |
71 | #define DRIVER_FLAGS_MD5 BIT(21) | |
72 | ||
73 | #define IMG_HASH_QUEUE_LENGTH 20 | |
302a1bee | 74 | #define IMG_HASH_DMA_BURST 4 |
d358f1ab JH |
75 | #define IMG_HASH_DMA_THRESHOLD 64 |
76 | ||
77 | #ifdef __LITTLE_ENDIAN | |
78 | #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210 | |
79 | #else | |
80 | #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123 | |
81 | #endif | |
82 | ||
83 | struct img_hash_dev; | |
84 | ||
85 | struct img_hash_request_ctx { | |
86 | struct img_hash_dev *hdev; | |
87 | u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); | |
88 | unsigned long flags; | |
89 | size_t digsize; | |
90 | ||
91 | dma_addr_t dma_addr; | |
92 | size_t dma_ct; | |
93 | ||
94 | /* sg root */ | |
95 | struct scatterlist *sgfirst; | |
96 | /* walk state */ | |
97 | struct scatterlist *sg; | |
98 | size_t nents; | |
99 | size_t offset; | |
100 | unsigned int total; | |
101 | size_t sent; | |
102 | ||
103 | unsigned long op; | |
104 | ||
105 | size_t bufcnt; | |
d358f1ab | 106 | struct ahash_request fallback_req; |
dd4f677b WT |
107 | |
108 | /* Zero length buffer must remain last member of struct */ | |
109 | u8 buffer[0] __aligned(sizeof(u32)); | |
d358f1ab JH |
110 | }; |
111 | ||
112 | struct img_hash_ctx { | |
113 | struct img_hash_dev *hdev; | |
114 | unsigned long flags; | |
115 | struct crypto_ahash *fallback; | |
116 | }; | |
117 | ||
118 | struct img_hash_dev { | |
119 | struct list_head list; | |
120 | struct device *dev; | |
121 | struct clk *hash_clk; | |
122 | struct clk *sys_clk; | |
123 | void __iomem *io_base; | |
124 | ||
125 | phys_addr_t bus_addr; | |
126 | void __iomem *cpu_addr; | |
127 | ||
128 | spinlock_t lock; | |
129 | int err; | |
130 | struct tasklet_struct done_task; | |
131 | struct tasklet_struct dma_task; | |
132 | ||
133 | unsigned long flags; | |
134 | struct crypto_queue queue; | |
135 | struct ahash_request *req; | |
136 | ||
137 | struct dma_chan *dma_lch; | |
138 | }; | |
139 | ||
140 | struct img_hash_drv { | |
141 | struct list_head dev_list; | |
142 | spinlock_t lock; | |
143 | }; | |
144 | ||
145 | static struct img_hash_drv img_hash = { | |
146 | .dev_list = LIST_HEAD_INIT(img_hash.dev_list), | |
147 | .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock), | |
148 | }; | |
149 | ||
150 | static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset) | |
151 | { | |
152 | return readl_relaxed(hdev->io_base + offset); | |
153 | } | |
154 | ||
155 | static inline void img_hash_write(struct img_hash_dev *hdev, | |
156 | u32 offset, u32 value) | |
157 | { | |
158 | writel_relaxed(value, hdev->io_base + offset); | |
159 | } | |
160 | ||
161 | static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev) | |
162 | { | |
163 | return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE)); | |
164 | } | |
165 | ||
166 | static void img_hash_start(struct img_hash_dev *hdev, bool dma) | |
167 | { | |
168 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
169 | u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT; | |
170 | ||
171 | if (ctx->flags & DRIVER_FLAGS_MD5) | |
172 | cr |= CR_CONTROL_ALGO_MD5; | |
173 | else if (ctx->flags & DRIVER_FLAGS_SHA1) | |
174 | cr |= CR_CONTROL_ALGO_SHA1; | |
175 | else if (ctx->flags & DRIVER_FLAGS_SHA224) | |
176 | cr |= CR_CONTROL_ALGO_SHA224; | |
177 | else if (ctx->flags & DRIVER_FLAGS_SHA256) | |
178 | cr |= CR_CONTROL_ALGO_SHA256; | |
179 | dev_dbg(hdev->dev, "Starting hash process\n"); | |
180 | img_hash_write(hdev, CR_CONTROL, cr); | |
181 | ||
182 | /* | |
183 | * The hardware block requires two cycles between writing the control | |
184 | * register and writing the first word of data in non DMA mode, to | |
185 | * ensure the first data write is not grouped in burst with the control | |
186 | * register write a read is issued to 'flush' the bus. | |
187 | */ | |
188 | if (!dma) | |
189 | img_hash_read(hdev, CR_CONTROL); | |
190 | } | |
191 | ||
192 | static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf, | |
193 | size_t length, int final) | |
194 | { | |
195 | u32 count, len32; | |
196 | const u32 *buffer = (const u32 *)buf; | |
197 | ||
900831a4 | 198 | dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length); |
d358f1ab JH |
199 | |
200 | if (final) | |
201 | hdev->flags |= DRIVER_FLAGS_FINAL; | |
202 | ||
203 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | |
204 | ||
205 | for (count = 0; count < len32; count++) | |
206 | writel_relaxed(buffer[count], hdev->cpu_addr); | |
207 | ||
208 | return -EINPROGRESS; | |
209 | } | |
210 | ||
211 | static void img_hash_dma_callback(void *data) | |
212 | { | |
213 | struct img_hash_dev *hdev = (struct img_hash_dev *)data; | |
214 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
215 | ||
216 | if (ctx->bufcnt) { | |
217 | img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0); | |
218 | ctx->bufcnt = 0; | |
219 | } | |
220 | if (ctx->sg) | |
221 | tasklet_schedule(&hdev->dma_task); | |
222 | } | |
223 | ||
224 | static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg) | |
225 | { | |
226 | struct dma_async_tx_descriptor *desc; | |
227 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
228 | ||
229 | ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); | |
230 | if (ctx->dma_ct == 0) { | |
231 | dev_err(hdev->dev, "Invalid DMA sg\n"); | |
232 | hdev->err = -EINVAL; | |
233 | return -EINVAL; | |
234 | } | |
235 | ||
236 | desc = dmaengine_prep_slave_sg(hdev->dma_lch, | |
237 | sg, | |
238 | ctx->dma_ct, | |
239 | DMA_MEM_TO_DEV, | |
240 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
241 | if (!desc) { | |
242 | dev_err(hdev->dev, "Null DMA descriptor\n"); | |
243 | hdev->err = -EINVAL; | |
244 | dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); | |
245 | return -EINVAL; | |
246 | } | |
247 | desc->callback = img_hash_dma_callback; | |
248 | desc->callback_param = hdev; | |
249 | dmaengine_submit(desc); | |
250 | dma_async_issue_pending(hdev->dma_lch); | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
255 | static int img_hash_write_via_cpu(struct img_hash_dev *hdev) | |
256 | { | |
257 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
258 | ||
259 | ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), | |
260 | ctx->buffer, hdev->req->nbytes); | |
261 | ||
262 | ctx->total = hdev->req->nbytes; | |
263 | ctx->bufcnt = 0; | |
264 | ||
265 | hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL); | |
266 | ||
267 | img_hash_start(hdev, false); | |
268 | ||
269 | return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1); | |
270 | } | |
271 | ||
272 | static int img_hash_finish(struct ahash_request *req) | |
273 | { | |
274 | struct img_hash_request_ctx *ctx = ahash_request_ctx(req); | |
275 | ||
276 | if (!req->result) | |
277 | return -EINVAL; | |
278 | ||
279 | memcpy(req->result, ctx->digest, ctx->digsize); | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | static void img_hash_copy_hash(struct ahash_request *req) | |
285 | { | |
286 | struct img_hash_request_ctx *ctx = ahash_request_ctx(req); | |
287 | u32 *hash = (u32 *)ctx->digest; | |
288 | int i; | |
289 | ||
290 | for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--) | |
291 | hash[i] = img_hash_read_result_queue(ctx->hdev); | |
292 | } | |
293 | ||
294 | static void img_hash_finish_req(struct ahash_request *req, int err) | |
295 | { | |
296 | struct img_hash_request_ctx *ctx = ahash_request_ctx(req); | |
297 | struct img_hash_dev *hdev = ctx->hdev; | |
298 | ||
299 | if (!err) { | |
300 | img_hash_copy_hash(req); | |
301 | if (DRIVER_FLAGS_FINAL & hdev->flags) | |
302 | err = img_hash_finish(req); | |
303 | } else { | |
304 | dev_warn(hdev->dev, "Hash failed with error %d\n", err); | |
305 | ctx->flags |= DRIVER_FLAGS_ERROR; | |
306 | } | |
307 | ||
308 | hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY | | |
309 | DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL); | |
310 | ||
311 | if (req->base.complete) | |
312 | req->base.complete(&req->base, err); | |
313 | } | |
314 | ||
315 | static int img_hash_write_via_dma(struct img_hash_dev *hdev) | |
316 | { | |
317 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
318 | ||
319 | img_hash_start(hdev, true); | |
320 | ||
321 | dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total); | |
322 | ||
323 | if (!ctx->total) | |
324 | hdev->flags |= DRIVER_FLAGS_FINAL; | |
325 | ||
326 | hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL; | |
327 | ||
328 | tasklet_schedule(&hdev->dma_task); | |
329 | ||
330 | return -EINPROGRESS; | |
331 | } | |
332 | ||
333 | static int img_hash_dma_init(struct img_hash_dev *hdev) | |
334 | { | |
335 | struct dma_slave_config dma_conf; | |
336 | int err = -EINVAL; | |
337 | ||
338 | hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); | |
339 | if (!hdev->dma_lch) { | |
327cbbab | 340 | dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); |
d358f1ab JH |
341 | return -EBUSY; |
342 | } | |
343 | dma_conf.direction = DMA_MEM_TO_DEV; | |
344 | dma_conf.dst_addr = hdev->bus_addr; | |
345 | dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
302a1bee | 346 | dma_conf.dst_maxburst = IMG_HASH_DMA_BURST; |
d358f1ab JH |
347 | dma_conf.device_fc = false; |
348 | ||
349 | err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); | |
350 | if (err) { | |
351 | dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); | |
352 | dma_release_channel(hdev->dma_lch); | |
353 | return err; | |
354 | } | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | static void img_hash_dma_task(unsigned long d) | |
360 | { | |
361 | struct img_hash_dev *hdev = (struct img_hash_dev *)d; | |
362 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
363 | u8 *addr; | |
364 | size_t nbytes, bleft, wsend, len, tbc; | |
365 | struct scatterlist tsg; | |
366 | ||
10badea2 | 367 | if (!hdev->req || !ctx->sg) |
d358f1ab JH |
368 | return; |
369 | ||
370 | addr = sg_virt(ctx->sg); | |
371 | nbytes = ctx->sg->length - ctx->offset; | |
372 | ||
373 | /* | |
374 | * The hash accelerator does not support a data valid mask. This means | |
375 | * that if each dma (i.e. per page) is not a multiple of 4 bytes, the | |
376 | * padding bytes in the last word written by that dma would erroneously | |
377 | * be included in the hash. To avoid this we round down the transfer, | |
378 | * and add the excess to the start of the next dma. It does not matter | |
379 | * that the final dma may not be a multiple of 4 bytes as the hashing | |
380 | * block is programmed to accept the correct number of bytes. | |
381 | */ | |
382 | ||
383 | bleft = nbytes % 4; | |
384 | wsend = (nbytes / 4); | |
385 | ||
386 | if (wsend) { | |
387 | sg_init_one(&tsg, addr + ctx->offset, wsend * 4); | |
388 | if (img_hash_xmit_dma(hdev, &tsg)) { | |
389 | dev_err(hdev->dev, "DMA failed, falling back to CPU"); | |
390 | ctx->flags |= DRIVER_FLAGS_CPU; | |
391 | hdev->err = 0; | |
392 | img_hash_xmit_cpu(hdev, addr + ctx->offset, | |
393 | wsend * 4, 0); | |
394 | ctx->sent += wsend * 4; | |
395 | wsend = 0; | |
396 | } else { | |
397 | ctx->sent += wsend * 4; | |
398 | } | |
399 | } | |
400 | ||
401 | if (bleft) { | |
402 | ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, | |
403 | ctx->buffer, bleft, ctx->sent); | |
404 | tbc = 0; | |
405 | ctx->sg = sg_next(ctx->sg); | |
406 | while (ctx->sg && (ctx->bufcnt < 4)) { | |
407 | len = ctx->sg->length; | |
408 | if (likely(len > (4 - ctx->bufcnt))) | |
409 | len = 4 - ctx->bufcnt; | |
410 | tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, | |
411 | ctx->buffer + ctx->bufcnt, len, | |
412 | ctx->sent + ctx->bufcnt); | |
413 | ctx->bufcnt += tbc; | |
414 | if (tbc >= ctx->sg->length) { | |
415 | ctx->sg = sg_next(ctx->sg); | |
416 | tbc = 0; | |
417 | } | |
418 | } | |
419 | ||
420 | ctx->sent += ctx->bufcnt; | |
421 | ctx->offset = tbc; | |
422 | ||
423 | if (!wsend) | |
424 | img_hash_dma_callback(hdev); | |
425 | } else { | |
426 | ctx->offset = 0; | |
427 | ctx->sg = sg_next(ctx->sg); | |
428 | } | |
429 | } | |
430 | ||
431 | static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev) | |
432 | { | |
433 | struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); | |
434 | ||
435 | if (ctx->flags & DRIVER_FLAGS_SG) | |
436 | dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE); | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
441 | static int img_hash_process_data(struct img_hash_dev *hdev) | |
442 | { | |
443 | struct ahash_request *req = hdev->req; | |
444 | struct img_hash_request_ctx *ctx = ahash_request_ctx(req); | |
445 | int err = 0; | |
446 | ||
447 | ctx->bufcnt = 0; | |
448 | ||
449 | if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) { | |
450 | dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n", | |
451 | req->nbytes); | |
452 | err = img_hash_write_via_dma(hdev); | |
453 | } else { | |
454 | dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n", | |
455 | req->nbytes); | |
456 | err = img_hash_write_via_cpu(hdev); | |
457 | } | |
458 | return err; | |
459 | } | |
460 | ||
461 | static int img_hash_hw_init(struct img_hash_dev *hdev) | |
462 | { | |
463 | unsigned long long nbits; | |
464 | u32 u, l; | |
d358f1ab JH |
465 | |
466 | img_hash_write(hdev, CR_RESET, CR_RESET_SET); | |
467 | img_hash_write(hdev, CR_RESET, CR_RESET_UNSET); | |
468 | img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET); | |
469 | ||
a83034f5 | 470 | nbits = (u64)hdev->req->nbytes << 3; |
d358f1ab JH |
471 | u = nbits >> 32; |
472 | l = nbits; | |
473 | img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u); | |
474 | img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l); | |
475 | ||
476 | if (!(DRIVER_FLAGS_INIT & hdev->flags)) { | |
477 | hdev->flags |= DRIVER_FLAGS_INIT; | |
478 | hdev->err = 0; | |
479 | } | |
480 | dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits); | |
481 | return 0; | |
482 | } | |
483 | ||
484 | static int img_hash_init(struct ahash_request *req) | |
485 | { | |
486 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
487 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
488 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
489 | ||
490 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
491 | rctx->fallback_req.base.flags = req->base.flags | |
492 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
493 | ||
494 | return crypto_ahash_init(&rctx->fallback_req); | |
495 | } | |
496 | ||
497 | static int img_hash_handle_queue(struct img_hash_dev *hdev, | |
498 | struct ahash_request *req) | |
499 | { | |
500 | struct crypto_async_request *async_req, *backlog; | |
501 | struct img_hash_request_ctx *ctx; | |
502 | unsigned long flags; | |
503 | int err = 0, res = 0; | |
504 | ||
505 | spin_lock_irqsave(&hdev->lock, flags); | |
506 | ||
507 | if (req) | |
508 | res = ahash_enqueue_request(&hdev->queue, req); | |
509 | ||
510 | if (DRIVER_FLAGS_BUSY & hdev->flags) { | |
511 | spin_unlock_irqrestore(&hdev->lock, flags); | |
512 | return res; | |
513 | } | |
514 | ||
515 | backlog = crypto_get_backlog(&hdev->queue); | |
516 | async_req = crypto_dequeue_request(&hdev->queue); | |
517 | if (async_req) | |
518 | hdev->flags |= DRIVER_FLAGS_BUSY; | |
519 | ||
520 | spin_unlock_irqrestore(&hdev->lock, flags); | |
521 | ||
522 | if (!async_req) | |
523 | return res; | |
524 | ||
525 | if (backlog) | |
526 | backlog->complete(backlog, -EINPROGRESS); | |
527 | ||
528 | req = ahash_request_cast(async_req); | |
529 | hdev->req = req; | |
530 | ||
531 | ctx = ahash_request_ctx(req); | |
532 | ||
533 | dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n", | |
534 | ctx->op, req->nbytes); | |
535 | ||
536 | err = img_hash_hw_init(hdev); | |
537 | ||
538 | if (!err) | |
539 | err = img_hash_process_data(hdev); | |
540 | ||
541 | if (err != -EINPROGRESS) { | |
542 | /* done_task will not finish so do it here */ | |
543 | img_hash_finish_req(req, err); | |
544 | } | |
545 | return res; | |
546 | } | |
547 | ||
548 | static int img_hash_update(struct ahash_request *req) | |
549 | { | |
550 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
551 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
552 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
553 | ||
554 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
555 | rctx->fallback_req.base.flags = req->base.flags | |
556 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
557 | rctx->fallback_req.nbytes = req->nbytes; | |
558 | rctx->fallback_req.src = req->src; | |
559 | ||
560 | return crypto_ahash_update(&rctx->fallback_req); | |
561 | } | |
562 | ||
563 | static int img_hash_final(struct ahash_request *req) | |
564 | { | |
565 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
566 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
567 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
568 | ||
569 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
570 | rctx->fallback_req.base.flags = req->base.flags | |
571 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
572 | rctx->fallback_req.result = req->result; | |
573 | ||
574 | return crypto_ahash_final(&rctx->fallback_req); | |
575 | } | |
576 | ||
577 | static int img_hash_finup(struct ahash_request *req) | |
578 | { | |
579 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
580 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
581 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
582 | ||
583 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
584 | rctx->fallback_req.base.flags = req->base.flags | |
585 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
586 | rctx->fallback_req.nbytes = req->nbytes; | |
587 | rctx->fallback_req.src = req->src; | |
588 | rctx->fallback_req.result = req->result; | |
589 | ||
590 | return crypto_ahash_finup(&rctx->fallback_req); | |
591 | } | |
592 | ||
436e3bb5 JH |
593 | static int img_hash_import(struct ahash_request *req, const void *in) |
594 | { | |
595 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
596 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
597 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
598 | ||
599 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
600 | rctx->fallback_req.base.flags = req->base.flags | |
601 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
602 | ||
603 | return crypto_ahash_import(&rctx->fallback_req, in); | |
604 | } | |
605 | ||
606 | static int img_hash_export(struct ahash_request *req, void *out) | |
607 | { | |
608 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | |
609 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
610 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
611 | ||
612 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | |
613 | rctx->fallback_req.base.flags = req->base.flags | |
614 | & CRYPTO_TFM_REQ_MAY_SLEEP; | |
615 | ||
616 | return crypto_ahash_export(&rctx->fallback_req, out); | |
617 | } | |
618 | ||
d358f1ab JH |
619 | static int img_hash_digest(struct ahash_request *req) |
620 | { | |
621 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
622 | struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm); | |
623 | struct img_hash_request_ctx *ctx = ahash_request_ctx(req); | |
624 | struct img_hash_dev *hdev = NULL; | |
625 | struct img_hash_dev *tmp; | |
626 | int err; | |
627 | ||
628 | spin_lock(&img_hash.lock); | |
629 | if (!tctx->hdev) { | |
630 | list_for_each_entry(tmp, &img_hash.dev_list, list) { | |
631 | hdev = tmp; | |
632 | break; | |
633 | } | |
634 | tctx->hdev = hdev; | |
635 | ||
636 | } else { | |
637 | hdev = tctx->hdev; | |
638 | } | |
639 | ||
640 | spin_unlock(&img_hash.lock); | |
641 | ctx->hdev = hdev; | |
642 | ctx->flags = 0; | |
643 | ctx->digsize = crypto_ahash_digestsize(tfm); | |
644 | ||
645 | switch (ctx->digsize) { | |
646 | case SHA1_DIGEST_SIZE: | |
647 | ctx->flags |= DRIVER_FLAGS_SHA1; | |
648 | break; | |
649 | case SHA256_DIGEST_SIZE: | |
650 | ctx->flags |= DRIVER_FLAGS_SHA256; | |
651 | break; | |
652 | case SHA224_DIGEST_SIZE: | |
653 | ctx->flags |= DRIVER_FLAGS_SHA224; | |
654 | break; | |
655 | case MD5_DIGEST_SIZE: | |
656 | ctx->flags |= DRIVER_FLAGS_MD5; | |
657 | break; | |
658 | default: | |
659 | return -EINVAL; | |
660 | } | |
661 | ||
662 | ctx->bufcnt = 0; | |
663 | ctx->offset = 0; | |
664 | ctx->sent = 0; | |
665 | ctx->total = req->nbytes; | |
666 | ctx->sg = req->src; | |
667 | ctx->sgfirst = req->src; | |
668 | ctx->nents = sg_nents(ctx->sg); | |
669 | ||
670 | err = img_hash_handle_queue(tctx->hdev, req); | |
671 | ||
672 | return err; | |
673 | } | |
674 | ||
436e3bb5 | 675 | static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) |
d358f1ab JH |
676 | { |
677 | struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
d358f1ab JH |
678 | int err = -ENOMEM; |
679 | ||
680 | ctx->fallback = crypto_alloc_ahash(alg_name, 0, | |
681 | CRYPTO_ALG_NEED_FALLBACK); | |
682 | if (IS_ERR(ctx->fallback)) { | |
683 | pr_err("img_hash: Could not load fallback driver.\n"); | |
684 | err = PTR_ERR(ctx->fallback); | |
685 | goto err; | |
686 | } | |
687 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
688 | sizeof(struct img_hash_request_ctx) + | |
5e38d200 | 689 | crypto_ahash_reqsize(ctx->fallback) + |
d358f1ab JH |
690 | IMG_HASH_DMA_THRESHOLD); |
691 | ||
692 | return 0; | |
693 | ||
694 | err: | |
695 | return err; | |
696 | } | |
697 | ||
436e3bb5 JH |
698 | static int img_hash_cra_md5_init(struct crypto_tfm *tfm) |
699 | { | |
700 | return img_hash_cra_init(tfm, "md5-generic"); | |
701 | } | |
702 | ||
703 | static int img_hash_cra_sha1_init(struct crypto_tfm *tfm) | |
704 | { | |
705 | return img_hash_cra_init(tfm, "sha1-generic"); | |
706 | } | |
707 | ||
708 | static int img_hash_cra_sha224_init(struct crypto_tfm *tfm) | |
709 | { | |
710 | return img_hash_cra_init(tfm, "sha224-generic"); | |
711 | } | |
712 | ||
713 | static int img_hash_cra_sha256_init(struct crypto_tfm *tfm) | |
714 | { | |
715 | return img_hash_cra_init(tfm, "sha256-generic"); | |
716 | } | |
717 | ||
d358f1ab JH |
718 | static void img_hash_cra_exit(struct crypto_tfm *tfm) |
719 | { | |
720 | struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); | |
721 | ||
722 | crypto_free_ahash(tctx->fallback); | |
723 | } | |
724 | ||
725 | static irqreturn_t img_irq_handler(int irq, void *dev_id) | |
726 | { | |
727 | struct img_hash_dev *hdev = dev_id; | |
728 | u32 reg; | |
729 | ||
730 | reg = img_hash_read(hdev, CR_INTSTAT); | |
731 | img_hash_write(hdev, CR_INTCLEAR, reg); | |
732 | ||
733 | if (reg & CR_INT_NEW_RESULTS_SET) { | |
734 | dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n"); | |
735 | if (DRIVER_FLAGS_BUSY & hdev->flags) { | |
736 | hdev->flags |= DRIVER_FLAGS_OUTPUT_READY; | |
737 | if (!(DRIVER_FLAGS_CPU & hdev->flags)) | |
738 | hdev->flags |= DRIVER_FLAGS_DMA_READY; | |
739 | tasklet_schedule(&hdev->done_task); | |
740 | } else { | |
741 | dev_warn(hdev->dev, | |
742 | "HASH interrupt when no active requests.\n"); | |
743 | } | |
744 | } else if (reg & CR_INT_RESULTS_AVAILABLE) { | |
745 | dev_warn(hdev->dev, | |
746 | "IRQ triggered before the hash had completed\n"); | |
747 | } else if (reg & CR_INT_RESULT_READ_ERR) { | |
748 | dev_warn(hdev->dev, | |
749 | "Attempt to read from an empty result queue\n"); | |
750 | } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) { | |
751 | dev_warn(hdev->dev, | |
752 | "Data written before the hardware was configured\n"); | |
753 | } | |
754 | return IRQ_HANDLED; | |
755 | } | |
756 | ||
757 | static struct ahash_alg img_algs[] = { | |
758 | { | |
759 | .init = img_hash_init, | |
760 | .update = img_hash_update, | |
761 | .final = img_hash_final, | |
762 | .finup = img_hash_finup, | |
436e3bb5 JH |
763 | .export = img_hash_export, |
764 | .import = img_hash_import, | |
d358f1ab JH |
765 | .digest = img_hash_digest, |
766 | .halg = { | |
767 | .digestsize = MD5_DIGEST_SIZE, | |
436e3bb5 | 768 | .statesize = sizeof(struct md5_state), |
d358f1ab JH |
769 | .base = { |
770 | .cra_name = "md5", | |
771 | .cra_driver_name = "img-md5", | |
772 | .cra_priority = 300, | |
773 | .cra_flags = | |
774 | CRYPTO_ALG_ASYNC | | |
775 | CRYPTO_ALG_NEED_FALLBACK, | |
776 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | |
777 | .cra_ctxsize = sizeof(struct img_hash_ctx), | |
436e3bb5 | 778 | .cra_init = img_hash_cra_md5_init, |
d358f1ab JH |
779 | .cra_exit = img_hash_cra_exit, |
780 | .cra_module = THIS_MODULE, | |
781 | } | |
782 | } | |
783 | }, | |
784 | { | |
785 | .init = img_hash_init, | |
786 | .update = img_hash_update, | |
787 | .final = img_hash_final, | |
788 | .finup = img_hash_finup, | |
436e3bb5 JH |
789 | .export = img_hash_export, |
790 | .import = img_hash_import, | |
d358f1ab JH |
791 | .digest = img_hash_digest, |
792 | .halg = { | |
793 | .digestsize = SHA1_DIGEST_SIZE, | |
436e3bb5 | 794 | .statesize = sizeof(struct sha1_state), |
d358f1ab JH |
795 | .base = { |
796 | .cra_name = "sha1", | |
797 | .cra_driver_name = "img-sha1", | |
798 | .cra_priority = 300, | |
799 | .cra_flags = | |
800 | CRYPTO_ALG_ASYNC | | |
801 | CRYPTO_ALG_NEED_FALLBACK, | |
802 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
803 | .cra_ctxsize = sizeof(struct img_hash_ctx), | |
436e3bb5 | 804 | .cra_init = img_hash_cra_sha1_init, |
d358f1ab JH |
805 | .cra_exit = img_hash_cra_exit, |
806 | .cra_module = THIS_MODULE, | |
807 | } | |
808 | } | |
809 | }, | |
810 | { | |
811 | .init = img_hash_init, | |
812 | .update = img_hash_update, | |
813 | .final = img_hash_final, | |
814 | .finup = img_hash_finup, | |
436e3bb5 JH |
815 | .export = img_hash_export, |
816 | .import = img_hash_import, | |
d358f1ab JH |
817 | .digest = img_hash_digest, |
818 | .halg = { | |
819 | .digestsize = SHA224_DIGEST_SIZE, | |
436e3bb5 | 820 | .statesize = sizeof(struct sha256_state), |
d358f1ab JH |
821 | .base = { |
822 | .cra_name = "sha224", | |
823 | .cra_driver_name = "img-sha224", | |
824 | .cra_priority = 300, | |
825 | .cra_flags = | |
826 | CRYPTO_ALG_ASYNC | | |
827 | CRYPTO_ALG_NEED_FALLBACK, | |
828 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
829 | .cra_ctxsize = sizeof(struct img_hash_ctx), | |
436e3bb5 | 830 | .cra_init = img_hash_cra_sha224_init, |
d358f1ab JH |
831 | .cra_exit = img_hash_cra_exit, |
832 | .cra_module = THIS_MODULE, | |
833 | } | |
834 | } | |
835 | }, | |
836 | { | |
837 | .init = img_hash_init, | |
838 | .update = img_hash_update, | |
839 | .final = img_hash_final, | |
840 | .finup = img_hash_finup, | |
436e3bb5 JH |
841 | .export = img_hash_export, |
842 | .import = img_hash_import, | |
d358f1ab JH |
843 | .digest = img_hash_digest, |
844 | .halg = { | |
845 | .digestsize = SHA256_DIGEST_SIZE, | |
436e3bb5 | 846 | .statesize = sizeof(struct sha256_state), |
d358f1ab JH |
847 | .base = { |
848 | .cra_name = "sha256", | |
849 | .cra_driver_name = "img-sha256", | |
850 | .cra_priority = 300, | |
851 | .cra_flags = | |
852 | CRYPTO_ALG_ASYNC | | |
853 | CRYPTO_ALG_NEED_FALLBACK, | |
854 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
855 | .cra_ctxsize = sizeof(struct img_hash_ctx), | |
436e3bb5 | 856 | .cra_init = img_hash_cra_sha256_init, |
d358f1ab JH |
857 | .cra_exit = img_hash_cra_exit, |
858 | .cra_module = THIS_MODULE, | |
859 | } | |
860 | } | |
861 | } | |
862 | }; | |
863 | ||
864 | static int img_register_algs(struct img_hash_dev *hdev) | |
865 | { | |
866 | int i, err; | |
867 | ||
868 | for (i = 0; i < ARRAY_SIZE(img_algs); i++) { | |
869 | err = crypto_register_ahash(&img_algs[i]); | |
870 | if (err) | |
871 | goto err_reg; | |
872 | } | |
873 | return 0; | |
874 | ||
875 | err_reg: | |
876 | for (; i--; ) | |
877 | crypto_unregister_ahash(&img_algs[i]); | |
878 | ||
879 | return err; | |
880 | } | |
881 | ||
882 | static int img_unregister_algs(struct img_hash_dev *hdev) | |
883 | { | |
884 | int i; | |
885 | ||
886 | for (i = 0; i < ARRAY_SIZE(img_algs); i++) | |
887 | crypto_unregister_ahash(&img_algs[i]); | |
888 | return 0; | |
889 | } | |
890 | ||
891 | static void img_hash_done_task(unsigned long data) | |
892 | { | |
893 | struct img_hash_dev *hdev = (struct img_hash_dev *)data; | |
894 | int err = 0; | |
895 | ||
896 | if (hdev->err == -EINVAL) { | |
897 | err = hdev->err; | |
898 | goto finish; | |
899 | } | |
900 | ||
901 | if (!(DRIVER_FLAGS_BUSY & hdev->flags)) { | |
902 | img_hash_handle_queue(hdev, NULL); | |
903 | return; | |
904 | } | |
905 | ||
906 | if (DRIVER_FLAGS_CPU & hdev->flags) { | |
907 | if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) { | |
908 | hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY; | |
909 | goto finish; | |
910 | } | |
911 | } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) { | |
912 | if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) { | |
913 | hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE; | |
914 | img_hash_write_via_dma_stop(hdev); | |
915 | if (hdev->err) { | |
916 | err = hdev->err; | |
917 | goto finish; | |
918 | } | |
919 | } | |
920 | if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) { | |
921 | hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | | |
922 | DRIVER_FLAGS_OUTPUT_READY); | |
923 | goto finish; | |
924 | } | |
925 | } | |
926 | return; | |
927 | ||
928 | finish: | |
929 | img_hash_finish_req(hdev->req, err); | |
930 | } | |
931 | ||
932 | static const struct of_device_id img_hash_match[] = { | |
933 | { .compatible = "img,hash-accelerator" }, | |
934 | {} | |
935 | }; | |
7094e8ea | 936 | MODULE_DEVICE_TABLE(of, img_hash_match); |
d358f1ab JH |
937 | |
938 | static int img_hash_probe(struct platform_device *pdev) | |
939 | { | |
940 | struct img_hash_dev *hdev; | |
941 | struct device *dev = &pdev->dev; | |
942 | struct resource *hash_res; | |
943 | int irq; | |
944 | int err; | |
945 | ||
946 | hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); | |
947 | if (hdev == NULL) | |
948 | return -ENOMEM; | |
949 | ||
950 | spin_lock_init(&hdev->lock); | |
951 | ||
952 | hdev->dev = dev; | |
953 | ||
954 | platform_set_drvdata(pdev, hdev); | |
955 | ||
956 | INIT_LIST_HEAD(&hdev->list); | |
957 | ||
958 | tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev); | |
959 | tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev); | |
960 | ||
961 | crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); | |
962 | ||
963 | /* Register bank */ | |
964 | hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
965 | ||
966 | hdev->io_base = devm_ioremap_resource(dev, hash_res); | |
967 | if (IS_ERR(hdev->io_base)) { | |
968 | err = PTR_ERR(hdev->io_base); | |
969 | dev_err(dev, "can't ioremap, returned %d\n", err); | |
970 | ||
971 | goto res_err; | |
972 | } | |
973 | ||
974 | /* Write port (DMA or CPU) */ | |
975 | hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
976 | hdev->cpu_addr = devm_ioremap_resource(dev, hash_res); | |
977 | if (IS_ERR(hdev->cpu_addr)) { | |
978 | dev_err(dev, "can't ioremap write port\n"); | |
979 | err = PTR_ERR(hdev->cpu_addr); | |
980 | goto res_err; | |
981 | } | |
982 | hdev->bus_addr = hash_res->start; | |
983 | ||
984 | irq = platform_get_irq(pdev, 0); | |
985 | if (irq < 0) { | |
986 | dev_err(dev, "no IRQ resource info\n"); | |
987 | err = irq; | |
988 | goto res_err; | |
989 | } | |
990 | ||
991 | err = devm_request_irq(dev, irq, img_irq_handler, 0, | |
992 | dev_name(dev), hdev); | |
993 | if (err) { | |
994 | dev_err(dev, "unable to request irq\n"); | |
995 | goto res_err; | |
996 | } | |
997 | dev_dbg(dev, "using IRQ channel %d\n", irq); | |
998 | ||
999 | hdev->hash_clk = devm_clk_get(&pdev->dev, "hash"); | |
1000 | if (IS_ERR(hdev->hash_clk)) { | |
1001 | dev_err(dev, "clock initialization failed.\n"); | |
1002 | err = PTR_ERR(hdev->hash_clk); | |
1003 | goto res_err; | |
1004 | } | |
1005 | ||
1006 | hdev->sys_clk = devm_clk_get(&pdev->dev, "sys"); | |
1007 | if (IS_ERR(hdev->sys_clk)) { | |
1008 | dev_err(dev, "clock initialization failed.\n"); | |
1009 | err = PTR_ERR(hdev->sys_clk); | |
1010 | goto res_err; | |
1011 | } | |
1012 | ||
1013 | err = clk_prepare_enable(hdev->hash_clk); | |
1014 | if (err) | |
1015 | goto res_err; | |
1016 | ||
1017 | err = clk_prepare_enable(hdev->sys_clk); | |
1018 | if (err) | |
1019 | goto clk_err; | |
1020 | ||
1021 | err = img_hash_dma_init(hdev); | |
1022 | if (err) | |
1023 | goto dma_err; | |
1024 | ||
1025 | dev_dbg(dev, "using %s for DMA transfers\n", | |
1026 | dma_chan_name(hdev->dma_lch)); | |
1027 | ||
1028 | spin_lock(&img_hash.lock); | |
1029 | list_add_tail(&hdev->list, &img_hash.dev_list); | |
1030 | spin_unlock(&img_hash.lock); | |
1031 | ||
1032 | err = img_register_algs(hdev); | |
1033 | if (err) | |
1034 | goto err_algs; | |
fb67740e | 1035 | dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); |
d358f1ab JH |
1036 | |
1037 | return 0; | |
1038 | ||
1039 | err_algs: | |
1040 | spin_lock(&img_hash.lock); | |
1041 | list_del(&hdev->list); | |
1042 | spin_unlock(&img_hash.lock); | |
1043 | dma_release_channel(hdev->dma_lch); | |
1044 | dma_err: | |
1045 | clk_disable_unprepare(hdev->sys_clk); | |
1046 | clk_err: | |
1047 | clk_disable_unprepare(hdev->hash_clk); | |
1048 | res_err: | |
1049 | tasklet_kill(&hdev->done_task); | |
1050 | tasklet_kill(&hdev->dma_task); | |
1051 | ||
1052 | return err; | |
1053 | } | |
1054 | ||
1055 | static int img_hash_remove(struct platform_device *pdev) | |
1056 | { | |
1057 | static struct img_hash_dev *hdev; | |
1058 | ||
1059 | hdev = platform_get_drvdata(pdev); | |
1060 | spin_lock(&img_hash.lock); | |
1061 | list_del(&hdev->list); | |
1062 | spin_unlock(&img_hash.lock); | |
1063 | ||
1064 | img_unregister_algs(hdev); | |
1065 | ||
1066 | tasklet_kill(&hdev->done_task); | |
1067 | tasklet_kill(&hdev->dma_task); | |
1068 | ||
1069 | dma_release_channel(hdev->dma_lch); | |
1070 | ||
1071 | clk_disable_unprepare(hdev->hash_clk); | |
1072 | clk_disable_unprepare(hdev->sys_clk); | |
1073 | ||
1074 | return 0; | |
1075 | } | |
1076 | ||
d084e13a GR |
1077 | #ifdef CONFIG_PM_SLEEP |
1078 | static int img_hash_suspend(struct device *dev) | |
1079 | { | |
1080 | struct img_hash_dev *hdev = dev_get_drvdata(dev); | |
1081 | ||
1082 | clk_disable_unprepare(hdev->hash_clk); | |
1083 | clk_disable_unprepare(hdev->sys_clk); | |
1084 | ||
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | static int img_hash_resume(struct device *dev) | |
1089 | { | |
1090 | struct img_hash_dev *hdev = dev_get_drvdata(dev); | |
1091 | ||
1092 | clk_prepare_enable(hdev->hash_clk); | |
1093 | clk_prepare_enable(hdev->sys_clk); | |
1094 | ||
1095 | return 0; | |
1096 | } | |
1097 | #endif /* CONFIG_PM_SLEEP */ | |
1098 | ||
1099 | static const struct dev_pm_ops img_hash_pm_ops = { | |
1100 | SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume) | |
1101 | }; | |
1102 | ||
d358f1ab JH |
1103 | static struct platform_driver img_hash_driver = { |
1104 | .probe = img_hash_probe, | |
1105 | .remove = img_hash_remove, | |
1106 | .driver = { | |
1107 | .name = "img-hash-accelerator", | |
d084e13a | 1108 | .pm = &img_hash_pm_ops, |
d358f1ab JH |
1109 | .of_match_table = of_match_ptr(img_hash_match), |
1110 | } | |
1111 | }; | |
1112 | module_platform_driver(img_hash_driver); | |
1113 | ||
1114 | MODULE_LICENSE("GPL v2"); | |
1115 | MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver"); | |
1116 | MODULE_AUTHOR("Will Thomas."); | |
1117 | MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>"); |