Commit | Line | Data |
---|---|---|
63b94509 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
553d2374 | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
63b94509 TL |
5 | * |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #ifndef __CCP_DEV_H__ | |
14 | #define __CCP_DEV_H__ | |
15 | ||
16 | #include <linux/device.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/wait.h> | |
22 | #include <linux/dmapool.h> | |
23 | #include <linux/hw_random.h> | |
8db88467 | 24 | #include <linux/bitops.h> |
63b94509 | 25 | |
553d2374 | 26 | #define MAX_CCP_NAME_LEN 16 |
63b94509 TL |
27 | #define MAX_DMAPOOL_NAME_LEN 32 |
28 | ||
29 | #define MAX_HW_QUEUES 5 | |
30 | #define MAX_CMD_QLEN 100 | |
31 | ||
32 | #define TRNG_RETRIES 10 | |
33 | ||
126ae9ad | 34 | #define CACHE_NONE 0x00 |
c4f4b325 TL |
35 | #define CACHE_WB_NO_ALLOC 0xb7 |
36 | ||
63b94509 TL |
37 | /****** Register Mappings ******/ |
38 | #define Q_MASK_REG 0x000 | |
39 | #define TRNG_OUT_REG 0x00c | |
40 | #define IRQ_MASK_REG 0x040 | |
41 | #define IRQ_STATUS_REG 0x200 | |
42 | ||
43 | #define DEL_CMD_Q_JOB 0x124 | |
44 | #define DEL_Q_ACTIVE 0x00000200 | |
45 | #define DEL_Q_ID_SHIFT 6 | |
46 | ||
47 | #define CMD_REQ0 0x180 | |
48 | #define CMD_REQ_INCR 0x04 | |
49 | ||
50 | #define CMD_Q_STATUS_BASE 0x210 | |
51 | #define CMD_Q_INT_STATUS_BASE 0x214 | |
52 | #define CMD_Q_STATUS_INCR 0x20 | |
53 | ||
c4f4b325 | 54 | #define CMD_Q_CACHE_BASE 0x228 |
63b94509 TL |
55 | #define CMD_Q_CACHE_INC 0x20 |
56 | ||
8db88467 TL |
57 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) |
58 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) | |
63b94509 TL |
59 | |
60 | /****** REQ0 Related Values ******/ | |
61 | #define REQ0_WAIT_FOR_WRITE 0x00000004 | |
62 | #define REQ0_INT_ON_COMPLETE 0x00000002 | |
63 | #define REQ0_STOP_ON_COMPLETE 0x00000001 | |
64 | ||
65 | #define REQ0_CMD_Q_SHIFT 9 | |
66 | #define REQ0_JOBID_SHIFT 3 | |
67 | ||
68 | /****** REQ1 Related Values ******/ | |
69 | #define REQ1_PROTECT_SHIFT 27 | |
70 | #define REQ1_ENGINE_SHIFT 23 | |
71 | #define REQ1_KEY_KSB_SHIFT 2 | |
72 | ||
73 | #define REQ1_EOM 0x00000002 | |
74 | #define REQ1_INIT 0x00000001 | |
75 | ||
76 | /* AES Related Values */ | |
77 | #define REQ1_AES_TYPE_SHIFT 21 | |
78 | #define REQ1_AES_MODE_SHIFT 18 | |
79 | #define REQ1_AES_ACTION_SHIFT 17 | |
80 | #define REQ1_AES_CFB_SIZE_SHIFT 10 | |
81 | ||
82 | /* XTS-AES Related Values */ | |
83 | #define REQ1_XTS_AES_SIZE_SHIFT 10 | |
84 | ||
85 | /* SHA Related Values */ | |
86 | #define REQ1_SHA_TYPE_SHIFT 21 | |
87 | ||
88 | /* RSA Related Values */ | |
89 | #define REQ1_RSA_MOD_SIZE_SHIFT 10 | |
90 | ||
91 | /* Pass-Through Related Values */ | |
92 | #define REQ1_PT_BW_SHIFT 12 | |
93 | #define REQ1_PT_BS_SHIFT 10 | |
94 | ||
95 | /* ECC Related Values */ | |
96 | #define REQ1_ECC_AFFINE_CONVERT 0x00200000 | |
97 | #define REQ1_ECC_FUNCTION_SHIFT 18 | |
98 | ||
99 | /****** REQ4 Related Values ******/ | |
100 | #define REQ4_KSB_SHIFT 18 | |
101 | #define REQ4_MEMTYPE_SHIFT 16 | |
102 | ||
103 | /****** REQ6 Related Values ******/ | |
104 | #define REQ6_MEMTYPE_SHIFT 16 | |
105 | ||
63b94509 TL |
106 | /****** Key Storage Block ******/ |
107 | #define KSB_START 77 | |
108 | #define KSB_END 127 | |
109 | #define KSB_COUNT (KSB_END - KSB_START + 1) | |
110 | #define CCP_KSB_BITS 256 | |
111 | #define CCP_KSB_BYTES 32 | |
112 | ||
113 | #define CCP_JOBID_MASK 0x0000003f | |
114 | ||
115 | #define CCP_DMAPOOL_MAX_SIZE 64 | |
8db88467 | 116 | #define CCP_DMAPOOL_ALIGN BIT(5) |
63b94509 TL |
117 | |
118 | #define CCP_REVERSE_BUF_SIZE 64 | |
119 | ||
120 | #define CCP_AES_KEY_KSB_COUNT 1 | |
121 | #define CCP_AES_CTX_KSB_COUNT 1 | |
122 | ||
123 | #define CCP_XTS_AES_KEY_KSB_COUNT 1 | |
124 | #define CCP_XTS_AES_CTX_KSB_COUNT 1 | |
125 | ||
126 | #define CCP_SHA_KSB_COUNT 1 | |
127 | ||
128 | #define CCP_RSA_MAX_WIDTH 4096 | |
129 | ||
130 | #define CCP_PASSTHRU_BLOCKSIZE 256 | |
131 | #define CCP_PASSTHRU_MASKSIZE 32 | |
132 | #define CCP_PASSTHRU_KSB_COUNT 1 | |
133 | ||
134 | #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ | |
135 | #define CCP_ECC_MAX_OPERANDS 6 | |
136 | #define CCP_ECC_MAX_OUTPUTS 3 | |
137 | #define CCP_ECC_SRC_BUF_SIZE 448 | |
138 | #define CCP_ECC_DST_BUF_SIZE 192 | |
139 | #define CCP_ECC_OPERAND_SIZE 64 | |
140 | #define CCP_ECC_OUTPUT_SIZE 64 | |
141 | #define CCP_ECC_RESULT_OFFSET 60 | |
142 | #define CCP_ECC_RESULT_SUCCESS 0x0001 | |
143 | ||
ea0375af GH |
144 | struct ccp_op; |
145 | ||
146 | /* Structure for computation functions that are device-specific */ | |
147 | struct ccp_actions { | |
148 | int (*perform_aes)(struct ccp_op *); | |
149 | int (*perform_xts_aes)(struct ccp_op *); | |
150 | int (*perform_sha)(struct ccp_op *); | |
151 | int (*perform_rsa)(struct ccp_op *); | |
152 | int (*perform_passthru)(struct ccp_op *); | |
153 | int (*perform_ecc)(struct ccp_op *); | |
154 | int (*init)(struct ccp_device *); | |
155 | void (*destroy)(struct ccp_device *); | |
156 | irqreturn_t (*irqhandler)(int, void *); | |
157 | }; | |
158 | ||
c7019c4d GH |
159 | /* Structure to hold CCP version-specific values */ |
160 | struct ccp_vdata { | |
161 | unsigned int version; | |
ea0375af | 162 | struct ccp_actions *perform; |
c7019c4d GH |
163 | }; |
164 | ||
165 | extern struct ccp_vdata ccpv3; | |
166 | ||
63b94509 TL |
167 | struct ccp_device; |
168 | struct ccp_cmd; | |
169 | ||
170 | struct ccp_cmd_queue { | |
171 | struct ccp_device *ccp; | |
172 | ||
173 | /* Queue identifier */ | |
174 | u32 id; | |
175 | ||
176 | /* Queue dma pool */ | |
177 | struct dma_pool *dma_pool; | |
178 | ||
179 | /* Queue reserved KSB regions */ | |
180 | u32 ksb_key; | |
181 | u32 ksb_ctx; | |
182 | ||
183 | /* Queue processing thread */ | |
184 | struct task_struct *kthread; | |
185 | unsigned int active; | |
186 | unsigned int suspended; | |
187 | ||
188 | /* Number of free command slots available */ | |
189 | unsigned int free_slots; | |
190 | ||
191 | /* Interrupt masks */ | |
192 | u32 int_ok; | |
193 | u32 int_err; | |
194 | ||
195 | /* Register addresses for queue */ | |
196 | void __iomem *reg_status; | |
197 | void __iomem *reg_int_status; | |
198 | ||
199 | /* Status values from job */ | |
200 | u32 int_status; | |
201 | u32 q_status; | |
202 | u32 q_int_status; | |
203 | u32 cmd_error; | |
204 | ||
205 | /* Interrupt wait queue */ | |
206 | wait_queue_head_t int_queue; | |
207 | unsigned int int_rcvd; | |
208 | } ____cacheline_aligned; | |
209 | ||
210 | struct ccp_device { | |
553d2374 GH |
211 | struct list_head entry; |
212 | ||
c7019c4d | 213 | struct ccp_vdata *vdata; |
553d2374 GH |
214 | unsigned int ord; |
215 | char name[MAX_CCP_NAME_LEN]; | |
216 | char rngname[MAX_CCP_NAME_LEN]; | |
217 | ||
63b94509 TL |
218 | struct device *dev; |
219 | ||
220 | /* | |
221 | * Bus specific device information | |
222 | */ | |
223 | void *dev_specific; | |
224 | int (*get_irq)(struct ccp_device *ccp); | |
225 | void (*free_irq)(struct ccp_device *ccp); | |
3d77565b | 226 | unsigned int irq; |
63b94509 TL |
227 | |
228 | /* | |
229 | * I/O area used for device communication. The register mapping | |
230 | * starts at an offset into the mapped bar. | |
231 | * The CMD_REQx registers and the Delete_Cmd_Queue_Job register | |
232 | * need to be protected while a command queue thread is accessing | |
233 | * them. | |
234 | */ | |
235 | struct mutex req_mutex ____cacheline_aligned; | |
236 | void __iomem *io_map; | |
237 | void __iomem *io_regs; | |
238 | ||
239 | /* | |
240 | * Master lists that all cmds are queued on. Because there can be | |
241 | * more than one CCP command queue that can process a cmd a separate | |
242 | * backlog list is neeeded so that the backlog completion call | |
243 | * completes before the cmd is available for execution. | |
244 | */ | |
245 | spinlock_t cmd_lock ____cacheline_aligned; | |
246 | unsigned int cmd_count; | |
247 | struct list_head cmd; | |
248 | struct list_head backlog; | |
249 | ||
250 | /* | |
251 | * The command queues. These represent the queues available on the | |
252 | * CCP that are available for processing cmds | |
253 | */ | |
254 | struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; | |
255 | unsigned int cmd_q_count; | |
256 | ||
257 | /* | |
258 | * Support for the CCP True RNG | |
259 | */ | |
260 | struct hwrng hwrng; | |
261 | unsigned int hwrng_retries; | |
262 | ||
263 | /* | |
264 | * A counter used to generate job-ids for cmds submitted to the CCP | |
265 | */ | |
266 | atomic_t current_id ____cacheline_aligned; | |
267 | ||
268 | /* | |
269 | * The CCP uses key storage blocks (KSB) to maintain context for certain | |
270 | * operations. To prevent multiple cmds from using the same KSB range | |
271 | * a command queue reserves a KSB range for the duration of the cmd. | |
272 | * Each queue, will however, reserve 2 KSB blocks for operations that | |
273 | * only require single KSB entries (eg. AES context/iv and key) in order | |
274 | * to avoid allocation contention. This will reserve at most 10 KSB | |
275 | * entries, leaving 40 KSB entries available for dynamic allocation. | |
276 | */ | |
277 | struct mutex ksb_mutex ____cacheline_aligned; | |
278 | DECLARE_BITMAP(ksb, KSB_COUNT); | |
279 | wait_queue_head_t ksb_queue; | |
280 | unsigned int ksb_avail; | |
281 | unsigned int ksb_count; | |
282 | u32 ksb_start; | |
283 | ||
284 | /* Suspend support */ | |
285 | unsigned int suspending; | |
286 | wait_queue_head_t suspend_queue; | |
126ae9ad TL |
287 | |
288 | /* DMA caching attribute support */ | |
289 | unsigned int axcache; | |
63b94509 TL |
290 | }; |
291 | ||
ea0375af GH |
292 | enum ccp_memtype { |
293 | CCP_MEMTYPE_SYSTEM = 0, | |
294 | CCP_MEMTYPE_KSB, | |
295 | CCP_MEMTYPE_LOCAL, | |
296 | CCP_MEMTYPE__LAST, | |
297 | }; | |
298 | ||
299 | struct ccp_dma_info { | |
300 | dma_addr_t address; | |
301 | unsigned int offset; | |
302 | unsigned int length; | |
303 | enum dma_data_direction dir; | |
304 | }; | |
305 | ||
306 | struct ccp_dm_workarea { | |
307 | struct device *dev; | |
308 | struct dma_pool *dma_pool; | |
309 | unsigned int length; | |
310 | ||
311 | u8 *address; | |
312 | struct ccp_dma_info dma; | |
313 | }; | |
314 | ||
315 | struct ccp_sg_workarea { | |
316 | struct scatterlist *sg; | |
317 | int nents; | |
318 | ||
319 | struct scatterlist *dma_sg; | |
320 | struct device *dma_dev; | |
321 | unsigned int dma_count; | |
322 | enum dma_data_direction dma_dir; | |
323 | ||
324 | unsigned int sg_used; | |
325 | ||
326 | u64 bytes_left; | |
327 | }; | |
328 | ||
329 | struct ccp_data { | |
330 | struct ccp_sg_workarea sg_wa; | |
331 | struct ccp_dm_workarea dm_wa; | |
332 | }; | |
333 | ||
334 | struct ccp_mem { | |
335 | enum ccp_memtype type; | |
336 | union { | |
337 | struct ccp_dma_info dma; | |
338 | u32 ksb; | |
339 | } u; | |
340 | }; | |
341 | ||
342 | struct ccp_aes_op { | |
343 | enum ccp_aes_type type; | |
344 | enum ccp_aes_mode mode; | |
345 | enum ccp_aes_action action; | |
346 | }; | |
347 | ||
348 | struct ccp_xts_aes_op { | |
349 | enum ccp_aes_action action; | |
350 | enum ccp_xts_aes_unit_size unit_size; | |
351 | }; | |
352 | ||
353 | struct ccp_sha_op { | |
354 | enum ccp_sha_type type; | |
355 | u64 msg_bits; | |
356 | }; | |
357 | ||
358 | struct ccp_rsa_op { | |
359 | u32 mod_size; | |
360 | u32 input_len; | |
361 | }; | |
362 | ||
363 | struct ccp_passthru_op { | |
364 | enum ccp_passthru_bitwise bit_mod; | |
365 | enum ccp_passthru_byteswap byte_swap; | |
366 | }; | |
367 | ||
368 | struct ccp_ecc_op { | |
369 | enum ccp_ecc_function function; | |
370 | }; | |
371 | ||
372 | struct ccp_op { | |
373 | struct ccp_cmd_queue *cmd_q; | |
374 | ||
375 | u32 jobid; | |
376 | u32 ioc; | |
377 | u32 soc; | |
378 | u32 ksb_key; | |
379 | u32 ksb_ctx; | |
380 | u32 init; | |
381 | u32 eom; | |
382 | ||
383 | struct ccp_mem src; | |
384 | struct ccp_mem dst; | |
385 | ||
386 | union { | |
387 | struct ccp_aes_op aes; | |
388 | struct ccp_xts_aes_op xts; | |
389 | struct ccp_sha_op sha; | |
390 | struct ccp_rsa_op rsa; | |
391 | struct ccp_passthru_op passthru; | |
392 | struct ccp_ecc_op ecc; | |
393 | } u; | |
394 | }; | |
395 | ||
396 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) | |
397 | { | |
398 | return lower_32_bits(info->address + info->offset); | |
399 | } | |
400 | ||
401 | static inline u32 ccp_addr_hi(struct ccp_dma_info *info) | |
402 | { | |
403 | return upper_32_bits(info->address + info->offset) & 0x0000ffff; | |
404 | } | |
405 | ||
63b94509 TL |
406 | int ccp_pci_init(void); |
407 | void ccp_pci_exit(void); | |
408 | ||
c4f4b325 TL |
409 | int ccp_platform_init(void); |
410 | void ccp_platform_exit(void); | |
411 | ||
ea0375af GH |
412 | void ccp_add_device(struct ccp_device *ccp); |
413 | void ccp_del_device(struct ccp_device *ccp); | |
414 | ||
63b94509 | 415 | struct ccp_device *ccp_alloc_struct(struct device *dev); |
63b94509 | 416 | bool ccp_queues_suspended(struct ccp_device *ccp); |
ea0375af | 417 | int ccp_cmd_queue_thread(void *data); |
63b94509 TL |
418 | |
419 | int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); | |
420 | ||
421 | #endif |