crypto: ccree - Relocate driver irq registration after clk init
[linux-block.git] / drivers / crypto / ccree / cc_driver.h
CommitLineData
4c3f9727 1/* SPDX-License-Identifier: GPL-2.0 */
03963cae 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4c3f9727
GBY
3
4/* \file cc_driver.h
5 * ARM CryptoCell Linux Crypto Driver
6 */
7
8#ifndef __CC_DRIVER_H__
9#define __CC_DRIVER_H__
10
11#ifdef COMP_IN_WQ
12#include <linux/workqueue.h>
13#else
14#include <linux/interrupt.h>
15#endif
16#include <linux/dma-mapping.h>
17#include <crypto/algapi.h>
63ee04c8 18#include <crypto/internal/skcipher.h>
4c3f9727
GBY
19#include <crypto/aes.h>
20#include <crypto/sha.h>
21#include <crypto/aead.h>
22#include <crypto/authenc.h>
23#include <crypto/hash.h>
24#include <crypto/skcipher.h>
25#include <linux/version.h>
26#include <linux/clk.h>
27#include <linux/platform_device.h>
28
29/* Registers definitions from shared/hw/ree_include */
30#include "cc_host_regs.h"
31#define CC_DEV_SHA_MAX 512
32#include "cc_crypto_ctx.h"
33#include "cc_hw_queue_defs.h"
34#include "cc_sram_mgr.h"
35
36extern bool cc_dump_desc;
37extern bool cc_dump_bytes;
38
e40fdb50 39#define DRV_MODULE_VERSION "5.0"
27b3b22d
GBY
40
41enum cc_hw_rev {
42 CC_HW_REV_630 = 630,
43 CC_HW_REV_710 = 710,
e40fdb50
GBY
44 CC_HW_REV_712 = 712,
45 CC_HW_REV_713 = 713
27b3b22d 46};
4c3f9727 47
1c876a90
GBY
48enum cc_std_body {
49 CC_STD_NIST = 0x1,
50 CC_STD_OSCCA = 0x2,
51 CC_STD_ALL = 0x3
52};
53
4c3f9727
GBY
54#define CC_COHERENT_CACHE_PARAMS 0xEEE
55
56/* Maximum DMA mask supported by IP */
57#define DMA_BIT_MASK_LEN 48
58
4c3f9727
GBY
59#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
60 (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
61 (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
62 (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
63
64#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
65
66#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
67
f98f6e21
GBY
68#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
69
4c3f9727
GBY
70#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
71 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
72 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
73
cadfd898
GBY
74#define CC_CPP_AES_ABORT_MASK ( \
75 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
76 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
77 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
78 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
79 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
80 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
81 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
82 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
83
84#define CC_CPP_SM4_ABORT_MASK ( \
85 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
86 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
87 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
88 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
89 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
90 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
91 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
92 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
93
4c3f9727
GBY
94/* Register name mangling macro */
95#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
96
97/* TEE FIPS status interrupt */
98#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
99
100#define CC_CRA_PRIO 400
101
102#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
103
104#define MAX_REQUEST_QUEUE_SIZE 4096
105#define MAX_MLLI_BUFF_SIZE 2080
4c3f9727
GBY
106
107/* Definitions for HW descriptors DIN/DOUT fields */
108#define NS_BIT 1
109#define AXI_ID 0
110/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
111 * field in the HW descriptor. The DMA engine +8 that value.
112 */
113
cadfd898
GBY
114struct cc_cpp_req {
115 bool is_cpp;
116 enum cc_cpp_alg alg;
117 u8 slot;
118};
119
4c3f9727
GBY
120#define CC_MAX_IVGEN_DMA_ADDRESSES 3
121struct cc_crypto_req {
122 void (*user_cb)(struct device *dev, void *req, int err);
123 void *user_arg;
124 dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
125 /* For the first 'ivgen_dma_addr_len' addresses of this array,
126 * generated IV would be placed in it by send_request().
127 * Same generated IV for all addresses!
128 */
129 /* Amount of 'ivgen_dma_addr' elements to be filled. */
130 unsigned int ivgen_dma_addr_len;
131 /* The generated IV size required, 8/16 B allowed. */
132 unsigned int ivgen_size;
133 struct completion seq_compl; /* request completion */
cadfd898 134 struct cc_cpp_req cpp;
4c3f9727
GBY
135};
136
137/**
138 * struct cc_drvdata - driver private data context
139 * @cc_base: virt address of the CC registers
140 * @irq: device IRQ number
141 * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
4c3f9727
GBY
142 */
143struct cc_drvdata {
144 void __iomem *cc_base;
145 int irq;
146 u32 irq_mask;
4c3f9727
GBY
147 struct completion hw_queue_avail; /* wait for HW queue availability */
148 struct platform_device *plat_dev;
149 cc_sram_addr_t mlli_sram_addr;
150 void *buff_mgr_handle;
63ee04c8 151 void *cipher_handle;
63893811 152 void *hash_handle;
ff27e85a 153 void *aead_handle;
4c3f9727 154 void *request_mgr_handle;
ab8ec965 155 void *fips_handle;
4c3f9727
GBY
156 void *ivgen_handle;
157 void *sram_mgr_handle;
158 void *debugfs;
159 struct clk *clk;
160 bool coherent;
27b3b22d
GBY
161 char *hw_rev_name;
162 enum cc_hw_rev hw_rev;
27b3b22d 163 u32 axim_mon_offset;
281a58c8
GBY
164 u32 sig_offset;
165 u32 ver_offset;
1c876a90 166 int std_bodies;
f98f6e21 167 bool sec_disabled;
cadfd898 168 u32 comp_mask;
4c3f9727
GBY
169};
170
171struct cc_crypto_alg {
172 struct list_head entry;
173 int cipher_mode;
174 int flow_mode; /* Note: currently, refers to the cipher mode only. */
175 int auth_mode;
63ee04c8 176 unsigned int data_unit;
4c3f9727 177 struct cc_drvdata *drvdata;
63ee04c8 178 struct skcipher_alg skcipher_alg;
ff27e85a 179 struct aead_alg aead_alg;
4c3f9727
GBY
180};
181
182struct cc_alg_template {
183 char name[CRYPTO_MAX_ALG_NAME];
184 char driver_name[CRYPTO_MAX_ALG_NAME];
185 unsigned int blocksize;
4c3f9727
GBY
186 union {
187 struct skcipher_alg skcipher;
188 struct aead_alg aead;
189 } template_u;
190 int cipher_mode;
191 int flow_mode; /* Note: currently, refers to the cipher mode only. */
192 int auth_mode;
27b3b22d 193 u32 min_hw_rev;
1c876a90 194 enum cc_std_body std_body;
f98f6e21 195 bool sec_func;
63ee04c8 196 unsigned int data_unit;
4c3f9727
GBY
197 struct cc_drvdata *drvdata;
198};
199
200struct async_gen_req_ctx {
201 dma_addr_t iv_dma_addr;
e8662a6a 202 u8 *iv;
4c3f9727
GBY
203 enum drv_crypto_direction op_type;
204};
205
206static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
207{
208 return &drvdata->plat_dev->dev;
209}
210
211void __dump_byte_array(const char *name, const u8 *buf, size_t len);
212static inline void dump_byte_array(const char *name, const u8 *the_array,
213 size_t size)
214{
215 if (cc_dump_bytes)
216 __dump_byte_array(name, the_array, size);
217}
218
219int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
220void fini_cc_regs(struct cc_drvdata *drvdata);
221int cc_clk_on(struct cc_drvdata *drvdata);
222void cc_clk_off(struct cc_drvdata *drvdata);
f1e52fd0 223unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
4c3f9727
GBY
224
225static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
226{
227 iowrite32(val, (drvdata->cc_base + reg));
228}
229
230static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
231{
232 return ioread32(drvdata->cc_base + reg);
233}
234
235static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
236{
237 return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
238 GFP_KERNEL : GFP_ATOMIC;
239}
240
27b3b22d
GBY
241static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
242 struct cc_hw_desc *pdesc)
243{
244 if (drvdata->hw_rev >= CC_HW_REV_712)
245 set_queue_last_ind_bit(pdesc);
246}
247
4c3f9727 248#endif /*__CC_DRIVER_H__*/