Merge tag 'gvt-fixes-2019-03-21' of https://github.com/intel/gvt-linux into drm-intel...
[linux-2.6-block.git] / drivers / crypto / ccree / cc_ivgen.c
CommitLineData
4c3f9727
GBY
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <crypto/ctr.h>
5#include "cc_driver.h"
6#include "cc_ivgen.h"
7#include "cc_request_mgr.h"
8#include "cc_sram_mgr.h"
9#include "cc_buffer_mgr.h"
10
11/* The max. size of pool *MUST* be <= SRAM total size */
12#define CC_IVPOOL_SIZE 1024
13/* The first 32B fraction of pool are dedicated to the
14 * next encryption "key" & "IV" for pool regeneration
15 */
16#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
17#define CC_IVPOOL_GEN_SEQ_LEN 4
18
19/**
20 * struct cc_ivgen_ctx -IV pool generation context
21 * @pool: the start address of the iv-pool resides in internal RAM
22 * @ctr_key_dma: address of pool's encryption key material in internal RAM
23 * @ctr_iv_dma: address of pool's counter iv in internal RAM
24 * @next_iv_ofs: the offset to the next available IV in pool
25 * @pool_meta: virt. address of the initial enc. key/IV
26 * @pool_meta_dma: phys. address of the initial enc. key/IV
27 */
28struct cc_ivgen_ctx {
29 cc_sram_addr_t pool;
30 cc_sram_addr_t ctr_key;
31 cc_sram_addr_t ctr_iv;
32 u32 next_iv_ofs;
33 u8 *pool_meta;
34 dma_addr_t pool_meta_dma;
35};
36
37/*!
38 * Generates CC_IVPOOL_SIZE of random bytes by
39 * encrypting 0's using AES128-CTR.
40 *
41 * \param ivgen iv-pool context
42 * \param iv_seq IN/OUT array to the descriptors sequence
43 * \param iv_seq_len IN/OUT pointer to the sequence length
44 */
45static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
46 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
47{
48 unsigned int idx = *iv_seq_len;
49
50 if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
51 /* The sequence will be longer than allowed */
52 return -EINVAL;
53 }
54 /* Setup key */
55 hw_desc_init(&iv_seq[idx]);
56 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
57 set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
58 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
59 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
60 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
61 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
62 idx++;
63
64 /* Setup cipher state */
65 hw_desc_init(&iv_seq[idx]);
66 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
67 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
68 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
69 set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
70 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
71 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
72 idx++;
73
74 /* Perform dummy encrypt to skip first block */
75 hw_desc_init(&iv_seq[idx]);
76 set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
77 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
78 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
79 idx++;
80
81 /* Generate IV pool */
82 hw_desc_init(&iv_seq[idx]);
83 set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
84 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
85 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
86 idx++;
87
88 *iv_seq_len = idx; /* Update sequence length */
89
90 /* queue ordering assures pool readiness */
91 ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
92
93 return 0;
94}
95
96/*!
97 * Generates the initial pool in SRAM.
98 * This function should be invoked when resuming driver.
99 *
100 * \param drvdata
101 *
102 * \return int Zero for success, negative value otherwise.
103 */
104int cc_init_iv_sram(struct cc_drvdata *drvdata)
105{
106 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
107 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
108 unsigned int iv_seq_len = 0;
109 int rc;
110
111 /* Generate initial enc. key/iv */
112 get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
113
114 /* The first 32B reserved for the enc. Key/IV */
115 ivgen_ctx->ctr_key = ivgen_ctx->pool;
116 ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
117
118 /* Copy initial enc. key and IV to SRAM at a single descriptor */
119 hw_desc_init(&iv_seq[iv_seq_len]);
120 set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
121 CC_IVPOOL_META_SIZE, NS_BIT);
122 set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
123 CC_IVPOOL_META_SIZE);
124 set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
125 iv_seq_len++;
126
127 /* Generate initial pool */
128 rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
129 if (rc)
130 return rc;
131
132 /* Fire-and-forget */
133 return send_request_init(drvdata, iv_seq, iv_seq_len);
134}
135
136/*!
137 * Free iv-pool and ivgen context.
138 *
139 * \param drvdata
140 */
141void cc_ivgen_fini(struct cc_drvdata *drvdata)
142{
143 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
144 struct device *device = &drvdata->plat_dev->dev;
145
146 if (!ivgen_ctx)
147 return;
148
149 if (ivgen_ctx->pool_meta) {
150 memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
151 dma_free_coherent(device, CC_IVPOOL_META_SIZE,
152 ivgen_ctx->pool_meta,
153 ivgen_ctx->pool_meta_dma);
154 }
155
156 ivgen_ctx->pool = NULL_SRAM_ADDR;
157
158 /* release "this" context */
159 kfree(ivgen_ctx);
160}
161
162/*!
163 * Allocates iv-pool and maps resources.
164 * This function generates the first IV pool.
165 *
166 * \param drvdata Driver's private context
167 *
168 * \return int Zero for success, negative value otherwise.
169 */
170int cc_ivgen_init(struct cc_drvdata *drvdata)
171{
172 struct cc_ivgen_ctx *ivgen_ctx;
173 struct device *device = &drvdata->plat_dev->dev;
174 int rc;
175
176 /* Allocate "this" context */
177 ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
178 if (!ivgen_ctx)
179 return -ENOMEM;
180
181 /* Allocate pool's header for initial enc. key/IV */
182 ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
183 &ivgen_ctx->pool_meta_dma,
184 GFP_KERNEL);
185 if (!ivgen_ctx->pool_meta) {
186 dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
187 CC_IVPOOL_META_SIZE);
188 rc = -ENOMEM;
189 goto out;
190 }
191 /* Allocate IV pool in SRAM */
192 ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
193 if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
194 dev_err(device, "SRAM pool exhausted\n");
195 rc = -ENOMEM;
196 goto out;
197 }
198
199 drvdata->ivgen_handle = ivgen_ctx;
200
201 return cc_init_iv_sram(drvdata);
202
203out:
204 cc_ivgen_fini(drvdata);
205 return rc;
206}
207
208/*!
209 * Acquires 16 Bytes IV from the iv-pool
210 *
211 * \param drvdata Driver private context
212 * \param iv_out_dma Array of physical IV out addresses
213 * \param iv_out_dma_len Length of iv_out_dma array (additional elements
214 * of iv_out_dma array are ignore)
215 * \param iv_out_size May be 8 or 16 bytes long
216 * \param iv_seq IN/OUT array to the descriptors sequence
217 * \param iv_seq_len IN/OUT pointer to the sequence length
218 *
219 * \return int Zero for success, negative value otherwise.
220 */
221int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
222 unsigned int iv_out_dma_len, unsigned int iv_out_size,
223 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
224{
225 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
226 unsigned int idx = *iv_seq_len;
227 struct device *dev = drvdata_to_dev(drvdata);
228 unsigned int t;
229
230 if (iv_out_size != CC_AES_IV_SIZE &&
231 iv_out_size != CTR_RFC3686_IV_SIZE) {
232 return -EINVAL;
233 }
234 if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
235 /* The sequence will be longer than allowed */
236 return -EINVAL;
237 }
238
239 /* check that number of generated IV is limited to max dma address
240 * iv buffer size
241 */
242 if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
243 /* The sequence will be longer than allowed */
244 return -EINVAL;
245 }
246
247 for (t = 0; t < iv_out_dma_len; t++) {
248 /* Acquire IV from pool */
249 hw_desc_init(&iv_seq[idx]);
250 set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
251 ivgen_ctx->next_iv_ofs),
252 iv_out_size);
253 set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
254 NS_BIT, 0);
255 set_flow_mode(&iv_seq[idx], BYPASS);
256 idx++;
257 }
258
259 /* Bypass operation is proceeded by crypto sequence, hence must
260 * assure bypass-write-transaction by a memory barrier
261 */
262 hw_desc_init(&iv_seq[idx]);
263 set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
264 set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
265 idx++;
266
267 *iv_seq_len = idx; /* update seq length */
268
269 /* Update iv index */
270 ivgen_ctx->next_iv_ofs += iv_out_size;
271
272 if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
273 dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
274 /* pool is drained -regenerate it! */
275 return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
276 }
277
278 return 0;
279}