Commit | Line | Data |
---|---|---|
bdf86d0e RW |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Intel MAX10 Board Management Controller Secure Update Driver | |
4 | * | |
5 | * Copyright (C) 2019-2022 Intel Corporation. All rights reserved. | |
6 | * | |
7 | */ | |
8 | #include <linux/bitfield.h> | |
9 | #include <linux/device.h> | |
10 | #include <linux/firmware.h> | |
11 | #include <linux/mfd/intel-m10-bmc.h> | |
12 | #include <linux/mod_devicetable.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/slab.h> | |
16 | ||
001a734a IJ |
17 | struct m10bmc_sec; |
18 | ||
19 | struct m10bmc_sec_ops { | |
20 | int (*rsu_status)(struct m10bmc_sec *sec); | |
21 | }; | |
22 | ||
bdf86d0e RW |
23 | struct m10bmc_sec { |
24 | struct device *dev; | |
25 | struct intel_m10bmc *m10bmc; | |
5cd339b3 RW |
26 | struct fw_upload *fwl; |
27 | char *fw_name; | |
28 | u32 fw_name_id; | |
29 | bool cancel_request; | |
001a734a | 30 | const struct m10bmc_sec_ops *ops; |
bdf86d0e RW |
31 | }; |
32 | ||
5cd339b3 RW |
33 | static DEFINE_XARRAY_ALLOC(fw_upload_xa); |
34 | ||
bdf86d0e RW |
35 | /* Root Entry Hash (REH) support */ |
36 | #define REH_SHA256_SIZE 32 | |
37 | #define REH_SHA384_SIZE 48 | |
38 | #define REH_MAGIC GENMASK(15, 0) | |
39 | #define REH_SHA_NUM_BYTES GENMASK(31, 16) | |
40 | ||
3e10c805 IJ |
41 | static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size) |
42 | { | |
43 | struct intel_m10bmc *m10bmc = sec->m10bmc; | |
44 | unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); | |
45 | u32 write_count = size / stride; | |
46 | u32 leftover_offset = write_count * stride; | |
47 | u32 leftover_size = size - leftover_offset; | |
48 | u32 leftover_tmp = 0; | |
49 | int ret; | |
50 | ||
acf63c45 IJ |
51 | if (sec->m10bmc->flash_bulk_ops) |
52 | return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size); | |
53 | ||
3e10c805 IJ |
54 | if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) |
55 | return -EINVAL; | |
56 | ||
57 | ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset, | |
58 | buf + offset, write_count); | |
59 | if (ret) | |
60 | return ret; | |
61 | ||
62 | /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */ | |
63 | if (leftover_size) { | |
64 | memcpy(&leftover_tmp, buf + leftover_offset, leftover_size); | |
65 | ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset, | |
66 | leftover_tmp); | |
67 | if (ret) | |
68 | return ret; | |
69 | } | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size) | |
75 | { | |
76 | struct intel_m10bmc *m10bmc = sec->m10bmc; | |
77 | unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); | |
78 | u32 read_count = size / stride; | |
79 | u32 leftover_offset = read_count * stride; | |
80 | u32 leftover_size = size - leftover_offset; | |
81 | u32 leftover_tmp; | |
82 | int ret; | |
83 | ||
acf63c45 IJ |
84 | if (sec->m10bmc->flash_bulk_ops) |
85 | return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size); | |
86 | ||
3e10c805 IJ |
87 | if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) |
88 | return -EINVAL; | |
89 | ||
90 | ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count); | |
91 | if (ret) | |
92 | return ret; | |
93 | ||
94 | /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */ | |
95 | if (leftover_size) { | |
96 | ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp); | |
97 | if (ret) | |
98 | return ret; | |
99 | memcpy(buf + leftover_offset, &leftover_tmp, leftover_size); | |
100 | } | |
101 | ||
102 | return 0; | |
103 | } | |
104 | ||
105 | ||
bdf86d0e RW |
106 | static ssize_t |
107 | show_root_entry_hash(struct device *dev, u32 exp_magic, | |
108 | u32 prog_addr, u32 reh_addr, char *buf) | |
109 | { | |
110 | struct m10bmc_sec *sec = dev_get_drvdata(dev); | |
111 | int sha_num_bytes, i, ret, cnt = 0; | |
112 | u8 hash[REH_SHA384_SIZE]; | |
bdf86d0e RW |
113 | u32 magic; |
114 | ||
3e10c805 | 115 | ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic)); |
bdf86d0e RW |
116 | if (ret) |
117 | return ret; | |
118 | ||
119 | if (FIELD_GET(REH_MAGIC, magic) != exp_magic) | |
120 | return sysfs_emit(buf, "hash not programmed\n"); | |
121 | ||
122 | sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8; | |
3e10c805 IJ |
123 | if (sha_num_bytes != REH_SHA256_SIZE && |
124 | sha_num_bytes != REH_SHA384_SIZE) { | |
bdf86d0e RW |
125 | dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, |
126 | sha_num_bytes); | |
127 | return -EINVAL; | |
128 | } | |
129 | ||
3e10c805 | 130 | ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes); |
bdf86d0e | 131 | if (ret) { |
3e10c805 | 132 | dev_err(dev, "failed to read root entry hash\n"); |
bdf86d0e RW |
133 | return ret; |
134 | } | |
135 | ||
136 | for (i = 0; i < sha_num_bytes; i++) | |
137 | cnt += sprintf(buf + cnt, "%02x", hash[i]); | |
138 | cnt += sprintf(buf + cnt, "\n"); | |
139 | ||
140 | return cnt; | |
141 | } | |
142 | ||
6052a005 | 143 | #define DEVICE_ATTR_SEC_REH_RO(_name) \ |
bdf86d0e RW |
144 | static ssize_t _name##_root_entry_hash_show(struct device *dev, \ |
145 | struct device_attribute *attr, \ | |
146 | char *buf) \ | |
6052a005 IJ |
147 | { \ |
148 | struct m10bmc_sec *sec = dev_get_drvdata(dev); \ | |
149 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ | |
150 | \ | |
151 | return show_root_entry_hash(dev, csr_map->_name##_magic, \ | |
152 | csr_map->_name##_prog_addr, \ | |
153 | csr_map->_name##_reh_addr, \ | |
154 | buf); \ | |
155 | } \ | |
bdf86d0e RW |
156 | static DEVICE_ATTR_RO(_name##_root_entry_hash) |
157 | ||
6052a005 IJ |
158 | DEVICE_ATTR_SEC_REH_RO(bmc); |
159 | DEVICE_ATTR_SEC_REH_RO(sr); | |
160 | DEVICE_ATTR_SEC_REH_RO(pr); | |
bdf86d0e | 161 | |
7f03d84a RW |
162 | #define CSK_BIT_LEN 128U |
163 | #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) | |
164 | ||
165 | static ssize_t | |
166 | show_canceled_csk(struct device *dev, u32 addr, char *buf) | |
167 | { | |
3e10c805 | 168 | unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32); |
7f03d84a RW |
169 | struct m10bmc_sec *sec = dev_get_drvdata(dev); |
170 | DECLARE_BITMAP(csk_map, CSK_BIT_LEN); | |
171 | __le32 csk_le32[CSK_32ARRAY_SIZE]; | |
172 | u32 csk32[CSK_32ARRAY_SIZE]; | |
173 | int ret; | |
174 | ||
3e10c805 | 175 | ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size); |
7f03d84a | 176 | if (ret) { |
3e10c805 | 177 | dev_err(sec->dev, "failed to read CSK vector\n"); |
7f03d84a RW |
178 | return ret; |
179 | } | |
180 | ||
181 | for (i = 0; i < CSK_32ARRAY_SIZE; i++) | |
182 | csk32[i] = le32_to_cpu(((csk_le32[i]))); | |
183 | ||
184 | bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN); | |
185 | bitmap_complement(csk_map, csk_map, CSK_BIT_LEN); | |
186 | return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN); | |
187 | } | |
188 | ||
6052a005 | 189 | #define DEVICE_ATTR_SEC_CSK_RO(_name) \ |
7f03d84a RW |
190 | static ssize_t _name##_canceled_csks_show(struct device *dev, \ |
191 | struct device_attribute *attr, \ | |
192 | char *buf) \ | |
6052a005 IJ |
193 | { \ |
194 | struct m10bmc_sec *sec = dev_get_drvdata(dev); \ | |
195 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ | |
196 | \ | |
197 | return show_canceled_csk(dev, \ | |
198 | csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \ | |
199 | buf); \ | |
200 | } \ | |
7f03d84a RW |
201 | static DEVICE_ATTR_RO(_name##_canceled_csks) |
202 | ||
203 | #define CSK_VEC_OFFSET 0x34 | |
204 | ||
6052a005 IJ |
205 | DEVICE_ATTR_SEC_CSK_RO(bmc); |
206 | DEVICE_ATTR_SEC_CSK_RO(sr); | |
207 | DEVICE_ATTR_SEC_CSK_RO(pr); | |
7f03d84a | 208 | |
154afa5c RW |
209 | #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */ |
210 | ||
211 | static ssize_t flash_count_show(struct device *dev, | |
212 | struct device_attribute *attr, char *buf) | |
213 | { | |
214 | struct m10bmc_sec *sec = dev_get_drvdata(dev); | |
6052a005 | 215 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
3e10c805 | 216 | unsigned int num_bits; |
154afa5c RW |
217 | u8 *flash_buf; |
218 | int cnt, ret; | |
219 | ||
154afa5c RW |
220 | num_bits = FLASH_COUNT_SIZE * 8; |
221 | ||
468c9d92 RW |
222 | flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); |
223 | if (!flash_buf) | |
224 | return -ENOMEM; | |
225 | ||
3e10c805 IJ |
226 | ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter, |
227 | FLASH_COUNT_SIZE); | |
154afa5c | 228 | if (ret) { |
3e10c805 | 229 | dev_err(sec->dev, "failed to read flash count\n"); |
154afa5c RW |
230 | goto exit_free; |
231 | } | |
232 | cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); | |
233 | ||
234 | exit_free: | |
235 | kfree(flash_buf); | |
236 | ||
237 | return ret ? : sysfs_emit(buf, "%u\n", cnt); | |
238 | } | |
239 | static DEVICE_ATTR_RO(flash_count); | |
240 | ||
bdf86d0e | 241 | static struct attribute *m10bmc_security_attrs[] = { |
154afa5c | 242 | &dev_attr_flash_count.attr, |
bdf86d0e RW |
243 | &dev_attr_bmc_root_entry_hash.attr, |
244 | &dev_attr_sr_root_entry_hash.attr, | |
245 | &dev_attr_pr_root_entry_hash.attr, | |
7f03d84a RW |
246 | &dev_attr_sr_canceled_csks.attr, |
247 | &dev_attr_pr_canceled_csks.attr, | |
248 | &dev_attr_bmc_canceled_csks.attr, | |
bdf86d0e RW |
249 | NULL, |
250 | }; | |
251 | ||
252 | static struct attribute_group m10bmc_security_attr_group = { | |
253 | .name = "security", | |
254 | .attrs = m10bmc_security_attrs, | |
255 | }; | |
256 | ||
257 | static const struct attribute_group *m10bmc_sec_attr_groups[] = { | |
258 | &m10bmc_security_attr_group, | |
259 | NULL, | |
260 | }; | |
261 | ||
5cd339b3 RW |
262 | static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) |
263 | { | |
6052a005 | 264 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
5cd339b3 RW |
265 | u32 auth_result; |
266 | ||
001a734a | 267 | dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell); |
5cd339b3 | 268 | |
6052a005 | 269 | if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result)) |
5cd339b3 RW |
270 | dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); |
271 | } | |
272 | ||
001a734a IJ |
273 | static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec) |
274 | { | |
275 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; | |
276 | u32 doorbell; | |
277 | int ret; | |
278 | ||
279 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); | |
280 | if (ret) | |
281 | return ret; | |
282 | ||
283 | return FIELD_GET(DRBL_RSU_STATUS, doorbell); | |
284 | } | |
285 | ||
acf63c45 IJ |
286 | static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec) |
287 | { | |
288 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; | |
289 | u32 auth_result; | |
290 | int ret; | |
291 | ||
292 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result); | |
293 | if (ret) | |
294 | return ret; | |
295 | ||
296 | return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result); | |
297 | } | |
298 | ||
da04fa8c IJ |
299 | static bool rsu_status_ok(u32 status) |
300 | { | |
301 | return (status == RSU_STAT_NORMAL || | |
302 | status == RSU_STAT_NIOS_OK || | |
303 | status == RSU_STAT_USER_OK || | |
304 | status == RSU_STAT_FACTORY_OK); | |
305 | } | |
306 | ||
307 | static bool rsu_progress_done(u32 progress) | |
308 | { | |
309 | return (progress == RSU_PROG_IDLE || | |
310 | progress == RSU_PROG_RSU_DONE); | |
311 | } | |
312 | ||
313 | static bool rsu_progress_busy(u32 progress) | |
314 | { | |
315 | return (progress == RSU_PROG_AUTHENTICATING || | |
316 | progress == RSU_PROG_COPYING || | |
317 | progress == RSU_PROG_UPDATE_CANCEL || | |
318 | progress == RSU_PROG_PROGRAM_KEY_HASH); | |
319 | } | |
320 | ||
001a734a IJ |
321 | static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg, |
322 | u32 *progress, u32 *status) | |
323 | { | |
324 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; | |
325 | int ret; | |
326 | ||
327 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg); | |
328 | if (ret) | |
329 | return ret; | |
330 | ||
331 | ret = sec->ops->rsu_status(sec); | |
332 | if (ret < 0) | |
333 | return ret; | |
334 | ||
335 | *status = ret; | |
336 | *progress = rsu_prog(*doorbell_reg); | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
5cd339b3 RW |
341 | static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) |
342 | { | |
6052a005 | 343 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
5cd339b3 RW |
344 | u32 doorbell; |
345 | int ret; | |
346 | ||
6052a005 | 347 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); |
5cd339b3 RW |
348 | if (ret) |
349 | return FW_UPLOAD_ERR_RW_ERROR; | |
350 | ||
da04fa8c | 351 | if (!rsu_progress_done(rsu_prog(doorbell))) { |
5cd339b3 RW |
352 | log_error_regs(sec, doorbell); |
353 | return FW_UPLOAD_ERR_BUSY; | |
354 | } | |
355 | ||
356 | return FW_UPLOAD_ERR_NONE; | |
357 | } | |
358 | ||
001a734a | 359 | static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status) |
5cd339b3 | 360 | { |
001a734a | 361 | if (doorbell_reg & DRBL_RSU_REQUEST) |
5cd339b3 RW |
362 | return false; |
363 | ||
5cd339b3 RW |
364 | if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT) |
365 | return true; | |
366 | ||
da04fa8c | 367 | if (!rsu_progress_done(progress)) |
5cd339b3 RW |
368 | return true; |
369 | ||
370 | return false; | |
371 | } | |
372 | ||
373 | static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) | |
374 | { | |
6052a005 | 375 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
001a734a IJ |
376 | u32 doorbell_reg, progress, status; |
377 | int ret, err; | |
5cd339b3 RW |
378 | |
379 | ret = regmap_update_bits(sec->m10bmc->regmap, | |
6052a005 | 380 | csr_map->base + csr_map->doorbell, |
5cd339b3 RW |
381 | DRBL_RSU_REQUEST | DRBL_HOST_STATUS, |
382 | DRBL_RSU_REQUEST | | |
383 | FIELD_PREP(DRBL_HOST_STATUS, | |
384 | HOST_STATUS_IDLE)); | |
385 | if (ret) | |
386 | return FW_UPLOAD_ERR_RW_ERROR; | |
387 | ||
001a734a IJ |
388 | ret = read_poll_timeout(m10bmc_sec_progress_status, err, |
389 | err < 0 || rsu_start_done(doorbell_reg, progress, status), | |
390 | NIOS_HANDSHAKE_INTERVAL_US, | |
391 | NIOS_HANDSHAKE_TIMEOUT_US, | |
392 | false, | |
393 | sec, &doorbell_reg, &progress, &status); | |
5cd339b3 RW |
394 | |
395 | if (ret == -ETIMEDOUT) { | |
001a734a | 396 | log_error_regs(sec, doorbell_reg); |
5cd339b3 | 397 | return FW_UPLOAD_ERR_TIMEOUT; |
001a734a | 398 | } else if (err) { |
5cd339b3 RW |
399 | return FW_UPLOAD_ERR_RW_ERROR; |
400 | } | |
401 | ||
5cd339b3 RW |
402 | if (status == RSU_STAT_WEAROUT) { |
403 | dev_warn(sec->dev, "Excessive flash update count detected\n"); | |
404 | return FW_UPLOAD_ERR_WEAROUT; | |
405 | } else if (status == RSU_STAT_ERASE_FAIL) { | |
001a734a | 406 | log_error_regs(sec, doorbell_reg); |
5cd339b3 RW |
407 | return FW_UPLOAD_ERR_HW_ERROR; |
408 | } | |
409 | ||
410 | return FW_UPLOAD_ERR_NONE; | |
411 | } | |
412 | ||
413 | static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) | |
414 | { | |
6052a005 | 415 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
5cd339b3 RW |
416 | unsigned long poll_timeout; |
417 | u32 doorbell, progress; | |
418 | int ret; | |
419 | ||
6052a005 | 420 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); |
5cd339b3 RW |
421 | if (ret) |
422 | return FW_UPLOAD_ERR_RW_ERROR; | |
423 | ||
424 | poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS); | |
425 | while (rsu_prog(doorbell) == RSU_PROG_PREPARE) { | |
426 | msleep(RSU_PREP_INTERVAL_MS); | |
427 | if (time_after(jiffies, poll_timeout)) | |
428 | break; | |
429 | ||
6052a005 | 430 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); |
5cd339b3 RW |
431 | if (ret) |
432 | return FW_UPLOAD_ERR_RW_ERROR; | |
433 | } | |
434 | ||
435 | progress = rsu_prog(doorbell); | |
436 | if (progress == RSU_PROG_PREPARE) { | |
437 | log_error_regs(sec, doorbell); | |
438 | return FW_UPLOAD_ERR_TIMEOUT; | |
439 | } else if (progress != RSU_PROG_READY) { | |
440 | log_error_regs(sec, doorbell); | |
441 | return FW_UPLOAD_ERR_HW_ERROR; | |
442 | } | |
443 | ||
444 | return FW_UPLOAD_ERR_NONE; | |
445 | } | |
446 | ||
447 | static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) | |
448 | { | |
6052a005 | 449 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
001a734a | 450 | u32 doorbell_reg, status; |
5cd339b3 RW |
451 | int ret; |
452 | ||
453 | ret = regmap_update_bits(sec->m10bmc->regmap, | |
6052a005 | 454 | csr_map->base + csr_map->doorbell, |
5cd339b3 RW |
455 | DRBL_HOST_STATUS, |
456 | FIELD_PREP(DRBL_HOST_STATUS, | |
457 | HOST_STATUS_WRITE_DONE)); | |
458 | if (ret) | |
459 | return FW_UPLOAD_ERR_RW_ERROR; | |
460 | ||
461 | ret = regmap_read_poll_timeout(sec->m10bmc->regmap, | |
6052a005 | 462 | csr_map->base + csr_map->doorbell, |
001a734a IJ |
463 | doorbell_reg, |
464 | rsu_prog(doorbell_reg) != RSU_PROG_READY, | |
5cd339b3 RW |
465 | NIOS_HANDSHAKE_INTERVAL_US, |
466 | NIOS_HANDSHAKE_TIMEOUT_US); | |
467 | ||
468 | if (ret == -ETIMEDOUT) { | |
001a734a | 469 | log_error_regs(sec, doorbell_reg); |
5cd339b3 RW |
470 | return FW_UPLOAD_ERR_TIMEOUT; |
471 | } else if (ret) { | |
472 | return FW_UPLOAD_ERR_RW_ERROR; | |
473 | } | |
474 | ||
001a734a IJ |
475 | ret = sec->ops->rsu_status(sec); |
476 | if (ret < 0) | |
c3d79fda | 477 | return FW_UPLOAD_ERR_HW_ERROR; |
001a734a IJ |
478 | status = ret; |
479 | ||
480 | if (!rsu_status_ok(status)) { | |
481 | log_error_regs(sec, doorbell_reg); | |
5cd339b3 RW |
482 | return FW_UPLOAD_ERR_HW_ERROR; |
483 | } | |
484 | ||
485 | return FW_UPLOAD_ERR_NONE; | |
486 | } | |
487 | ||
001a734a | 488 | static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg) |
5cd339b3 | 489 | { |
001a734a | 490 | u32 progress, status; |
6052a005 | 491 | |
001a734a | 492 | if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status)) |
5cd339b3 RW |
493 | return -EIO; |
494 | ||
001a734a | 495 | if (!rsu_status_ok(status)) |
5cd339b3 | 496 | return -EINVAL; |
5cd339b3 | 497 | |
001a734a | 498 | if (rsu_progress_done(progress)) |
5cd339b3 | 499 | return 0; |
da04fa8c | 500 | |
001a734a | 501 | if (rsu_progress_busy(progress)) |
5cd339b3 | 502 | return -EAGAIN; |
da04fa8c IJ |
503 | |
504 | return -EINVAL; | |
5cd339b3 RW |
505 | } |
506 | ||
507 | static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) | |
508 | { | |
6052a005 | 509 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
5cd339b3 RW |
510 | u32 doorbell; |
511 | int ret; | |
512 | ||
6052a005 | 513 | ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); |
5cd339b3 RW |
514 | if (ret) |
515 | return FW_UPLOAD_ERR_RW_ERROR; | |
516 | ||
517 | if (rsu_prog(doorbell) != RSU_PROG_READY) | |
518 | return FW_UPLOAD_ERR_BUSY; | |
519 | ||
520 | ret = regmap_update_bits(sec->m10bmc->regmap, | |
6052a005 | 521 | csr_map->base + csr_map->doorbell, |
5cd339b3 RW |
522 | DRBL_HOST_STATUS, |
523 | FIELD_PREP(DRBL_HOST_STATUS, | |
524 | HOST_STATUS_ABORT_RSU)); | |
525 | if (ret) | |
526 | return FW_UPLOAD_ERR_RW_ERROR; | |
527 | ||
528 | return FW_UPLOAD_ERR_CANCELED; | |
529 | } | |
530 | ||
531 | static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, | |
532 | const u8 *data, u32 size) | |
533 | { | |
534 | struct m10bmc_sec *sec = fwl->dd_handle; | |
535 | u32 ret; | |
536 | ||
537 | sec->cancel_request = false; | |
538 | ||
539 | if (!size || size > M10BMC_STAGING_SIZE) | |
540 | return FW_UPLOAD_ERR_INVALID_SIZE; | |
541 | ||
acf63c45 IJ |
542 | if (sec->m10bmc->flash_bulk_ops) |
543 | if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc)) | |
544 | return FW_UPLOAD_ERR_BUSY; | |
545 | ||
5cd339b3 RW |
546 | ret = rsu_check_idle(sec); |
547 | if (ret != FW_UPLOAD_ERR_NONE) | |
acf63c45 | 548 | goto unlock_flash; |
5cd339b3 RW |
549 | |
550 | ret = rsu_update_init(sec); | |
551 | if (ret != FW_UPLOAD_ERR_NONE) | |
acf63c45 | 552 | goto unlock_flash; |
5cd339b3 RW |
553 | |
554 | ret = rsu_prog_ready(sec); | |
555 | if (ret != FW_UPLOAD_ERR_NONE) | |
acf63c45 | 556 | goto unlock_flash; |
5cd339b3 | 557 | |
acf63c45 IJ |
558 | if (sec->cancel_request) { |
559 | ret = rsu_cancel(sec); | |
560 | goto unlock_flash; | |
561 | } | |
5cd339b3 RW |
562 | |
563 | return FW_UPLOAD_ERR_NONE; | |
acf63c45 IJ |
564 | |
565 | unlock_flash: | |
566 | if (sec->m10bmc->flash_bulk_ops) | |
567 | sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc); | |
568 | return ret; | |
5cd339b3 RW |
569 | } |
570 | ||
571 | #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */ | |
572 | ||
3e10c805 IJ |
573 | static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data, |
574 | u32 offset, u32 size, u32 *written) | |
5cd339b3 RW |
575 | { |
576 | struct m10bmc_sec *sec = fwl->dd_handle; | |
6052a005 | 577 | const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; |
3e10c805 IJ |
578 | struct intel_m10bmc *m10bmc = sec->m10bmc; |
579 | u32 blk_size, doorbell; | |
5cd339b3 RW |
580 | int ret; |
581 | ||
5cd339b3 RW |
582 | if (sec->cancel_request) |
583 | return rsu_cancel(sec); | |
584 | ||
3e10c805 | 585 | ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell); |
5cd339b3 RW |
586 | if (ret) { |
587 | return FW_UPLOAD_ERR_RW_ERROR; | |
588 | } else if (rsu_prog(doorbell) != RSU_PROG_READY) { | |
589 | log_error_regs(sec, doorbell); | |
590 | return FW_UPLOAD_ERR_HW_ERROR; | |
591 | } | |
592 | ||
3e10c805 | 593 | WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap)); |
5cd339b3 | 594 | blk_size = min_t(u32, WRITE_BLOCK_SIZE, size); |
3e10c805 | 595 | ret = m10bmc_sec_write(sec, data, offset, blk_size); |
5cd339b3 RW |
596 | if (ret) |
597 | return FW_UPLOAD_ERR_RW_ERROR; | |
598 | ||
5cd339b3 RW |
599 | *written = blk_size; |
600 | return FW_UPLOAD_ERR_NONE; | |
601 | } | |
602 | ||
603 | static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl) | |
604 | { | |
605 | struct m10bmc_sec *sec = fwl->dd_handle; | |
606 | unsigned long poll_timeout; | |
607 | u32 doorbell, result; | |
608 | int ret; | |
609 | ||
610 | if (sec->cancel_request) | |
611 | return rsu_cancel(sec); | |
612 | ||
613 | result = rsu_send_data(sec); | |
614 | if (result != FW_UPLOAD_ERR_NONE) | |
615 | return result; | |
616 | ||
617 | poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS); | |
618 | do { | |
619 | msleep(RSU_COMPLETE_INTERVAL_MS); | |
620 | ret = rsu_check_complete(sec, &doorbell); | |
621 | } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout)); | |
622 | ||
623 | if (ret == -EAGAIN) { | |
624 | log_error_regs(sec, doorbell); | |
625 | return FW_UPLOAD_ERR_TIMEOUT; | |
626 | } else if (ret == -EIO) { | |
627 | return FW_UPLOAD_ERR_RW_ERROR; | |
628 | } else if (ret) { | |
629 | log_error_regs(sec, doorbell); | |
630 | return FW_UPLOAD_ERR_HW_ERROR; | |
631 | } | |
632 | ||
633 | return FW_UPLOAD_ERR_NONE; | |
634 | } | |
635 | ||
636 | /* | |
637 | * m10bmc_sec_cancel() may be called asynchronously with an on-going update. | |
638 | * All other functions are called sequentially in a single thread. To avoid | |
639 | * contention on register accesses, m10bmc_sec_cancel() must only update | |
640 | * the cancel_request flag. Other functions will check this flag and handle | |
641 | * the cancel request synchronously. | |
642 | */ | |
643 | static void m10bmc_sec_cancel(struct fw_upload *fwl) | |
644 | { | |
645 | struct m10bmc_sec *sec = fwl->dd_handle; | |
646 | ||
647 | sec->cancel_request = true; | |
648 | } | |
649 | ||
650 | static void m10bmc_sec_cleanup(struct fw_upload *fwl) | |
651 | { | |
652 | struct m10bmc_sec *sec = fwl->dd_handle; | |
653 | ||
654 | (void)rsu_cancel(sec); | |
acf63c45 IJ |
655 | |
656 | if (sec->m10bmc->flash_bulk_ops) | |
657 | sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc); | |
5cd339b3 RW |
658 | } |
659 | ||
660 | static const struct fw_upload_ops m10bmc_ops = { | |
661 | .prepare = m10bmc_sec_prepare, | |
3e10c805 | 662 | .write = m10bmc_sec_fw_write, |
5cd339b3 RW |
663 | .poll_complete = m10bmc_sec_poll_complete, |
664 | .cancel = m10bmc_sec_cancel, | |
665 | .cleanup = m10bmc_sec_cleanup, | |
666 | }; | |
667 | ||
001a734a IJ |
668 | static const struct m10bmc_sec_ops m10sec_n3000_ops = { |
669 | .rsu_status = m10bmc_sec_n3000_rsu_status, | |
670 | }; | |
671 | ||
acf63c45 IJ |
672 | static const struct m10bmc_sec_ops m10sec_n6000_ops = { |
673 | .rsu_status = m10bmc_sec_n6000_rsu_status, | |
674 | }; | |
675 | ||
bdf86d0e RW |
676 | #define SEC_UPDATE_LEN_MAX 32 |
677 | static int m10bmc_sec_probe(struct platform_device *pdev) | |
678 | { | |
5cd339b3 | 679 | char buf[SEC_UPDATE_LEN_MAX]; |
bdf86d0e | 680 | struct m10bmc_sec *sec; |
5cd339b3 RW |
681 | struct fw_upload *fwl; |
682 | unsigned int len; | |
683 | int ret; | |
bdf86d0e RW |
684 | |
685 | sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); | |
686 | if (!sec) | |
687 | return -ENOMEM; | |
688 | ||
689 | sec->dev = &pdev->dev; | |
690 | sec->m10bmc = dev_get_drvdata(pdev->dev.parent); | |
001a734a | 691 | sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data; |
bdf86d0e RW |
692 | dev_set_drvdata(&pdev->dev, sec); |
693 | ||
5cd339b3 RW |
694 | ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec, |
695 | xa_limit_32b, GFP_KERNEL); | |
696 | if (ret) | |
697 | return ret; | |
698 | ||
699 | len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d", | |
700 | sec->fw_name_id); | |
701 | sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL); | |
60ce26d1 IJ |
702 | if (!sec->fw_name) { |
703 | ret = -ENOMEM; | |
704 | goto fw_name_fail; | |
705 | } | |
5cd339b3 RW |
706 | |
707 | fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name, | |
708 | &m10bmc_ops, sec); | |
709 | if (IS_ERR(fwl)) { | |
710 | dev_err(sec->dev, "Firmware Upload driver failed to start\n"); | |
60ce26d1 IJ |
711 | ret = PTR_ERR(fwl); |
712 | goto fw_uploader_fail; | |
5cd339b3 RW |
713 | } |
714 | ||
715 | sec->fwl = fwl; | |
716 | return 0; | |
60ce26d1 IJ |
717 | |
718 | fw_uploader_fail: | |
719 | kfree(sec->fw_name); | |
720 | fw_name_fail: | |
721 | xa_erase(&fw_upload_xa, sec->fw_name_id); | |
722 | return ret; | |
5cd339b3 RW |
723 | } |
724 | ||
725 | static int m10bmc_sec_remove(struct platform_device *pdev) | |
726 | { | |
727 | struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); | |
728 | ||
729 | firmware_upload_unregister(sec->fwl); | |
730 | kfree(sec->fw_name); | |
731 | xa_erase(&fw_upload_xa, sec->fw_name_id); | |
732 | ||
bdf86d0e RW |
733 | return 0; |
734 | } | |
735 | ||
736 | static const struct platform_device_id intel_m10bmc_sec_ids[] = { | |
737 | { | |
738 | .name = "n3000bmc-sec-update", | |
001a734a | 739 | .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, |
bdf86d0e | 740 | }, |
562d0bf2 RW |
741 | { |
742 | .name = "d5005bmc-sec-update", | |
001a734a | 743 | .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, |
562d0bf2 | 744 | }, |
acf63c45 IJ |
745 | { |
746 | .name = "n6000bmc-sec-update", | |
747 | .driver_data = (kernel_ulong_t)&m10sec_n6000_ops, | |
562d0bf2 | 748 | }, |
bdf86d0e RW |
749 | { } |
750 | }; | |
751 | MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids); | |
752 | ||
753 | static struct platform_driver intel_m10bmc_sec_driver = { | |
754 | .probe = m10bmc_sec_probe, | |
5cd339b3 | 755 | .remove = m10bmc_sec_remove, |
bdf86d0e RW |
756 | .driver = { |
757 | .name = "intel-m10bmc-sec-update", | |
758 | .dev_groups = m10bmc_sec_attr_groups, | |
759 | }, | |
760 | .id_table = intel_m10bmc_sec_ids, | |
761 | }; | |
762 | module_platform_driver(intel_m10bmc_sec_driver); | |
763 | ||
764 | MODULE_AUTHOR("Intel Corporation"); | |
765 | MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update"); | |
766 | MODULE_LICENSE("GPL"); |