fpga: m10bmc-sec: expose max10 canceled keys in sysfs
[linux-block.git] / drivers / fpga / intel-m10-bmc-sec-update.c
CommitLineData
bdf86d0e
RW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel MAX10 Board Management Controller Secure Update Driver
4 *
5 * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
6 *
7 */
8#include <linux/bitfield.h>
9#include <linux/device.h>
10#include <linux/firmware.h>
11#include <linux/mfd/intel-m10-bmc.h>
12#include <linux/mod_devicetable.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16
17struct m10bmc_sec {
18 struct device *dev;
19 struct intel_m10bmc *m10bmc;
20};
21
22/* Root Entry Hash (REH) support */
23#define REH_SHA256_SIZE 32
24#define REH_SHA384_SIZE 48
25#define REH_MAGIC GENMASK(15, 0)
26#define REH_SHA_NUM_BYTES GENMASK(31, 16)
27
28static ssize_t
29show_root_entry_hash(struct device *dev, u32 exp_magic,
30 u32 prog_addr, u32 reh_addr, char *buf)
31{
32 struct m10bmc_sec *sec = dev_get_drvdata(dev);
33 int sha_num_bytes, i, ret, cnt = 0;
34 u8 hash[REH_SHA384_SIZE];
35 unsigned int stride;
36 u32 magic;
37
38 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
39 ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
40 if (ret)
41 return ret;
42
43 if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
44 return sysfs_emit(buf, "hash not programmed\n");
45
46 sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
47 if ((sha_num_bytes % stride) ||
48 (sha_num_bytes != REH_SHA256_SIZE &&
49 sha_num_bytes != REH_SHA384_SIZE)) {
50 dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
51 sha_num_bytes);
52 return -EINVAL;
53 }
54
55 ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
56 hash, sha_num_bytes / stride);
57 if (ret) {
58 dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
59 reh_addr, sha_num_bytes / stride, ret);
60 return ret;
61 }
62
63 for (i = 0; i < sha_num_bytes; i++)
64 cnt += sprintf(buf + cnt, "%02x", hash[i]);
65 cnt += sprintf(buf + cnt, "\n");
66
67 return cnt;
68}
69
70#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
71static ssize_t _name##_root_entry_hash_show(struct device *dev, \
72 struct device_attribute *attr, \
73 char *buf) \
74{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
75static DEVICE_ATTR_RO(_name##_root_entry_hash)
76
77DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
78DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
79DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
80
7f03d84a
RW
81#define CSK_BIT_LEN 128U
82#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
83
84static ssize_t
85show_canceled_csk(struct device *dev, u32 addr, char *buf)
86{
87 unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
88 struct m10bmc_sec *sec = dev_get_drvdata(dev);
89 DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
90 __le32 csk_le32[CSK_32ARRAY_SIZE];
91 u32 csk32[CSK_32ARRAY_SIZE];
92 int ret;
93
94 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
95 if (size % stride) {
96 dev_err(sec->dev,
97 "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
98 size, stride);
99 WARN_ON_ONCE(1);
100 return -EINVAL;
101 }
102
103 ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
104 size / stride);
105 if (ret) {
106 dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
107 addr, size / stride, ret);
108 return ret;
109 }
110
111 for (i = 0; i < CSK_32ARRAY_SIZE; i++)
112 csk32[i] = le32_to_cpu(((csk_le32[i])));
113
114 bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
115 bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
116 return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
117}
118
119#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
120static ssize_t _name##_canceled_csks_show(struct device *dev, \
121 struct device_attribute *attr, \
122 char *buf) \
123{ return show_canceled_csk(dev, _addr, buf); } \
124static DEVICE_ATTR_RO(_name##_canceled_csks)
125
126#define CSK_VEC_OFFSET 0x34
127
128DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
129DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
130DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
131
154afa5c
RW
132#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
133
134static ssize_t flash_count_show(struct device *dev,
135 struct device_attribute *attr, char *buf)
136{
137 struct m10bmc_sec *sec = dev_get_drvdata(dev);
138 unsigned int stride, num_bits;
139 u8 *flash_buf;
140 int cnt, ret;
141
142 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
143 num_bits = FLASH_COUNT_SIZE * 8;
144
145 flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
146 if (!flash_buf)
147 return -ENOMEM;
148
149 if (FLASH_COUNT_SIZE % stride) {
150 dev_err(sec->dev,
151 "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
152 FLASH_COUNT_SIZE, stride);
153 WARN_ON_ONCE(1);
154 return -EINVAL;
155 }
156
157 ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
158 flash_buf, FLASH_COUNT_SIZE / stride);
159 if (ret) {
160 dev_err(sec->dev,
161 "failed to read flash count: %x cnt %x: %d\n",
162 STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
163 goto exit_free;
164 }
165 cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
166
167exit_free:
168 kfree(flash_buf);
169
170 return ret ? : sysfs_emit(buf, "%u\n", cnt);
171}
172static DEVICE_ATTR_RO(flash_count);
173
bdf86d0e 174static struct attribute *m10bmc_security_attrs[] = {
154afa5c 175 &dev_attr_flash_count.attr,
bdf86d0e
RW
176 &dev_attr_bmc_root_entry_hash.attr,
177 &dev_attr_sr_root_entry_hash.attr,
178 &dev_attr_pr_root_entry_hash.attr,
7f03d84a
RW
179 &dev_attr_sr_canceled_csks.attr,
180 &dev_attr_pr_canceled_csks.attr,
181 &dev_attr_bmc_canceled_csks.attr,
bdf86d0e
RW
182 NULL,
183};
184
185static struct attribute_group m10bmc_security_attr_group = {
186 .name = "security",
187 .attrs = m10bmc_security_attrs,
188};
189
190static const struct attribute_group *m10bmc_sec_attr_groups[] = {
191 &m10bmc_security_attr_group,
192 NULL,
193};
194
195#define SEC_UPDATE_LEN_MAX 32
196static int m10bmc_sec_probe(struct platform_device *pdev)
197{
198 struct m10bmc_sec *sec;
199
200 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
201 if (!sec)
202 return -ENOMEM;
203
204 sec->dev = &pdev->dev;
205 sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
206 dev_set_drvdata(&pdev->dev, sec);
207
208 return 0;
209}
210
211static const struct platform_device_id intel_m10bmc_sec_ids[] = {
212 {
213 .name = "n3000bmc-sec-update",
214 },
215 { }
216};
217MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
218
219static struct platform_driver intel_m10bmc_sec_driver = {
220 .probe = m10bmc_sec_probe,
221 .driver = {
222 .name = "intel-m10bmc-sec-update",
223 .dev_groups = m10bmc_sec_attr_groups,
224 },
225 .id_table = intel_m10bmc_sec_ids,
226};
227module_platform_driver(intel_m10bmc_sec_driver);
228
229MODULE_AUTHOR("Intel Corporation");
230MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
231MODULE_LICENSE("GPL");