1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <asm/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
14 #include <linux/nvme-auth.h>
16 #define CHAP_BUF_SIZE 4096
17 static struct kmem_cache *nvme_chap_buf_cache;
18 static mempool_t *nvme_chap_buf_pool;
20 struct nvme_dhchap_queue_context {
21 struct list_head entry;
22 struct work_struct auth_work;
23 struct nvme_ctrl *ctrl;
24 struct crypto_shash *shash_tfm;
25 struct crypto_kpp *dh_tfm;
26 struct nvme_dhchap_key *transformed_key;
49 static struct workqueue_struct *nvme_auth_wq;
51 #define nvme_auth_flags_from_qid(qid) \
52 (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
53 #define nvme_auth_queue_from_qid(ctrl, qid) \
54 (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
56 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
58 return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
59 ctrl->opts->nr_poll_queues + 1;
62 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
63 void *data, size_t data_len, bool auth_send)
65 struct nvme_command cmd = {};
66 blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
67 struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
70 cmd.auth_common.opcode = nvme_fabrics_command;
71 cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
72 cmd.auth_common.spsp0 = 0x01;
73 cmd.auth_common.spsp1 = 0x01;
75 cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
76 cmd.auth_send.tl = cpu_to_le32(data_len);
78 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
79 cmd.auth_receive.al = cpu_to_le32(data_len);
82 ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
83 qid == 0 ? NVME_QID_ANY : qid,
86 dev_warn(ctrl->device,
87 "qid %d auth_send failed with status %d\n", qid, ret);
90 "qid %d auth_send failed with error %d\n", qid, ret);
94 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
95 struct nvmf_auth_dhchap_failure_data *data,
96 u16 transaction, u8 expected_msg)
98 dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
99 __func__, qid, data->auth_type, data->auth_id);
101 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
102 data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
103 return data->rescode_exp;
105 if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
106 data->auth_id != expected_msg) {
107 dev_warn(ctrl->device,
108 "qid %d invalid message %02x/%02x\n",
109 qid, data->auth_type, data->auth_id);
110 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
112 if (le16_to_cpu(data->t_id) != transaction) {
113 dev_warn(ctrl->device,
114 "qid %d invalid transaction ID %d\n",
115 qid, le16_to_cpu(data->t_id));
116 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
121 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
122 struct nvme_dhchap_queue_context *chap)
124 struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
125 size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
127 if (size > CHAP_BUF_SIZE) {
128 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
131 memset((u8 *)chap->buf, 0, size);
132 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
133 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
134 data->t_id = cpu_to_le16(chap->transaction);
135 data->sc_c = 0; /* No secure channel concatenation */
137 data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
138 data->auth_protocol[0].dhchap.halen = 3;
139 data->auth_protocol[0].dhchap.dhlen = 6;
140 data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
141 data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
142 data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
143 data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
144 data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
145 data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
146 data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
147 data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
148 data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
153 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
154 struct nvme_dhchap_queue_context *chap)
156 struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
157 u16 dhvlen = le16_to_cpu(data->dhvlen);
158 size_t size = sizeof(*data) + data->hl + dhvlen;
159 const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
160 const char *hmac_name, *kpp_name;
162 if (size > CHAP_BUF_SIZE) {
163 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
167 hmac_name = nvme_auth_hmac_name(data->hashid);
169 dev_warn(ctrl->device,
170 "qid %d: invalid HASH ID %d\n",
171 chap->qid, data->hashid);
172 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
176 if (chap->hash_id == data->hashid && chap->shash_tfm &&
177 !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
178 crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
179 dev_dbg(ctrl->device,
180 "qid %d: reuse existing hash %s\n",
181 chap->qid, hmac_name);
185 /* Reset if hash cannot be reused */
186 if (chap->shash_tfm) {
187 crypto_free_shash(chap->shash_tfm);
191 chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
192 CRYPTO_ALG_ALLOCATES_MEMORY);
193 if (IS_ERR(chap->shash_tfm)) {
194 dev_warn(ctrl->device,
195 "qid %d: failed to allocate hash %s, error %ld\n",
196 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
197 chap->shash_tfm = NULL;
198 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
202 if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
203 dev_warn(ctrl->device,
204 "qid %d: invalid hash length %d\n",
205 chap->qid, data->hl);
206 crypto_free_shash(chap->shash_tfm);
207 chap->shash_tfm = NULL;
208 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
212 chap->hash_id = data->hashid;
213 chap->hash_len = data->hl;
214 dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
215 chap->qid, hmac_name);
218 kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
220 dev_warn(ctrl->device,
221 "qid %d: invalid DH group id %d\n",
222 chap->qid, data->dhgid);
223 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
224 /* Leave previous dh_tfm intact */
228 if (chap->dhgroup_id == data->dhgid &&
229 (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
230 dev_dbg(ctrl->device,
231 "qid %d: reuse existing DH group %s\n",
232 chap->qid, gid_name);
236 /* Reset dh_tfm if it can't be reused */
238 crypto_free_kpp(chap->dh_tfm);
242 if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
244 dev_warn(ctrl->device,
245 "qid %d: empty DH value\n",
247 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
251 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
252 if (IS_ERR(chap->dh_tfm)) {
253 int ret = PTR_ERR(chap->dh_tfm);
255 dev_warn(ctrl->device,
256 "qid %d: error %d initializing DH group %s\n",
257 chap->qid, ret, gid_name);
258 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
262 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
263 chap->qid, gid_name);
264 } else if (dhvlen != 0) {
265 dev_warn(ctrl->device,
266 "qid %d: invalid DH value for NULL DH\n",
268 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
271 chap->dhgroup_id = data->dhgid;
274 chap->s1 = le32_to_cpu(data->seqnum);
275 memcpy(chap->c1, data->cval, chap->hash_len);
277 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
278 if (!chap->ctrl_key) {
279 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
282 chap->ctrl_key_len = dhvlen;
283 memcpy(chap->ctrl_key, data->cval + chap->hash_len,
285 dev_dbg(ctrl->device, "ctrl public key %*ph\n",
286 (int)chap->ctrl_key_len, chap->ctrl_key);
292 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
293 struct nvme_dhchap_queue_context *chap)
295 struct nvmf_auth_dhchap_reply_data *data = chap->buf;
296 size_t size = sizeof(*data);
298 size += 2 * chap->hash_len;
300 if (chap->host_key_len)
301 size += chap->host_key_len;
303 if (size > CHAP_BUF_SIZE) {
304 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
308 memset(chap->buf, 0, size);
309 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
310 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
311 data->t_id = cpu_to_le16(chap->transaction);
312 data->hl = chap->hash_len;
313 data->dhvlen = cpu_to_le16(chap->host_key_len);
314 memcpy(data->rval, chap->response, chap->hash_len);
315 if (ctrl->ctrl_key) {
316 chap->bi_directional = true;
317 get_random_bytes(chap->c2, chap->hash_len);
319 memcpy(data->rval + chap->hash_len, chap->c2,
321 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
322 __func__, chap->qid, (int)chap->hash_len, chap->c2);
324 memset(chap->c2, 0, chap->hash_len);
326 chap->s2 = nvme_auth_get_seqnum();
327 data->seqnum = cpu_to_le32(chap->s2);
328 if (chap->host_key_len) {
329 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
331 chap->host_key_len, chap->host_key);
332 memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
339 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
340 struct nvme_dhchap_queue_context *chap)
342 struct nvmf_auth_dhchap_success1_data *data = chap->buf;
343 size_t size = sizeof(*data) + chap->hash_len;
345 if (size > CHAP_BUF_SIZE) {
346 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
350 if (data->hl != chap->hash_len) {
351 dev_warn(ctrl->device,
352 "qid %d: invalid hash length %u\n",
353 chap->qid, data->hl);
354 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
358 /* Just print out information for the admin queue */
360 dev_info(ctrl->device,
361 "qid 0: authenticated with hash %s dhgroup %s\n",
362 nvme_auth_hmac_name(chap->hash_id),
363 nvme_auth_dhgroup_name(chap->dhgroup_id));
368 /* Validate controller response */
369 if (memcmp(chap->response, data->rval, data->hl)) {
370 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
371 __func__, chap->qid, (int)chap->hash_len, data->rval);
372 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
373 __func__, chap->qid, (int)chap->hash_len,
375 dev_warn(ctrl->device,
376 "qid %d: controller authentication failed\n",
378 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
379 return -ECONNREFUSED;
382 /* Just print out information for the admin queue */
384 dev_info(ctrl->device,
385 "qid 0: controller authenticated\n");
389 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
390 struct nvme_dhchap_queue_context *chap)
392 struct nvmf_auth_dhchap_success2_data *data = chap->buf;
393 size_t size = sizeof(*data);
395 memset(chap->buf, 0, size);
396 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
397 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
398 data->t_id = cpu_to_le16(chap->transaction);
403 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
404 struct nvme_dhchap_queue_context *chap)
406 struct nvmf_auth_dhchap_failure_data *data = chap->buf;
407 size_t size = sizeof(*data);
409 memset(chap->buf, 0, size);
410 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
411 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
412 data->t_id = cpu_to_le16(chap->transaction);
413 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
414 data->rescode_exp = chap->status;
419 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
420 struct nvme_dhchap_queue_context *chap)
422 SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
423 u8 buf[4], *challenge = chap->c1;
426 dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
427 __func__, chap->qid, chap->s1, chap->transaction);
429 if (!chap->transformed_key) {
430 chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
431 ctrl->opts->host->nqn);
432 if (IS_ERR(chap->transformed_key)) {
433 ret = PTR_ERR(chap->transformed_key);
434 chap->transformed_key = NULL;
438 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
439 __func__, chap->qid);
442 ret = crypto_shash_setkey(chap->shash_tfm,
443 chap->transformed_key->key, chap->transformed_key->len);
445 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
451 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
456 ret = nvme_auth_augmented_challenge(chap->hash_id,
465 shash->tfm = chap->shash_tfm;
466 ret = crypto_shash_init(shash);
469 ret = crypto_shash_update(shash, challenge, chap->hash_len);
472 put_unaligned_le32(chap->s1, buf);
473 ret = crypto_shash_update(shash, buf, 4);
476 put_unaligned_le16(chap->transaction, buf);
477 ret = crypto_shash_update(shash, buf, 2);
480 memset(buf, 0, sizeof(buf));
481 ret = crypto_shash_update(shash, buf, 1);
484 ret = crypto_shash_update(shash, "HostHost", 8);
487 ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
488 strlen(ctrl->opts->host->nqn));
491 ret = crypto_shash_update(shash, buf, 1);
494 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
495 strlen(ctrl->opts->subsysnqn));
498 ret = crypto_shash_final(shash, chap->response);
500 if (challenge != chap->c1)
505 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
506 struct nvme_dhchap_queue_context *chap)
508 SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
509 struct nvme_dhchap_key *transformed_key;
510 u8 buf[4], *challenge = chap->c2;
513 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
514 ctrl->opts->subsysnqn);
515 if (IS_ERR(transformed_key)) {
516 ret = PTR_ERR(transformed_key);
520 ret = crypto_shash_setkey(chap->shash_tfm,
521 transformed_key->key, transformed_key->len);
523 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
529 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
534 ret = nvme_auth_augmented_challenge(chap->hash_id,
542 dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
543 __func__, chap->qid, chap->s2, chap->transaction);
544 dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
545 __func__, chap->qid, (int)chap->hash_len, challenge);
546 dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
547 __func__, chap->qid, ctrl->opts->subsysnqn);
548 dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
549 __func__, chap->qid, ctrl->opts->host->nqn);
550 shash->tfm = chap->shash_tfm;
551 ret = crypto_shash_init(shash);
554 ret = crypto_shash_update(shash, challenge, chap->hash_len);
557 put_unaligned_le32(chap->s2, buf);
558 ret = crypto_shash_update(shash, buf, 4);
561 put_unaligned_le16(chap->transaction, buf);
562 ret = crypto_shash_update(shash, buf, 2);
566 ret = crypto_shash_update(shash, buf, 1);
569 ret = crypto_shash_update(shash, "Controller", 10);
572 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
573 strlen(ctrl->opts->subsysnqn));
576 ret = crypto_shash_update(shash, buf, 1);
579 ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
580 strlen(ctrl->opts->host->nqn));
583 ret = crypto_shash_final(shash, chap->response);
585 if (challenge != chap->c2)
587 nvme_auth_free_key(transformed_key);
591 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
592 struct nvme_dhchap_queue_context *chap)
596 if (chap->host_key && chap->host_key_len) {
597 dev_dbg(ctrl->device,
598 "qid %d: reusing host key\n", chap->qid);
601 ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
603 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
607 chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
609 chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
610 if (!chap->host_key) {
611 chap->host_key_len = 0;
612 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
615 ret = nvme_auth_gen_pubkey(chap->dh_tfm,
616 chap->host_key, chap->host_key_len);
618 dev_dbg(ctrl->device,
619 "failed to generate public key, error %d\n", ret);
620 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
625 chap->sess_key_len = chap->host_key_len;
626 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
627 if (!chap->sess_key) {
628 chap->sess_key_len = 0;
629 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
633 ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
634 chap->ctrl_key, chap->ctrl_key_len,
635 chap->sess_key, chap->sess_key_len);
637 dev_dbg(ctrl->device,
638 "failed to generate shared secret, error %d\n", ret);
639 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
642 dev_dbg(ctrl->device, "shared secret %*ph\n",
643 (int)chap->sess_key_len, chap->sess_key);
647 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
649 nvme_auth_free_key(chap->transformed_key);
650 chap->transformed_key = NULL;
651 kfree_sensitive(chap->host_key);
652 chap->host_key = NULL;
653 chap->host_key_len = 0;
654 kfree_sensitive(chap->ctrl_key);
655 chap->ctrl_key = NULL;
656 chap->ctrl_key_len = 0;
657 kfree_sensitive(chap->sess_key);
658 chap->sess_key = NULL;
659 chap->sess_key_len = 0;
664 chap->bi_directional = false;
665 chap->transaction = 0;
666 memset(chap->c1, 0, sizeof(chap->c1));
667 memset(chap->c2, 0, sizeof(chap->c2));
668 mempool_free(chap->buf, nvme_chap_buf_pool);
672 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
674 nvme_auth_reset_dhchap(chap);
676 crypto_free_shash(chap->shash_tfm);
678 crypto_free_kpp(chap->dh_tfm);
681 static void nvme_queue_auth_work(struct work_struct *work)
683 struct nvme_dhchap_queue_context *chap =
684 container_of(work, struct nvme_dhchap_queue_context, auth_work);
685 struct nvme_ctrl *ctrl = chap->ctrl;
690 * Allocate a large enough buffer for the entire negotiation:
691 * 4k is enough to ffdhe8192.
693 chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
695 chap->error = -ENOMEM;
699 chap->transaction = ctrl->transaction++;
701 /* DH-HMAC-CHAP Step 1: send negotiate */
702 dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
703 __func__, chap->qid);
704 ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
710 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
716 /* DH-HMAC-CHAP Step 2: receive challenge */
717 dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
718 __func__, chap->qid);
720 memset(chap->buf, 0, CHAP_BUF_SIZE);
721 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
724 dev_warn(ctrl->device,
725 "qid %d failed to receive challenge, %s %d\n",
726 chap->qid, ret < 0 ? "error" : "nvme status", ret);
730 ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
731 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
734 chap->error = -ECONNREFUSED;
738 ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
740 /* Invalid challenge parameters */
745 if (chap->ctrl_key_len) {
746 dev_dbg(ctrl->device,
747 "%s: qid %d DH exponential\n",
748 __func__, chap->qid);
749 ret = nvme_auth_dhchap_exponential(ctrl, chap);
756 dev_dbg(ctrl->device, "%s: qid %d host response\n",
757 __func__, chap->qid);
758 mutex_lock(&ctrl->dhchap_auth_mutex);
759 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
760 mutex_unlock(&ctrl->dhchap_auth_mutex);
766 /* DH-HMAC-CHAP Step 3: send reply */
767 dev_dbg(ctrl->device, "%s: qid %d send reply\n",
768 __func__, chap->qid);
769 ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
776 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
782 /* DH-HMAC-CHAP Step 4: receive success1 */
783 dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
784 __func__, chap->qid);
786 memset(chap->buf, 0, CHAP_BUF_SIZE);
787 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
790 dev_warn(ctrl->device,
791 "qid %d failed to receive success1, %s %d\n",
792 chap->qid, ret < 0 ? "error" : "nvme status", ret);
796 ret = nvme_auth_receive_validate(ctrl, chap->qid,
797 chap->buf, chap->transaction,
798 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
801 chap->error = -ECONNREFUSED;
805 mutex_lock(&ctrl->dhchap_auth_mutex);
806 if (ctrl->ctrl_key) {
807 dev_dbg(ctrl->device,
808 "%s: qid %d controller response\n",
809 __func__, chap->qid);
810 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
812 mutex_unlock(&ctrl->dhchap_auth_mutex);
817 mutex_unlock(&ctrl->dhchap_auth_mutex);
819 ret = nvme_auth_process_dhchap_success1(ctrl, chap);
821 /* Controller authentication failed */
822 chap->error = -ECONNREFUSED;
826 if (chap->bi_directional) {
827 /* DH-HMAC-CHAP Step 5: send success2 */
828 dev_dbg(ctrl->device, "%s: qid %d send success2\n",
829 __func__, chap->qid);
830 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
831 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
841 if (chap->status == 0)
842 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
843 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
844 __func__, chap->qid, chap->status);
845 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
846 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
848 * only update error if send failure2 failed and no other
849 * error had been set during authentication.
851 if (ret && !chap->error)
855 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
857 struct nvme_dhchap_queue_context *chap;
859 if (!ctrl->host_key) {
860 dev_warn(ctrl->device, "qid %d: no key\n", qid);
864 if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
865 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
869 chap = &ctrl->dhchap_ctxs[qid];
870 cancel_work_sync(&chap->auth_work);
871 queue_work(nvme_auth_wq, &chap->auth_work);
874 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
876 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
878 struct nvme_dhchap_queue_context *chap;
881 chap = &ctrl->dhchap_ctxs[qid];
882 flush_work(&chap->auth_work);
884 /* clear sensitive info */
885 nvme_auth_reset_dhchap(chap);
888 EXPORT_SYMBOL_GPL(nvme_auth_wait);
890 static void nvme_ctrl_auth_work(struct work_struct *work)
892 struct nvme_ctrl *ctrl =
893 container_of(work, struct nvme_ctrl, dhchap_auth_work);
897 * If the ctrl is no connected, bail as reconnect will handle
900 if (ctrl->state != NVME_CTRL_LIVE)
903 /* Authenticate admin queue first */
904 ret = nvme_auth_negotiate(ctrl, 0);
906 dev_warn(ctrl->device,
907 "qid 0: error %d setting up authentication\n", ret);
910 ret = nvme_auth_wait(ctrl, 0);
912 dev_warn(ctrl->device,
913 "qid 0: authentication failed\n");
917 for (q = 1; q < ctrl->queue_count; q++) {
918 ret = nvme_auth_negotiate(ctrl, q);
920 dev_warn(ctrl->device,
921 "qid %d: error %d setting up authentication\n",
928 * Failure is a soft-state; credentials remain valid until
929 * the controller terminates the connection.
931 for (q = 1; q < ctrl->queue_count; q++) {
932 ret = nvme_auth_wait(ctrl, q);
934 dev_warn(ctrl->device,
935 "qid %d: authentication failed\n", q);
939 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
941 struct nvme_dhchap_queue_context *chap;
944 mutex_init(&ctrl->dhchap_auth_mutex);
945 INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
948 ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
952 ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
955 goto err_free_dhchap_secret;
957 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
960 ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
961 sizeof(*chap), GFP_KERNEL);
962 if (!ctrl->dhchap_ctxs) {
964 goto err_free_dhchap_ctrl_secret;
967 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
968 chap = &ctrl->dhchap_ctxs[i];
971 INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
975 err_free_dhchap_ctrl_secret:
976 nvme_auth_free_key(ctrl->ctrl_key);
977 ctrl->ctrl_key = NULL;
978 err_free_dhchap_secret:
979 nvme_auth_free_key(ctrl->host_key);
980 ctrl->host_key = NULL;
983 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
985 void nvme_auth_stop(struct nvme_ctrl *ctrl)
987 cancel_work_sync(&ctrl->dhchap_auth_work);
989 EXPORT_SYMBOL_GPL(nvme_auth_stop);
991 void nvme_auth_free(struct nvme_ctrl *ctrl)
995 if (ctrl->dhchap_ctxs) {
996 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
997 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
998 kfree(ctrl->dhchap_ctxs);
1000 if (ctrl->host_key) {
1001 nvme_auth_free_key(ctrl->host_key);
1002 ctrl->host_key = NULL;
1004 if (ctrl->ctrl_key) {
1005 nvme_auth_free_key(ctrl->ctrl_key);
1006 ctrl->ctrl_key = NULL;
1009 EXPORT_SYMBOL_GPL(nvme_auth_free);
1011 int __init nvme_init_auth(void)
1013 nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1014 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1018 nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1019 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1020 if (!nvme_chap_buf_cache)
1021 goto err_destroy_workqueue;
1023 nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1024 mempool_free_slab, nvme_chap_buf_cache);
1025 if (!nvme_chap_buf_pool)
1026 goto err_destroy_chap_buf_cache;
1029 err_destroy_chap_buf_cache:
1030 kmem_cache_destroy(nvme_chap_buf_cache);
1031 err_destroy_workqueue:
1032 destroy_workqueue(nvme_auth_wq);
1036 void __exit nvme_exit_auth(void)
1038 mempool_destroy(nvme_chap_buf_pool);
1039 kmem_cache_destroy(nvme_chap_buf_cache);
1040 destroy_workqueue(nvme_auth_wq);