nvme: store a struct device pointer in struct nvme_dev
[linux-2.6-block.git] / drivers / block / nvme-scsi.c
CommitLineData
5d0f6131
VV
1/*
2 * NVM Express device driver
8757ad65 3 * Copyright (c) 2011-2014, Intel Corporation.
5d0f6131
VV
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
5d0f6131
VV
13 */
14
15/*
16 * Refer to the SCSI-NVMe Translation spec for details on how
17 * each command is translated.
18 */
19
20#include <linux/nvme.h>
21#include <linux/bio.h>
22#include <linux/bitops.h>
23#include <linux/blkdev.h>
320a3827 24#include <linux/compat.h>
5d0f6131
VV
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/fs.h>
28#include <linux/genhd.h>
29#include <linux/idr.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/io.h>
33#include <linux/kdev_t.h>
34#include <linux/kthread.h>
35#include <linux/kernel.h>
36#include <linux/mm.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/pci.h>
40#include <linux/poison.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/types.h>
5d0f6131
VV
44#include <scsi/sg.h>
45#include <scsi/scsi.h>
46
47
48static int sg_version_num = 30534; /* 2 digits for each component */
49
50#define SNTI_TRANSLATION_SUCCESS 0
51#define SNTI_INTERNAL_ERROR 1
52
53/* VPD Page Codes */
54#define VPD_SUPPORTED_PAGES 0x00
55#define VPD_SERIAL_NUMBER 0x80
56#define VPD_DEVICE_IDENTIFIERS 0x83
57#define VPD_EXTENDED_INQUIRY 0x86
7f749d9c 58#define VPD_BLOCK_LIMITS 0xB0
5d0f6131
VV
59#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
60
61/* CDB offsets */
62#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
63#define REPORT_LUNS_SR_OFFSET 2
64#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
65#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
66#define REQUEST_SENSE_DESC_OFFSET 1
67#define REQUEST_SENSE_DESC_MASK 0x01
68#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
69#define INQUIRY_EVPD_BYTE_OFFSET 1
70#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
71#define INQUIRY_EVPD_BIT_MASK 1
72#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
73#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
74#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
75#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
76#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
77#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
78#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
79#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
80#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
81#define START_STOP_UNIT_CDB_START_OFFSET 4
82#define START_STOP_UNIT_CDB_START_MASK 0x1
83#define WRITE_BUFFER_CDB_MODE_OFFSET 1
84#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
85#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
86#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
87#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
88#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
89#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
90#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
91#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
92#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
93#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
94#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
95#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
96#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
97#define FORMAT_UNIT_PROT_INT_OFFSET 3
98#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
99#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
ec503733 100#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
5d0f6131
VV
101
102/* Misc. defines */
103#define NIBBLE_SHIFT 4
104#define FIXED_SENSE_DATA 0x70
105#define DESC_FORMAT_SENSE_DATA 0x72
106#define FIXED_SENSE_DATA_ADD_LENGTH 10
107#define LUN_ENTRY_SIZE 8
108#define LUN_DATA_HEADER_SIZE 8
109#define ALL_LUNS_RETURNED 0x02
110#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
111#define RESTRICTED_LUNS_RETURNED 0x00
112#define NVME_POWER_STATE_START_VALID 0x00
113#define NVME_POWER_STATE_ACTIVE 0x01
114#define NVME_POWER_STATE_IDLE 0x02
115#define NVME_POWER_STATE_STANDBY 0x03
116#define NVME_POWER_STATE_LU_CONTROL 0x07
117#define POWER_STATE_0 0
118#define POWER_STATE_1 1
119#define POWER_STATE_2 2
120#define POWER_STATE_3 3
121#define DOWNLOAD_SAVE_ACTIVATE 0x05
122#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
123#define ACTIVATE_DEFERRED_MICROCODE 0x0F
124#define FORMAT_UNIT_IMMED_MASK 0x2
125#define FORMAT_UNIT_IMMED_OFFSET 1
126#define KELVIN_TEMP_FACTOR 273
127#define FIXED_FMT_SENSE_DATA_SIZE 18
128#define DESC_FMT_SENSE_DATA_SIZE 8
129
130/* SCSI/NVMe defines and bit masks */
131#define INQ_STANDARD_INQUIRY_PAGE 0x00
132#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
133#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
134#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
135#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
7f749d9c 136#define INQ_BDEV_LIMITS_PAGE 0xB0
5d0f6131
VV
137#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
138#define INQ_SERIAL_NUMBER_LENGTH 0x14
7f749d9c 139#define INQ_NUM_SUPPORTED_VPD_PAGES 6
5d0f6131
VV
140#define VERSION_SPC_4 0x06
141#define ACA_UNSUPPORTED 0
142#define STANDARD_INQUIRY_LENGTH 36
143#define ADDITIONAL_STD_INQ_LENGTH 31
144#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
145#define RESERVED_FIELD 0
146
147/* SCSI READ/WRITE Defines */
148#define IO_CDB_WP_MASK 0xE0
149#define IO_CDB_WP_SHIFT 5
150#define IO_CDB_FUA_MASK 0x8
151#define IO_6_CDB_LBA_OFFSET 0
152#define IO_6_CDB_LBA_MASK 0x001FFFFF
153#define IO_6_CDB_TX_LEN_OFFSET 4
154#define IO_6_DEFAULT_TX_LEN 256
155#define IO_10_CDB_LBA_OFFSET 2
156#define IO_10_CDB_TX_LEN_OFFSET 7
157#define IO_10_CDB_WP_OFFSET 1
158#define IO_10_CDB_FUA_OFFSET 1
159#define IO_12_CDB_LBA_OFFSET 2
160#define IO_12_CDB_TX_LEN_OFFSET 6
161#define IO_12_CDB_WP_OFFSET 1
162#define IO_12_CDB_FUA_OFFSET 1
163#define IO_16_CDB_FUA_OFFSET 1
164#define IO_16_CDB_WP_OFFSET 1
165#define IO_16_CDB_LBA_OFFSET 2
166#define IO_16_CDB_TX_LEN_OFFSET 10
167
168/* Mode Sense/Select defines */
169#define MODE_PAGE_INFO_EXCEP 0x1C
170#define MODE_PAGE_CACHING 0x08
171#define MODE_PAGE_CONTROL 0x0A
172#define MODE_PAGE_POWER_CONDITION 0x1A
173#define MODE_PAGE_RETURN_ALL 0x3F
174#define MODE_PAGE_BLK_DES_LEN 0x08
175#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
176#define MODE_PAGE_CACHING_LEN 0x14
177#define MODE_PAGE_CONTROL_LEN 0x0C
178#define MODE_PAGE_POW_CND_LEN 0x28
179#define MODE_PAGE_INF_EXC_LEN 0x0C
180#define MODE_PAGE_ALL_LEN 0x54
181#define MODE_SENSE6_MPH_SIZE 4
182#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
183#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
184#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
185#define MODE_SENSE_PAGE_CODE_OFFSET 2
186#define MODE_SENSE_PAGE_CODE_MASK 0x3F
187#define MODE_SENSE_LLBAA_OFFSET 1
188#define MODE_SENSE_LLBAA_MASK 0x10
189#define MODE_SENSE_LLBAA_SHIFT 4
190#define MODE_SENSE_DBD_OFFSET 1
191#define MODE_SENSE_DBD_MASK 8
192#define MODE_SENSE_DBD_SHIFT 3
193#define MODE_SENSE10_MPH_SIZE 8
194#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
195#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
196#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
197#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
198#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
199#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
200#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
201#define MODE_SELECT_6_BD_OFFSET 3
202#define MODE_SELECT_10_BD_OFFSET 6
203#define MODE_SELECT_10_LLBAA_OFFSET 4
204#define MODE_SELECT_10_LLBAA_MASK 1
205#define MODE_SELECT_6_MPH_SIZE 4
206#define MODE_SELECT_10_MPH_SIZE 8
207#define CACHING_MODE_PAGE_WCE_MASK 0x04
208#define MODE_SENSE_BLK_DESC_ENABLED 0
209#define MODE_SENSE_BLK_DESC_COUNT 1
210#define MODE_SELECT_PAGE_CODE_MASK 0x3F
211#define SHORT_DESC_BLOCK 8
212#define LONG_DESC_BLOCK 16
213#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
214#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
215#define MODE_PAGE_CACHING_LEN_FIELD 0x12
216#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
217#define MODE_SENSE_PC_CURRENT_VALUES 0
218
219/* Log Sense defines */
220#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
221#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
222#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
223#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
224#define LOG_SENSE_CDB_SP_OFFSET 1
225#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
226#define LOG_SENSE_CDB_PC_OFFSET 2
227#define LOG_SENSE_CDB_PC_MASK 0xC0
228#define LOG_SENSE_CDB_PC_SHIFT 6
229#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
230#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
231#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
232#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
233#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
234#define REMAINING_TEMP_PAGE_LENGTH 0xC
235#define LOG_TEMP_PAGE_LENGTH 0x10
236#define LOG_TEMP_UNKNOWN 0xFF
237#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
238
239/* Read Capacity defines */
240#define READ_CAP_10_RESP_SIZE 8
241#define READ_CAP_16_RESP_SIZE 32
242
243/* NVMe Namespace and Command Defines */
5d0f6131
VV
244#define BYTES_TO_DWORDS 4
245#define NVME_MAX_FIRMWARE_SLOT 7
246
247/* Report LUNs defines */
248#define REPORT_LUNS_FIRST_LUN_OFFSET 8
249
250/* SCSI ADDITIONAL SENSE Codes */
251
252#define SCSI_ASC_NO_SENSE 0x00
253#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
254#define SCSI_ASC_LUN_NOT_READY 0x04
255#define SCSI_ASC_WARNING 0x0B
256#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
257#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
258#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
259#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
260#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
261#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
262#define SCSI_ASC_ILLEGAL_COMMAND 0x20
263#define SCSI_ASC_ILLEGAL_BLOCK 0x21
264#define SCSI_ASC_INVALID_CDB 0x24
265#define SCSI_ASC_INVALID_LUN 0x25
266#define SCSI_ASC_INVALID_PARAMETER 0x26
267#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
268#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
269
270/* SCSI ADDITIONAL SENSE Code Qualifiers */
271
272#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
273#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
274#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
275#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
276#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
277#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
278#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
279#define SCSI_ASCQ_INVALID_LUN_ID 0x09
280
281/**
282 * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
283 * enable DPOFUA support type 0x10 value.
284 */
285#define DEVICE_SPECIFIC_PARAMETER 0
286#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
287
288/* MACROs to extract information from CDBs */
289
290#define GET_OPCODE(cdb) cdb[0]
291
292#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
293
294#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
295
296#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
297(cdb[index + 1] << 8) | \
298(cdb[index + 2] << 0))
299
300#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
301(cdb[index + 1] << 16) | \
302(cdb[index + 2] << 8) | \
303(cdb[index + 3] << 0))
304
305#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
306(((u64)cdb[index + 1]) << 48) | \
307(((u64)cdb[index + 2]) << 40) | \
308(((u64)cdb[index + 3]) << 32) | \
309(((u64)cdb[index + 4]) << 24) | \
310(((u64)cdb[index + 5]) << 16) | \
311(((u64)cdb[index + 6]) << 8) | \
312(((u64)cdb[index + 7]) << 0))
313
314/* Inquiry Helper Macros */
315#define GET_INQ_EVPD_BIT(cdb) \
316((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
317INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
318
319#define GET_INQ_PAGE_CODE(cdb) \
320(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
321
322#define GET_INQ_ALLOC_LENGTH(cdb) \
323(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
324
325/* Report LUNs Helper Macros */
326#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
327(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
328
329/* Read Capacity Helper Macros */
330#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
331(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
332
333#define IS_READ_CAP_16(cdb) \
eb846d9f 334((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
5d0f6131
VV
335
336/* Request Sense Helper Macros */
337#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
338(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
339
340/* Mode Sense Helper Macros */
341#define GET_MODE_SENSE_DBD(cdb) \
342((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
343MODE_SENSE_DBD_SHIFT)
344
345#define GET_MODE_SENSE_LLBAA(cdb) \
346((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
347MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
348
349#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
350(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
351
352
353/* Struct to gather data that needs to be extracted from a SCSI CDB.
354 Not conforming to any particular CDB variant, but compatible with all. */
355
356struct nvme_trans_io_cdb {
357 u8 fua;
358 u8 prot_info;
359 u64 lba;
360 u32 xfer_len;
361};
362
363
364/* Internal Helper Functions */
365
366
367/* Copy data to userspace memory */
368
369static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
370 unsigned long n)
371{
372 int res = SNTI_TRANSLATION_SUCCESS;
373 unsigned long not_copied;
374 int i;
375 void *index = from;
376 size_t remaining = n;
377 size_t xfer_len;
378
379 if (hdr->iovec_count > 0) {
8741ee4c 380 struct sg_iovec sgl;
5d0f6131
VV
381
382 for (i = 0; i < hdr->iovec_count; i++) {
8741ee4c
VV
383 not_copied = copy_from_user(&sgl, hdr->dxferp +
384 i * sizeof(struct sg_iovec),
385 sizeof(struct sg_iovec));
386 if (not_copied)
387 return -EFAULT;
388 xfer_len = min(remaining, sgl.iov_len);
389 not_copied = copy_to_user(sgl.iov_base, index,
5d0f6131
VV
390 xfer_len);
391 if (not_copied) {
392 res = -EFAULT;
393 break;
394 }
395 index += xfer_len;
396 remaining -= xfer_len;
397 if (remaining == 0)
398 break;
399 }
400 return res;
401 }
8741ee4c 402 not_copied = copy_to_user(hdr->dxferp, from, n);
5d0f6131
VV
403 if (not_copied)
404 res = -EFAULT;
405 return res;
406}
407
408/* Copy data from userspace memory */
409
410static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
411 unsigned long n)
412{
413 int res = SNTI_TRANSLATION_SUCCESS;
414 unsigned long not_copied;
415 int i;
416 void *index = to;
417 size_t remaining = n;
418 size_t xfer_len;
419
420 if (hdr->iovec_count > 0) {
8741ee4c 421 struct sg_iovec sgl;
5d0f6131
VV
422
423 for (i = 0; i < hdr->iovec_count; i++) {
8741ee4c
VV
424 not_copied = copy_from_user(&sgl, hdr->dxferp +
425 i * sizeof(struct sg_iovec),
426 sizeof(struct sg_iovec));
427 if (not_copied)
428 return -EFAULT;
429 xfer_len = min(remaining, sgl.iov_len);
430 not_copied = copy_from_user(index, sgl.iov_base,
431 xfer_len);
5d0f6131
VV
432 if (not_copied) {
433 res = -EFAULT;
434 break;
435 }
436 index += xfer_len;
437 remaining -= xfer_len;
438 if (remaining == 0)
439 break;
440 }
441 return res;
442 }
443
8741ee4c 444 not_copied = copy_from_user(to, hdr->dxferp, n);
5d0f6131
VV
445 if (not_copied)
446 res = -EFAULT;
447 return res;
448}
449
450/* Status/Sense Buffer Writeback */
451
452static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
453 u8 asc, u8 ascq)
454{
455 int res = SNTI_TRANSLATION_SUCCESS;
456 u8 xfer_len;
457 u8 resp[DESC_FMT_SENSE_DATA_SIZE];
458
459 if (scsi_status_is_good(status)) {
460 hdr->status = SAM_STAT_GOOD;
461 hdr->masked_status = GOOD;
462 hdr->host_status = DID_OK;
463 hdr->driver_status = DRIVER_OK;
464 hdr->sb_len_wr = 0;
465 } else {
466 hdr->status = status;
467 hdr->masked_status = status >> 1;
468 hdr->host_status = DID_OK;
469 hdr->driver_status = DRIVER_OK;
470
471 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
472 resp[0] = DESC_FORMAT_SENSE_DATA;
473 resp[1] = sense_key;
474 resp[2] = asc;
475 resp[3] = ascq;
476
477 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
478 hdr->sb_len_wr = xfer_len;
8741ee4c 479 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
5d0f6131
VV
480 res = -EFAULT;
481 }
482
483 return res;
484}
485
486static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
487{
488 u8 status, sense_key, asc, ascq;
489 int res = SNTI_TRANSLATION_SUCCESS;
490
491 /* For non-nvme (Linux) errors, simply return the error code */
492 if (nvme_sc < 0)
493 return nvme_sc;
494
495 /* Mask DNR, More, and reserved fields */
496 nvme_sc &= 0x7FF;
497
498 switch (nvme_sc) {
499 /* Generic Command Status */
500 case NVME_SC_SUCCESS:
501 status = SAM_STAT_GOOD;
502 sense_key = NO_SENSE;
503 asc = SCSI_ASC_NO_SENSE;
504 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
505 break;
506 case NVME_SC_INVALID_OPCODE:
507 status = SAM_STAT_CHECK_CONDITION;
508 sense_key = ILLEGAL_REQUEST;
509 asc = SCSI_ASC_ILLEGAL_COMMAND;
510 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
511 break;
512 case NVME_SC_INVALID_FIELD:
513 status = SAM_STAT_CHECK_CONDITION;
514 sense_key = ILLEGAL_REQUEST;
515 asc = SCSI_ASC_INVALID_CDB;
516 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
517 break;
518 case NVME_SC_DATA_XFER_ERROR:
519 status = SAM_STAT_CHECK_CONDITION;
520 sense_key = MEDIUM_ERROR;
521 asc = SCSI_ASC_NO_SENSE;
522 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
523 break;
524 case NVME_SC_POWER_LOSS:
525 status = SAM_STAT_TASK_ABORTED;
526 sense_key = ABORTED_COMMAND;
527 asc = SCSI_ASC_WARNING;
528 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
529 break;
530 case NVME_SC_INTERNAL:
531 status = SAM_STAT_CHECK_CONDITION;
532 sense_key = HARDWARE_ERROR;
533 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
534 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
535 break;
536 case NVME_SC_ABORT_REQ:
537 status = SAM_STAT_TASK_ABORTED;
538 sense_key = ABORTED_COMMAND;
539 asc = SCSI_ASC_NO_SENSE;
540 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
541 break;
542 case NVME_SC_ABORT_QUEUE:
543 status = SAM_STAT_TASK_ABORTED;
544 sense_key = ABORTED_COMMAND;
545 asc = SCSI_ASC_NO_SENSE;
546 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
547 break;
548 case NVME_SC_FUSED_FAIL:
549 status = SAM_STAT_TASK_ABORTED;
550 sense_key = ABORTED_COMMAND;
551 asc = SCSI_ASC_NO_SENSE;
552 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
553 break;
554 case NVME_SC_FUSED_MISSING:
555 status = SAM_STAT_TASK_ABORTED;
556 sense_key = ABORTED_COMMAND;
557 asc = SCSI_ASC_NO_SENSE;
558 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
559 break;
560 case NVME_SC_INVALID_NS:
561 status = SAM_STAT_CHECK_CONDITION;
562 sense_key = ILLEGAL_REQUEST;
563 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
564 ascq = SCSI_ASCQ_INVALID_LUN_ID;
565 break;
566 case NVME_SC_LBA_RANGE:
567 status = SAM_STAT_CHECK_CONDITION;
568 sense_key = ILLEGAL_REQUEST;
569 asc = SCSI_ASC_ILLEGAL_BLOCK;
570 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
571 break;
572 case NVME_SC_CAP_EXCEEDED:
573 status = SAM_STAT_CHECK_CONDITION;
574 sense_key = MEDIUM_ERROR;
575 asc = SCSI_ASC_NO_SENSE;
576 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
577 break;
578 case NVME_SC_NS_NOT_READY:
579 status = SAM_STAT_CHECK_CONDITION;
580 sense_key = NOT_READY;
581 asc = SCSI_ASC_LUN_NOT_READY;
582 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
583 break;
584
585 /* Command Specific Status */
586 case NVME_SC_INVALID_FORMAT:
587 status = SAM_STAT_CHECK_CONDITION;
588 sense_key = ILLEGAL_REQUEST;
589 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
590 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
591 break;
592 case NVME_SC_BAD_ATTRIBUTES:
593 status = SAM_STAT_CHECK_CONDITION;
594 sense_key = ILLEGAL_REQUEST;
595 asc = SCSI_ASC_INVALID_CDB;
596 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
597 break;
598
599 /* Media Errors */
600 case NVME_SC_WRITE_FAULT:
601 status = SAM_STAT_CHECK_CONDITION;
602 sense_key = MEDIUM_ERROR;
603 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
604 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
605 break;
606 case NVME_SC_READ_ERROR:
607 status = SAM_STAT_CHECK_CONDITION;
608 sense_key = MEDIUM_ERROR;
609 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
610 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
611 break;
612 case NVME_SC_GUARD_CHECK:
613 status = SAM_STAT_CHECK_CONDITION;
614 sense_key = MEDIUM_ERROR;
615 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
616 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
617 break;
618 case NVME_SC_APPTAG_CHECK:
619 status = SAM_STAT_CHECK_CONDITION;
620 sense_key = MEDIUM_ERROR;
621 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
622 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
623 break;
624 case NVME_SC_REFTAG_CHECK:
625 status = SAM_STAT_CHECK_CONDITION;
626 sense_key = MEDIUM_ERROR;
627 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
628 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
629 break;
630 case NVME_SC_COMPARE_FAILED:
631 status = SAM_STAT_CHECK_CONDITION;
632 sense_key = MISCOMPARE;
633 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
634 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
635 break;
636 case NVME_SC_ACCESS_DENIED:
637 status = SAM_STAT_CHECK_CONDITION;
638 sense_key = ILLEGAL_REQUEST;
639 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
640 ascq = SCSI_ASCQ_INVALID_LUN_ID;
641 break;
642
643 /* Unspecified/Default */
644 case NVME_SC_CMDID_CONFLICT:
645 case NVME_SC_CMD_SEQ_ERROR:
646 case NVME_SC_CQ_INVALID:
647 case NVME_SC_QID_INVALID:
648 case NVME_SC_QUEUE_SIZE:
649 case NVME_SC_ABORT_LIMIT:
650 case NVME_SC_ABORT_MISSING:
651 case NVME_SC_ASYNC_LIMIT:
652 case NVME_SC_FIRMWARE_SLOT:
653 case NVME_SC_FIRMWARE_IMAGE:
654 case NVME_SC_INVALID_VECTOR:
655 case NVME_SC_INVALID_LOG_PAGE:
656 default:
657 status = SAM_STAT_CHECK_CONDITION;
658 sense_key = ILLEGAL_REQUEST;
659 asc = SCSI_ASC_NO_SENSE;
660 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
661 break;
662 }
663
664 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
665
666 return res;
667}
668
669/* INQUIRY Helper Functions */
670
671static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
672 struct sg_io_hdr *hdr, u8 *inq_response,
673 int alloc_len)
674{
675 struct nvme_dev *dev = ns->dev;
676 dma_addr_t dma_addr;
677 void *mem;
678 struct nvme_id_ns *id_ns;
679 int res = SNTI_TRANSLATION_SUCCESS;
680 int nvme_sc;
681 int xfer_len;
682 u8 resp_data_format = 0x02;
683 u8 protect;
684 u8 cmdque = 0x01 << 1;
dedf4b15 685 u8 fw_offset = sizeof(dev->firmware_rev);
5d0f6131 686
e75ec752 687 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
688 &dma_addr, GFP_KERNEL);
689 if (mem == NULL) {
690 res = -ENOMEM;
691 goto out_dma;
692 }
693
694 /* nvme ns identify - use DPS value for PROTECT field */
695 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
696 res = nvme_trans_status_code(hdr, nvme_sc);
697 /*
698 * If nvme_sc was -ve, res will be -ve here.
699 * If nvme_sc was +ve, the status would bace been translated, and res
700 * can only be 0 or -ve.
701 * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
702 * - If -ve, return because its a Linux error.
703 */
704 if (res)
705 goto out_free;
706 if (nvme_sc) {
707 res = nvme_sc;
708 goto out_free;
709 }
710 id_ns = mem;
711 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
712
713 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
714 inq_response[2] = VERSION_SPC_4;
715 inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
716 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
717 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
718 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
719 strncpy(&inq_response[8], "NVMe ", 8);
720 strncpy(&inq_response[16], dev->model, 16);
dedf4b15
KB
721
722 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
723 fw_offset--;
724 fw_offset -= 4;
725 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
5d0f6131
VV
726
727 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
728 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
729
730 out_free:
e75ec752 731 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
732 out_dma:
733 return res;
734}
735
736static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
737 struct sg_io_hdr *hdr, u8 *inq_response,
738 int alloc_len)
739{
740 int res = SNTI_TRANSLATION_SUCCESS;
741 int xfer_len;
742
743 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
744 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
745 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
746 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
747 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
748 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
749 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
750 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
7f749d9c 751 inq_response[9] = INQ_BDEV_LIMITS_PAGE;
5d0f6131
VV
752
753 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
754 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
755
756 return res;
757}
758
759static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
760 struct sg_io_hdr *hdr, u8 *inq_response,
761 int alloc_len)
762{
763 struct nvme_dev *dev = ns->dev;
764 int res = SNTI_TRANSLATION_SUCCESS;
765 int xfer_len;
766
767 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
768 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
769 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
770 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
771
772 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
773 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
774
775 return res;
776}
777
778static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
779 u8 *inq_response, int alloc_len)
780{
781 struct nvme_dev *dev = ns->dev;
782 dma_addr_t dma_addr;
783 void *mem;
5d0f6131
VV
784 int res = SNTI_TRANSLATION_SUCCESS;
785 int nvme_sc;
5d0f6131 786 int xfer_len;
8741ee4c 787 __be32 tmp_id = cpu_to_be32(ns->ns_id);
5d0f6131 788
e75ec752 789 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
790 &dma_addr, GFP_KERNEL);
791 if (mem == NULL) {
792 res = -ENOMEM;
793 goto out_dma;
794 }
795
4f1982b4 796 memset(inq_response, 0, alloc_len);
5d0f6131 797 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
4f1982b4
KB
798 if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
799 struct nvme_id_ns *id_ns = mem;
800 void *eui = id_ns->eui64;
801 int len = sizeof(id_ns->eui64);
5d0f6131 802
4f1982b4
KB
803 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
804 res = nvme_trans_status_code(hdr, nvme_sc);
805 if (res)
806 goto out_free;
807 if (nvme_sc) {
808 res = nvme_sc;
809 goto out_free;
810 }
811
812 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
813 if (bitmap_empty(eui, len * 8)) {
814 eui = id_ns->nguid;
815 len = sizeof(id_ns->nguid);
816 }
817 }
818 if (bitmap_empty(eui, len * 8))
819 goto scsi_string;
820
821 inq_response[3] = 4 + len; /* Page Length */
822 /* Designation Descriptor start */
823 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
824 inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
825 inq_response[6] = 0x00; /* Rsvd */
826 inq_response[7] = len; /* Designator Length */
827 memcpy(&inq_response[8], eui, len);
828 } else {
829 scsi_string:
830 if (alloc_len < 72) {
831 res = nvme_trans_completion(hdr,
832 SAM_STAT_CHECK_CONDITION,
833 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
834 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
835 goto out_free;
836 }
837 inq_response[3] = 0x48; /* Page Length */
838 /* Designation Descriptor start */
839 inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
840 inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
841 inq_response[6] = 0x00; /* Rsvd */
842 inq_response[7] = 0x44; /* Designator Length */
843
e75ec752 844 sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
4f1982b4
KB
845 memcpy(&inq_response[12], dev->model, sizeof(dev->model));
846 sprintf(&inq_response[52], "%04x", tmp_id);
847 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
848 }
849 xfer_len = alloc_len;
5d0f6131
VV
850 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
851
852 out_free:
e75ec752 853 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
854 out_dma:
855 return res;
856}
857
858static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
859 int alloc_len)
860{
861 u8 *inq_response;
862 int res = SNTI_TRANSLATION_SUCCESS;
863 int nvme_sc;
864 struct nvme_dev *dev = ns->dev;
865 dma_addr_t dma_addr;
866 void *mem;
867 struct nvme_id_ctrl *id_ctrl;
868 struct nvme_id_ns *id_ns;
869 int xfer_len;
870 u8 microcode = 0x80;
871 u8 spt;
872 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
873 u8 grd_chk, app_chk, ref_chk, protect;
874 u8 uask_sup = 0x20;
875 u8 v_sup;
876 u8 luiclr = 0x01;
877
878 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
879 if (inq_response == NULL) {
880 res = -ENOMEM;
881 goto out_mem;
882 }
883
e75ec752 884 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
885 &dma_addr, GFP_KERNEL);
886 if (mem == NULL) {
887 res = -ENOMEM;
888 goto out_dma;
889 }
890
891 /* nvme ns identify */
892 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
893 res = nvme_trans_status_code(hdr, nvme_sc);
894 if (res)
895 goto out_free;
896 if (nvme_sc) {
897 res = nvme_sc;
898 goto out_free;
899 }
900 id_ns = mem;
901 spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
902 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
903 grd_chk = protect << 2;
904 app_chk = protect << 1;
905 ref_chk = protect;
906
907 /* nvme controller identify */
908 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
909 res = nvme_trans_status_code(hdr, nvme_sc);
910 if (res)
911 goto out_free;
912 if (nvme_sc) {
913 res = nvme_sc;
914 goto out_free;
915 }
916 id_ctrl = mem;
917 v_sup = id_ctrl->vwc;
918
919 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
920 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
921 inq_response[2] = 0x00; /* Page Length MSB */
922 inq_response[3] = 0x3C; /* Page Length LSB */
923 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
924 inq_response[5] = uask_sup;
925 inq_response[6] = v_sup;
926 inq_response[7] = luiclr;
927 inq_response[8] = 0;
928 inq_response[9] = 0;
929
930 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
931 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
932
933 out_free:
e75ec752 934 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
935 out_dma:
936 kfree(inq_response);
937 out_mem:
938 return res;
939}
940
7f749d9c
KB
941static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
942 u8 *inq_response, int alloc_len)
943{
944 __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
945 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
946 __be32 discard_desc_count = cpu_to_be32(0x100);
947
948 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
949 inq_response[1] = VPD_BLOCK_LIMITS;
950 inq_response[3] = 0x3c; /* Page Length */
951 memcpy(&inq_response[8], &max_sectors, sizeof(u32));
952 memcpy(&inq_response[20], &max_discard, sizeof(u32));
953
954 if (max_discard)
955 memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
956
957 return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
958}
959
5d0f6131
VV
960static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
961 int alloc_len)
962{
963 u8 *inq_response;
964 int res = SNTI_TRANSLATION_SUCCESS;
965 int xfer_len;
966
03ea83e9 967 inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
5d0f6131
VV
968 if (inq_response == NULL) {
969 res = -ENOMEM;
970 goto out_mem;
971 }
972
5d0f6131
VV
973 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
974 inq_response[2] = 0x00; /* Page Length MSB */
975 inq_response[3] = 0x3C; /* Page Length LSB */
976 inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
977 inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
978 inq_response[6] = 0x00; /* Form Factor */
979
980 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
981 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
982
983 kfree(inq_response);
984 out_mem:
985 return res;
986}
987
988/* LOG SENSE Helper Functions */
989
990static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
991 int alloc_len)
992{
993 int res = SNTI_TRANSLATION_SUCCESS;
994 int xfer_len;
995 u8 *log_response;
996
03ea83e9 997 log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
5d0f6131
VV
998 if (log_response == NULL) {
999 res = -ENOMEM;
1000 goto out_mem;
1001 }
5d0f6131
VV
1002
1003 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
1004 /* Subpage=0x00, Page Length MSB=0 */
1005 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
1006 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
1007 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1008 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
1009
1010 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
1011 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1012
1013 kfree(log_response);
1014 out_mem:
1015 return res;
1016}
1017
1018static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1019 struct sg_io_hdr *hdr, int alloc_len)
1020{
1021 int res = SNTI_TRANSLATION_SUCCESS;
1022 int xfer_len;
1023 u8 *log_response;
1024 struct nvme_command c;
1025 struct nvme_dev *dev = ns->dev;
1026 struct nvme_smart_log *smart_log;
1027 dma_addr_t dma_addr;
1028 void *mem;
1029 u8 temp_c;
1030 u16 temp_k;
1031
03ea83e9 1032 log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
5d0f6131
VV
1033 if (log_response == NULL) {
1034 res = -ENOMEM;
1035 goto out_mem;
1036 }
5d0f6131 1037
e75ec752 1038 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
5d0f6131
VV
1039 &dma_addr, GFP_KERNEL);
1040 if (mem == NULL) {
1041 res = -ENOMEM;
1042 goto out_dma;
1043 }
1044
1045 /* Get SMART Log Page */
1046 memset(&c, 0, sizeof(c));
1047 c.common.opcode = nvme_admin_get_log_page;
1048 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1049 c.common.prp1 = cpu_to_le64(dma_addr);
4131f2fc 1050 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
ef351b97 1051 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
f705f837 1052 res = nvme_submit_sync_cmd(dev->admin_q, &c);
5d0f6131
VV
1053 if (res != NVME_SC_SUCCESS) {
1054 temp_c = LOG_TEMP_UNKNOWN;
1055 } else {
1056 smart_log = mem;
1057 temp_k = (smart_log->temperature[1] << 8) +
1058 (smart_log->temperature[0]);
1059 temp_c = temp_k - KELVIN_TEMP_FACTOR;
1060 }
1061
1062 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1063 /* Subpage=0x00, Page Length MSB=0 */
1064 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
1065 /* Informational Exceptions Log Parameter 1 Start */
1066 /* Parameter Code=0x0000 bytes 4,5 */
1067 log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
1068 log_response[7] = 0x04; /* PARAMETER LENGTH */
1069 /* Add sense Code and qualifier = 0x00 each */
1070 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1071 log_response[10] = temp_c;
1072
1073 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1074 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1075
e75ec752 1076 dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
5d0f6131
VV
1077 mem, dma_addr);
1078 out_dma:
1079 kfree(log_response);
1080 out_mem:
1081 return res;
1082}
1083
1084static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1085 int alloc_len)
1086{
1087 int res = SNTI_TRANSLATION_SUCCESS;
1088 int xfer_len;
1089 u8 *log_response;
1090 struct nvme_command c;
1091 struct nvme_dev *dev = ns->dev;
1092 struct nvme_smart_log *smart_log;
1093 dma_addr_t dma_addr;
1094 void *mem;
1095 u32 feature_resp;
1096 u8 temp_c_cur, temp_c_thresh;
1097 u16 temp_k;
1098
03ea83e9 1099 log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
5d0f6131
VV
1100 if (log_response == NULL) {
1101 res = -ENOMEM;
1102 goto out_mem;
1103 }
5d0f6131 1104
e75ec752 1105 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
5d0f6131
VV
1106 &dma_addr, GFP_KERNEL);
1107 if (mem == NULL) {
1108 res = -ENOMEM;
1109 goto out_dma;
1110 }
1111
1112 /* Get SMART Log Page */
1113 memset(&c, 0, sizeof(c));
1114 c.common.opcode = nvme_admin_get_log_page;
1115 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1116 c.common.prp1 = cpu_to_le64(dma_addr);
4131f2fc 1117 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
ef351b97 1118 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
f705f837 1119 res = nvme_submit_sync_cmd(dev->admin_q, &c);
5d0f6131
VV
1120 if (res != NVME_SC_SUCCESS) {
1121 temp_c_cur = LOG_TEMP_UNKNOWN;
1122 } else {
1123 smart_log = mem;
1124 temp_k = (smart_log->temperature[1] << 8) +
1125 (smart_log->temperature[0]);
1126 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
1127 }
1128
1129 /* Get Features for Temp Threshold */
1130 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
1131 &feature_resp);
1132 if (res != NVME_SC_SUCCESS)
1133 temp_c_thresh = LOG_TEMP_UNKNOWN;
1134 else
1135 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
1136
1137 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
1138 /* Subpage=0x00, Page Length MSB=0 */
1139 log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
1140 /* Temperature Log Parameter 1 (Temperature) Start */
1141 /* Parameter Code = 0x0000 */
1142 log_response[6] = 0x01; /* Format and Linking = 01b */
1143 log_response[7] = 0x02; /* Parameter Length */
1144 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1145 log_response[9] = temp_c_cur;
1146 /* Temperature Log Parameter 2 (Reference Temperature) Start */
1147 log_response[11] = 0x01; /* Parameter Code = 0x0001 */
1148 log_response[12] = 0x01; /* Format and Linking = 01b */
1149 log_response[13] = 0x02; /* Parameter Length */
1150 /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
1151 log_response[15] = temp_c_thresh;
1152
1153 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1154 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1155
e75ec752 1156 dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
5d0f6131
VV
1157 mem, dma_addr);
1158 out_dma:
1159 kfree(log_response);
1160 out_mem:
1161 return res;
1162}
1163
1164/* MODE SENSE Helper Functions */
1165
1166static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
1167 u16 mode_data_length, u16 blk_desc_len)
1168{
1169 /* Quick check to make sure I don't stomp on my own memory... */
1170 if ((cdb10 && len < 8) || (!cdb10 && len < 4))
1171 return SNTI_INTERNAL_ERROR;
1172
1173 if (cdb10) {
1174 resp[0] = (mode_data_length & 0xFF00) >> 8;
1175 resp[1] = (mode_data_length & 0x00FF);
1176 /* resp[2] and [3] are zero */
1177 resp[4] = llbaa;
1178 resp[5] = RESERVED_FIELD;
1179 resp[6] = (blk_desc_len & 0xFF00) >> 8;
1180 resp[7] = (blk_desc_len & 0x00FF);
1181 } else {
1182 resp[0] = (mode_data_length & 0x00FF);
1183 /* resp[1] and [2] are zero */
1184 resp[3] = (blk_desc_len & 0x00FF);
1185 }
1186
1187 return SNTI_TRANSLATION_SUCCESS;
1188}
1189
1190static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1191 u8 *resp, int len, u8 llbaa)
1192{
1193 int res = SNTI_TRANSLATION_SUCCESS;
1194 int nvme_sc;
1195 struct nvme_dev *dev = ns->dev;
1196 dma_addr_t dma_addr;
1197 void *mem;
1198 struct nvme_id_ns *id_ns;
1199 u8 flbas;
1200 u32 lba_length;
1201
1202 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
1203 return SNTI_INTERNAL_ERROR;
1204 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1205 return SNTI_INTERNAL_ERROR;
1206
e75ec752 1207 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
1208 &dma_addr, GFP_KERNEL);
1209 if (mem == NULL) {
1210 res = -ENOMEM;
1211 goto out;
1212 }
1213
1214 /* nvme ns identify */
1215 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1216 res = nvme_trans_status_code(hdr, nvme_sc);
1217 if (res)
1218 goto out_dma;
1219 if (nvme_sc) {
1220 res = nvme_sc;
1221 goto out_dma;
1222 }
1223 id_ns = mem;
1224 flbas = (id_ns->flbas) & 0x0F;
1225 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1226
1227 if (llbaa == 0) {
8741ee4c 1228 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
5d0f6131 1229 /* Byte 4 is reserved */
8741ee4c 1230 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
5d0f6131
VV
1231
1232 memcpy(resp, &tmp_cap, sizeof(u32));
1233 memcpy(&resp[4], &tmp_len, sizeof(u32));
1234 } else {
8741ee4c
VV
1235 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
1236 __be32 tmp_len = cpu_to_be32(lba_length);
5d0f6131
VV
1237
1238 memcpy(resp, &tmp_cap, sizeof(u64));
1239 /* Bytes 8, 9, 10, 11 are reserved */
1240 memcpy(&resp[12], &tmp_len, sizeof(u32));
1241 }
1242
1243 out_dma:
e75ec752 1244 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
1245 out:
1246 return res;
1247}
1248
1249static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1250 struct sg_io_hdr *hdr, u8 *resp,
1251 int len)
1252{
1253 if (len < MODE_PAGE_CONTROL_LEN)
1254 return SNTI_INTERNAL_ERROR;
1255
1256 resp[0] = MODE_PAGE_CONTROL;
1257 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1258 resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
1259 * D_SENSE=1, GLTSD=1, RLEC=0 */
1260 resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1261 /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
1262 resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1263 /* resp[6] and [7] are obsolete, thus zero */
1264 resp[8] = 0xFF; /* Busy timeout period = 0xffff */
1265 resp[9] = 0xFF;
1266 /* Bytes 10,11: Extended selftest completion time = 0x0000 */
1267
1268 return SNTI_TRANSLATION_SUCCESS;
1269}
1270
1271static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1272 struct sg_io_hdr *hdr,
1273 u8 *resp, int len)
1274{
1275 int res = SNTI_TRANSLATION_SUCCESS;
1276 int nvme_sc;
1277 struct nvme_dev *dev = ns->dev;
1278 u32 feature_resp;
1279 u8 vwc;
1280
1281 if (len < MODE_PAGE_CACHING_LEN)
1282 return SNTI_INTERNAL_ERROR;
1283
1284 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1285 &feature_resp);
1286 res = nvme_trans_status_code(hdr, nvme_sc);
1287 if (res)
1288 goto out;
1289 if (nvme_sc) {
1290 res = nvme_sc;
1291 goto out;
1292 }
1293 vwc = feature_resp & 0x00000001;
1294
1295 resp[0] = MODE_PAGE_CACHING;
1296 resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1297 resp[2] = vwc << 2;
1298
1299 out:
1300 return res;
1301}
1302
1303static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1304 struct sg_io_hdr *hdr, u8 *resp,
1305 int len)
1306{
1307 int res = SNTI_TRANSLATION_SUCCESS;
1308
1309 if (len < MODE_PAGE_POW_CND_LEN)
1310 return SNTI_INTERNAL_ERROR;
1311
1312 resp[0] = MODE_PAGE_POWER_CONDITION;
1313 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1314 /* All other bytes are zero */
1315
1316 return res;
1317}
1318
1319static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1320 struct sg_io_hdr *hdr, u8 *resp,
1321 int len)
1322{
1323 int res = SNTI_TRANSLATION_SUCCESS;
1324
1325 if (len < MODE_PAGE_INF_EXC_LEN)
1326 return SNTI_INTERNAL_ERROR;
1327
1328 resp[0] = MODE_PAGE_INFO_EXCEP;
1329 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1330 resp[2] = 0x88;
1331 /* All other bytes are zero */
1332
1333 return res;
1334}
1335
1336static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1337 u8 *resp, int len)
1338{
1339 int res = SNTI_TRANSLATION_SUCCESS;
1340 u16 mode_pages_offset_1 = 0;
1341 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1342
1343 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1344 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1345 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1346
1347 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1348 MODE_PAGE_CACHING_LEN);
1349 if (res != SNTI_TRANSLATION_SUCCESS)
1350 goto out;
1351 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1352 MODE_PAGE_CONTROL_LEN);
1353 if (res != SNTI_TRANSLATION_SUCCESS)
1354 goto out;
1355 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1356 MODE_PAGE_POW_CND_LEN);
1357 if (res != SNTI_TRANSLATION_SUCCESS)
1358 goto out;
1359 res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1360 MODE_PAGE_INF_EXC_LEN);
1361 if (res != SNTI_TRANSLATION_SUCCESS)
1362 goto out;
1363
1364 out:
1365 return res;
1366}
1367
1368static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1369{
1370 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1371 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1372 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1373 } else {
1374 return 0;
1375 }
1376}
1377
1378static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1379 struct sg_io_hdr *hdr, u8 *cmd,
1380 u16 alloc_len, u8 cdb10,
1381 int (*mode_page_fill_func)
1382 (struct nvme_ns *,
1383 struct sg_io_hdr *hdr, u8 *, int),
1384 u16 mode_pages_tot_len)
1385{
1386 int res = SNTI_TRANSLATION_SUCCESS;
1387 int xfer_len;
1388 u8 *response;
1389 u8 dbd, llbaa;
1390 u16 resp_size;
1391 int mph_size;
1392 u16 mode_pages_offset_1;
1393 u16 blk_desc_len, blk_desc_offset, mode_data_length;
1394
1395 dbd = GET_MODE_SENSE_DBD(cmd);
1396 llbaa = GET_MODE_SENSE_LLBAA(cmd);
1397 mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
1398 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1399
1400 resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1401 /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1402 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1403
1404 blk_desc_offset = mph_size;
1405 mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1406
03ea83e9 1407 response = kzalloc(resp_size, GFP_KERNEL);
5d0f6131
VV
1408 if (response == NULL) {
1409 res = -ENOMEM;
1410 goto out_mem;
1411 }
5d0f6131
VV
1412
1413 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1414 llbaa, mode_data_length, blk_desc_len);
1415 if (res != SNTI_TRANSLATION_SUCCESS)
1416 goto out_free;
1417 if (blk_desc_len > 0) {
1418 res = nvme_trans_fill_blk_desc(ns, hdr,
1419 &response[blk_desc_offset],
1420 blk_desc_len, llbaa);
1421 if (res != SNTI_TRANSLATION_SUCCESS)
1422 goto out_free;
1423 }
1424 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1425 mode_pages_tot_len);
1426 if (res != SNTI_TRANSLATION_SUCCESS)
1427 goto out_free;
1428
1429 xfer_len = min(alloc_len, resp_size);
1430 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1431
1432 out_free:
1433 kfree(response);
1434 out_mem:
1435 return res;
1436}
1437
1438/* Read Capacity Helper Functions */
1439
1440static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1441 u8 cdb16)
1442{
1443 u8 flbas;
1444 u32 lba_length;
1445 u64 rlba;
1446 u8 prot_en;
1447 u8 p_type_lut[4] = {0, 0, 1, 2};
8741ee4c
VV
1448 __be64 tmp_rlba;
1449 __be32 tmp_rlba_32;
1450 __be32 tmp_len;
5d0f6131
VV
1451
1452 flbas = (id_ns->flbas) & 0x0F;
1453 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1454 rlba = le64_to_cpup(&id_ns->nsze) - 1;
1455 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1456
1457 if (!cdb16) {
1458 if (rlba > 0xFFFFFFFF)
1459 rlba = 0xFFFFFFFF;
1460 tmp_rlba_32 = cpu_to_be32(rlba);
1461 tmp_len = cpu_to_be32(lba_length);
1462 memcpy(response, &tmp_rlba_32, sizeof(u32));
1463 memcpy(&response[4], &tmp_len, sizeof(u32));
1464 } else {
1465 tmp_rlba = cpu_to_be64(rlba);
1466 tmp_len = cpu_to_be32(lba_length);
1467 memcpy(response, &tmp_rlba, sizeof(u64));
1468 memcpy(&response[8], &tmp_len, sizeof(u32));
1469 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1470 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1471 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1472 /* Bytes 16-31 - Reserved */
1473 }
1474}
1475
1476/* Start Stop Unit Helper Functions */
1477
1478static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1479 u8 pc, u8 pcmod, u8 start)
1480{
1481 int res = SNTI_TRANSLATION_SUCCESS;
1482 int nvme_sc;
1483 struct nvme_dev *dev = ns->dev;
1484 dma_addr_t dma_addr;
1485 void *mem;
1486 struct nvme_id_ctrl *id_ctrl;
1487 int lowest_pow_st; /* max npss = lowest power consumption */
1488 unsigned ps_desired = 0;
1489
1490 /* NVMe Controller Identify */
e75ec752 1491 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
5d0f6131
VV
1492 &dma_addr, GFP_KERNEL);
1493 if (mem == NULL) {
1494 res = -ENOMEM;
1495 goto out;
1496 }
1497 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
1498 res = nvme_trans_status_code(hdr, nvme_sc);
1499 if (res)
1500 goto out_dma;
1501 if (nvme_sc) {
1502 res = nvme_sc;
1503 goto out_dma;
1504 }
1505 id_ctrl = mem;
b8e08084 1506 lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
5d0f6131
VV
1507
1508 switch (pc) {
1509 case NVME_POWER_STATE_START_VALID:
1510 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1511 if (pcmod == 0 && start == 0x1)
1512 ps_desired = POWER_STATE_0;
1513 if (pcmod == 0 && start == 0x0)
1514 ps_desired = lowest_pow_st;
1515 break;
1516 case NVME_POWER_STATE_ACTIVE:
1517 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1518 if (pcmod == 0)
1519 ps_desired = POWER_STATE_0;
1520 break;
1521 case NVME_POWER_STATE_IDLE:
1522 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
5d0f6131 1523 if (pcmod == 0x0)
b8e08084 1524 ps_desired = POWER_STATE_1;
5d0f6131 1525 else if (pcmod == 0x1)
b8e08084 1526 ps_desired = POWER_STATE_2;
5d0f6131 1527 else if (pcmod == 0x2)
b8e08084 1528 ps_desired = POWER_STATE_3;
5d0f6131
VV
1529 break;
1530 case NVME_POWER_STATE_STANDBY:
1531 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1532 if (pcmod == 0x0)
b8e08084 1533 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
5d0f6131 1534 else if (pcmod == 0x1)
b8e08084 1535 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
5d0f6131
VV
1536 break;
1537 case NVME_POWER_STATE_LU_CONTROL:
1538 default:
1539 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1540 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1541 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1542 break;
1543 }
1544 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1545 NULL);
1546 res = nvme_trans_status_code(hdr, nvme_sc);
1547 if (res)
1548 goto out_dma;
1549 if (nvme_sc)
1550 res = nvme_sc;
1551 out_dma:
e75ec752 1552 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
5d0f6131
VV
1553 out:
1554 return res;
1555}
1556
1557/* Write Buffer Helper Functions */
1558/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
1559
1560static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1561 u8 opcode, u32 tot_len, u32 offset,
1562 u8 buffer_id)
1563{
1564 int res = SNTI_TRANSLATION_SUCCESS;
1565 int nvme_sc;
1566 struct nvme_dev *dev = ns->dev;
1567 struct nvme_command c;
1568 struct nvme_iod *iod = NULL;
1569 unsigned length;
1570
1571 memset(&c, 0, sizeof(c));
1572 c.common.opcode = opcode;
1573 if (opcode == nvme_admin_download_fw) {
1574 if (hdr->iovec_count > 0) {
1575 /* Assuming SGL is not allowed for this command */
1576 res = nvme_trans_completion(hdr,
1577 SAM_STAT_CHECK_CONDITION,
1578 ILLEGAL_REQUEST,
1579 SCSI_ASC_INVALID_CDB,
1580 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1581 goto out;
1582 }
1583 iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
1584 (unsigned long)hdr->dxferp, tot_len);
1585 if (IS_ERR(iod)) {
1586 res = PTR_ERR(iod);
1587 goto out;
1588 }
edd10d33 1589 length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
5d0f6131
VV
1590 if (length != tot_len) {
1591 res = -ENOMEM;
1592 goto out_unmap;
1593 }
1594
edd10d33
KB
1595 c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1596 c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
8741ee4c
VV
1597 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1598 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
5d0f6131 1599 } else if (opcode == nvme_admin_activate_fw) {
ab3ea5bf
MW
1600 u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
1601 c.common.cdw10[0] = cpu_to_le32(cdw10);
5d0f6131
VV
1602 }
1603
f705f837 1604 nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
5d0f6131
VV
1605 res = nvme_trans_status_code(hdr, nvme_sc);
1606 if (res)
1607 goto out_unmap;
1608 if (nvme_sc)
1609 res = nvme_sc;
1610
1611 out_unmap:
1612 if (opcode == nvme_admin_download_fw) {
1613 nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
1614 nvme_free_iod(dev, iod);
1615 }
1616 out:
1617 return res;
1618}
1619
1620/* Mode Select Helper Functions */
1621
1622static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1623 u16 *bd_len, u8 *llbaa)
1624{
1625 if (cdb10) {
1626 /* 10 Byte CDB */
1627 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1628 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
9ac16938 1629 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
5d0f6131
VV
1630 MODE_SELECT_10_LLBAA_MASK;
1631 } else {
1632 /* 6 Byte CDB */
1633 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1634 }
1635}
1636
1637static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1638 u16 idx, u16 bd_len, u8 llbaa)
1639{
1640 u16 bd_num;
1641
1642 bd_num = bd_len / ((llbaa == 0) ?
1643 SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1644 /* Store block descriptor info if a FORMAT UNIT comes later */
1645 /* TODO Saving 1st BD info; what to do if multiple BD received? */
1646 if (llbaa == 0) {
1647 /* Standard Block Descriptor - spc4r34 7.5.5.1 */
1648 ns->mode_select_num_blocks =
1649 (parm_list[idx + 1] << 16) +
1650 (parm_list[idx + 2] << 8) +
1651 (parm_list[idx + 3]);
1652
1653 ns->mode_select_block_len =
1654 (parm_list[idx + 5] << 16) +
1655 (parm_list[idx + 6] << 8) +
1656 (parm_list[idx + 7]);
1657 } else {
1658 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1659 ns->mode_select_num_blocks =
1660 (((u64)parm_list[idx + 0]) << 56) +
1661 (((u64)parm_list[idx + 1]) << 48) +
1662 (((u64)parm_list[idx + 2]) << 40) +
1663 (((u64)parm_list[idx + 3]) << 32) +
1664 (((u64)parm_list[idx + 4]) << 24) +
1665 (((u64)parm_list[idx + 5]) << 16) +
1666 (((u64)parm_list[idx + 6]) << 8) +
1667 ((u64)parm_list[idx + 7]);
1668
1669 ns->mode_select_block_len =
1670 (parm_list[idx + 12] << 24) +
1671 (parm_list[idx + 13] << 16) +
1672 (parm_list[idx + 14] << 8) +
1673 (parm_list[idx + 15]);
1674 }
1675}
1676
710a143d 1677static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
5d0f6131
VV
1678 u8 *mode_page, u8 page_code)
1679{
1680 int res = SNTI_TRANSLATION_SUCCESS;
1681 int nvme_sc;
1682 struct nvme_dev *dev = ns->dev;
1683 unsigned dword11;
1684
1685 switch (page_code) {
1686 case MODE_PAGE_CACHING:
1687 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1688 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1689 0, NULL);
1690 res = nvme_trans_status_code(hdr, nvme_sc);
1691 if (res)
1692 break;
1693 if (nvme_sc) {
1694 res = nvme_sc;
1695 break;
1696 }
1697 break;
1698 case MODE_PAGE_CONTROL:
1699 break;
1700 case MODE_PAGE_POWER_CONDITION:
1701 /* Verify the OS is not trying to set timers */
1702 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1703 res = nvme_trans_completion(hdr,
1704 SAM_STAT_CHECK_CONDITION,
1705 ILLEGAL_REQUEST,
1706 SCSI_ASC_INVALID_PARAMETER,
1707 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1708 if (!res)
1709 res = SNTI_INTERNAL_ERROR;
1710 break;
1711 }
1712 break;
1713 default:
1714 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1715 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1716 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1717 if (!res)
1718 res = SNTI_INTERNAL_ERROR;
1719 break;
1720 }
1721
1722 return res;
1723}
1724
1725static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1726 u8 *cmd, u16 parm_list_len, u8 pf,
1727 u8 sp, u8 cdb10)
1728{
1729 int res = SNTI_TRANSLATION_SUCCESS;
1730 u8 *parm_list;
1731 u16 bd_len;
1732 u8 llbaa = 0;
1733 u16 index, saved_index;
1734 u8 page_code;
1735 u16 mp_size;
1736
1737 /* Get parm list from data-in/out buffer */
1738 parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1739 if (parm_list == NULL) {
1740 res = -ENOMEM;
1741 goto out;
1742 }
1743
1744 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1745 if (res != SNTI_TRANSLATION_SUCCESS)
1746 goto out_mem;
1747
1748 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1749 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1750
1751 if (bd_len != 0) {
1752 /* Block Descriptors present, parse */
1753 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1754 index += bd_len;
1755 }
1756 saved_index = index;
1757
1758 /* Multiple mode pages may be present; iterate through all */
1759 /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1760 do {
1761 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1762 mp_size = parm_list[index + 1] + 2;
1763 if ((page_code != MODE_PAGE_CACHING) &&
1764 (page_code != MODE_PAGE_CONTROL) &&
1765 (page_code != MODE_PAGE_POWER_CONDITION)) {
1766 res = nvme_trans_completion(hdr,
1767 SAM_STAT_CHECK_CONDITION,
1768 ILLEGAL_REQUEST,
1769 SCSI_ASC_INVALID_CDB,
1770 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1771 goto out_mem;
1772 }
1773 index += mp_size;
1774 } while (index < parm_list_len);
1775
1776 /* In 2nd Iteration, do the NVME Commands */
1777 index = saved_index;
1778 do {
1779 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1780 mp_size = parm_list[index + 1] + 2;
1781 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1782 page_code);
1783 if (res != SNTI_TRANSLATION_SUCCESS)
1784 break;
1785 index += mp_size;
1786 } while (index < parm_list_len);
1787
1788 out_mem:
1789 kfree(parm_list);
1790 out:
1791 return res;
1792}
1793
1794/* Format Unit Helper Functions */
1795
1796static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1797 struct sg_io_hdr *hdr)
1798{
1799 int res = SNTI_TRANSLATION_SUCCESS;
1800 int nvme_sc;
1801 struct nvme_dev *dev = ns->dev;
1802 dma_addr_t dma_addr;
1803 void *mem;
1804 struct nvme_id_ns *id_ns;
1805 u8 flbas;
1806
1807 /*
1808 * SCSI Expects a MODE SELECT would have been issued prior to
1809 * a FORMAT UNIT, and the block size and number would be used
1810 * from the block descriptor in it. If a MODE SELECT had not
1811 * been issued, FORMAT shall use the current values for both.
1812 */
1813
1814 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
e75ec752 1815 mem = dma_alloc_coherent(dev->dev,
5d0f6131
VV
1816 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1817 if (mem == NULL) {
1818 res = -ENOMEM;
1819 goto out;
1820 }
1821 /* nvme ns identify */
1822 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1823 res = nvme_trans_status_code(hdr, nvme_sc);
1824 if (res)
1825 goto out_dma;
1826 if (nvme_sc) {
1827 res = nvme_sc;
1828 goto out_dma;
1829 }
1830 id_ns = mem;
1831
1832 if (ns->mode_select_num_blocks == 0)
8741ee4c 1833 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
5d0f6131
VV
1834 if (ns->mode_select_block_len == 0) {
1835 flbas = (id_ns->flbas) & 0x0F;
1836 ns->mode_select_block_len =
1837 (1 << (id_ns->lbaf[flbas].ds));
1838 }
1839 out_dma:
e75ec752 1840 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
1841 mem, dma_addr);
1842 }
1843 out:
1844 return res;
1845}
1846
1847static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1848 u8 format_prot_info, u8 *nvme_pf_code)
1849{
1850 int res = SNTI_TRANSLATION_SUCCESS;
1851 u8 *parm_list;
1852 u8 pf_usage, pf_code;
1853
1854 parm_list = kmalloc(len, GFP_KERNEL);
1855 if (parm_list == NULL) {
1856 res = -ENOMEM;
1857 goto out;
1858 }
1859 res = nvme_trans_copy_from_user(hdr, parm_list, len);
1860 if (res != SNTI_TRANSLATION_SUCCESS)
1861 goto out_mem;
1862
1863 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1864 FORMAT_UNIT_IMMED_MASK) != 0) {
1865 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1866 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1867 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1868 goto out_mem;
1869 }
1870
1871 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1872 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1873 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1874 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1875 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1876 goto out_mem;
1877 }
1878 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1879 FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1880 pf_code = (pf_usage << 2) | format_prot_info;
1881 switch (pf_code) {
1882 case 0:
1883 *nvme_pf_code = 0;
1884 break;
1885 case 2:
1886 *nvme_pf_code = 1;
1887 break;
1888 case 3:
1889 *nvme_pf_code = 2;
1890 break;
1891 case 7:
1892 *nvme_pf_code = 3;
1893 break;
1894 default:
1895 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1896 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1897 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1898 break;
1899 }
1900
1901 out_mem:
1902 kfree(parm_list);
1903 out:
1904 return res;
1905}
1906
1907static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1908 u8 prot_info)
1909{
1910 int res = SNTI_TRANSLATION_SUCCESS;
1911 int nvme_sc;
1912 struct nvme_dev *dev = ns->dev;
1913 dma_addr_t dma_addr;
1914 void *mem;
1915 struct nvme_id_ns *id_ns;
1916 u8 i;
1917 u8 flbas, nlbaf;
1918 u8 selected_lbaf = 0xFF;
1919 u32 cdw10 = 0;
1920 struct nvme_command c;
1921
1922 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
e75ec752 1923 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
1924 &dma_addr, GFP_KERNEL);
1925 if (mem == NULL) {
1926 res = -ENOMEM;
1927 goto out;
1928 }
1929 /* nvme ns identify */
1930 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1931 res = nvme_trans_status_code(hdr, nvme_sc);
1932 if (res)
1933 goto out_dma;
1934 if (nvme_sc) {
1935 res = nvme_sc;
1936 goto out_dma;
1937 }
1938 id_ns = mem;
1939 flbas = (id_ns->flbas) & 0x0F;
1940 nlbaf = id_ns->nlbaf;
1941
1942 for (i = 0; i < nlbaf; i++) {
1943 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1944 selected_lbaf = i;
1945 break;
1946 }
1947 }
1948 if (selected_lbaf > 0x0F) {
1949 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1950 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1951 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1952 }
8741ee4c 1953 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
5d0f6131
VV
1954 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1955 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1956 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1957 }
1958
1959 cdw10 |= prot_info << 5;
1960 cdw10 |= selected_lbaf & 0x0F;
1961 memset(&c, 0, sizeof(c));
1962 c.format.opcode = nvme_admin_format_nvm;
8741ee4c 1963 c.format.nsid = cpu_to_le32(ns->ns_id);
5d0f6131
VV
1964 c.format.cdw10 = cpu_to_le32(cdw10);
1965
f705f837 1966 nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
5d0f6131
VV
1967 res = nvme_trans_status_code(hdr, nvme_sc);
1968 if (res)
1969 goto out_dma;
1970 if (nvme_sc)
1971 res = nvme_sc;
1972
1973 out_dma:
e75ec752 1974 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
1975 out:
1976 return res;
1977}
1978
1979/* Read/Write Helper Functions */
1980
1981static inline void nvme_trans_get_io_cdb6(u8 *cmd,
1982 struct nvme_trans_io_cdb *cdb_info)
1983{
1984 cdb_info->fua = 0;
1985 cdb_info->prot_info = 0;
1986 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
1987 IO_6_CDB_LBA_MASK;
1988 cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
1989
1990 /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
1991 if (cdb_info->xfer_len == 0)
1992 cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
1993}
1994
1995static inline void nvme_trans_get_io_cdb10(u8 *cmd,
1996 struct nvme_trans_io_cdb *cdb_info)
1997{
1998 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
1999 IO_CDB_FUA_MASK;
2000 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
2001 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2002 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
2003 cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
2004}
2005
2006static inline void nvme_trans_get_io_cdb12(u8 *cmd,
2007 struct nvme_trans_io_cdb *cdb_info)
2008{
2009 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
2010 IO_CDB_FUA_MASK;
2011 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
2012 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2013 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
2014 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
2015}
2016
2017static inline void nvme_trans_get_io_cdb16(u8 *cmd,
2018 struct nvme_trans_io_cdb *cdb_info)
2019{
2020 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
2021 IO_CDB_FUA_MASK;
2022 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
2023 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2024 cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
2025 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
2026}
2027
2028static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
2029 struct nvme_trans_io_cdb *cdb_info,
2030 u32 max_blocks)
2031{
2032 /* If using iovecs, send one nvme command per vector */
2033 if (hdr->iovec_count > 0)
2034 return hdr->iovec_count;
2035 else if (cdb_info->xfer_len > max_blocks)
2036 return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
2037 else
2038 return 1;
2039}
2040
2041static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
2042 struct nvme_trans_io_cdb *cdb_info)
2043{
2044 u16 control = 0;
2045
2046 /* When Protection information support is added, implement here */
2047
2048 if (cdb_info->fua > 0)
2049 control |= NVME_RW_FUA;
2050
2051 return control;
2052}
2053
2054static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2055 struct nvme_trans_io_cdb *cdb_info, u8 is_write)
2056{
2057 int res = SNTI_TRANSLATION_SUCCESS;
2058 int nvme_sc;
2059 struct nvme_dev *dev = ns->dev;
5d0f6131
VV
2060 u32 num_cmds;
2061 struct nvme_iod *iod;
2062 u64 unit_len;
2063 u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
2064 u32 retcode;
2065 u32 i = 0;
2066 u64 nvme_offset = 0;
8741ee4c 2067 void __user *next_mapping_addr;
5d0f6131
VV
2068 struct nvme_command c;
2069 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2070 u16 control;
ddcb7762 2071 u32 max_blocks = queue_max_hw_sectors(ns->queue);
5d0f6131
VV
2072
2073 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2074
2075 /*
2076 * This loop handles two cases.
2077 * First, when an SGL is used in the form of an iovec list:
2078 * - Use iov_base as the next mapping address for the nvme command_id
2079 * - Use iov_len as the data transfer length for the command.
2080 * Second, when we have a single buffer
2081 * - If larger than max_blocks, split into chunks, offset
2082 * each nvme command accordingly.
2083 */
2084 for (i = 0; i < num_cmds; i++) {
2085 memset(&c, 0, sizeof(c));
2086 if (hdr->iovec_count > 0) {
8741ee4c
VV
2087 struct sg_iovec sgl;
2088
2089 retcode = copy_from_user(&sgl, hdr->dxferp +
2090 i * sizeof(struct sg_iovec),
2091 sizeof(struct sg_iovec));
2092 if (retcode)
2093 return -EFAULT;
2094 unit_len = sgl.iov_len;
5d0f6131 2095 unit_num_blocks = unit_len >> ns->lba_shift;
8741ee4c 2096 next_mapping_addr = sgl.iov_base;
5d0f6131
VV
2097 } else {
2098 unit_num_blocks = min((u64)max_blocks,
2099 (cdb_info->xfer_len - nvme_offset));
2100 unit_len = unit_num_blocks << ns->lba_shift;
2101 next_mapping_addr = hdr->dxferp +
2102 ((1 << ns->lba_shift) * nvme_offset);
2103 }
2104
2105 c.rw.opcode = opcode;
2106 c.rw.nsid = cpu_to_le32(ns->ns_id);
2107 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
2108 c.rw.length = cpu_to_le16(unit_num_blocks - 1);
2109 control = nvme_trans_io_get_control(ns, cdb_info);
2110 c.rw.control = cpu_to_le16(control);
2111
2112 iod = nvme_map_user_pages(dev,
2113 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2114 (unsigned long)next_mapping_addr, unit_len);
2115 if (IS_ERR(iod)) {
2116 res = PTR_ERR(iod);
2117 goto out;
2118 }
edd10d33 2119 retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
5d0f6131
VV
2120 if (retcode != unit_len) {
2121 nvme_unmap_user_pages(dev,
2122 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2123 iod);
2124 nvme_free_iod(dev, iod);
2125 res = -ENOMEM;
2126 goto out;
2127 }
edd10d33
KB
2128 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2129 c.rw.prp2 = cpu_to_le64(iod->first_dma);
5d0f6131
VV
2130
2131 nvme_offset += unit_num_blocks;
2132
f705f837 2133 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
5d0f6131
VV
2134 if (nvme_sc != NVME_SC_SUCCESS) {
2135 nvme_unmap_user_pages(dev,
2136 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2137 iod);
2138 nvme_free_iod(dev, iod);
2139 res = nvme_trans_status_code(hdr, nvme_sc);
2140 goto out;
2141 }
2142 nvme_unmap_user_pages(dev,
2143 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2144 iod);
2145 nvme_free_iod(dev, iod);
2146 }
2147 res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2148
2149 out:
2150 return res;
2151}
2152
2153
2154/* SCSI Command Translation Functions */
2155
2156static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
2157 u8 *cmd)
2158{
2159 int res = SNTI_TRANSLATION_SUCCESS;
2160 struct nvme_trans_io_cdb cdb_info;
2161 u8 opcode = cmd[0];
2162 u64 xfer_bytes;
2163 u64 sum_iov_len = 0;
8741ee4c 2164 struct sg_iovec sgl;
5d0f6131 2165 int i;
8741ee4c 2166 size_t not_copied;
5d0f6131
VV
2167
2168 /* Extract Fields from CDB */
2169 switch (opcode) {
2170 case WRITE_6:
2171 case READ_6:
2172 nvme_trans_get_io_cdb6(cmd, &cdb_info);
2173 break;
2174 case WRITE_10:
2175 case READ_10:
2176 nvme_trans_get_io_cdb10(cmd, &cdb_info);
2177 break;
2178 case WRITE_12:
2179 case READ_12:
2180 nvme_trans_get_io_cdb12(cmd, &cdb_info);
2181 break;
2182 case WRITE_16:
2183 case READ_16:
2184 nvme_trans_get_io_cdb16(cmd, &cdb_info);
2185 break;
2186 default:
2187 /* Will never really reach here */
2188 res = SNTI_INTERNAL_ERROR;
2189 goto out;
2190 }
2191
2192 /* Calculate total length of transfer (in bytes) */
2193 if (hdr->iovec_count > 0) {
5d0f6131 2194 for (i = 0; i < hdr->iovec_count; i++) {
8741ee4c
VV
2195 not_copied = copy_from_user(&sgl, hdr->dxferp +
2196 i * sizeof(struct sg_iovec),
2197 sizeof(struct sg_iovec));
2198 if (not_copied)
2199 return -EFAULT;
2200 sum_iov_len += sgl.iov_len;
5d0f6131 2201 /* IO vector sizes should be multiples of block size */
8741ee4c 2202 if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
5d0f6131
VV
2203 res = nvme_trans_completion(hdr,
2204 SAM_STAT_CHECK_CONDITION,
2205 ILLEGAL_REQUEST,
2206 SCSI_ASC_INVALID_PARAMETER,
2207 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2208 goto out;
2209 }
2210 }
2211 } else {
2212 sum_iov_len = hdr->dxfer_len;
2213 }
2214
2215 /* As Per sg ioctl howto, if the lengths differ, use the lower one */
2216 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
2217
2218 /* If block count and actual data buffer size dont match, error out */
2219 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
2220 res = -EINVAL;
2221 goto out;
2222 }
2223
2224 /* Check for 0 length transfer - it is not illegal */
2225 if (cdb_info.xfer_len == 0)
2226 goto out;
2227
2228 /* Send NVMe IO Command(s) */
2229 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
2230 if (res != SNTI_TRANSLATION_SUCCESS)
2231 goto out;
2232
2233 out:
2234 return res;
2235}
2236
2237static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2238 u8 *cmd)
2239{
2240 int res = SNTI_TRANSLATION_SUCCESS;
2241 u8 evpd;
2242 u8 page_code;
2243 int alloc_len;
2244 u8 *inq_response;
2245
2246 evpd = GET_INQ_EVPD_BIT(cmd);
2247 page_code = GET_INQ_PAGE_CODE(cmd);
2248 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2249
4f1982b4 2250 inq_response = kmalloc(alloc_len, GFP_KERNEL);
5d0f6131
VV
2251 if (inq_response == NULL) {
2252 res = -ENOMEM;
2253 goto out_mem;
2254 }
2255
2256 if (evpd == 0) {
2257 if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
2258 res = nvme_trans_standard_inquiry_page(ns, hdr,
2259 inq_response, alloc_len);
2260 } else {
2261 res = nvme_trans_completion(hdr,
2262 SAM_STAT_CHECK_CONDITION,
2263 ILLEGAL_REQUEST,
2264 SCSI_ASC_INVALID_CDB,
2265 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2266 }
2267 } else {
2268 switch (page_code) {
2269 case VPD_SUPPORTED_PAGES:
2270 res = nvme_trans_supported_vpd_pages(ns, hdr,
2271 inq_response, alloc_len);
2272 break;
2273 case VPD_SERIAL_NUMBER:
2274 res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
2275 alloc_len);
2276 break;
2277 case VPD_DEVICE_IDENTIFIERS:
2278 res = nvme_trans_device_id_page(ns, hdr, inq_response,
2279 alloc_len);
2280 break;
2281 case VPD_EXTENDED_INQUIRY:
2282 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2283 break;
7f749d9c
KB
2284 case VPD_BLOCK_LIMITS:
2285 res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
2286 alloc_len);
2287 break;
5d0f6131
VV
2288 case VPD_BLOCK_DEV_CHARACTERISTICS:
2289 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2290 break;
2291 default:
2292 res = nvme_trans_completion(hdr,
2293 SAM_STAT_CHECK_CONDITION,
2294 ILLEGAL_REQUEST,
2295 SCSI_ASC_INVALID_CDB,
2296 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2297 break;
2298 }
2299 }
2300 kfree(inq_response);
2301 out_mem:
2302 return res;
2303}
2304
2305static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2306 u8 *cmd)
2307{
2308 int res = SNTI_TRANSLATION_SUCCESS;
2309 u16 alloc_len;
2310 u8 sp;
2311 u8 pc;
2312 u8 page_code;
2313
2314 sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
2315 if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
2316 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2317 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2318 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2319 goto out;
2320 }
2321 pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
2322 page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
2323 pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
2324 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
2325 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2326 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2327 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2328 goto out;
2329 }
2330 alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
2331 switch (page_code) {
2332 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
2333 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
2334 break;
2335 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
2336 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
2337 break;
2338 case LOG_PAGE_TEMPERATURE_PAGE:
2339 res = nvme_trans_log_temperature(ns, hdr, alloc_len);
2340 break;
2341 default:
2342 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2343 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2344 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2345 break;
2346 }
2347
2348 out:
2349 return res;
2350}
2351
2352static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2353 u8 *cmd)
2354{
2355 int res = SNTI_TRANSLATION_SUCCESS;
2356 u8 cdb10 = 0;
2357 u16 parm_list_len;
2358 u8 page_format;
2359 u8 save_pages;
2360
2361 page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
2362 page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
2363
2364 save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
2365 save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
2366
2367 if (GET_OPCODE(cmd) == MODE_SELECT) {
2368 parm_list_len = GET_U8_FROM_CDB(cmd,
2369 MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
2370 } else {
2371 parm_list_len = GET_U16_FROM_CDB(cmd,
2372 MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
2373 cdb10 = 1;
2374 }
2375
2376 if (parm_list_len != 0) {
2377 /*
2378 * According to SPC-4 r24, a paramter list length field of 0
2379 * shall not be considered an error
2380 */
2381 res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
2382 page_format, save_pages, cdb10);
2383 }
2384
2385 return res;
2386}
2387
2388static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2389 u8 *cmd)
2390{
2391 int res = SNTI_TRANSLATION_SUCCESS;
2392 u16 alloc_len;
2393 u8 cdb10 = 0;
2394 u8 page_code;
2395 u8 pc;
2396
2397 if (GET_OPCODE(cmd) == MODE_SENSE) {
2398 alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
2399 } else {
2400 alloc_len = GET_U16_FROM_CDB(cmd,
2401 MODE_SENSE10_ALLOC_LEN_OFFSET);
2402 cdb10 = 1;
2403 }
2404
2405 pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
2406 MODE_SENSE_PAGE_CONTROL_MASK;
2407 if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
2408 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2409 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2410 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2411 goto out;
2412 }
2413
2414 page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
2415 MODE_SENSE_PAGE_CODE_MASK;
2416 switch (page_code) {
2417 case MODE_PAGE_CACHING:
2418 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2419 cdb10,
2420 &nvme_trans_fill_caching_page,
2421 MODE_PAGE_CACHING_LEN);
2422 break;
2423 case MODE_PAGE_CONTROL:
2424 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2425 cdb10,
2426 &nvme_trans_fill_control_page,
2427 MODE_PAGE_CONTROL_LEN);
2428 break;
2429 case MODE_PAGE_POWER_CONDITION:
2430 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2431 cdb10,
2432 &nvme_trans_fill_pow_cnd_page,
2433 MODE_PAGE_POW_CND_LEN);
2434 break;
2435 case MODE_PAGE_INFO_EXCEP:
2436 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2437 cdb10,
2438 &nvme_trans_fill_inf_exc_page,
2439 MODE_PAGE_INF_EXC_LEN);
2440 break;
2441 case MODE_PAGE_RETURN_ALL:
2442 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2443 cdb10,
2444 &nvme_trans_fill_all_pages,
2445 MODE_PAGE_ALL_LEN);
2446 break;
2447 default:
2448 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2449 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2450 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2451 break;
2452 }
2453
2454 out:
2455 return res;
2456}
2457
2458static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2459 u8 *cmd)
2460{
2461 int res = SNTI_TRANSLATION_SUCCESS;
2462 int nvme_sc;
2463 u32 alloc_len = READ_CAP_10_RESP_SIZE;
2464 u32 resp_size = READ_CAP_10_RESP_SIZE;
2465 u32 xfer_len;
2466 u8 cdb16;
2467 struct nvme_dev *dev = ns->dev;
2468 dma_addr_t dma_addr;
2469 void *mem;
2470 struct nvme_id_ns *id_ns;
2471 u8 *response;
2472
2473 cdb16 = IS_READ_CAP_16(cmd);
2474 if (cdb16) {
2475 alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
2476 resp_size = READ_CAP_16_RESP_SIZE;
2477 }
2478
e75ec752 2479 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
5d0f6131
VV
2480 &dma_addr, GFP_KERNEL);
2481 if (mem == NULL) {
2482 res = -ENOMEM;
2483 goto out;
2484 }
2485 /* nvme ns identify */
2486 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
2487 res = nvme_trans_status_code(hdr, nvme_sc);
2488 if (res)
2489 goto out_dma;
2490 if (nvme_sc) {
2491 res = nvme_sc;
2492 goto out_dma;
2493 }
2494 id_ns = mem;
2495
03ea83e9 2496 response = kzalloc(resp_size, GFP_KERNEL);
5d0f6131
VV
2497 if (response == NULL) {
2498 res = -ENOMEM;
2499 goto out_dma;
2500 }
5d0f6131
VV
2501 nvme_trans_fill_read_cap(response, id_ns, cdb16);
2502
2503 xfer_len = min(alloc_len, resp_size);
2504 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2505
2506 kfree(response);
2507 out_dma:
e75ec752 2508 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
5d0f6131
VV
2509 out:
2510 return res;
2511}
2512
2513static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2514 u8 *cmd)
2515{
2516 int res = SNTI_TRANSLATION_SUCCESS;
2517 int nvme_sc;
2518 u32 alloc_len, xfer_len, resp_size;
2519 u8 select_report;
2520 u8 *response;
2521 struct nvme_dev *dev = ns->dev;
2522 dma_addr_t dma_addr;
2523 void *mem;
2524 struct nvme_id_ctrl *id_ctrl;
2525 u32 ll_length, lun_id;
2526 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
8741ee4c 2527 __be32 tmp_len;
5d0f6131
VV
2528
2529 alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
2530 select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
2531
2532 if ((select_report != ALL_LUNS_RETURNED) &&
2533 (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
2534 (select_report != RESTRICTED_LUNS_RETURNED)) {
2535 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2536 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2537 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2538 goto out;
2539 } else {
2540 /* NVMe Controller Identify */
e75ec752 2541 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
5d0f6131
VV
2542 &dma_addr, GFP_KERNEL);
2543 if (mem == NULL) {
2544 res = -ENOMEM;
2545 goto out;
2546 }
2547 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
2548 res = nvme_trans_status_code(hdr, nvme_sc);
2549 if (res)
2550 goto out_dma;
2551 if (nvme_sc) {
2552 res = nvme_sc;
2553 goto out_dma;
2554 }
2555 id_ctrl = mem;
8741ee4c 2556 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
5d0f6131
VV
2557 resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2558
2559 if (alloc_len < resp_size) {
2560 res = nvme_trans_completion(hdr,
2561 SAM_STAT_CHECK_CONDITION,
2562 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2563 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2564 goto out_dma;
2565 }
2566
03ea83e9 2567 response = kzalloc(resp_size, GFP_KERNEL);
5d0f6131
VV
2568 if (response == NULL) {
2569 res = -ENOMEM;
2570 goto out_dma;
2571 }
5d0f6131
VV
2572
2573 /* The first LUN ID will always be 0 per the SAM spec */
8741ee4c 2574 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
5d0f6131
VV
2575 /*
2576 * Set the LUN Id and then increment to the next LUN
2577 * location in the parameter data.
2578 */
8741ee4c 2579 __be64 tmp_id = cpu_to_be64(lun_id);
5d0f6131
VV
2580 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2581 lun_id_offset += LUN_ENTRY_SIZE;
2582 }
2583 tmp_len = cpu_to_be32(ll_length);
2584 memcpy(response, &tmp_len, sizeof(u32));
2585 }
2586
2587 xfer_len = min(alloc_len, resp_size);
2588 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2589
2590 kfree(response);
2591 out_dma:
e75ec752 2592 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
5d0f6131
VV
2593 out:
2594 return res;
2595}
2596
2597static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2598 u8 *cmd)
2599{
2600 int res = SNTI_TRANSLATION_SUCCESS;
2601 u8 alloc_len, xfer_len, resp_size;
2602 u8 desc_format;
2603 u8 *response;
2604
2605 alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
2606 desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
2607 desc_format &= REQUEST_SENSE_DESC_MASK;
2608
2609 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2610 (FIXED_FMT_SENSE_DATA_SIZE));
03ea83e9 2611 response = kzalloc(resp_size, GFP_KERNEL);
5d0f6131
VV
2612 if (response == NULL) {
2613 res = -ENOMEM;
2614 goto out;
2615 }
5d0f6131
VV
2616
2617 if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
2618 /* Descriptor Format Sense Data */
2619 response[0] = DESC_FORMAT_SENSE_DATA;
2620 response[1] = NO_SENSE;
2621 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2622 response[2] = SCSI_ASC_NO_SENSE;
2623 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2624 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2625 } else {
2626 /* Fixed Format Sense Data */
2627 response[0] = FIXED_SENSE_DATA;
2628 /* Byte 1 = Obsolete */
2629 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2630 /* Bytes 3-6 - Information - set to zero */
2631 response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2632 /* Bytes 8-11 - Cmd Specific Information - set to zero */
2633 response[12] = SCSI_ASC_NO_SENSE;
2634 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2635 /* Byte 14 = Field Replaceable Unit Code = 0 */
2636 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2637 }
2638
2639 xfer_len = min(alloc_len, resp_size);
2640 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2641
2642 kfree(response);
2643 out:
2644 return res;
2645}
2646
2647static int nvme_trans_security_protocol(struct nvme_ns *ns,
2648 struct sg_io_hdr *hdr,
2649 u8 *cmd)
2650{
2651 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2652 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2653 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2654}
2655
2656static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2657 u8 *cmd)
2658{
2659 int res = SNTI_TRANSLATION_SUCCESS;
2660 int nvme_sc;
14385de1 2661 struct nvme_command c;
5d0f6131
VV
2662 u8 immed, pcmod, pc, no_flush, start;
2663
2664 immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
2665 pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
2666 pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
2667 no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
2668 start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
2669
2670 immed &= START_STOP_UNIT_CDB_IMMED_MASK;
2671 pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
2672 pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
2673 no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
2674 start &= START_STOP_UNIT_CDB_START_MASK;
2675
2676 if (immed != 0) {
2677 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2678 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2679 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2680 } else {
2681 if (no_flush == 0) {
2682 /* Issue NVME FLUSH command prior to START STOP UNIT */
14385de1
KB
2683 memset(&c, 0, sizeof(c));
2684 c.common.opcode = nvme_cmd_flush;
2685 c.common.nsid = cpu_to_le32(ns->ns_id);
2686
f705f837 2687 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
5d0f6131
VV
2688 res = nvme_trans_status_code(hdr, nvme_sc);
2689 if (res)
2690 goto out;
2691 if (nvme_sc) {
2692 res = nvme_sc;
2693 goto out;
2694 }
2695 }
2696 /* Setup the expected power state transition */
2697 res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2698 }
2699
2700 out:
2701 return res;
2702}
2703
2704static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2705 struct sg_io_hdr *hdr, u8 *cmd)
2706{
2707 int res = SNTI_TRANSLATION_SUCCESS;
2708 int nvme_sc;
14385de1 2709 struct nvme_command c;
14385de1
KB
2710
2711 memset(&c, 0, sizeof(c));
2712 c.common.opcode = nvme_cmd_flush;
2713 c.common.nsid = cpu_to_le32(ns->ns_id);
2714
f705f837 2715 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
5d0f6131
VV
2716 res = nvme_trans_status_code(hdr, nvme_sc);
2717 if (res)
2718 goto out;
2719 if (nvme_sc)
2720 res = nvme_sc;
2721
2722 out:
2723 return res;
2724}
2725
2726static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2727 u8 *cmd)
2728{
2729 int res = SNTI_TRANSLATION_SUCCESS;
2730 u8 parm_hdr_len = 0;
2731 u8 nvme_pf_code = 0;
2732 u8 format_prot_info, long_list, format_data;
2733
2734 format_prot_info = GET_U8_FROM_CDB(cmd,
2735 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
2736 long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
2737 format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
2738
2739 format_prot_info = (format_prot_info &
2740 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
2741 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
2742 long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
2743 format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
2744
2745 if (format_data != 0) {
2746 if (format_prot_info != 0) {
2747 if (long_list == 0)
2748 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2749 else
2750 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2751 }
2752 } else if (format_data == 0 && format_prot_info != 0) {
2753 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2754 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2755 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2756 goto out;
2757 }
2758
2759 /* Get parm header from data-in/out buffer */
2760 /*
2761 * According to the translation spec, the only fields in the parameter
2762 * list we are concerned with are in the header. So allocate only that.
2763 */
2764 if (parm_hdr_len > 0) {
2765 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2766 format_prot_info, &nvme_pf_code);
2767 if (res != SNTI_TRANSLATION_SUCCESS)
2768 goto out;
2769 }
2770
2771 /* Attempt to activate any previously downloaded firmware image */
2772 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
2773
2774 /* Determine Block size and count and send format command */
2775 res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2776 if (res != SNTI_TRANSLATION_SUCCESS)
2777 goto out;
2778
2779 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2780
2781 out:
2782 return res;
2783}
2784
2785static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2786 struct sg_io_hdr *hdr,
2787 u8 *cmd)
2788{
2789 int res = SNTI_TRANSLATION_SUCCESS;
2790 struct nvme_dev *dev = ns->dev;
2791
2792 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2793 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2794 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2795 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2796 else
2797 res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2798
2799 return res;
2800}
2801
2802static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2803 u8 *cmd)
2804{
2805 int res = SNTI_TRANSLATION_SUCCESS;
2806 u32 buffer_offset, parm_list_length;
2807 u8 buffer_id, mode;
2808
2809 parm_list_length =
2810 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
2811 if (parm_list_length % BYTES_TO_DWORDS != 0) {
2812 /* NVMe expects Firmware file to be a whole number of DWORDS */
2813 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2814 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2815 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2816 goto out;
2817 }
2818 buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
2819 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2820 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2821 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2822 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2823 goto out;
2824 }
2825 mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
2826 WRITE_BUFFER_CDB_MODE_MASK;
2827 buffer_offset =
2828 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
2829
2830 switch (mode) {
2831 case DOWNLOAD_SAVE_ACTIVATE:
2832 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2833 parm_list_length, buffer_offset,
2834 buffer_id);
2835 if (res != SNTI_TRANSLATION_SUCCESS)
2836 goto out;
2837 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2838 parm_list_length, buffer_offset,
2839 buffer_id);
2840 break;
2841 case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2842 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2843 parm_list_length, buffer_offset,
2844 buffer_id);
2845 break;
2846 case ACTIVATE_DEFERRED_MICROCODE:
2847 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2848 parm_list_length, buffer_offset,
2849 buffer_id);
2850 break;
2851 default:
2852 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2853 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2854 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2855 break;
2856 }
2857
2858 out:
2859 return res;
2860}
2861
ec503733
KB
2862struct scsi_unmap_blk_desc {
2863 __be64 slba;
2864 __be32 nlb;
2865 u32 resv;
2866};
2867
2868struct scsi_unmap_parm_list {
2869 __be16 unmap_data_len;
2870 __be16 unmap_blk_desc_data_len;
2871 u32 resv;
2872 struct scsi_unmap_blk_desc desc[0];
2873};
2874
2875static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2876 u8 *cmd)
2877{
2878 struct nvme_dev *dev = ns->dev;
2879 struct scsi_unmap_parm_list *plist;
2880 struct nvme_dsm_range *range;
ec503733
KB
2881 struct nvme_command c;
2882 int i, nvme_sc, res = -ENOMEM;
2883 u16 ndesc, list_len;
2884 dma_addr_t dma_addr;
2885
2886 list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
2887 if (!list_len)
2888 return -EINVAL;
2889
2890 plist = kmalloc(list_len, GFP_KERNEL);
2891 if (!plist)
2892 return -ENOMEM;
2893
2894 res = nvme_trans_copy_from_user(hdr, plist, list_len);
2895 if (res != SNTI_TRANSLATION_SUCCESS)
2896 goto out;
2897
2898 ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2899 if (!ndesc || ndesc > 256) {
2900 res = -EINVAL;
2901 goto out;
2902 }
2903
e75ec752 2904 range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range),
ec503733
KB
2905 &dma_addr, GFP_KERNEL);
2906 if (!range)
2907 goto out;
2908
2909 for (i = 0; i < ndesc; i++) {
2910 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2911 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2912 range[i].cattr = 0;
2913 }
2914
2915 memset(&c, 0, sizeof(c));
2916 c.dsm.opcode = nvme_cmd_dsm;
2917 c.dsm.nsid = cpu_to_le32(ns->ns_id);
2918 c.dsm.prp1 = cpu_to_le64(dma_addr);
2919 c.dsm.nr = cpu_to_le32(ndesc - 1);
2920 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2921
f705f837 2922 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
ec503733
KB
2923 res = nvme_trans_status_code(hdr, nvme_sc);
2924
e75ec752 2925 dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr);
ec503733
KB
2926 out:
2927 kfree(plist);
2928 return res;
2929}
2930
5d0f6131
VV
2931static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2932{
2933 u8 cmd[BLK_MAX_CDB];
2934 int retcode;
2935 unsigned int opcode;
2936
2937 if (hdr->cmdp == NULL)
2938 return -EMSGSIZE;
2939 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2940 return -EFAULT;
2941
695a4fe7
KB
2942 /*
2943 * Prime the hdr with good status for scsi commands that don't require
2944 * an nvme command for translation.
2945 */
2946 retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2947 if (retcode)
2948 return retcode;
2949
5d0f6131
VV
2950 opcode = cmd[0];
2951
2952 switch (opcode) {
2953 case READ_6:
2954 case READ_10:
2955 case READ_12:
2956 case READ_16:
2957 retcode = nvme_trans_io(ns, hdr, 0, cmd);
2958 break;
2959 case WRITE_6:
2960 case WRITE_10:
2961 case WRITE_12:
2962 case WRITE_16:
2963 retcode = nvme_trans_io(ns, hdr, 1, cmd);
2964 break;
2965 case INQUIRY:
2966 retcode = nvme_trans_inquiry(ns, hdr, cmd);
2967 break;
2968 case LOG_SENSE:
2969 retcode = nvme_trans_log_sense(ns, hdr, cmd);
2970 break;
2971 case MODE_SELECT:
2972 case MODE_SELECT_10:
2973 retcode = nvme_trans_mode_select(ns, hdr, cmd);
2974 break;
2975 case MODE_SENSE:
2976 case MODE_SENSE_10:
2977 retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2978 break;
2979 case READ_CAPACITY:
2980 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2981 break;
eb846d9f 2982 case SERVICE_ACTION_IN_16:
5d0f6131
VV
2983 if (IS_READ_CAP_16(cmd))
2984 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2985 else
2986 goto out;
2987 break;
2988 case REPORT_LUNS:
2989 retcode = nvme_trans_report_luns(ns, hdr, cmd);
2990 break;
2991 case REQUEST_SENSE:
2992 retcode = nvme_trans_request_sense(ns, hdr, cmd);
2993 break;
2994 case SECURITY_PROTOCOL_IN:
2995 case SECURITY_PROTOCOL_OUT:
2996 retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2997 break;
2998 case START_STOP:
2999 retcode = nvme_trans_start_stop(ns, hdr, cmd);
3000 break;
3001 case SYNCHRONIZE_CACHE:
3002 retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
3003 break;
3004 case FORMAT_UNIT:
3005 retcode = nvme_trans_format_unit(ns, hdr, cmd);
3006 break;
3007 case TEST_UNIT_READY:
3008 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
3009 break;
3010 case WRITE_BUFFER:
3011 retcode = nvme_trans_write_buffer(ns, hdr, cmd);
3012 break;
ec503733
KB
3013 case UNMAP:
3014 retcode = nvme_trans_unmap(ns, hdr, cmd);
3015 break;
5d0f6131
VV
3016 default:
3017 out:
3018 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
3019 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
3020 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
3021 break;
3022 }
3023 return retcode;
3024}
3025
3026int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
3027{
3028 struct sg_io_hdr hdr;
3029 int retcode;
3030
3031 if (!capable(CAP_SYS_ADMIN))
3032 return -EACCES;
3033 if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
3034 return -EFAULT;
3035 if (hdr.interface_id != 'S')
3036 return -EINVAL;
3037 if (hdr.cmd_len > BLK_MAX_CDB)
3038 return -EINVAL;
3039
3040 retcode = nvme_scsi_translate(ns, &hdr);
3041 if (retcode < 0)
3042 return retcode;
3043 if (retcode > 0)
3044 retcode = SNTI_TRANSLATION_SUCCESS;
8741ee4c 3045 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
5d0f6131
VV
3046 return -EFAULT;
3047
3048 return retcode;
3049}
3050
3051int nvme_sg_get_version_num(int __user *ip)
3052{
3053 return put_user(sg_version_num, ip);
3054}