2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * Refer to the SCSI-NVMe Translation spec for details on how
17 * each command is translated.
20 #include <linux/nvme.h>
21 #include <linux/bio.h>
22 #include <linux/bitops.h>
23 #include <linux/blkdev.h>
24 #include <linux/compat.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
28 #include <linux/genhd.h>
29 #include <linux/idr.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
33 #include <linux/kdev_t.h>
34 #include <linux/kthread.h>
35 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/pci.h>
40 #include <linux/poison.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/types.h>
45 #include <scsi/scsi.h>
48 static int sg_version_num = 30534; /* 2 digits for each component */
50 #define SNTI_TRANSLATION_SUCCESS 0
51 #define SNTI_INTERNAL_ERROR 1
54 #define VPD_SUPPORTED_PAGES 0x00
55 #define VPD_SERIAL_NUMBER 0x80
56 #define VPD_DEVICE_IDENTIFIERS 0x83
57 #define VPD_EXTENDED_INQUIRY 0x86
58 #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
61 #define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
62 #define REPORT_LUNS_SR_OFFSET 2
63 #define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
64 #define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
65 #define REQUEST_SENSE_DESC_OFFSET 1
66 #define REQUEST_SENSE_DESC_MASK 0x01
67 #define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
68 #define INQUIRY_EVPD_BYTE_OFFSET 1
69 #define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
70 #define INQUIRY_EVPD_BIT_MASK 1
71 #define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
72 #define START_STOP_UNIT_CDB_IMMED_OFFSET 1
73 #define START_STOP_UNIT_CDB_IMMED_MASK 0x1
74 #define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
75 #define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
76 #define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
77 #define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
78 #define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
79 #define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
80 #define START_STOP_UNIT_CDB_START_OFFSET 4
81 #define START_STOP_UNIT_CDB_START_MASK 0x1
82 #define WRITE_BUFFER_CDB_MODE_OFFSET 1
83 #define WRITE_BUFFER_CDB_MODE_MASK 0x1F
84 #define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
85 #define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
86 #define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
87 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
88 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
89 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
90 #define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
91 #define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
92 #define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
93 #define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
94 #define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
95 #define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
96 #define FORMAT_UNIT_PROT_INT_OFFSET 3
97 #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
98 #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
99 #define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
102 #define NIBBLE_SHIFT 4
103 #define FIXED_SENSE_DATA 0x70
104 #define DESC_FORMAT_SENSE_DATA 0x72
105 #define FIXED_SENSE_DATA_ADD_LENGTH 10
106 #define LUN_ENTRY_SIZE 8
107 #define LUN_DATA_HEADER_SIZE 8
108 #define ALL_LUNS_RETURNED 0x02
109 #define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
110 #define RESTRICTED_LUNS_RETURNED 0x00
111 #define NVME_POWER_STATE_START_VALID 0x00
112 #define NVME_POWER_STATE_ACTIVE 0x01
113 #define NVME_POWER_STATE_IDLE 0x02
114 #define NVME_POWER_STATE_STANDBY 0x03
115 #define NVME_POWER_STATE_LU_CONTROL 0x07
116 #define POWER_STATE_0 0
117 #define POWER_STATE_1 1
118 #define POWER_STATE_2 2
119 #define POWER_STATE_3 3
120 #define DOWNLOAD_SAVE_ACTIVATE 0x05
121 #define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
122 #define ACTIVATE_DEFERRED_MICROCODE 0x0F
123 #define FORMAT_UNIT_IMMED_MASK 0x2
124 #define FORMAT_UNIT_IMMED_OFFSET 1
125 #define KELVIN_TEMP_FACTOR 273
126 #define FIXED_FMT_SENSE_DATA_SIZE 18
127 #define DESC_FMT_SENSE_DATA_SIZE 8
129 /* SCSI/NVMe defines and bit masks */
130 #define INQ_STANDARD_INQUIRY_PAGE 0x00
131 #define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
132 #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
133 #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
134 #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
135 #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
136 #define INQ_SERIAL_NUMBER_LENGTH 0x14
137 #define INQ_NUM_SUPPORTED_VPD_PAGES 5
138 #define VERSION_SPC_4 0x06
139 #define ACA_UNSUPPORTED 0
140 #define STANDARD_INQUIRY_LENGTH 36
141 #define ADDITIONAL_STD_INQ_LENGTH 31
142 #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
143 #define RESERVED_FIELD 0
145 /* SCSI READ/WRITE Defines */
146 #define IO_CDB_WP_MASK 0xE0
147 #define IO_CDB_WP_SHIFT 5
148 #define IO_CDB_FUA_MASK 0x8
149 #define IO_6_CDB_LBA_OFFSET 0
150 #define IO_6_CDB_LBA_MASK 0x001FFFFF
151 #define IO_6_CDB_TX_LEN_OFFSET 4
152 #define IO_6_DEFAULT_TX_LEN 256
153 #define IO_10_CDB_LBA_OFFSET 2
154 #define IO_10_CDB_TX_LEN_OFFSET 7
155 #define IO_10_CDB_WP_OFFSET 1
156 #define IO_10_CDB_FUA_OFFSET 1
157 #define IO_12_CDB_LBA_OFFSET 2
158 #define IO_12_CDB_TX_LEN_OFFSET 6
159 #define IO_12_CDB_WP_OFFSET 1
160 #define IO_12_CDB_FUA_OFFSET 1
161 #define IO_16_CDB_FUA_OFFSET 1
162 #define IO_16_CDB_WP_OFFSET 1
163 #define IO_16_CDB_LBA_OFFSET 2
164 #define IO_16_CDB_TX_LEN_OFFSET 10
166 /* Mode Sense/Select defines */
167 #define MODE_PAGE_INFO_EXCEP 0x1C
168 #define MODE_PAGE_CACHING 0x08
169 #define MODE_PAGE_CONTROL 0x0A
170 #define MODE_PAGE_POWER_CONDITION 0x1A
171 #define MODE_PAGE_RETURN_ALL 0x3F
172 #define MODE_PAGE_BLK_DES_LEN 0x08
173 #define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
174 #define MODE_PAGE_CACHING_LEN 0x14
175 #define MODE_PAGE_CONTROL_LEN 0x0C
176 #define MODE_PAGE_POW_CND_LEN 0x28
177 #define MODE_PAGE_INF_EXC_LEN 0x0C
178 #define MODE_PAGE_ALL_LEN 0x54
179 #define MODE_SENSE6_MPH_SIZE 4
180 #define MODE_SENSE6_ALLOC_LEN_OFFSET 4
181 #define MODE_SENSE_PAGE_CONTROL_OFFSET 2
182 #define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
183 #define MODE_SENSE_PAGE_CODE_OFFSET 2
184 #define MODE_SENSE_PAGE_CODE_MASK 0x3F
185 #define MODE_SENSE_LLBAA_OFFSET 1
186 #define MODE_SENSE_LLBAA_MASK 0x10
187 #define MODE_SENSE_LLBAA_SHIFT 4
188 #define MODE_SENSE_DBD_OFFSET 1
189 #define MODE_SENSE_DBD_MASK 8
190 #define MODE_SENSE_DBD_SHIFT 3
191 #define MODE_SENSE10_MPH_SIZE 8
192 #define MODE_SENSE10_ALLOC_LEN_OFFSET 7
193 #define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
194 #define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
195 #define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
196 #define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
197 #define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
198 #define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
199 #define MODE_SELECT_6_BD_OFFSET 3
200 #define MODE_SELECT_10_BD_OFFSET 6
201 #define MODE_SELECT_10_LLBAA_OFFSET 4
202 #define MODE_SELECT_10_LLBAA_MASK 1
203 #define MODE_SELECT_6_MPH_SIZE 4
204 #define MODE_SELECT_10_MPH_SIZE 8
205 #define CACHING_MODE_PAGE_WCE_MASK 0x04
206 #define MODE_SENSE_BLK_DESC_ENABLED 0
207 #define MODE_SENSE_BLK_DESC_COUNT 1
208 #define MODE_SELECT_PAGE_CODE_MASK 0x3F
209 #define SHORT_DESC_BLOCK 8
210 #define LONG_DESC_BLOCK 16
211 #define MODE_PAGE_POW_CND_LEN_FIELD 0x26
212 #define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
213 #define MODE_PAGE_CACHING_LEN_FIELD 0x12
214 #define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
215 #define MODE_SENSE_PC_CURRENT_VALUES 0
217 /* Log Sense defines */
218 #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
219 #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
220 #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
221 #define LOG_PAGE_TEMPERATURE_PAGE 0x0D
222 #define LOG_SENSE_CDB_SP_OFFSET 1
223 #define LOG_SENSE_CDB_SP_NOT_ENABLED 0
224 #define LOG_SENSE_CDB_PC_OFFSET 2
225 #define LOG_SENSE_CDB_PC_MASK 0xC0
226 #define LOG_SENSE_CDB_PC_SHIFT 6
227 #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
228 #define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
229 #define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
230 #define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
231 #define LOG_INFO_EXCP_PAGE_LENGTH 0xC
232 #define REMAINING_TEMP_PAGE_LENGTH 0xC
233 #define LOG_TEMP_PAGE_LENGTH 0x10
234 #define LOG_TEMP_UNKNOWN 0xFF
235 #define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
237 /* Read Capacity defines */
238 #define READ_CAP_10_RESP_SIZE 8
239 #define READ_CAP_16_RESP_SIZE 32
241 /* NVMe Namespace and Command Defines */
242 #define BYTES_TO_DWORDS 4
243 #define NVME_MAX_FIRMWARE_SLOT 7
245 /* Report LUNs defines */
246 #define REPORT_LUNS_FIRST_LUN_OFFSET 8
248 /* SCSI ADDITIONAL SENSE Codes */
250 #define SCSI_ASC_NO_SENSE 0x00
251 #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
252 #define SCSI_ASC_LUN_NOT_READY 0x04
253 #define SCSI_ASC_WARNING 0x0B
254 #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
255 #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
256 #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
257 #define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
258 #define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
259 #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
260 #define SCSI_ASC_ILLEGAL_COMMAND 0x20
261 #define SCSI_ASC_ILLEGAL_BLOCK 0x21
262 #define SCSI_ASC_INVALID_CDB 0x24
263 #define SCSI_ASC_INVALID_LUN 0x25
264 #define SCSI_ASC_INVALID_PARAMETER 0x26
265 #define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
266 #define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
268 /* SCSI ADDITIONAL SENSE Code Qualifiers */
270 #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
271 #define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
272 #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
273 #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
274 #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
275 #define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
276 #define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
277 #define SCSI_ASCQ_INVALID_LUN_ID 0x09
280 * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
281 * enable DPOFUA support type 0x10 value.
283 #define DEVICE_SPECIFIC_PARAMETER 0
284 #define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
286 /* MACROs to extract information from CDBs */
288 #define GET_OPCODE(cdb) cdb[0]
290 #define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
292 #define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
294 #define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
295 (cdb[index + 1] << 8) | \
296 (cdb[index + 2] << 0))
298 #define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
299 (cdb[index + 1] << 16) | \
300 (cdb[index + 2] << 8) | \
301 (cdb[index + 3] << 0))
303 #define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
304 (((u64)cdb[index + 1]) << 48) | \
305 (((u64)cdb[index + 2]) << 40) | \
306 (((u64)cdb[index + 3]) << 32) | \
307 (((u64)cdb[index + 4]) << 24) | \
308 (((u64)cdb[index + 5]) << 16) | \
309 (((u64)cdb[index + 6]) << 8) | \
310 (((u64)cdb[index + 7]) << 0))
312 /* Inquiry Helper Macros */
313 #define GET_INQ_EVPD_BIT(cdb) \
314 ((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
315 INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
317 #define GET_INQ_PAGE_CODE(cdb) \
318 (GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
320 #define GET_INQ_ALLOC_LENGTH(cdb) \
321 (GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
323 /* Report LUNs Helper Macros */
324 #define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
325 (GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
327 /* Read Capacity Helper Macros */
328 #define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
329 (GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
331 #define IS_READ_CAP_16(cdb) \
332 ((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
334 /* Request Sense Helper Macros */
335 #define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
336 (GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
338 /* Mode Sense Helper Macros */
339 #define GET_MODE_SENSE_DBD(cdb) \
340 ((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
341 MODE_SENSE_DBD_SHIFT)
343 #define GET_MODE_SENSE_LLBAA(cdb) \
344 ((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
345 MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
347 #define GET_MODE_SENSE_MPH_SIZE(cdb10) \
348 (cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
351 /* Struct to gather data that needs to be extracted from a SCSI CDB.
352 Not conforming to any particular CDB variant, but compatible with all. */
354 struct nvme_trans_io_cdb {
362 /* Internal Helper Functions */
365 /* Copy data to userspace memory */
367 static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
370 int res = SNTI_TRANSLATION_SUCCESS;
371 unsigned long not_copied;
374 size_t remaining = n;
377 if (hdr->iovec_count > 0) {
380 for (i = 0; i < hdr->iovec_count; i++) {
381 not_copied = copy_from_user(&sgl, hdr->dxferp +
382 i * sizeof(struct sg_iovec),
383 sizeof(struct sg_iovec));
386 xfer_len = min(remaining, sgl.iov_len);
387 not_copied = copy_to_user(sgl.iov_base, index,
394 remaining -= xfer_len;
400 not_copied = copy_to_user(hdr->dxferp, from, n);
406 /* Copy data from userspace memory */
408 static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
411 int res = SNTI_TRANSLATION_SUCCESS;
412 unsigned long not_copied;
415 size_t remaining = n;
418 if (hdr->iovec_count > 0) {
421 for (i = 0; i < hdr->iovec_count; i++) {
422 not_copied = copy_from_user(&sgl, hdr->dxferp +
423 i * sizeof(struct sg_iovec),
424 sizeof(struct sg_iovec));
427 xfer_len = min(remaining, sgl.iov_len);
428 not_copied = copy_from_user(index, sgl.iov_base,
435 remaining -= xfer_len;
442 not_copied = copy_from_user(to, hdr->dxferp, n);
448 /* Status/Sense Buffer Writeback */
450 static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
453 int res = SNTI_TRANSLATION_SUCCESS;
455 u8 resp[DESC_FMT_SENSE_DATA_SIZE];
457 if (scsi_status_is_good(status)) {
458 hdr->status = SAM_STAT_GOOD;
459 hdr->masked_status = GOOD;
460 hdr->host_status = DID_OK;
461 hdr->driver_status = DRIVER_OK;
464 hdr->status = status;
465 hdr->masked_status = status >> 1;
466 hdr->host_status = DID_OK;
467 hdr->driver_status = DRIVER_OK;
469 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
470 resp[0] = DESC_FORMAT_SENSE_DATA;
475 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
476 hdr->sb_len_wr = xfer_len;
477 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
484 static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
486 u8 status, sense_key, asc, ascq;
487 int res = SNTI_TRANSLATION_SUCCESS;
489 /* For non-nvme (Linux) errors, simply return the error code */
493 /* Mask DNR, More, and reserved fields */
497 /* Generic Command Status */
498 case NVME_SC_SUCCESS:
499 status = SAM_STAT_GOOD;
500 sense_key = NO_SENSE;
501 asc = SCSI_ASC_NO_SENSE;
502 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
504 case NVME_SC_INVALID_OPCODE:
505 status = SAM_STAT_CHECK_CONDITION;
506 sense_key = ILLEGAL_REQUEST;
507 asc = SCSI_ASC_ILLEGAL_COMMAND;
508 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
510 case NVME_SC_INVALID_FIELD:
511 status = SAM_STAT_CHECK_CONDITION;
512 sense_key = ILLEGAL_REQUEST;
513 asc = SCSI_ASC_INVALID_CDB;
514 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
516 case NVME_SC_DATA_XFER_ERROR:
517 status = SAM_STAT_CHECK_CONDITION;
518 sense_key = MEDIUM_ERROR;
519 asc = SCSI_ASC_NO_SENSE;
520 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
522 case NVME_SC_POWER_LOSS:
523 status = SAM_STAT_TASK_ABORTED;
524 sense_key = ABORTED_COMMAND;
525 asc = SCSI_ASC_WARNING;
526 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
528 case NVME_SC_INTERNAL:
529 status = SAM_STAT_CHECK_CONDITION;
530 sense_key = HARDWARE_ERROR;
531 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
532 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
534 case NVME_SC_ABORT_REQ:
535 status = SAM_STAT_TASK_ABORTED;
536 sense_key = ABORTED_COMMAND;
537 asc = SCSI_ASC_NO_SENSE;
538 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
540 case NVME_SC_ABORT_QUEUE:
541 status = SAM_STAT_TASK_ABORTED;
542 sense_key = ABORTED_COMMAND;
543 asc = SCSI_ASC_NO_SENSE;
544 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
546 case NVME_SC_FUSED_FAIL:
547 status = SAM_STAT_TASK_ABORTED;
548 sense_key = ABORTED_COMMAND;
549 asc = SCSI_ASC_NO_SENSE;
550 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
552 case NVME_SC_FUSED_MISSING:
553 status = SAM_STAT_TASK_ABORTED;
554 sense_key = ABORTED_COMMAND;
555 asc = SCSI_ASC_NO_SENSE;
556 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
558 case NVME_SC_INVALID_NS:
559 status = SAM_STAT_CHECK_CONDITION;
560 sense_key = ILLEGAL_REQUEST;
561 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
562 ascq = SCSI_ASCQ_INVALID_LUN_ID;
564 case NVME_SC_LBA_RANGE:
565 status = SAM_STAT_CHECK_CONDITION;
566 sense_key = ILLEGAL_REQUEST;
567 asc = SCSI_ASC_ILLEGAL_BLOCK;
568 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
570 case NVME_SC_CAP_EXCEEDED:
571 status = SAM_STAT_CHECK_CONDITION;
572 sense_key = MEDIUM_ERROR;
573 asc = SCSI_ASC_NO_SENSE;
574 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
576 case NVME_SC_NS_NOT_READY:
577 status = SAM_STAT_CHECK_CONDITION;
578 sense_key = NOT_READY;
579 asc = SCSI_ASC_LUN_NOT_READY;
580 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
583 /* Command Specific Status */
584 case NVME_SC_INVALID_FORMAT:
585 status = SAM_STAT_CHECK_CONDITION;
586 sense_key = ILLEGAL_REQUEST;
587 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
588 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
590 case NVME_SC_BAD_ATTRIBUTES:
591 status = SAM_STAT_CHECK_CONDITION;
592 sense_key = ILLEGAL_REQUEST;
593 asc = SCSI_ASC_INVALID_CDB;
594 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
598 case NVME_SC_WRITE_FAULT:
599 status = SAM_STAT_CHECK_CONDITION;
600 sense_key = MEDIUM_ERROR;
601 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
602 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
604 case NVME_SC_READ_ERROR:
605 status = SAM_STAT_CHECK_CONDITION;
606 sense_key = MEDIUM_ERROR;
607 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
608 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
610 case NVME_SC_GUARD_CHECK:
611 status = SAM_STAT_CHECK_CONDITION;
612 sense_key = MEDIUM_ERROR;
613 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
614 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
616 case NVME_SC_APPTAG_CHECK:
617 status = SAM_STAT_CHECK_CONDITION;
618 sense_key = MEDIUM_ERROR;
619 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
620 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
622 case NVME_SC_REFTAG_CHECK:
623 status = SAM_STAT_CHECK_CONDITION;
624 sense_key = MEDIUM_ERROR;
625 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
626 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
628 case NVME_SC_COMPARE_FAILED:
629 status = SAM_STAT_CHECK_CONDITION;
630 sense_key = MISCOMPARE;
631 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
632 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
634 case NVME_SC_ACCESS_DENIED:
635 status = SAM_STAT_CHECK_CONDITION;
636 sense_key = ILLEGAL_REQUEST;
637 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
638 ascq = SCSI_ASCQ_INVALID_LUN_ID;
641 /* Unspecified/Default */
642 case NVME_SC_CMDID_CONFLICT:
643 case NVME_SC_CMD_SEQ_ERROR:
644 case NVME_SC_CQ_INVALID:
645 case NVME_SC_QID_INVALID:
646 case NVME_SC_QUEUE_SIZE:
647 case NVME_SC_ABORT_LIMIT:
648 case NVME_SC_ABORT_MISSING:
649 case NVME_SC_ASYNC_LIMIT:
650 case NVME_SC_FIRMWARE_SLOT:
651 case NVME_SC_FIRMWARE_IMAGE:
652 case NVME_SC_INVALID_VECTOR:
653 case NVME_SC_INVALID_LOG_PAGE:
655 status = SAM_STAT_CHECK_CONDITION;
656 sense_key = ILLEGAL_REQUEST;
657 asc = SCSI_ASC_NO_SENSE;
658 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
662 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
667 /* INQUIRY Helper Functions */
669 static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
670 struct sg_io_hdr *hdr, u8 *inq_response,
673 struct nvme_dev *dev = ns->dev;
676 struct nvme_id_ns *id_ns;
677 int res = SNTI_TRANSLATION_SUCCESS;
680 u8 resp_data_format = 0x02;
682 u8 cmdque = 0x01 << 1;
683 u8 fw_offset = sizeof(dev->firmware_rev);
685 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
686 &dma_addr, GFP_KERNEL);
692 /* nvme ns identify - use DPS value for PROTECT field */
693 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
694 res = nvme_trans_status_code(hdr, nvme_sc);
696 * If nvme_sc was -ve, res will be -ve here.
697 * If nvme_sc was +ve, the status would bace been translated, and res
698 * can only be 0 or -ve.
699 * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
700 * - If -ve, return because its a Linux error.
709 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
711 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
712 inq_response[2] = VERSION_SPC_4;
713 inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
714 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
715 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
716 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
717 strncpy(&inq_response[8], "NVMe ", 8);
718 strncpy(&inq_response[16], dev->model, 16);
720 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
723 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
729 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
735 static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
736 struct sg_io_hdr *hdr, u8 *inq_response,
739 int res = SNTI_TRANSLATION_SUCCESS;
742 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
743 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
744 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
745 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
746 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
757 static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
758 struct sg_io_hdr *hdr, u8 *inq_response,
761 struct nvme_dev *dev = ns->dev;
762 int res = SNTI_TRANSLATION_SUCCESS;
765 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
766 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
767 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
768 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
770 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
771 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
776 static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
777 u8 *inq_response, int alloc_len)
779 struct nvme_dev *dev = ns->dev;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS;
787 __be32 tmp_id = cpu_to_be32(ns->ns_id);
789 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
790 &dma_addr, GFP_KERNEL);
796 /* nvme controller identify */
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
815 inq_response[3] = 20; /* Page Length */
816 /* Designation Descriptor start */
817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */
819 inq_response[6] = 0x00; /* Rsvd */
820 inq_response[7] = 16; /* Designator Length */
821 /* Designator start */
822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
823 inq_response[9] = ieee[2]; /* IEEE ID */
824 inq_response[10] = ieee[1]; /* IEEE ID */
825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833 /* Last 2 bytes are zero */
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
839 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
845 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
849 int res = SNTI_TRANSLATION_SUCCESS;
851 struct nvme_dev *dev = ns->dev;
854 struct nvme_id_ctrl *id_ctrl;
855 struct nvme_id_ns *id_ns;
859 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
860 u8 grd_chk, app_chk, ref_chk, protect;
865 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
866 if (inq_response == NULL) {
871 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
872 &dma_addr, GFP_KERNEL);
878 /* nvme ns identify */
879 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
880 res = nvme_trans_status_code(hdr, nvme_sc);
888 spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
889 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
890 grd_chk = protect << 2;
891 app_chk = protect << 1;
894 /* nvme controller identify */
895 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
896 res = nvme_trans_status_code(hdr, nvme_sc);
904 v_sup = id_ctrl->vwc;
906 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
907 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
908 inq_response[2] = 0x00; /* Page Length MSB */
909 inq_response[3] = 0x3C; /* Page Length LSB */
910 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
911 inq_response[5] = uask_sup;
912 inq_response[6] = v_sup;
913 inq_response[7] = luiclr;
917 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
918 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
921 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
929 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
933 int res = SNTI_TRANSLATION_SUCCESS;
936 inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
937 if (inq_response == NULL) {
942 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
943 inq_response[2] = 0x00; /* Page Length MSB */
944 inq_response[3] = 0x3C; /* Page Length LSB */
945 inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
946 inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
947 inq_response[6] = 0x00; /* Form Factor */
949 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
950 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
957 /* LOG SENSE Helper Functions */
959 static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
962 int res = SNTI_TRANSLATION_SUCCESS;
966 log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
967 if (log_response == NULL) {
972 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
973 /* Subpage=0x00, Page Length MSB=0 */
974 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
975 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
976 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
977 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
979 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
980 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
987 static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
988 struct sg_io_hdr *hdr, int alloc_len)
990 int res = SNTI_TRANSLATION_SUCCESS;
993 struct nvme_command c;
994 struct nvme_dev *dev = ns->dev;
995 struct nvme_smart_log *smart_log;
1001 log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
1002 if (log_response == NULL) {
1007 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1008 sizeof(struct nvme_smart_log),
1009 &dma_addr, GFP_KERNEL);
1015 /* Get SMART Log Page */
1016 memset(&c, 0, sizeof(c));
1017 c.common.opcode = nvme_admin_get_log_page;
1018 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1019 c.common.prp1 = cpu_to_le64(dma_addr);
1020 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1021 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1022 res = nvme_submit_admin_cmd(dev, &c, NULL);
1023 if (res != NVME_SC_SUCCESS) {
1024 temp_c = LOG_TEMP_UNKNOWN;
1027 temp_k = (smart_log->temperature[1] << 8) +
1028 (smart_log->temperature[0]);
1029 temp_c = temp_k - KELVIN_TEMP_FACTOR;
1032 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1033 /* Subpage=0x00, Page Length MSB=0 */
1034 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
1035 /* Informational Exceptions Log Parameter 1 Start */
1036 /* Parameter Code=0x0000 bytes 4,5 */
1037 log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
1038 log_response[7] = 0x04; /* PARAMETER LENGTH */
1039 /* Add sense Code and qualifier = 0x00 each */
1040 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1041 log_response[10] = temp_c;
1043 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1044 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1046 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1049 kfree(log_response);
1054 static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1057 int res = SNTI_TRANSLATION_SUCCESS;
1060 struct nvme_command c;
1061 struct nvme_dev *dev = ns->dev;
1062 struct nvme_smart_log *smart_log;
1063 dma_addr_t dma_addr;
1066 u8 temp_c_cur, temp_c_thresh;
1069 log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
1070 if (log_response == NULL) {
1075 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1076 sizeof(struct nvme_smart_log),
1077 &dma_addr, GFP_KERNEL);
1083 /* Get SMART Log Page */
1084 memset(&c, 0, sizeof(c));
1085 c.common.opcode = nvme_admin_get_log_page;
1086 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1087 c.common.prp1 = cpu_to_le64(dma_addr);
1088 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1089 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1090 res = nvme_submit_admin_cmd(dev, &c, NULL);
1091 if (res != NVME_SC_SUCCESS) {
1092 temp_c_cur = LOG_TEMP_UNKNOWN;
1095 temp_k = (smart_log->temperature[1] << 8) +
1096 (smart_log->temperature[0]);
1097 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
1100 /* Get Features for Temp Threshold */
1101 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
1103 if (res != NVME_SC_SUCCESS)
1104 temp_c_thresh = LOG_TEMP_UNKNOWN;
1106 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
1108 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
1109 /* Subpage=0x00, Page Length MSB=0 */
1110 log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
1111 /* Temperature Log Parameter 1 (Temperature) Start */
1112 /* Parameter Code = 0x0000 */
1113 log_response[6] = 0x01; /* Format and Linking = 01b */
1114 log_response[7] = 0x02; /* Parameter Length */
1115 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1116 log_response[9] = temp_c_cur;
1117 /* Temperature Log Parameter 2 (Reference Temperature) Start */
1118 log_response[11] = 0x01; /* Parameter Code = 0x0001 */
1119 log_response[12] = 0x01; /* Format and Linking = 01b */
1120 log_response[13] = 0x02; /* Parameter Length */
1121 /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
1122 log_response[15] = temp_c_thresh;
1124 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1125 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1127 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1130 kfree(log_response);
1135 /* MODE SENSE Helper Functions */
1137 static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
1138 u16 mode_data_length, u16 blk_desc_len)
1140 /* Quick check to make sure I don't stomp on my own memory... */
1141 if ((cdb10 && len < 8) || (!cdb10 && len < 4))
1142 return SNTI_INTERNAL_ERROR;
1145 resp[0] = (mode_data_length & 0xFF00) >> 8;
1146 resp[1] = (mode_data_length & 0x00FF);
1147 /* resp[2] and [3] are zero */
1149 resp[5] = RESERVED_FIELD;
1150 resp[6] = (blk_desc_len & 0xFF00) >> 8;
1151 resp[7] = (blk_desc_len & 0x00FF);
1153 resp[0] = (mode_data_length & 0x00FF);
1154 /* resp[1] and [2] are zero */
1155 resp[3] = (blk_desc_len & 0x00FF);
1158 return SNTI_TRANSLATION_SUCCESS;
1161 static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1162 u8 *resp, int len, u8 llbaa)
1164 int res = SNTI_TRANSLATION_SUCCESS;
1166 struct nvme_dev *dev = ns->dev;
1167 dma_addr_t dma_addr;
1169 struct nvme_id_ns *id_ns;
1173 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
1174 return SNTI_INTERNAL_ERROR;
1175 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1176 return SNTI_INTERNAL_ERROR;
1178 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1179 &dma_addr, GFP_KERNEL);
1185 /* nvme ns identify */
1186 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1187 res = nvme_trans_status_code(hdr, nvme_sc);
1195 flbas = (id_ns->flbas) & 0x0F;
1196 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1199 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
1200 /* Byte 4 is reserved */
1201 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
1203 memcpy(resp, &tmp_cap, sizeof(u32));
1204 memcpy(&resp[4], &tmp_len, sizeof(u32));
1206 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
1207 __be32 tmp_len = cpu_to_be32(lba_length);
1209 memcpy(resp, &tmp_cap, sizeof(u64));
1210 /* Bytes 8, 9, 10, 11 are reserved */
1211 memcpy(&resp[12], &tmp_len, sizeof(u32));
1215 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1221 static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1222 struct sg_io_hdr *hdr, u8 *resp,
1225 if (len < MODE_PAGE_CONTROL_LEN)
1226 return SNTI_INTERNAL_ERROR;
1228 resp[0] = MODE_PAGE_CONTROL;
1229 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1230 resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
1231 * D_SENSE=1, GLTSD=1, RLEC=0 */
1232 resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1233 /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
1234 resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1235 /* resp[6] and [7] are obsolete, thus zero */
1236 resp[8] = 0xFF; /* Busy timeout period = 0xffff */
1238 /* Bytes 10,11: Extended selftest completion time = 0x0000 */
1240 return SNTI_TRANSLATION_SUCCESS;
1243 static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1244 struct sg_io_hdr *hdr,
1247 int res = SNTI_TRANSLATION_SUCCESS;
1249 struct nvme_dev *dev = ns->dev;
1253 if (len < MODE_PAGE_CACHING_LEN)
1254 return SNTI_INTERNAL_ERROR;
1256 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1258 res = nvme_trans_status_code(hdr, nvme_sc);
1265 vwc = feature_resp & 0x00000001;
1267 resp[0] = MODE_PAGE_CACHING;
1268 resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1275 static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1276 struct sg_io_hdr *hdr, u8 *resp,
1279 int res = SNTI_TRANSLATION_SUCCESS;
1281 if (len < MODE_PAGE_POW_CND_LEN)
1282 return SNTI_INTERNAL_ERROR;
1284 resp[0] = MODE_PAGE_POWER_CONDITION;
1285 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1286 /* All other bytes are zero */
1291 static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1292 struct sg_io_hdr *hdr, u8 *resp,
1295 int res = SNTI_TRANSLATION_SUCCESS;
1297 if (len < MODE_PAGE_INF_EXC_LEN)
1298 return SNTI_INTERNAL_ERROR;
1300 resp[0] = MODE_PAGE_INFO_EXCEP;
1301 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1303 /* All other bytes are zero */
1308 static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1311 int res = SNTI_TRANSLATION_SUCCESS;
1312 u16 mode_pages_offset_1 = 0;
1313 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1315 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1316 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1317 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1319 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1320 MODE_PAGE_CACHING_LEN);
1321 if (res != SNTI_TRANSLATION_SUCCESS)
1323 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1324 MODE_PAGE_CONTROL_LEN);
1325 if (res != SNTI_TRANSLATION_SUCCESS)
1327 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1328 MODE_PAGE_POW_CND_LEN);
1329 if (res != SNTI_TRANSLATION_SUCCESS)
1331 res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1332 MODE_PAGE_INF_EXC_LEN);
1333 if (res != SNTI_TRANSLATION_SUCCESS)
1340 static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1342 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1343 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1344 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1350 static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1351 struct sg_io_hdr *hdr, u8 *cmd,
1352 u16 alloc_len, u8 cdb10,
1353 int (*mode_page_fill_func)
1355 struct sg_io_hdr *hdr, u8 *, int),
1356 u16 mode_pages_tot_len)
1358 int res = SNTI_TRANSLATION_SUCCESS;
1364 u16 mode_pages_offset_1;
1365 u16 blk_desc_len, blk_desc_offset, mode_data_length;
1367 dbd = GET_MODE_SENSE_DBD(cmd);
1368 llbaa = GET_MODE_SENSE_LLBAA(cmd);
1369 mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
1370 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1372 resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1373 /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1374 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1376 blk_desc_offset = mph_size;
1377 mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1379 response = kzalloc(resp_size, GFP_KERNEL);
1380 if (response == NULL) {
1385 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1386 llbaa, mode_data_length, blk_desc_len);
1387 if (res != SNTI_TRANSLATION_SUCCESS)
1389 if (blk_desc_len > 0) {
1390 res = nvme_trans_fill_blk_desc(ns, hdr,
1391 &response[blk_desc_offset],
1392 blk_desc_len, llbaa);
1393 if (res != SNTI_TRANSLATION_SUCCESS)
1396 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1397 mode_pages_tot_len);
1398 if (res != SNTI_TRANSLATION_SUCCESS)
1401 xfer_len = min(alloc_len, resp_size);
1402 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1410 /* Read Capacity Helper Functions */
1412 static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1419 u8 p_type_lut[4] = {0, 0, 1, 2};
1424 flbas = (id_ns->flbas) & 0x0F;
1425 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1426 rlba = le64_to_cpup(&id_ns->nsze) - 1;
1427 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1430 if (rlba > 0xFFFFFFFF)
1432 tmp_rlba_32 = cpu_to_be32(rlba);
1433 tmp_len = cpu_to_be32(lba_length);
1434 memcpy(response, &tmp_rlba_32, sizeof(u32));
1435 memcpy(&response[4], &tmp_len, sizeof(u32));
1437 tmp_rlba = cpu_to_be64(rlba);
1438 tmp_len = cpu_to_be32(lba_length);
1439 memcpy(response, &tmp_rlba, sizeof(u64));
1440 memcpy(&response[8], &tmp_len, sizeof(u32));
1441 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1442 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1443 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1444 /* Bytes 16-31 - Reserved */
1448 /* Start Stop Unit Helper Functions */
1450 static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1451 u8 pc, u8 pcmod, u8 start)
1453 int res = SNTI_TRANSLATION_SUCCESS;
1455 struct nvme_dev *dev = ns->dev;
1456 dma_addr_t dma_addr;
1458 struct nvme_id_ctrl *id_ctrl;
1459 int lowest_pow_st; /* max npss = lowest power consumption */
1460 unsigned ps_desired = 0;
1462 /* NVMe Controller Identify */
1463 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1464 sizeof(struct nvme_id_ctrl),
1465 &dma_addr, GFP_KERNEL);
1470 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
1471 res = nvme_trans_status_code(hdr, nvme_sc);
1479 lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1482 case NVME_POWER_STATE_START_VALID:
1483 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1484 if (pcmod == 0 && start == 0x1)
1485 ps_desired = POWER_STATE_0;
1486 if (pcmod == 0 && start == 0x0)
1487 ps_desired = lowest_pow_st;
1489 case NVME_POWER_STATE_ACTIVE:
1490 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1492 ps_desired = POWER_STATE_0;
1494 case NVME_POWER_STATE_IDLE:
1495 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1497 ps_desired = POWER_STATE_1;
1498 else if (pcmod == 0x1)
1499 ps_desired = POWER_STATE_2;
1500 else if (pcmod == 0x2)
1501 ps_desired = POWER_STATE_3;
1503 case NVME_POWER_STATE_STANDBY:
1504 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1506 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1507 else if (pcmod == 0x1)
1508 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1510 case NVME_POWER_STATE_LU_CONTROL:
1512 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1513 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1514 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1517 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1519 res = nvme_trans_status_code(hdr, nvme_sc);
1525 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
1531 /* Write Buffer Helper Functions */
1532 /* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
1534 static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1535 u8 opcode, u32 tot_len, u32 offset,
1538 int res = SNTI_TRANSLATION_SUCCESS;
1540 struct nvme_dev *dev = ns->dev;
1541 struct nvme_command c;
1542 struct nvme_iod *iod = NULL;
1545 memset(&c, 0, sizeof(c));
1546 c.common.opcode = opcode;
1547 if (opcode == nvme_admin_download_fw) {
1548 if (hdr->iovec_count > 0) {
1549 /* Assuming SGL is not allowed for this command */
1550 res = nvme_trans_completion(hdr,
1551 SAM_STAT_CHECK_CONDITION,
1553 SCSI_ASC_INVALID_CDB,
1554 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1557 iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
1558 (unsigned long)hdr->dxferp, tot_len);
1563 length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
1564 if (length != tot_len) {
1569 c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1570 c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
1571 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1572 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1573 } else if (opcode == nvme_admin_activate_fw) {
1574 u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
1575 c.common.cdw10[0] = cpu_to_le32(cdw10);
1578 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1579 res = nvme_trans_status_code(hdr, nvme_sc);
1586 if (opcode == nvme_admin_download_fw) {
1587 nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
1588 nvme_free_iod(dev, iod);
1594 /* Mode Select Helper Functions */
1596 static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1597 u16 *bd_len, u8 *llbaa)
1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
1604 MODE_SELECT_10_LLBAA_MASK;
1607 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1611 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1612 u16 idx, u16 bd_len, u8 llbaa)
1616 bd_num = bd_len / ((llbaa == 0) ?
1617 SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1618 /* Store block descriptor info if a FORMAT UNIT comes later */
1619 /* TODO Saving 1st BD info; what to do if multiple BD received? */
1621 /* Standard Block Descriptor - spc4r34 7.5.5.1 */
1622 ns->mode_select_num_blocks =
1623 (parm_list[idx + 1] << 16) +
1624 (parm_list[idx + 2] << 8) +
1625 (parm_list[idx + 3]);
1627 ns->mode_select_block_len =
1628 (parm_list[idx + 5] << 16) +
1629 (parm_list[idx + 6] << 8) +
1630 (parm_list[idx + 7]);
1632 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1633 ns->mode_select_num_blocks =
1634 (((u64)parm_list[idx + 0]) << 56) +
1635 (((u64)parm_list[idx + 1]) << 48) +
1636 (((u64)parm_list[idx + 2]) << 40) +
1637 (((u64)parm_list[idx + 3]) << 32) +
1638 (((u64)parm_list[idx + 4]) << 24) +
1639 (((u64)parm_list[idx + 5]) << 16) +
1640 (((u64)parm_list[idx + 6]) << 8) +
1641 ((u64)parm_list[idx + 7]);
1643 ns->mode_select_block_len =
1644 (parm_list[idx + 12] << 24) +
1645 (parm_list[idx + 13] << 16) +
1646 (parm_list[idx + 14] << 8) +
1647 (parm_list[idx + 15]);
1651 static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1652 u8 *mode_page, u8 page_code)
1654 int res = SNTI_TRANSLATION_SUCCESS;
1656 struct nvme_dev *dev = ns->dev;
1659 switch (page_code) {
1660 case MODE_PAGE_CACHING:
1661 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1662 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1664 res = nvme_trans_status_code(hdr, nvme_sc);
1672 case MODE_PAGE_CONTROL:
1674 case MODE_PAGE_POWER_CONDITION:
1675 /* Verify the OS is not trying to set timers */
1676 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1677 res = nvme_trans_completion(hdr,
1678 SAM_STAT_CHECK_CONDITION,
1680 SCSI_ASC_INVALID_PARAMETER,
1681 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1683 res = SNTI_INTERNAL_ERROR;
1688 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1689 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1690 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1692 res = SNTI_INTERNAL_ERROR;
1699 static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1700 u8 *cmd, u16 parm_list_len, u8 pf,
1703 int res = SNTI_TRANSLATION_SUCCESS;
1707 u16 index, saved_index;
1711 /* Get parm list from data-in/out buffer */
1712 parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1713 if (parm_list == NULL) {
1718 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1719 if (res != SNTI_TRANSLATION_SUCCESS)
1722 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1723 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1726 /* Block Descriptors present, parse */
1727 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1730 saved_index = index;
1732 /* Multiple mode pages may be present; iterate through all */
1733 /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1735 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1736 mp_size = parm_list[index + 1] + 2;
1737 if ((page_code != MODE_PAGE_CACHING) &&
1738 (page_code != MODE_PAGE_CONTROL) &&
1739 (page_code != MODE_PAGE_POWER_CONDITION)) {
1740 res = nvme_trans_completion(hdr,
1741 SAM_STAT_CHECK_CONDITION,
1743 SCSI_ASC_INVALID_CDB,
1744 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1748 } while (index < parm_list_len);
1750 /* In 2nd Iteration, do the NVME Commands */
1751 index = saved_index;
1753 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1754 mp_size = parm_list[index + 1] + 2;
1755 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1757 if (res != SNTI_TRANSLATION_SUCCESS)
1760 } while (index < parm_list_len);
1768 /* Format Unit Helper Functions */
1770 static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1771 struct sg_io_hdr *hdr)
1773 int res = SNTI_TRANSLATION_SUCCESS;
1775 struct nvme_dev *dev = ns->dev;
1776 dma_addr_t dma_addr;
1778 struct nvme_id_ns *id_ns;
1782 * SCSI Expects a MODE SELECT would have been issued prior to
1783 * a FORMAT UNIT, and the block size and number would be used
1784 * from the block descriptor in it. If a MODE SELECT had not
1785 * been issued, FORMAT shall use the current values for both.
1788 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1789 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1790 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1795 /* nvme ns identify */
1796 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1797 res = nvme_trans_status_code(hdr, nvme_sc);
1806 if (ns->mode_select_num_blocks == 0)
1807 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
1808 if (ns->mode_select_block_len == 0) {
1809 flbas = (id_ns->flbas) & 0x0F;
1810 ns->mode_select_block_len =
1811 (1 << (id_ns->lbaf[flbas].ds));
1814 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1821 static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1822 u8 format_prot_info, u8 *nvme_pf_code)
1824 int res = SNTI_TRANSLATION_SUCCESS;
1826 u8 pf_usage, pf_code;
1828 parm_list = kmalloc(len, GFP_KERNEL);
1829 if (parm_list == NULL) {
1833 res = nvme_trans_copy_from_user(hdr, parm_list, len);
1834 if (res != SNTI_TRANSLATION_SUCCESS)
1837 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1838 FORMAT_UNIT_IMMED_MASK) != 0) {
1839 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1840 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1841 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1845 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1846 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1847 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1848 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1849 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1852 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1853 FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1854 pf_code = (pf_usage << 2) | format_prot_info;
1869 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1870 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1871 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1881 static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1884 int res = SNTI_TRANSLATION_SUCCESS;
1886 struct nvme_dev *dev = ns->dev;
1887 dma_addr_t dma_addr;
1889 struct nvme_id_ns *id_ns;
1892 u8 selected_lbaf = 0xFF;
1894 struct nvme_command c;
1896 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1897 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1898 &dma_addr, GFP_KERNEL);
1903 /* nvme ns identify */
1904 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1905 res = nvme_trans_status_code(hdr, nvme_sc);
1913 flbas = (id_ns->flbas) & 0x0F;
1914 nlbaf = id_ns->nlbaf;
1916 for (i = 0; i < nlbaf; i++) {
1917 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1922 if (selected_lbaf > 0x0F) {
1923 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1924 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1925 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1927 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
1928 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1929 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1930 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1933 cdw10 |= prot_info << 5;
1934 cdw10 |= selected_lbaf & 0x0F;
1935 memset(&c, 0, sizeof(c));
1936 c.format.opcode = nvme_admin_format_nvm;
1937 c.format.nsid = cpu_to_le32(ns->ns_id);
1938 c.format.cdw10 = cpu_to_le32(cdw10);
1940 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1941 res = nvme_trans_status_code(hdr, nvme_sc);
1948 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1954 /* Read/Write Helper Functions */
1956 static inline void nvme_trans_get_io_cdb6(u8 *cmd,
1957 struct nvme_trans_io_cdb *cdb_info)
1960 cdb_info->prot_info = 0;
1961 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
1963 cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
1965 /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
1966 if (cdb_info->xfer_len == 0)
1967 cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
1970 static inline void nvme_trans_get_io_cdb10(u8 *cmd,
1971 struct nvme_trans_io_cdb *cdb_info)
1973 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
1975 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
1976 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1977 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
1978 cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
1981 static inline void nvme_trans_get_io_cdb12(u8 *cmd,
1982 struct nvme_trans_io_cdb *cdb_info)
1984 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
1986 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
1987 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1988 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
1989 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
1992 static inline void nvme_trans_get_io_cdb16(u8 *cmd,
1993 struct nvme_trans_io_cdb *cdb_info)
1995 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
1997 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
1998 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1999 cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
2000 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
2003 static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
2004 struct nvme_trans_io_cdb *cdb_info,
2007 /* If using iovecs, send one nvme command per vector */
2008 if (hdr->iovec_count > 0)
2009 return hdr->iovec_count;
2010 else if (cdb_info->xfer_len > max_blocks)
2011 return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
2016 static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
2017 struct nvme_trans_io_cdb *cdb_info)
2021 /* When Protection information support is added, implement here */
2023 if (cdb_info->fua > 0)
2024 control |= NVME_RW_FUA;
2029 static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2030 struct nvme_trans_io_cdb *cdb_info, u8 is_write)
2032 int res = SNTI_TRANSLATION_SUCCESS;
2034 struct nvme_dev *dev = ns->dev;
2036 struct nvme_iod *iod;
2038 u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
2041 u64 nvme_offset = 0;
2042 void __user *next_mapping_addr;
2043 struct nvme_command c;
2044 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2046 u32 max_blocks = queue_max_hw_sectors(ns->queue);
2048 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2051 * This loop handles two cases.
2052 * First, when an SGL is used in the form of an iovec list:
2053 * - Use iov_base as the next mapping address for the nvme command_id
2054 * - Use iov_len as the data transfer length for the command.
2055 * Second, when we have a single buffer
2056 * - If larger than max_blocks, split into chunks, offset
2057 * each nvme command accordingly.
2059 for (i = 0; i < num_cmds; i++) {
2060 memset(&c, 0, sizeof(c));
2061 if (hdr->iovec_count > 0) {
2062 struct sg_iovec sgl;
2064 retcode = copy_from_user(&sgl, hdr->dxferp +
2065 i * sizeof(struct sg_iovec),
2066 sizeof(struct sg_iovec));
2069 unit_len = sgl.iov_len;
2070 unit_num_blocks = unit_len >> ns->lba_shift;
2071 next_mapping_addr = sgl.iov_base;
2073 unit_num_blocks = min((u64)max_blocks,
2074 (cdb_info->xfer_len - nvme_offset));
2075 unit_len = unit_num_blocks << ns->lba_shift;
2076 next_mapping_addr = hdr->dxferp +
2077 ((1 << ns->lba_shift) * nvme_offset);
2080 c.rw.opcode = opcode;
2081 c.rw.nsid = cpu_to_le32(ns->ns_id);
2082 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
2083 c.rw.length = cpu_to_le16(unit_num_blocks - 1);
2084 control = nvme_trans_io_get_control(ns, cdb_info);
2085 c.rw.control = cpu_to_le16(control);
2087 iod = nvme_map_user_pages(dev,
2088 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2089 (unsigned long)next_mapping_addr, unit_len);
2094 retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
2095 if (retcode != unit_len) {
2096 nvme_unmap_user_pages(dev,
2097 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2099 nvme_free_iod(dev, iod);
2103 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2104 c.rw.prp2 = cpu_to_le64(iod->first_dma);
2106 nvme_offset += unit_num_blocks;
2108 nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2109 if (nvme_sc != NVME_SC_SUCCESS) {
2110 nvme_unmap_user_pages(dev,
2111 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2113 nvme_free_iod(dev, iod);
2114 res = nvme_trans_status_code(hdr, nvme_sc);
2117 nvme_unmap_user_pages(dev,
2118 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2120 nvme_free_iod(dev, iod);
2122 res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2129 /* SCSI Command Translation Functions */
2131 static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
2134 int res = SNTI_TRANSLATION_SUCCESS;
2135 struct nvme_trans_io_cdb cdb_info;
2138 u64 sum_iov_len = 0;
2139 struct sg_iovec sgl;
2143 /* Extract Fields from CDB */
2147 nvme_trans_get_io_cdb6(cmd, &cdb_info);
2151 nvme_trans_get_io_cdb10(cmd, &cdb_info);
2155 nvme_trans_get_io_cdb12(cmd, &cdb_info);
2159 nvme_trans_get_io_cdb16(cmd, &cdb_info);
2162 /* Will never really reach here */
2163 res = SNTI_INTERNAL_ERROR;
2167 /* Calculate total length of transfer (in bytes) */
2168 if (hdr->iovec_count > 0) {
2169 for (i = 0; i < hdr->iovec_count; i++) {
2170 not_copied = copy_from_user(&sgl, hdr->dxferp +
2171 i * sizeof(struct sg_iovec),
2172 sizeof(struct sg_iovec));
2175 sum_iov_len += sgl.iov_len;
2176 /* IO vector sizes should be multiples of block size */
2177 if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
2178 res = nvme_trans_completion(hdr,
2179 SAM_STAT_CHECK_CONDITION,
2181 SCSI_ASC_INVALID_PARAMETER,
2182 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2187 sum_iov_len = hdr->dxfer_len;
2190 /* As Per sg ioctl howto, if the lengths differ, use the lower one */
2191 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
2193 /* If block count and actual data buffer size dont match, error out */
2194 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
2199 /* Check for 0 length transfer - it is not illegal */
2200 if (cdb_info.xfer_len == 0)
2203 /* Send NVMe IO Command(s) */
2204 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
2205 if (res != SNTI_TRANSLATION_SUCCESS)
2212 static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2215 int res = SNTI_TRANSLATION_SUCCESS;
2221 evpd = GET_INQ_EVPD_BIT(cmd);
2222 page_code = GET_INQ_PAGE_CODE(cmd);
2223 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2225 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
2226 if (inq_response == NULL) {
2232 if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
2233 res = nvme_trans_standard_inquiry_page(ns, hdr,
2234 inq_response, alloc_len);
2236 res = nvme_trans_completion(hdr,
2237 SAM_STAT_CHECK_CONDITION,
2239 SCSI_ASC_INVALID_CDB,
2240 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2243 switch (page_code) {
2244 case VPD_SUPPORTED_PAGES:
2245 res = nvme_trans_supported_vpd_pages(ns, hdr,
2246 inq_response, alloc_len);
2248 case VPD_SERIAL_NUMBER:
2249 res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
2252 case VPD_DEVICE_IDENTIFIERS:
2253 res = nvme_trans_device_id_page(ns, hdr, inq_response,
2256 case VPD_EXTENDED_INQUIRY:
2257 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2259 case VPD_BLOCK_DEV_CHARACTERISTICS:
2260 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2263 res = nvme_trans_completion(hdr,
2264 SAM_STAT_CHECK_CONDITION,
2266 SCSI_ASC_INVALID_CDB,
2267 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2271 kfree(inq_response);
2276 static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2279 int res = SNTI_TRANSLATION_SUCCESS;
2285 sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
2286 if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
2287 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2288 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2289 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2292 pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
2293 page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
2294 pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
2295 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
2296 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2297 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2298 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2301 alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
2302 switch (page_code) {
2303 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
2304 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
2306 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
2307 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
2309 case LOG_PAGE_TEMPERATURE_PAGE:
2310 res = nvme_trans_log_temperature(ns, hdr, alloc_len);
2313 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2314 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2315 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2323 static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2326 int res = SNTI_TRANSLATION_SUCCESS;
2332 page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
2333 page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
2335 save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
2336 save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
2338 if (GET_OPCODE(cmd) == MODE_SELECT) {
2339 parm_list_len = GET_U8_FROM_CDB(cmd,
2340 MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
2342 parm_list_len = GET_U16_FROM_CDB(cmd,
2343 MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
2347 if (parm_list_len != 0) {
2349 * According to SPC-4 r24, a paramter list length field of 0
2350 * shall not be considered an error
2352 res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
2353 page_format, save_pages, cdb10);
2359 static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2362 int res = SNTI_TRANSLATION_SUCCESS;
2368 if (GET_OPCODE(cmd) == MODE_SENSE) {
2369 alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
2371 alloc_len = GET_U16_FROM_CDB(cmd,
2372 MODE_SENSE10_ALLOC_LEN_OFFSET);
2376 pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
2377 MODE_SENSE_PAGE_CONTROL_MASK;
2378 if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
2379 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2380 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2381 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2385 page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
2386 MODE_SENSE_PAGE_CODE_MASK;
2387 switch (page_code) {
2388 case MODE_PAGE_CACHING:
2389 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2391 &nvme_trans_fill_caching_page,
2392 MODE_PAGE_CACHING_LEN);
2394 case MODE_PAGE_CONTROL:
2395 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2397 &nvme_trans_fill_control_page,
2398 MODE_PAGE_CONTROL_LEN);
2400 case MODE_PAGE_POWER_CONDITION:
2401 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2403 &nvme_trans_fill_pow_cnd_page,
2404 MODE_PAGE_POW_CND_LEN);
2406 case MODE_PAGE_INFO_EXCEP:
2407 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2409 &nvme_trans_fill_inf_exc_page,
2410 MODE_PAGE_INF_EXC_LEN);
2412 case MODE_PAGE_RETURN_ALL:
2413 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2415 &nvme_trans_fill_all_pages,
2419 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2420 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2421 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2429 static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2432 int res = SNTI_TRANSLATION_SUCCESS;
2434 u32 alloc_len = READ_CAP_10_RESP_SIZE;
2435 u32 resp_size = READ_CAP_10_RESP_SIZE;
2438 struct nvme_dev *dev = ns->dev;
2439 dma_addr_t dma_addr;
2441 struct nvme_id_ns *id_ns;
2444 cdb16 = IS_READ_CAP_16(cmd);
2446 alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
2447 resp_size = READ_CAP_16_RESP_SIZE;
2450 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
2451 &dma_addr, GFP_KERNEL);
2456 /* nvme ns identify */
2457 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
2458 res = nvme_trans_status_code(hdr, nvme_sc);
2467 response = kzalloc(resp_size, GFP_KERNEL);
2468 if (response == NULL) {
2472 nvme_trans_fill_read_cap(response, id_ns, cdb16);
2474 xfer_len = min(alloc_len, resp_size);
2475 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2479 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
2485 static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2488 int res = SNTI_TRANSLATION_SUCCESS;
2490 u32 alloc_len, xfer_len, resp_size;
2493 struct nvme_dev *dev = ns->dev;
2494 dma_addr_t dma_addr;
2496 struct nvme_id_ctrl *id_ctrl;
2497 u32 ll_length, lun_id;
2498 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
2501 alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
2502 select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
2504 if ((select_report != ALL_LUNS_RETURNED) &&
2505 (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
2506 (select_report != RESTRICTED_LUNS_RETURNED)) {
2507 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2508 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2509 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2512 /* NVMe Controller Identify */
2513 mem = dma_alloc_coherent(&dev->pci_dev->dev,
2514 sizeof(struct nvme_id_ctrl),
2515 &dma_addr, GFP_KERNEL);
2520 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
2521 res = nvme_trans_status_code(hdr, nvme_sc);
2529 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
2530 resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2532 if (alloc_len < resp_size) {
2533 res = nvme_trans_completion(hdr,
2534 SAM_STAT_CHECK_CONDITION,
2535 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2536 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2540 response = kzalloc(resp_size, GFP_KERNEL);
2541 if (response == NULL) {
2546 /* The first LUN ID will always be 0 per the SAM spec */
2547 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
2549 * Set the LUN Id and then increment to the next LUN
2550 * location in the parameter data.
2552 __be64 tmp_id = cpu_to_be64(lun_id);
2553 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2554 lun_id_offset += LUN_ENTRY_SIZE;
2556 tmp_len = cpu_to_be32(ll_length);
2557 memcpy(response, &tmp_len, sizeof(u32));
2560 xfer_len = min(alloc_len, resp_size);
2561 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2565 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
2571 static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2574 int res = SNTI_TRANSLATION_SUCCESS;
2575 u8 alloc_len, xfer_len, resp_size;
2579 alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
2580 desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
2581 desc_format &= REQUEST_SENSE_DESC_MASK;
2583 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2584 (FIXED_FMT_SENSE_DATA_SIZE));
2585 response = kzalloc(resp_size, GFP_KERNEL);
2586 if (response == NULL) {
2591 if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
2592 /* Descriptor Format Sense Data */
2593 response[0] = DESC_FORMAT_SENSE_DATA;
2594 response[1] = NO_SENSE;
2595 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2596 response[2] = SCSI_ASC_NO_SENSE;
2597 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2598 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2600 /* Fixed Format Sense Data */
2601 response[0] = FIXED_SENSE_DATA;
2602 /* Byte 1 = Obsolete */
2603 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2604 /* Bytes 3-6 - Information - set to zero */
2605 response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2606 /* Bytes 8-11 - Cmd Specific Information - set to zero */
2607 response[12] = SCSI_ASC_NO_SENSE;
2608 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2609 /* Byte 14 = Field Replaceable Unit Code = 0 */
2610 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2613 xfer_len = min(alloc_len, resp_size);
2614 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2621 static int nvme_trans_security_protocol(struct nvme_ns *ns,
2622 struct sg_io_hdr *hdr,
2625 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2626 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2627 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2630 static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2633 int res = SNTI_TRANSLATION_SUCCESS;
2635 struct nvme_command c;
2636 u8 immed, pcmod, pc, no_flush, start;
2638 immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
2639 pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
2640 pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
2641 no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
2642 start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
2644 immed &= START_STOP_UNIT_CDB_IMMED_MASK;
2645 pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
2646 pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
2647 no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
2648 start &= START_STOP_UNIT_CDB_START_MASK;
2651 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2652 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2653 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2655 if (no_flush == 0) {
2656 /* Issue NVME FLUSH command prior to START STOP UNIT */
2657 memset(&c, 0, sizeof(c));
2658 c.common.opcode = nvme_cmd_flush;
2659 c.common.nsid = cpu_to_le32(ns->ns_id);
2661 nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2662 res = nvme_trans_status_code(hdr, nvme_sc);
2670 /* Setup the expected power state transition */
2671 res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2678 static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2679 struct sg_io_hdr *hdr, u8 *cmd)
2681 int res = SNTI_TRANSLATION_SUCCESS;
2683 struct nvme_command c;
2685 memset(&c, 0, sizeof(c));
2686 c.common.opcode = nvme_cmd_flush;
2687 c.common.nsid = cpu_to_le32(ns->ns_id);
2689 nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2691 res = nvme_trans_status_code(hdr, nvme_sc);
2701 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2704 int res = SNTI_TRANSLATION_SUCCESS;
2705 u8 parm_hdr_len = 0;
2706 u8 nvme_pf_code = 0;
2707 u8 format_prot_info, long_list, format_data;
2709 format_prot_info = GET_U8_FROM_CDB(cmd,
2710 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
2711 long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
2712 format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
2714 format_prot_info = (format_prot_info &
2715 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
2716 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
2717 long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
2718 format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
2720 if (format_data != 0) {
2721 if (format_prot_info != 0) {
2723 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2725 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2727 } else if (format_data == 0 && format_prot_info != 0) {
2728 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2729 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2730 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2734 /* Get parm header from data-in/out buffer */
2736 * According to the translation spec, the only fields in the parameter
2737 * list we are concerned with are in the header. So allocate only that.
2739 if (parm_hdr_len > 0) {
2740 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2741 format_prot_info, &nvme_pf_code);
2742 if (res != SNTI_TRANSLATION_SUCCESS)
2746 /* Attempt to activate any previously downloaded firmware image */
2747 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
2749 /* Determine Block size and count and send format command */
2750 res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2751 if (res != SNTI_TRANSLATION_SUCCESS)
2754 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2760 static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2761 struct sg_io_hdr *hdr,
2764 int res = SNTI_TRANSLATION_SUCCESS;
2765 struct nvme_dev *dev = ns->dev;
2767 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2768 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2769 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2770 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2772 res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2777 static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2780 int res = SNTI_TRANSLATION_SUCCESS;
2781 u32 buffer_offset, parm_list_length;
2785 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
2786 if (parm_list_length % BYTES_TO_DWORDS != 0) {
2787 /* NVMe expects Firmware file to be a whole number of DWORDS */
2788 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2789 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2790 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2793 buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
2794 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2795 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2796 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2797 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2800 mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
2801 WRITE_BUFFER_CDB_MODE_MASK;
2803 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
2806 case DOWNLOAD_SAVE_ACTIVATE:
2807 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2808 parm_list_length, buffer_offset,
2810 if (res != SNTI_TRANSLATION_SUCCESS)
2812 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2813 parm_list_length, buffer_offset,
2816 case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2817 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2818 parm_list_length, buffer_offset,
2821 case ACTIVATE_DEFERRED_MICROCODE:
2822 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2823 parm_list_length, buffer_offset,
2827 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2828 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2829 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2837 struct scsi_unmap_blk_desc {
2843 struct scsi_unmap_parm_list {
2844 __be16 unmap_data_len;
2845 __be16 unmap_blk_desc_data_len;
2847 struct scsi_unmap_blk_desc desc[0];
2850 static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2853 struct nvme_dev *dev = ns->dev;
2854 struct scsi_unmap_parm_list *plist;
2855 struct nvme_dsm_range *range;
2856 struct nvme_command c;
2857 int i, nvme_sc, res = -ENOMEM;
2858 u16 ndesc, list_len;
2859 dma_addr_t dma_addr;
2861 list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
2865 plist = kmalloc(list_len, GFP_KERNEL);
2869 res = nvme_trans_copy_from_user(hdr, plist, list_len);
2870 if (res != SNTI_TRANSLATION_SUCCESS)
2873 ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2874 if (!ndesc || ndesc > 256) {
2879 range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2880 &dma_addr, GFP_KERNEL);
2884 for (i = 0; i < ndesc; i++) {
2885 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2886 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2890 memset(&c, 0, sizeof(c));
2891 c.dsm.opcode = nvme_cmd_dsm;
2892 c.dsm.nsid = cpu_to_le32(ns->ns_id);
2893 c.dsm.prp1 = cpu_to_le64(dma_addr);
2894 c.dsm.nr = cpu_to_le32(ndesc - 1);
2895 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2897 nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2898 res = nvme_trans_status_code(hdr, nvme_sc);
2900 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2907 static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2909 u8 cmd[BLK_MAX_CDB];
2911 unsigned int opcode;
2913 if (hdr->cmdp == NULL)
2915 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2919 * Prime the hdr with good status for scsi commands that don't require
2920 * an nvme command for translation.
2922 retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2933 retcode = nvme_trans_io(ns, hdr, 0, cmd);
2939 retcode = nvme_trans_io(ns, hdr, 1, cmd);
2942 retcode = nvme_trans_inquiry(ns, hdr, cmd);
2945 retcode = nvme_trans_log_sense(ns, hdr, cmd);
2948 case MODE_SELECT_10:
2949 retcode = nvme_trans_mode_select(ns, hdr, cmd);
2953 retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2956 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2958 case SERVICE_ACTION_IN_16:
2959 if (IS_READ_CAP_16(cmd))
2960 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2965 retcode = nvme_trans_report_luns(ns, hdr, cmd);
2968 retcode = nvme_trans_request_sense(ns, hdr, cmd);
2970 case SECURITY_PROTOCOL_IN:
2971 case SECURITY_PROTOCOL_OUT:
2972 retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2975 retcode = nvme_trans_start_stop(ns, hdr, cmd);
2977 case SYNCHRONIZE_CACHE:
2978 retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
2981 retcode = nvme_trans_format_unit(ns, hdr, cmd);
2983 case TEST_UNIT_READY:
2984 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
2987 retcode = nvme_trans_write_buffer(ns, hdr, cmd);
2990 retcode = nvme_trans_unmap(ns, hdr, cmd);
2994 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2995 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2996 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
3002 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
3004 struct sg_io_hdr hdr;
3007 if (!capable(CAP_SYS_ADMIN))
3009 if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
3011 if (hdr.interface_id != 'S')
3013 if (hdr.cmd_len > BLK_MAX_CDB)
3016 retcode = nvme_scsi_translate(ns, &hdr);
3020 retcode = SNTI_TRANSLATION_SUCCESS;
3021 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
3027 int nvme_sg_get_version_num(int __user *ip)
3029 return put_user(sg_version_num, ip);