1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Definitions for the NVM Express interface 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #ifndef _LINUX_NVME_H 8 #define _LINUX_NVME_H 9 10 #include <linux/bits.h> 11 #include <linux/types.h> 12 #include <linux/uuid.h> 13 14 /* NQN names in commands fields specified one size */ 15 #define NVMF_NQN_FIELD_LEN 256 16 17 /* However the max length of a qualified name is another size */ 18 #define NVMF_NQN_SIZE 223 19 20 #define NVMF_TRSVCID_SIZE 32 21 #define NVMF_TRADDR_SIZE 256 22 #define NVMF_TSAS_SIZE 256 23 24 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" 25 26 #define NVME_NSID_ALL 0xffffffff 27 28 /* Special NSSR value, 'NVMe' */ 29 #define NVME_SUBSYS_RESET 0x4E564D65 30 31 enum nvme_subsys_type { 32 /* Referral to another discovery type target subsystem */ 33 NVME_NQN_DISC = 1, 34 35 /* NVME type target subsystem */ 36 NVME_NQN_NVME = 2, 37 38 /* Current discovery type target subsystem */ 39 NVME_NQN_CURR = 3, 40 }; 41 42 enum nvme_ctrl_type { 43 NVME_CTRL_IO = 1, /* I/O controller */ 44 NVME_CTRL_DISC = 2, /* Discovery controller */ 45 NVME_CTRL_ADMIN = 3, /* Administrative controller */ 46 }; 47 48 enum nvme_dctype { 49 NVME_DCTYPE_NOT_REPORTED = 0, 50 NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */ 51 NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */ 52 }; 53 54 /* Address Family codes for Discovery Log Page entry ADRFAM field */ 55 enum { 56 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ 57 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ 58 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ 59 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ 60 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ 61 NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */ 62 NVMF_ADDR_FAMILY_MAX, 63 }; 64 65 /* Transport Type codes for Discovery Log Page entry TRTYPE field */ 66 enum { 67 NVMF_TRTYPE_RDMA = 1, /* RDMA */ 68 NVMF_TRTYPE_FC = 2, /* Fibre Channel */ 69 NVMF_TRTYPE_TCP = 3, /* TCP/IP */ 70 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ 71 NVMF_TRTYPE_MAX, 72 }; 73 74 /* Transport Requirements codes for Discovery Log Page entry TREQ field */ 75 enum { 76 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ 77 NVMF_TREQ_REQUIRED = 1, /* Required */ 78 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ 79 #define NVME_TREQ_SECURE_CHANNEL_MASK \ 80 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED) 81 82 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */ 83 }; 84 85 /* RDMA QP Service Type codes for Discovery Log Page entry TSAS 86 * RDMA_QPTYPE field 87 */ 88 enum { 89 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ 90 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ 91 NVMF_RDMA_QPTYPE_INVALID = 0xff, 92 }; 93 94 /* RDMA Provider Type codes for Discovery Log Page entry TSAS 95 * RDMA_PRTYPE field 96 */ 97 enum { 98 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ 99 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ 100 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ 101 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ 102 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ 103 }; 104 105 /* RDMA Connection Management Service Type codes for Discovery Log Page 106 * entry TSAS RDMA_CMS field 107 */ 108 enum { 109 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ 110 }; 111 112 /* TSAS SECTYPE for TCP transport */ 113 enum { 114 NVMF_TCP_SECTYPE_NONE = 0, /* No Security */ 115 NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ 116 NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ 117 NVMF_TCP_SECTYPE_INVALID = 0xff, 118 }; 119 120 #define NVME_AQ_DEPTH 32 121 #define NVME_NR_AEN_COMMANDS 1 122 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) 123 124 /* 125 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See 126 * NVM-Express 1.2 specification, section 4.1.2. 127 */ 128 #define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1) 129 130 enum { 131 NVME_REG_CAP = 0x0000, /* Controller Capabilities */ 132 NVME_REG_VS = 0x0008, /* Version */ 133 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */ 134 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */ 135 NVME_REG_CC = 0x0014, /* Controller Configuration */ 136 NVME_REG_CSTS = 0x001c, /* Controller Status */ 137 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */ 138 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ 139 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ 140 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ 141 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ 142 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ 143 NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */ 144 NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */ 145 NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer 146 * Location 147 */ 148 NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory 149 * Space Control 150 */ 151 NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */ 152 NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */ 153 NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */ 154 NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */ 155 NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity 156 * Buffer Size 157 */ 158 NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained 159 * Write Throughput 160 */ 161 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */ 162 }; 163 164 #define NVME_CAP_MQES(cap) ((cap) & 0xffff) 165 #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) 166 #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) 167 #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) 168 #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) 169 #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) 170 #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) 171 #define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1) 172 173 #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) 174 #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) 175 176 #define NVME_CRTO_CRIMT(crto) ((crto) >> 16) 177 #define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff) 178 179 enum { 180 NVME_CMBSZ_SQS = 1 << 0, 181 NVME_CMBSZ_CQS = 1 << 1, 182 NVME_CMBSZ_LISTS = 1 << 2, 183 NVME_CMBSZ_RDS = 1 << 3, 184 NVME_CMBSZ_WDS = 1 << 4, 185 186 NVME_CMBSZ_SZ_SHIFT = 12, 187 NVME_CMBSZ_SZ_MASK = 0xfffff, 188 189 NVME_CMBSZ_SZU_SHIFT = 8, 190 NVME_CMBSZ_SZU_MASK = 0xf, 191 }; 192 193 /* 194 * Submission and Completion Queue Entry Sizes for the NVM command set. 195 * (In bytes and specified as a power of two (2^n)). 196 */ 197 #define NVME_ADM_SQES 6 198 #define NVME_NVM_IOSQES 6 199 #define NVME_NVM_IOCQES 4 200 201 enum { 202 NVME_CC_ENABLE = 1 << 0, 203 NVME_CC_EN_SHIFT = 0, 204 NVME_CC_CSS_SHIFT = 4, 205 NVME_CC_MPS_SHIFT = 7, 206 NVME_CC_AMS_SHIFT = 11, 207 NVME_CC_SHN_SHIFT = 14, 208 NVME_CC_IOSQES_SHIFT = 16, 209 NVME_CC_IOCQES_SHIFT = 20, 210 NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT, 211 NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT, 212 NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, 213 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, 214 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, 215 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, 216 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT, 217 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT, 218 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT, 219 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, 220 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, 221 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, 222 NVME_CC_CRIME = 1 << 24, 223 }; 224 225 enum { 226 NVME_CSTS_RDY = 1 << 0, 227 NVME_CSTS_CFS = 1 << 1, 228 NVME_CSTS_NSSRO = 1 << 4, 229 NVME_CSTS_PP = 1 << 5, 230 NVME_CSTS_SHST_NORMAL = 0 << 2, 231 NVME_CSTS_SHST_OCCUR = 1 << 2, 232 NVME_CSTS_SHST_CMPLT = 2 << 2, 233 NVME_CSTS_SHST_MASK = 3 << 2, 234 }; 235 236 enum { 237 NVME_CMBMSC_CRE = 1 << 0, 238 NVME_CMBMSC_CMSE = 1 << 1, 239 }; 240 241 enum { 242 NVME_CAP_CSS_NVM = 1 << 0, 243 NVME_CAP_CSS_CSI = 1 << 6, 244 }; 245 246 enum { 247 NVME_CAP_CRMS_CRWMS = 1ULL << 59, 248 NVME_CAP_CRMS_CRIMS = 1ULL << 60, 249 }; 250 251 struct nvme_id_power_state { 252 __le16 max_power; /* centiwatts */ 253 __u8 rsvd2; 254 __u8 flags; 255 __le32 entry_lat; /* microseconds */ 256 __le32 exit_lat; /* microseconds */ 257 __u8 read_tput; 258 __u8 read_lat; 259 __u8 write_tput; 260 __u8 write_lat; 261 __le16 idle_power; 262 __u8 idle_scale; 263 __u8 rsvd19; 264 __le16 active_power; 265 __u8 active_work_scale; 266 __u8 rsvd23[9]; 267 }; 268 269 enum { 270 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, 271 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, 272 }; 273 274 enum nvme_ctrl_attr { 275 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), 276 NVME_CTRL_ATTR_TBKAS = (1 << 6), 277 NVME_CTRL_ATTR_ELBAS = (1 << 15), 278 }; 279 280 struct nvme_id_ctrl { 281 __le16 vid; 282 __le16 ssvid; 283 char sn[20]; 284 char mn[40]; 285 char fr[8]; 286 __u8 rab; 287 __u8 ieee[3]; 288 __u8 cmic; 289 __u8 mdts; 290 __le16 cntlid; 291 __le32 ver; 292 __le32 rtd3r; 293 __le32 rtd3e; 294 __le32 oaes; 295 __le32 ctratt; 296 __u8 rsvd100[11]; 297 __u8 cntrltype; 298 __u8 fguid[16]; 299 __le16 crdt1; 300 __le16 crdt2; 301 __le16 crdt3; 302 __u8 rsvd134[122]; 303 __le16 oacs; 304 __u8 acl; 305 __u8 aerl; 306 __u8 frmw; 307 __u8 lpa; 308 __u8 elpe; 309 __u8 npss; 310 __u8 avscc; 311 __u8 apsta; 312 __le16 wctemp; 313 __le16 cctemp; 314 __le16 mtfa; 315 __le32 hmpre; 316 __le32 hmmin; 317 __u8 tnvmcap[16]; 318 __u8 unvmcap[16]; 319 __le32 rpmbs; 320 __le16 edstt; 321 __u8 dsto; 322 __u8 fwug; 323 __le16 kas; 324 __le16 hctma; 325 __le16 mntmt; 326 __le16 mxtmt; 327 __le32 sanicap; 328 __le32 hmminds; 329 __le16 hmmaxd; 330 __le16 nvmsetidmax; 331 __le16 endgidmax; 332 __u8 anatt; 333 __u8 anacap; 334 __le32 anagrpmax; 335 __le32 nanagrpid; 336 __u8 rsvd352[160]; 337 __u8 sqes; 338 __u8 cqes; 339 __le16 maxcmd; 340 __le32 nn; 341 __le16 oncs; 342 __le16 fuses; 343 __u8 fna; 344 __u8 vwc; 345 __le16 awun; 346 __le16 awupf; 347 __u8 nvscc; 348 __u8 nwpc; 349 __le16 acwu; 350 __u8 rsvd534[2]; 351 __le32 sgls; 352 __le32 mnan; 353 __u8 rsvd544[224]; 354 char subnqn[256]; 355 __u8 rsvd1024[768]; 356 __le32 ioccsz; 357 __le32 iorcsz; 358 __le16 icdoff; 359 __u8 ctrattr; 360 __u8 msdbd; 361 __u8 rsvd1804[2]; 362 __u8 dctype; 363 __u8 rsvd1807[241]; 364 struct nvme_id_power_state psd[32]; 365 __u8 vs[1024]; 366 }; 367 368 enum { 369 NVME_CTRL_CMIC_MULTI_PORT = 1 << 0, 370 NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, 371 NVME_CTRL_CMIC_ANA = 1 << 3, 372 NVME_CTRL_ONCS_COMPARE = 1 << 0, 373 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, 374 NVME_CTRL_ONCS_DSM = 1 << 2, 375 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, 376 NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, 377 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, 378 NVME_CTRL_VWC_PRESENT = 1 << 0, 379 NVME_CTRL_OACS_SEC_SUPP = 1 << 0, 380 NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3, 381 NVME_CTRL_OACS_DIRECTIVES = 1 << 5, 382 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, 383 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, 384 NVME_CTRL_CTRATT_128_ID = 1 << 0, 385 NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1, 386 NVME_CTRL_CTRATT_NVM_SETS = 1 << 2, 387 NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3, 388 NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4, 389 NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, 390 NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, 391 NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, 392 }; 393 394 struct nvme_lbaf { 395 __le16 ms; 396 __u8 ds; 397 __u8 rp; 398 }; 399 400 struct nvme_id_ns { 401 __le64 nsze; 402 __le64 ncap; 403 __le64 nuse; 404 __u8 nsfeat; 405 __u8 nlbaf; 406 __u8 flbas; 407 __u8 mc; 408 __u8 dpc; 409 __u8 dps; 410 __u8 nmic; 411 __u8 rescap; 412 __u8 fpi; 413 __u8 dlfeat; 414 __le16 nawun; 415 __le16 nawupf; 416 __le16 nacwu; 417 __le16 nabsn; 418 __le16 nabo; 419 __le16 nabspf; 420 __le16 noiob; 421 __u8 nvmcap[16]; 422 __le16 npwg; 423 __le16 npwa; 424 __le16 npdg; 425 __le16 npda; 426 __le16 nows; 427 __u8 rsvd74[18]; 428 __le32 anagrpid; 429 __u8 rsvd96[3]; 430 __u8 nsattr; 431 __le16 nvmsetid; 432 __le16 endgid; 433 __u8 nguid[16]; 434 __u8 eui64[8]; 435 struct nvme_lbaf lbaf[64]; 436 __u8 vs[3712]; 437 }; 438 439 /* I/O Command Set Independent Identify Namespace Data Structure */ 440 struct nvme_id_ns_cs_indep { 441 __u8 nsfeat; 442 __u8 nmic; 443 __u8 rescap; 444 __u8 fpi; 445 __le32 anagrpid; 446 __u8 nsattr; 447 __u8 rsvd9; 448 __le16 nvmsetid; 449 __le16 endgid; 450 __u8 nstat; 451 __u8 rsvd15[4081]; 452 }; 453 454 struct nvme_zns_lbafe { 455 __le64 zsze; 456 __u8 zdes; 457 __u8 rsvd9[7]; 458 }; 459 460 struct nvme_id_ns_zns { 461 __le16 zoc; 462 __le16 ozcs; 463 __le32 mar; 464 __le32 mor; 465 __le32 rrl; 466 __le32 frl; 467 __u8 rsvd20[2796]; 468 struct nvme_zns_lbafe lbafe[64]; 469 __u8 vs[256]; 470 }; 471 472 struct nvme_id_ctrl_zns { 473 __u8 zasl; 474 __u8 rsvd1[4095]; 475 }; 476 477 struct nvme_id_ns_nvm { 478 __le64 lbstm; 479 __u8 pic; 480 __u8 rsvd9[3]; 481 __le32 elbaf[64]; 482 __u8 rsvd268[3828]; 483 }; 484 485 enum { 486 NVME_ID_NS_NVM_STS_MASK = 0x7f, 487 NVME_ID_NS_NVM_GUARD_SHIFT = 7, 488 NVME_ID_NS_NVM_GUARD_MASK = 0x3, 489 NVME_ID_NS_NVM_QPIF_SHIFT = 9, 490 NVME_ID_NS_NVM_QPIF_MASK = 0xf, 491 NVME_ID_NS_NVM_QPIFS = 1 << 3, 492 }; 493 494 static inline __u8 nvme_elbaf_sts(__u32 elbaf) 495 { 496 return elbaf & NVME_ID_NS_NVM_STS_MASK; 497 } 498 499 static inline __u8 nvme_elbaf_guard_type(__u32 elbaf) 500 { 501 return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK; 502 } 503 504 static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf) 505 { 506 return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK; 507 } 508 509 struct nvme_id_ctrl_nvm { 510 __u8 vsl; 511 __u8 wzsl; 512 __u8 wusl; 513 __u8 dmrl; 514 __le32 dmrsl; 515 __le64 dmsl; 516 __u8 rsvd16[4080]; 517 }; 518 519 enum { 520 NVME_ID_CNS_NS = 0x00, 521 NVME_ID_CNS_CTRL = 0x01, 522 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, 523 NVME_ID_CNS_NS_DESC_LIST = 0x03, 524 NVME_ID_CNS_CS_NS = 0x05, 525 NVME_ID_CNS_CS_CTRL = 0x06, 526 NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07, 527 NVME_ID_CNS_NS_CS_INDEP = 0x08, 528 NVME_ID_CNS_NS_PRESENT_LIST = 0x10, 529 NVME_ID_CNS_NS_PRESENT = 0x11, 530 NVME_ID_CNS_CTRL_NS_LIST = 0x12, 531 NVME_ID_CNS_CTRL_LIST = 0x13, 532 NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15, 533 NVME_ID_CNS_NS_GRANULARITY = 0x16, 534 NVME_ID_CNS_UUID_LIST = 0x17, 535 NVME_ID_CNS_ENDGRP_LIST = 0x19, 536 }; 537 538 enum { 539 NVME_CSI_NVM = 0, 540 NVME_CSI_ZNS = 2, 541 }; 542 543 enum { 544 NVME_DIR_IDENTIFY = 0x00, 545 NVME_DIR_STREAMS = 0x01, 546 NVME_DIR_SND_ID_OP_ENABLE = 0x01, 547 NVME_DIR_SND_ST_OP_REL_ID = 0x01, 548 NVME_DIR_SND_ST_OP_REL_RSC = 0x02, 549 NVME_DIR_RCV_ID_OP_PARAM = 0x01, 550 NVME_DIR_RCV_ST_OP_PARAM = 0x01, 551 NVME_DIR_RCV_ST_OP_STATUS = 0x02, 552 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03, 553 NVME_DIR_ENDIR = 0x01, 554 }; 555 556 enum { 557 NVME_NS_FEAT_THIN = 1 << 0, 558 NVME_NS_FEAT_ATOMICS = 1 << 1, 559 NVME_NS_FEAT_IO_OPT = 1 << 4, 560 NVME_NS_ATTR_RO = 1 << 0, 561 NVME_NS_FLBAS_LBA_MASK = 0xf, 562 NVME_NS_FLBAS_LBA_UMASK = 0x60, 563 NVME_NS_FLBAS_LBA_SHIFT = 1, 564 NVME_NS_FLBAS_META_EXT = 0x10, 565 NVME_NS_NMIC_SHARED = 1 << 0, 566 NVME_NS_ROTATIONAL = 1 << 4, 567 NVME_NS_VWC_NOT_PRESENT = 1 << 5, 568 NVME_LBAF_RP_BEST = 0, 569 NVME_LBAF_RP_BETTER = 1, 570 NVME_LBAF_RP_GOOD = 2, 571 NVME_LBAF_RP_DEGRADED = 3, 572 NVME_NS_DPC_PI_LAST = 1 << 4, 573 NVME_NS_DPC_PI_FIRST = 1 << 3, 574 NVME_NS_DPC_PI_TYPE3 = 1 << 2, 575 NVME_NS_DPC_PI_TYPE2 = 1 << 1, 576 NVME_NS_DPC_PI_TYPE1 = 1 << 0, 577 NVME_NS_DPS_PI_FIRST = 1 << 3, 578 NVME_NS_DPS_PI_MASK = 0x7, 579 NVME_NS_DPS_PI_TYPE1 = 1, 580 NVME_NS_DPS_PI_TYPE2 = 2, 581 NVME_NS_DPS_PI_TYPE3 = 3, 582 }; 583 584 enum { 585 NVME_NSTAT_NRDY = 1 << 0, 586 }; 587 588 enum { 589 NVME_NVM_NS_16B_GUARD = 0, 590 NVME_NVM_NS_32B_GUARD = 1, 591 NVME_NVM_NS_64B_GUARD = 2, 592 NVME_NVM_NS_QTYPE_GUARD = 3, 593 }; 594 595 static inline __u8 nvme_lbaf_index(__u8 flbas) 596 { 597 return (flbas & NVME_NS_FLBAS_LBA_MASK) | 598 ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT); 599 } 600 601 /* Identify Namespace Metadata Capabilities (MC): */ 602 enum { 603 NVME_MC_EXTENDED_LBA = (1 << 0), 604 NVME_MC_METADATA_PTR = (1 << 1), 605 }; 606 607 struct nvme_ns_id_desc { 608 __u8 nidt; 609 __u8 nidl; 610 __le16 reserved; 611 }; 612 613 #define NVME_NIDT_EUI64_LEN 8 614 #define NVME_NIDT_NGUID_LEN 16 615 #define NVME_NIDT_UUID_LEN 16 616 #define NVME_NIDT_CSI_LEN 1 617 618 enum { 619 NVME_NIDT_EUI64 = 0x01, 620 NVME_NIDT_NGUID = 0x02, 621 NVME_NIDT_UUID = 0x03, 622 NVME_NIDT_CSI = 0x04, 623 }; 624 625 struct nvme_endurance_group_log { 626 __u8 egcw; 627 __u8 egfeat; 628 __u8 rsvd2; 629 __u8 avsp; 630 __u8 avspt; 631 __u8 pused; 632 __le16 did; 633 __u8 rsvd8[24]; 634 __u8 ee[16]; 635 __u8 dur[16]; 636 __u8 duw[16]; 637 __u8 muw[16]; 638 __u8 hrc[16]; 639 __u8 hwc[16]; 640 __u8 mdie[16]; 641 __u8 neile[16]; 642 __u8 tegcap[16]; 643 __u8 uegcap[16]; 644 __u8 rsvd192[320]; 645 }; 646 647 struct nvme_rotational_media_log { 648 __le16 endgid; 649 __le16 numa; 650 __le16 nrs; 651 __u8 rsvd6[2]; 652 __le32 spinc; 653 __le32 fspinc; 654 __le32 ldc; 655 __le32 fldc; 656 __u8 rsvd24[488]; 657 }; 658 659 struct nvme_smart_log { 660 __u8 critical_warning; 661 __u8 temperature[2]; 662 __u8 avail_spare; 663 __u8 spare_thresh; 664 __u8 percent_used; 665 __u8 endu_grp_crit_warn_sumry; 666 __u8 rsvd7[25]; 667 __u8 data_units_read[16]; 668 __u8 data_units_written[16]; 669 __u8 host_reads[16]; 670 __u8 host_writes[16]; 671 __u8 ctrl_busy_time[16]; 672 __u8 power_cycles[16]; 673 __u8 power_on_hours[16]; 674 __u8 unsafe_shutdowns[16]; 675 __u8 media_errors[16]; 676 __u8 num_err_log_entries[16]; 677 __le32 warning_temp_time; 678 __le32 critical_comp_time; 679 __le16 temp_sensor[8]; 680 __le32 thm_temp1_trans_count; 681 __le32 thm_temp2_trans_count; 682 __le32 thm_temp1_total_time; 683 __le32 thm_temp2_total_time; 684 __u8 rsvd232[280]; 685 }; 686 687 struct nvme_fw_slot_info_log { 688 __u8 afi; 689 __u8 rsvd1[7]; 690 __le64 frs[7]; 691 __u8 rsvd64[448]; 692 }; 693 694 enum { 695 NVME_CMD_EFFECTS_CSUPP = 1 << 0, 696 NVME_CMD_EFFECTS_LBCC = 1 << 1, 697 NVME_CMD_EFFECTS_NCC = 1 << 2, 698 NVME_CMD_EFFECTS_NIC = 1 << 3, 699 NVME_CMD_EFFECTS_CCC = 1 << 4, 700 NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14), 701 NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16), 702 NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, 703 NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20), 704 }; 705 706 struct nvme_effects_log { 707 __le32 acs[256]; 708 __le32 iocs[256]; 709 __u8 resv[2048]; 710 }; 711 712 enum nvme_ana_state { 713 NVME_ANA_OPTIMIZED = 0x01, 714 NVME_ANA_NONOPTIMIZED = 0x02, 715 NVME_ANA_INACCESSIBLE = 0x03, 716 NVME_ANA_PERSISTENT_LOSS = 0x04, 717 NVME_ANA_CHANGE = 0x0f, 718 }; 719 720 struct nvme_ana_group_desc { 721 __le32 grpid; 722 __le32 nnsids; 723 __le64 chgcnt; 724 __u8 state; 725 __u8 rsvd17[15]; 726 __le32 nsids[]; 727 }; 728 729 /* flag for the log specific field of the ANA log */ 730 #define NVME_ANA_LOG_RGO (1 << 0) 731 732 struct nvme_ana_rsp_hdr { 733 __le64 chgcnt; 734 __le16 ngrps; 735 __le16 rsvd10[3]; 736 }; 737 738 struct nvme_zone_descriptor { 739 __u8 zt; 740 __u8 zs; 741 __u8 za; 742 __u8 rsvd3[5]; 743 __le64 zcap; 744 __le64 zslba; 745 __le64 wp; 746 __u8 rsvd32[32]; 747 }; 748 749 enum { 750 NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2, 751 }; 752 753 struct nvme_zone_report { 754 __le64 nr_zones; 755 __u8 resv8[56]; 756 struct nvme_zone_descriptor entries[]; 757 }; 758 759 enum { 760 NVME_SMART_CRIT_SPARE = 1 << 0, 761 NVME_SMART_CRIT_TEMPERATURE = 1 << 1, 762 NVME_SMART_CRIT_RELIABILITY = 1 << 2, 763 NVME_SMART_CRIT_MEDIA = 1 << 3, 764 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, 765 }; 766 767 enum { 768 NVME_AER_ERROR = 0, 769 NVME_AER_SMART = 1, 770 NVME_AER_NOTICE = 2, 771 NVME_AER_CSS = 6, 772 NVME_AER_VS = 7, 773 }; 774 775 enum { 776 NVME_AER_ERROR_PERSIST_INT_ERR = 0x03, 777 }; 778 779 enum { 780 NVME_AER_NOTICE_NS_CHANGED = 0x00, 781 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, 782 NVME_AER_NOTICE_ANA = 0x03, 783 NVME_AER_NOTICE_DISC_CHANGED = 0xf0, 784 }; 785 786 enum { 787 NVME_AEN_BIT_NS_ATTR = 8, 788 NVME_AEN_BIT_FW_ACT = 9, 789 NVME_AEN_BIT_ANA_CHANGE = 11, 790 NVME_AEN_BIT_DISC_CHANGE = 31, 791 }; 792 793 enum { 794 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR, 795 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT, 796 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE, 797 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE, 798 }; 799 800 struct nvme_lba_range_type { 801 __u8 type; 802 __u8 attributes; 803 __u8 rsvd2[14]; 804 __le64 slba; 805 __le64 nlb; 806 __u8 guid[16]; 807 __u8 rsvd48[16]; 808 }; 809 810 enum { 811 NVME_LBART_TYPE_FS = 0x01, 812 NVME_LBART_TYPE_RAID = 0x02, 813 NVME_LBART_TYPE_CACHE = 0x03, 814 NVME_LBART_TYPE_SWAP = 0x04, 815 816 NVME_LBART_ATTRIB_TEMP = 1 << 0, 817 NVME_LBART_ATTRIB_HIDE = 1 << 1, 818 }; 819 820 enum nvme_pr_type { 821 NVME_PR_WRITE_EXCLUSIVE = 1, 822 NVME_PR_EXCLUSIVE_ACCESS = 2, 823 NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, 824 NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, 825 NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, 826 NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, 827 }; 828 829 enum nvme_eds { 830 NVME_EXTENDED_DATA_STRUCT = 0x1, 831 }; 832 833 struct nvme_registered_ctrl { 834 __le16 cntlid; 835 __u8 rcsts; 836 __u8 rsvd3[5]; 837 __le64 hostid; 838 __le64 rkey; 839 }; 840 841 struct nvme_reservation_status { 842 __le32 gen; 843 __u8 rtype; 844 __u8 regctl[2]; 845 __u8 resv5[2]; 846 __u8 ptpls; 847 __u8 resv10[14]; 848 struct nvme_registered_ctrl regctl_ds[]; 849 }; 850 851 struct nvme_registered_ctrl_ext { 852 __le16 cntlid; 853 __u8 rcsts; 854 __u8 rsvd3[5]; 855 __le64 rkey; 856 __u8 hostid[16]; 857 __u8 rsvd32[32]; 858 }; 859 860 struct nvme_reservation_status_ext { 861 __le32 gen; 862 __u8 rtype; 863 __u8 regctl[2]; 864 __u8 resv5[2]; 865 __u8 ptpls; 866 __u8 resv10[14]; 867 __u8 rsvd24[40]; 868 struct nvme_registered_ctrl_ext regctl_eds[]; 869 }; 870 871 /* I/O commands */ 872 873 enum nvme_opcode { 874 nvme_cmd_flush = 0x00, 875 nvme_cmd_write = 0x01, 876 nvme_cmd_read = 0x02, 877 nvme_cmd_write_uncor = 0x04, 878 nvme_cmd_compare = 0x05, 879 nvme_cmd_write_zeroes = 0x08, 880 nvme_cmd_dsm = 0x09, 881 nvme_cmd_verify = 0x0c, 882 nvme_cmd_resv_register = 0x0d, 883 nvme_cmd_resv_report = 0x0e, 884 nvme_cmd_resv_acquire = 0x11, 885 nvme_cmd_resv_release = 0x15, 886 nvme_cmd_zone_mgmt_send = 0x79, 887 nvme_cmd_zone_mgmt_recv = 0x7a, 888 nvme_cmd_zone_append = 0x7d, 889 nvme_cmd_vendor_start = 0x80, 890 }; 891 892 #define nvme_opcode_name(opcode) { opcode, #opcode } 893 #define show_nvm_opcode_name(val) \ 894 __print_symbolic(val, \ 895 nvme_opcode_name(nvme_cmd_flush), \ 896 nvme_opcode_name(nvme_cmd_write), \ 897 nvme_opcode_name(nvme_cmd_read), \ 898 nvme_opcode_name(nvme_cmd_write_uncor), \ 899 nvme_opcode_name(nvme_cmd_compare), \ 900 nvme_opcode_name(nvme_cmd_write_zeroes), \ 901 nvme_opcode_name(nvme_cmd_dsm), \ 902 nvme_opcode_name(nvme_cmd_verify), \ 903 nvme_opcode_name(nvme_cmd_resv_register), \ 904 nvme_opcode_name(nvme_cmd_resv_report), \ 905 nvme_opcode_name(nvme_cmd_resv_acquire), \ 906 nvme_opcode_name(nvme_cmd_resv_release), \ 907 nvme_opcode_name(nvme_cmd_zone_mgmt_send), \ 908 nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \ 909 nvme_opcode_name(nvme_cmd_zone_append)) 910 911 912 913 /* 914 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier 915 * 916 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block 917 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block 918 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA 919 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation 920 * request subtype 921 */ 922 enum { 923 NVME_SGL_FMT_ADDRESS = 0x00, 924 NVME_SGL_FMT_OFFSET = 0x01, 925 NVME_SGL_FMT_TRANSPORT_A = 0x0A, 926 NVME_SGL_FMT_INVALIDATE = 0x0f, 927 }; 928 929 /* 930 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier 931 * 932 * For struct nvme_sgl_desc: 933 * @NVME_SGL_FMT_DATA_DESC: data block descriptor 934 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor 935 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor 936 * 937 * For struct nvme_keyed_sgl_desc: 938 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor 939 * 940 * Transport-specific SGL types: 941 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor 942 */ 943 enum { 944 NVME_SGL_FMT_DATA_DESC = 0x00, 945 NVME_SGL_FMT_SEG_DESC = 0x02, 946 NVME_SGL_FMT_LAST_SEG_DESC = 0x03, 947 NVME_KEY_SGL_FMT_DATA_DESC = 0x04, 948 NVME_TRANSPORT_SGL_DATA_DESC = 0x05, 949 }; 950 951 struct nvme_sgl_desc { 952 __le64 addr; 953 __le32 length; 954 __u8 rsvd[3]; 955 __u8 type; 956 }; 957 958 struct nvme_keyed_sgl_desc { 959 __le64 addr; 960 __u8 length[3]; 961 __u8 key[4]; 962 __u8 type; 963 }; 964 965 union nvme_data_ptr { 966 struct { 967 __le64 prp1; 968 __le64 prp2; 969 }; 970 struct nvme_sgl_desc sgl; 971 struct nvme_keyed_sgl_desc ksgl; 972 }; 973 974 /* 975 * Lowest two bits of our flags field (FUSE field in the spec): 976 * 977 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command 978 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command 979 * 980 * Highest two bits in our flags field (PSDT field in the spec): 981 * 982 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, 983 * If used, MPTR contains addr of single physical buffer (byte aligned). 984 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, 985 * If used, MPTR contains an address of an SGL segment containing 986 * exactly 1 SGL descriptor (qword aligned). 987 */ 988 enum { 989 NVME_CMD_FUSE_FIRST = (1 << 0), 990 NVME_CMD_FUSE_SECOND = (1 << 1), 991 992 NVME_CMD_SGL_METABUF = (1 << 6), 993 NVME_CMD_SGL_METASEG = (1 << 7), 994 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, 995 }; 996 997 struct nvme_common_command { 998 __u8 opcode; 999 __u8 flags; 1000 __u16 command_id; 1001 __le32 nsid; 1002 __le32 cdw2[2]; 1003 __le64 metadata; 1004 union nvme_data_ptr dptr; 1005 struct_group(cdws, 1006 __le32 cdw10; 1007 __le32 cdw11; 1008 __le32 cdw12; 1009 __le32 cdw13; 1010 __le32 cdw14; 1011 __le32 cdw15; 1012 ); 1013 }; 1014 1015 struct nvme_rw_command { 1016 __u8 opcode; 1017 __u8 flags; 1018 __u16 command_id; 1019 __le32 nsid; 1020 __le32 cdw2; 1021 __le32 cdw3; 1022 __le64 metadata; 1023 union nvme_data_ptr dptr; 1024 __le64 slba; 1025 __le16 length; 1026 __le16 control; 1027 __le32 dsmgmt; 1028 __le32 reftag; 1029 __le16 lbat; 1030 __le16 lbatm; 1031 }; 1032 1033 enum { 1034 NVME_RW_LR = 1 << 15, 1035 NVME_RW_FUA = 1 << 14, 1036 NVME_RW_APPEND_PIREMAP = 1 << 9, 1037 NVME_RW_DSM_FREQ_UNSPEC = 0, 1038 NVME_RW_DSM_FREQ_TYPICAL = 1, 1039 NVME_RW_DSM_FREQ_RARE = 2, 1040 NVME_RW_DSM_FREQ_READS = 3, 1041 NVME_RW_DSM_FREQ_WRITES = 4, 1042 NVME_RW_DSM_FREQ_RW = 5, 1043 NVME_RW_DSM_FREQ_ONCE = 6, 1044 NVME_RW_DSM_FREQ_PREFETCH = 7, 1045 NVME_RW_DSM_FREQ_TEMP = 8, 1046 NVME_RW_DSM_LATENCY_NONE = 0 << 4, 1047 NVME_RW_DSM_LATENCY_IDLE = 1 << 4, 1048 NVME_RW_DSM_LATENCY_NORM = 2 << 4, 1049 NVME_RW_DSM_LATENCY_LOW = 3 << 4, 1050 NVME_RW_DSM_SEQ_REQ = 1 << 6, 1051 NVME_RW_DSM_COMPRESSED = 1 << 7, 1052 NVME_RW_PRINFO_PRCHK_REF = 1 << 10, 1053 NVME_RW_PRINFO_PRCHK_APP = 1 << 11, 1054 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, 1055 NVME_RW_PRINFO_PRACT = 1 << 13, 1056 NVME_RW_DTYPE_STREAMS = 1 << 4, 1057 NVME_WZ_DEAC = 1 << 9, 1058 }; 1059 1060 struct nvme_dsm_cmd { 1061 __u8 opcode; 1062 __u8 flags; 1063 __u16 command_id; 1064 __le32 nsid; 1065 __u64 rsvd2[2]; 1066 union nvme_data_ptr dptr; 1067 __le32 nr; 1068 __le32 attributes; 1069 __u32 rsvd12[4]; 1070 }; 1071 1072 enum { 1073 NVME_DSMGMT_IDR = 1 << 0, 1074 NVME_DSMGMT_IDW = 1 << 1, 1075 NVME_DSMGMT_AD = 1 << 2, 1076 }; 1077 1078 #define NVME_DSM_MAX_RANGES 256 1079 1080 struct nvme_dsm_range { 1081 __le32 cattr; 1082 __le32 nlb; 1083 __le64 slba; 1084 }; 1085 1086 struct nvme_write_zeroes_cmd { 1087 __u8 opcode; 1088 __u8 flags; 1089 __u16 command_id; 1090 __le32 nsid; 1091 __u64 rsvd2; 1092 __le64 metadata; 1093 union nvme_data_ptr dptr; 1094 __le64 slba; 1095 __le16 length; 1096 __le16 control; 1097 __le32 dsmgmt; 1098 __le32 reftag; 1099 __le16 lbat; 1100 __le16 lbatm; 1101 }; 1102 1103 enum nvme_zone_mgmt_action { 1104 NVME_ZONE_CLOSE = 0x1, 1105 NVME_ZONE_FINISH = 0x2, 1106 NVME_ZONE_OPEN = 0x3, 1107 NVME_ZONE_RESET = 0x4, 1108 NVME_ZONE_OFFLINE = 0x5, 1109 NVME_ZONE_SET_DESC_EXT = 0x10, 1110 }; 1111 1112 struct nvme_zone_mgmt_send_cmd { 1113 __u8 opcode; 1114 __u8 flags; 1115 __u16 command_id; 1116 __le32 nsid; 1117 __le32 cdw2[2]; 1118 __le64 metadata; 1119 union nvme_data_ptr dptr; 1120 __le64 slba; 1121 __le32 cdw12; 1122 __u8 zsa; 1123 __u8 select_all; 1124 __u8 rsvd13[2]; 1125 __le32 cdw14[2]; 1126 }; 1127 1128 struct nvme_zone_mgmt_recv_cmd { 1129 __u8 opcode; 1130 __u8 flags; 1131 __u16 command_id; 1132 __le32 nsid; 1133 __le64 rsvd2[2]; 1134 union nvme_data_ptr dptr; 1135 __le64 slba; 1136 __le32 numd; 1137 __u8 zra; 1138 __u8 zrasf; 1139 __u8 pr; 1140 __u8 rsvd13; 1141 __le32 cdw14[2]; 1142 }; 1143 1144 enum { 1145 NVME_ZRA_ZONE_REPORT = 0, 1146 NVME_ZRASF_ZONE_REPORT_ALL = 0, 1147 NVME_ZRASF_ZONE_STATE_EMPTY = 0x01, 1148 NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02, 1149 NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03, 1150 NVME_ZRASF_ZONE_STATE_CLOSED = 0x04, 1151 NVME_ZRASF_ZONE_STATE_READONLY = 0x05, 1152 NVME_ZRASF_ZONE_STATE_FULL = 0x06, 1153 NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07, 1154 NVME_REPORT_ZONE_PARTIAL = 1, 1155 }; 1156 1157 /* Features */ 1158 1159 enum { 1160 NVME_TEMP_THRESH_MASK = 0xffff, 1161 NVME_TEMP_THRESH_SELECT_SHIFT = 16, 1162 NVME_TEMP_THRESH_TYPE_UNDER = 0x100000, 1163 }; 1164 1165 struct nvme_feat_auto_pst { 1166 __le64 entries[32]; 1167 }; 1168 1169 enum { 1170 NVME_HOST_MEM_ENABLE = (1 << 0), 1171 NVME_HOST_MEM_RETURN = (1 << 1), 1172 }; 1173 1174 struct nvme_feat_host_behavior { 1175 __u8 acre; 1176 __u8 etdas; 1177 __u8 lbafee; 1178 __u8 resv1[509]; 1179 }; 1180 1181 enum { 1182 NVME_ENABLE_ACRE = 1, 1183 NVME_ENABLE_LBAFEE = 1, 1184 }; 1185 1186 /* Admin commands */ 1187 1188 enum nvme_admin_opcode { 1189 nvme_admin_delete_sq = 0x00, 1190 nvme_admin_create_sq = 0x01, 1191 nvme_admin_get_log_page = 0x02, 1192 nvme_admin_delete_cq = 0x04, 1193 nvme_admin_create_cq = 0x05, 1194 nvme_admin_identify = 0x06, 1195 nvme_admin_abort_cmd = 0x08, 1196 nvme_admin_set_features = 0x09, 1197 nvme_admin_get_features = 0x0a, 1198 nvme_admin_async_event = 0x0c, 1199 nvme_admin_ns_mgmt = 0x0d, 1200 nvme_admin_activate_fw = 0x10, 1201 nvme_admin_download_fw = 0x11, 1202 nvme_admin_dev_self_test = 0x14, 1203 nvme_admin_ns_attach = 0x15, 1204 nvme_admin_keep_alive = 0x18, 1205 nvme_admin_directive_send = 0x19, 1206 nvme_admin_directive_recv = 0x1a, 1207 nvme_admin_virtual_mgmt = 0x1c, 1208 nvme_admin_nvme_mi_send = 0x1d, 1209 nvme_admin_nvme_mi_recv = 0x1e, 1210 nvme_admin_dbbuf = 0x7C, 1211 nvme_admin_format_nvm = 0x80, 1212 nvme_admin_security_send = 0x81, 1213 nvme_admin_security_recv = 0x82, 1214 nvme_admin_sanitize_nvm = 0x84, 1215 nvme_admin_get_lba_status = 0x86, 1216 nvme_admin_vendor_start = 0xC0, 1217 }; 1218 1219 #define nvme_admin_opcode_name(opcode) { opcode, #opcode } 1220 #define show_admin_opcode_name(val) \ 1221 __print_symbolic(val, \ 1222 nvme_admin_opcode_name(nvme_admin_delete_sq), \ 1223 nvme_admin_opcode_name(nvme_admin_create_sq), \ 1224 nvme_admin_opcode_name(nvme_admin_get_log_page), \ 1225 nvme_admin_opcode_name(nvme_admin_delete_cq), \ 1226 nvme_admin_opcode_name(nvme_admin_create_cq), \ 1227 nvme_admin_opcode_name(nvme_admin_identify), \ 1228 nvme_admin_opcode_name(nvme_admin_abort_cmd), \ 1229 nvme_admin_opcode_name(nvme_admin_set_features), \ 1230 nvme_admin_opcode_name(nvme_admin_get_features), \ 1231 nvme_admin_opcode_name(nvme_admin_async_event), \ 1232 nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ 1233 nvme_admin_opcode_name(nvme_admin_activate_fw), \ 1234 nvme_admin_opcode_name(nvme_admin_download_fw), \ 1235 nvme_admin_opcode_name(nvme_admin_dev_self_test), \ 1236 nvme_admin_opcode_name(nvme_admin_ns_attach), \ 1237 nvme_admin_opcode_name(nvme_admin_keep_alive), \ 1238 nvme_admin_opcode_name(nvme_admin_directive_send), \ 1239 nvme_admin_opcode_name(nvme_admin_directive_recv), \ 1240 nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \ 1241 nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \ 1242 nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \ 1243 nvme_admin_opcode_name(nvme_admin_dbbuf), \ 1244 nvme_admin_opcode_name(nvme_admin_format_nvm), \ 1245 nvme_admin_opcode_name(nvme_admin_security_send), \ 1246 nvme_admin_opcode_name(nvme_admin_security_recv), \ 1247 nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \ 1248 nvme_admin_opcode_name(nvme_admin_get_lba_status)) 1249 1250 enum { 1251 NVME_QUEUE_PHYS_CONTIG = (1 << 0), 1252 NVME_CQ_IRQ_ENABLED = (1 << 1), 1253 NVME_SQ_PRIO_URGENT = (0 << 1), 1254 NVME_SQ_PRIO_HIGH = (1 << 1), 1255 NVME_SQ_PRIO_MEDIUM = (2 << 1), 1256 NVME_SQ_PRIO_LOW = (3 << 1), 1257 NVME_FEAT_ARBITRATION = 0x01, 1258 NVME_FEAT_POWER_MGMT = 0x02, 1259 NVME_FEAT_LBA_RANGE = 0x03, 1260 NVME_FEAT_TEMP_THRESH = 0x04, 1261 NVME_FEAT_ERR_RECOVERY = 0x05, 1262 NVME_FEAT_VOLATILE_WC = 0x06, 1263 NVME_FEAT_NUM_QUEUES = 0x07, 1264 NVME_FEAT_IRQ_COALESCE = 0x08, 1265 NVME_FEAT_IRQ_CONFIG = 0x09, 1266 NVME_FEAT_WRITE_ATOMIC = 0x0a, 1267 NVME_FEAT_ASYNC_EVENT = 0x0b, 1268 NVME_FEAT_AUTO_PST = 0x0c, 1269 NVME_FEAT_HOST_MEM_BUF = 0x0d, 1270 NVME_FEAT_TIMESTAMP = 0x0e, 1271 NVME_FEAT_KATO = 0x0f, 1272 NVME_FEAT_HCTM = 0x10, 1273 NVME_FEAT_NOPSC = 0x11, 1274 NVME_FEAT_RRL = 0x12, 1275 NVME_FEAT_PLM_CONFIG = 0x13, 1276 NVME_FEAT_PLM_WINDOW = 0x14, 1277 NVME_FEAT_HOST_BEHAVIOR = 0x16, 1278 NVME_FEAT_SANITIZE = 0x17, 1279 NVME_FEAT_SW_PROGRESS = 0x80, 1280 NVME_FEAT_HOST_ID = 0x81, 1281 NVME_FEAT_RESV_MASK = 0x82, 1282 NVME_FEAT_RESV_PERSIST = 0x83, 1283 NVME_FEAT_WRITE_PROTECT = 0x84, 1284 NVME_FEAT_VENDOR_START = 0xC0, 1285 NVME_FEAT_VENDOR_END = 0xFF, 1286 NVME_LOG_SUPPORTED = 0x00, 1287 NVME_LOG_ERROR = 0x01, 1288 NVME_LOG_SMART = 0x02, 1289 NVME_LOG_FW_SLOT = 0x03, 1290 NVME_LOG_CHANGED_NS = 0x04, 1291 NVME_LOG_CMD_EFFECTS = 0x05, 1292 NVME_LOG_DEVICE_SELF_TEST = 0x06, 1293 NVME_LOG_TELEMETRY_HOST = 0x07, 1294 NVME_LOG_TELEMETRY_CTRL = 0x08, 1295 NVME_LOG_ENDURANCE_GROUP = 0x09, 1296 NVME_LOG_ANA = 0x0c, 1297 NVME_LOG_FEATURES = 0x12, 1298 NVME_LOG_RMI = 0x16, 1299 NVME_LOG_DISC = 0x70, 1300 NVME_LOG_RESERVATION = 0x80, 1301 NVME_FWACT_REPL = (0 << 3), 1302 NVME_FWACT_REPL_ACTV = (1 << 3), 1303 NVME_FWACT_ACTV = (2 << 3), 1304 }; 1305 1306 struct nvme_supported_log { 1307 __le32 lids[256]; 1308 }; 1309 1310 enum { 1311 NVME_LIDS_LSUPP = 1 << 0, 1312 }; 1313 1314 struct nvme_supported_features_log { 1315 __le32 fis[256]; 1316 }; 1317 1318 enum { 1319 NVME_FIS_FSUPP = 1 << 0, 1320 NVME_FIS_NSCPE = 1 << 20, 1321 NVME_FIS_CSCPE = 1 << 21, 1322 }; 1323 1324 /* NVMe Namespace Write Protect State */ 1325 enum { 1326 NVME_NS_NO_WRITE_PROTECT = 0, 1327 NVME_NS_WRITE_PROTECT, 1328 NVME_NS_WRITE_PROTECT_POWER_CYCLE, 1329 NVME_NS_WRITE_PROTECT_PERMANENT, 1330 }; 1331 1332 #define NVME_MAX_CHANGED_NAMESPACES 1024 1333 1334 struct nvme_identify { 1335 __u8 opcode; 1336 __u8 flags; 1337 __u16 command_id; 1338 __le32 nsid; 1339 __u64 rsvd2[2]; 1340 union nvme_data_ptr dptr; 1341 __u8 cns; 1342 __u8 rsvd3; 1343 __le16 ctrlid; 1344 __le16 cnssid; 1345 __u8 rsvd11; 1346 __u8 csi; 1347 __u32 rsvd12[4]; 1348 }; 1349 1350 #define NVME_IDENTIFY_DATA_SIZE 4096 1351 1352 struct nvme_features { 1353 __u8 opcode; 1354 __u8 flags; 1355 __u16 command_id; 1356 __le32 nsid; 1357 __u64 rsvd2[2]; 1358 union nvme_data_ptr dptr; 1359 __le32 fid; 1360 __le32 dword11; 1361 __le32 dword12; 1362 __le32 dword13; 1363 __le32 dword14; 1364 __le32 dword15; 1365 }; 1366 1367 struct nvme_host_mem_buf_desc { 1368 __le64 addr; 1369 __le32 size; 1370 __u32 rsvd; 1371 }; 1372 1373 struct nvme_create_cq { 1374 __u8 opcode; 1375 __u8 flags; 1376 __u16 command_id; 1377 __u32 rsvd1[5]; 1378 __le64 prp1; 1379 __u64 rsvd8; 1380 __le16 cqid; 1381 __le16 qsize; 1382 __le16 cq_flags; 1383 __le16 irq_vector; 1384 __u32 rsvd12[4]; 1385 }; 1386 1387 struct nvme_create_sq { 1388 __u8 opcode; 1389 __u8 flags; 1390 __u16 command_id; 1391 __u32 rsvd1[5]; 1392 __le64 prp1; 1393 __u64 rsvd8; 1394 __le16 sqid; 1395 __le16 qsize; 1396 __le16 sq_flags; 1397 __le16 cqid; 1398 __u32 rsvd12[4]; 1399 }; 1400 1401 struct nvme_delete_queue { 1402 __u8 opcode; 1403 __u8 flags; 1404 __u16 command_id; 1405 __u32 rsvd1[9]; 1406 __le16 qid; 1407 __u16 rsvd10; 1408 __u32 rsvd11[5]; 1409 }; 1410 1411 struct nvme_abort_cmd { 1412 __u8 opcode; 1413 __u8 flags; 1414 __u16 command_id; 1415 __u32 rsvd1[9]; 1416 __le16 sqid; 1417 __u16 cid; 1418 __u32 rsvd11[5]; 1419 }; 1420 1421 struct nvme_download_firmware { 1422 __u8 opcode; 1423 __u8 flags; 1424 __u16 command_id; 1425 __u32 rsvd1[5]; 1426 union nvme_data_ptr dptr; 1427 __le32 numd; 1428 __le32 offset; 1429 __u32 rsvd12[4]; 1430 }; 1431 1432 struct nvme_format_cmd { 1433 __u8 opcode; 1434 __u8 flags; 1435 __u16 command_id; 1436 __le32 nsid; 1437 __u64 rsvd2[4]; 1438 __le32 cdw10; 1439 __u32 rsvd11[5]; 1440 }; 1441 1442 struct nvme_get_log_page_command { 1443 __u8 opcode; 1444 __u8 flags; 1445 __u16 command_id; 1446 __le32 nsid; 1447 __u64 rsvd2[2]; 1448 union nvme_data_ptr dptr; 1449 __u8 lid; 1450 __u8 lsp; /* upper 4 bits reserved */ 1451 __le16 numdl; 1452 __le16 numdu; 1453 __le16 lsi; 1454 union { 1455 struct { 1456 __le32 lpol; 1457 __le32 lpou; 1458 }; 1459 __le64 lpo; 1460 }; 1461 __u8 rsvd14[3]; 1462 __u8 csi; 1463 __u32 rsvd15; 1464 }; 1465 1466 struct nvme_directive_cmd { 1467 __u8 opcode; 1468 __u8 flags; 1469 __u16 command_id; 1470 __le32 nsid; 1471 __u64 rsvd2[2]; 1472 union nvme_data_ptr dptr; 1473 __le32 numd; 1474 __u8 doper; 1475 __u8 dtype; 1476 __le16 dspec; 1477 __u8 endir; 1478 __u8 tdtype; 1479 __u16 rsvd15; 1480 1481 __u32 rsvd16[3]; 1482 }; 1483 1484 /* 1485 * Fabrics subcommands. 1486 */ 1487 enum nvmf_fabrics_opcode { 1488 nvme_fabrics_command = 0x7f, 1489 }; 1490 1491 enum nvmf_capsule_command { 1492 nvme_fabrics_type_property_set = 0x00, 1493 nvme_fabrics_type_connect = 0x01, 1494 nvme_fabrics_type_property_get = 0x04, 1495 nvme_fabrics_type_auth_send = 0x05, 1496 nvme_fabrics_type_auth_receive = 0x06, 1497 }; 1498 1499 #define nvme_fabrics_type_name(type) { type, #type } 1500 #define show_fabrics_type_name(type) \ 1501 __print_symbolic(type, \ 1502 nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ 1503 nvme_fabrics_type_name(nvme_fabrics_type_connect), \ 1504 nvme_fabrics_type_name(nvme_fabrics_type_property_get), \ 1505 nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \ 1506 nvme_fabrics_type_name(nvme_fabrics_type_auth_receive)) 1507 1508 /* 1509 * If not fabrics command, fctype will be ignored. 1510 */ 1511 #define show_opcode_name(qid, opcode, fctype) \ 1512 ((opcode) == nvme_fabrics_command ? \ 1513 show_fabrics_type_name(fctype) : \ 1514 ((qid) ? \ 1515 show_nvm_opcode_name(opcode) : \ 1516 show_admin_opcode_name(opcode))) 1517 1518 struct nvmf_common_command { 1519 __u8 opcode; 1520 __u8 resv1; 1521 __u16 command_id; 1522 __u8 fctype; 1523 __u8 resv2[35]; 1524 __u8 ts[24]; 1525 }; 1526 1527 /* 1528 * The legal cntlid range a NVMe Target will provide. 1529 * Note that cntlid of value 0 is considered illegal in the fabrics world. 1530 * Devices based on earlier specs did not have the subsystem concept; 1531 * therefore, those devices had their cntlid value set to 0 as a result. 1532 */ 1533 #define NVME_CNTLID_MIN 1 1534 #define NVME_CNTLID_MAX 0xffef 1535 #define NVME_CNTLID_DYNAMIC 0xffff 1536 1537 #define MAX_DISC_LOGS 255 1538 1539 /* Discovery log page entry flags (EFLAGS): */ 1540 enum { 1541 NVME_DISC_EFLAGS_EPCSD = (1 << 1), 1542 NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0), 1543 }; 1544 1545 /* Discovery log page entry */ 1546 struct nvmf_disc_rsp_page_entry { 1547 __u8 trtype; 1548 __u8 adrfam; 1549 __u8 subtype; 1550 __u8 treq; 1551 __le16 portid; 1552 __le16 cntlid; 1553 __le16 asqsz; 1554 __le16 eflags; 1555 __u8 resv10[20]; 1556 char trsvcid[NVMF_TRSVCID_SIZE]; 1557 __u8 resv64[192]; 1558 char subnqn[NVMF_NQN_FIELD_LEN]; 1559 char traddr[NVMF_TRADDR_SIZE]; 1560 union tsas { 1561 char common[NVMF_TSAS_SIZE]; 1562 struct rdma { 1563 __u8 qptype; 1564 __u8 prtype; 1565 __u8 cms; 1566 __u8 resv3[5]; 1567 __u16 pkey; 1568 __u8 resv10[246]; 1569 } rdma; 1570 struct tcp { 1571 __u8 sectype; 1572 } tcp; 1573 } tsas; 1574 }; 1575 1576 /* Discovery log page header */ 1577 struct nvmf_disc_rsp_page_hdr { 1578 __le64 genctr; 1579 __le64 numrec; 1580 __le16 recfmt; 1581 __u8 resv14[1006]; 1582 struct nvmf_disc_rsp_page_entry entries[]; 1583 }; 1584 1585 enum { 1586 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2), 1587 }; 1588 1589 struct nvmf_connect_command { 1590 __u8 opcode; 1591 __u8 resv1; 1592 __u16 command_id; 1593 __u8 fctype; 1594 __u8 resv2[19]; 1595 union nvme_data_ptr dptr; 1596 __le16 recfmt; 1597 __le16 qid; 1598 __le16 sqsize; 1599 __u8 cattr; 1600 __u8 resv3; 1601 __le32 kato; 1602 __u8 resv4[12]; 1603 }; 1604 1605 enum { 1606 NVME_CONNECT_AUTHREQ_ASCR = (1U << 18), 1607 NVME_CONNECT_AUTHREQ_ATR = (1U << 17), 1608 }; 1609 1610 struct nvmf_connect_data { 1611 uuid_t hostid; 1612 __le16 cntlid; 1613 char resv4[238]; 1614 char subsysnqn[NVMF_NQN_FIELD_LEN]; 1615 char hostnqn[NVMF_NQN_FIELD_LEN]; 1616 char resv5[256]; 1617 }; 1618 1619 struct nvmf_property_set_command { 1620 __u8 opcode; 1621 __u8 resv1; 1622 __u16 command_id; 1623 __u8 fctype; 1624 __u8 resv2[35]; 1625 __u8 attrib; 1626 __u8 resv3[3]; 1627 __le32 offset; 1628 __le64 value; 1629 __u8 resv4[8]; 1630 }; 1631 1632 struct nvmf_property_get_command { 1633 __u8 opcode; 1634 __u8 resv1; 1635 __u16 command_id; 1636 __u8 fctype; 1637 __u8 resv2[35]; 1638 __u8 attrib; 1639 __u8 resv3[3]; 1640 __le32 offset; 1641 __u8 resv4[16]; 1642 }; 1643 1644 struct nvmf_auth_common_command { 1645 __u8 opcode; 1646 __u8 resv1; 1647 __u16 command_id; 1648 __u8 fctype; 1649 __u8 resv2[19]; 1650 union nvme_data_ptr dptr; 1651 __u8 resv3; 1652 __u8 spsp0; 1653 __u8 spsp1; 1654 __u8 secp; 1655 __le32 al_tl; 1656 __u8 resv4[16]; 1657 }; 1658 1659 struct nvmf_auth_send_command { 1660 __u8 opcode; 1661 __u8 resv1; 1662 __u16 command_id; 1663 __u8 fctype; 1664 __u8 resv2[19]; 1665 union nvme_data_ptr dptr; 1666 __u8 resv3; 1667 __u8 spsp0; 1668 __u8 spsp1; 1669 __u8 secp; 1670 __le32 tl; 1671 __u8 resv4[16]; 1672 }; 1673 1674 struct nvmf_auth_receive_command { 1675 __u8 opcode; 1676 __u8 resv1; 1677 __u16 command_id; 1678 __u8 fctype; 1679 __u8 resv2[19]; 1680 union nvme_data_ptr dptr; 1681 __u8 resv3; 1682 __u8 spsp0; 1683 __u8 spsp1; 1684 __u8 secp; 1685 __le32 al; 1686 __u8 resv4[16]; 1687 }; 1688 1689 /* Value for secp */ 1690 enum { 1691 NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9, 1692 }; 1693 1694 /* Defined value for auth_type */ 1695 enum { 1696 NVME_AUTH_COMMON_MESSAGES = 0x00, 1697 NVME_AUTH_DHCHAP_MESSAGES = 0x01, 1698 }; 1699 1700 /* Defined messages for auth_id */ 1701 enum { 1702 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00, 1703 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01, 1704 NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02, 1705 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03, 1706 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04, 1707 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0, 1708 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1, 1709 }; 1710 1711 struct nvmf_auth_dhchap_protocol_descriptor { 1712 __u8 authid; 1713 __u8 rsvd; 1714 __u8 halen; 1715 __u8 dhlen; 1716 __u8 idlist[60]; 1717 }; 1718 1719 enum { 1720 NVME_AUTH_DHCHAP_AUTH_ID = 0x01, 1721 }; 1722 1723 /* Defined hash functions for DH-HMAC-CHAP authentication */ 1724 enum { 1725 NVME_AUTH_HASH_SHA256 = 0x01, 1726 NVME_AUTH_HASH_SHA384 = 0x02, 1727 NVME_AUTH_HASH_SHA512 = 0x03, 1728 NVME_AUTH_HASH_INVALID = 0xff, 1729 }; 1730 1731 /* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */ 1732 enum { 1733 NVME_AUTH_DHGROUP_NULL = 0x00, 1734 NVME_AUTH_DHGROUP_2048 = 0x01, 1735 NVME_AUTH_DHGROUP_3072 = 0x02, 1736 NVME_AUTH_DHGROUP_4096 = 0x03, 1737 NVME_AUTH_DHGROUP_6144 = 0x04, 1738 NVME_AUTH_DHGROUP_8192 = 0x05, 1739 NVME_AUTH_DHGROUP_INVALID = 0xff, 1740 }; 1741 1742 union nvmf_auth_protocol { 1743 struct nvmf_auth_dhchap_protocol_descriptor dhchap; 1744 }; 1745 1746 struct nvmf_auth_dhchap_negotiate_data { 1747 __u8 auth_type; 1748 __u8 auth_id; 1749 __le16 rsvd; 1750 __le16 t_id; 1751 __u8 sc_c; 1752 __u8 napd; 1753 union nvmf_auth_protocol auth_protocol[]; 1754 }; 1755 1756 struct nvmf_auth_dhchap_challenge_data { 1757 __u8 auth_type; 1758 __u8 auth_id; 1759 __u16 rsvd1; 1760 __le16 t_id; 1761 __u8 hl; 1762 __u8 rsvd2; 1763 __u8 hashid; 1764 __u8 dhgid; 1765 __le16 dhvlen; 1766 __le32 seqnum; 1767 /* 'hl' bytes of challenge value */ 1768 __u8 cval[]; 1769 /* followed by 'dhvlen' bytes of DH value */ 1770 }; 1771 1772 struct nvmf_auth_dhchap_reply_data { 1773 __u8 auth_type; 1774 __u8 auth_id; 1775 __le16 rsvd1; 1776 __le16 t_id; 1777 __u8 hl; 1778 __u8 rsvd2; 1779 __u8 cvalid; 1780 __u8 rsvd3; 1781 __le16 dhvlen; 1782 __le32 seqnum; 1783 /* 'hl' bytes of response data */ 1784 __u8 rval[]; 1785 /* followed by 'hl' bytes of Challenge value */ 1786 /* followed by 'dhvlen' bytes of DH value */ 1787 }; 1788 1789 enum { 1790 NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0), 1791 }; 1792 1793 struct nvmf_auth_dhchap_success1_data { 1794 __u8 auth_type; 1795 __u8 auth_id; 1796 __le16 rsvd1; 1797 __le16 t_id; 1798 __u8 hl; 1799 __u8 rsvd2; 1800 __u8 rvalid; 1801 __u8 rsvd3[7]; 1802 /* 'hl' bytes of response value */ 1803 __u8 rval[]; 1804 }; 1805 1806 struct nvmf_auth_dhchap_success2_data { 1807 __u8 auth_type; 1808 __u8 auth_id; 1809 __le16 rsvd1; 1810 __le16 t_id; 1811 __u8 rsvd2[10]; 1812 }; 1813 1814 struct nvmf_auth_dhchap_failure_data { 1815 __u8 auth_type; 1816 __u8 auth_id; 1817 __le16 rsvd1; 1818 __le16 t_id; 1819 __u8 rescode; 1820 __u8 rescode_exp; 1821 }; 1822 1823 enum { 1824 NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01, 1825 }; 1826 1827 enum { 1828 NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01, 1829 NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02, 1830 NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03, 1831 NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04, 1832 NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05, 1833 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06, 1834 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07, 1835 }; 1836 1837 1838 struct nvme_dbbuf { 1839 __u8 opcode; 1840 __u8 flags; 1841 __u16 command_id; 1842 __u32 rsvd1[5]; 1843 __le64 prp1; 1844 __le64 prp2; 1845 __u32 rsvd12[6]; 1846 }; 1847 1848 struct streams_directive_params { 1849 __le16 msl; 1850 __le16 nssa; 1851 __le16 nsso; 1852 __u8 rsvd[10]; 1853 __le32 sws; 1854 __le16 sgs; 1855 __le16 nsa; 1856 __le16 nso; 1857 __u8 rsvd2[6]; 1858 }; 1859 1860 struct nvme_command { 1861 union { 1862 struct nvme_common_command common; 1863 struct nvme_rw_command rw; 1864 struct nvme_identify identify; 1865 struct nvme_features features; 1866 struct nvme_create_cq create_cq; 1867 struct nvme_create_sq create_sq; 1868 struct nvme_delete_queue delete_queue; 1869 struct nvme_download_firmware dlfw; 1870 struct nvme_format_cmd format; 1871 struct nvme_dsm_cmd dsm; 1872 struct nvme_write_zeroes_cmd write_zeroes; 1873 struct nvme_zone_mgmt_send_cmd zms; 1874 struct nvme_zone_mgmt_recv_cmd zmr; 1875 struct nvme_abort_cmd abort; 1876 struct nvme_get_log_page_command get_log_page; 1877 struct nvmf_common_command fabrics; 1878 struct nvmf_connect_command connect; 1879 struct nvmf_property_set_command prop_set; 1880 struct nvmf_property_get_command prop_get; 1881 struct nvmf_auth_common_command auth_common; 1882 struct nvmf_auth_send_command auth_send; 1883 struct nvmf_auth_receive_command auth_receive; 1884 struct nvme_dbbuf dbbuf; 1885 struct nvme_directive_cmd directive; 1886 }; 1887 }; 1888 1889 static inline bool nvme_is_fabrics(const struct nvme_command *cmd) 1890 { 1891 return cmd->common.opcode == nvme_fabrics_command; 1892 } 1893 1894 struct nvme_error_slot { 1895 __le64 error_count; 1896 __le16 sqid; 1897 __le16 cmdid; 1898 __le16 status_field; 1899 __le16 param_error_location; 1900 __le64 lba; 1901 __le32 nsid; 1902 __u8 vs; 1903 __u8 resv[3]; 1904 __le64 cs; 1905 __u8 resv2[24]; 1906 }; 1907 1908 static inline bool nvme_is_write(const struct nvme_command *cmd) 1909 { 1910 /* 1911 * What a mess... 1912 * 1913 * Why can't we simply have a Fabrics In and Fabrics out command? 1914 */ 1915 if (unlikely(nvme_is_fabrics(cmd))) 1916 return cmd->fabrics.fctype & 1; 1917 return cmd->common.opcode & 1; 1918 } 1919 1920 enum { 1921 /* 1922 * Generic Command Status: 1923 */ 1924 NVME_SCT_GENERIC = 0x0, 1925 NVME_SC_SUCCESS = 0x0, 1926 NVME_SC_INVALID_OPCODE = 0x1, 1927 NVME_SC_INVALID_FIELD = 0x2, 1928 NVME_SC_CMDID_CONFLICT = 0x3, 1929 NVME_SC_DATA_XFER_ERROR = 0x4, 1930 NVME_SC_POWER_LOSS = 0x5, 1931 NVME_SC_INTERNAL = 0x6, 1932 NVME_SC_ABORT_REQ = 0x7, 1933 NVME_SC_ABORT_QUEUE = 0x8, 1934 NVME_SC_FUSED_FAIL = 0x9, 1935 NVME_SC_FUSED_MISSING = 0xa, 1936 NVME_SC_INVALID_NS = 0xb, 1937 NVME_SC_CMD_SEQ_ERROR = 0xc, 1938 NVME_SC_SGL_INVALID_LAST = 0xd, 1939 NVME_SC_SGL_INVALID_COUNT = 0xe, 1940 NVME_SC_SGL_INVALID_DATA = 0xf, 1941 NVME_SC_SGL_INVALID_METADATA = 0x10, 1942 NVME_SC_SGL_INVALID_TYPE = 0x11, 1943 NVME_SC_CMB_INVALID_USE = 0x12, 1944 NVME_SC_PRP_INVALID_OFFSET = 0x13, 1945 NVME_SC_ATOMIC_WU_EXCEEDED = 0x14, 1946 NVME_SC_OP_DENIED = 0x15, 1947 NVME_SC_SGL_INVALID_OFFSET = 0x16, 1948 NVME_SC_RESERVED = 0x17, 1949 NVME_SC_HOST_ID_INCONSIST = 0x18, 1950 NVME_SC_KA_TIMEOUT_EXPIRED = 0x19, 1951 NVME_SC_KA_TIMEOUT_INVALID = 0x1A, 1952 NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B, 1953 NVME_SC_SANITIZE_FAILED = 0x1C, 1954 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D, 1955 NVME_SC_SGL_INVALID_GRANULARITY = 0x1E, 1956 NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F, 1957 NVME_SC_NS_WRITE_PROTECTED = 0x20, 1958 NVME_SC_CMD_INTERRUPTED = 0x21, 1959 NVME_SC_TRANSIENT_TR_ERR = 0x22, 1960 NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24, 1961 NVME_SC_INVALID_IO_CMD_SET = 0x2C, 1962 1963 NVME_SC_LBA_RANGE = 0x80, 1964 NVME_SC_CAP_EXCEEDED = 0x81, 1965 NVME_SC_NS_NOT_READY = 0x82, 1966 NVME_SC_RESERVATION_CONFLICT = 0x83, 1967 NVME_SC_FORMAT_IN_PROGRESS = 0x84, 1968 1969 /* 1970 * Command Specific Status: 1971 */ 1972 NVME_SCT_COMMAND_SPECIFIC = 0x100, 1973 NVME_SC_CQ_INVALID = 0x100, 1974 NVME_SC_QID_INVALID = 0x101, 1975 NVME_SC_QUEUE_SIZE = 0x102, 1976 NVME_SC_ABORT_LIMIT = 0x103, 1977 NVME_SC_ABORT_MISSING = 0x104, 1978 NVME_SC_ASYNC_LIMIT = 0x105, 1979 NVME_SC_FIRMWARE_SLOT = 0x106, 1980 NVME_SC_FIRMWARE_IMAGE = 0x107, 1981 NVME_SC_INVALID_VECTOR = 0x108, 1982 NVME_SC_INVALID_LOG_PAGE = 0x109, 1983 NVME_SC_INVALID_FORMAT = 0x10a, 1984 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, 1985 NVME_SC_INVALID_QUEUE = 0x10c, 1986 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, 1987 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, 1988 NVME_SC_FEATURE_NOT_PER_NS = 0x10f, 1989 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, 1990 NVME_SC_FW_NEEDS_RESET = 0x111, 1991 NVME_SC_FW_NEEDS_MAX_TIME = 0x112, 1992 NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113, 1993 NVME_SC_OVERLAPPING_RANGE = 0x114, 1994 NVME_SC_NS_INSUFFICIENT_CAP = 0x115, 1995 NVME_SC_NS_ID_UNAVAILABLE = 0x116, 1996 NVME_SC_NS_ALREADY_ATTACHED = 0x118, 1997 NVME_SC_NS_IS_PRIVATE = 0x119, 1998 NVME_SC_NS_NOT_ATTACHED = 0x11a, 1999 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, 2000 NVME_SC_CTRL_LIST_INVALID = 0x11c, 2001 NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d, 2002 NVME_SC_BP_WRITE_PROHIBITED = 0x11e, 2003 NVME_SC_CTRL_ID_INVALID = 0x11f, 2004 NVME_SC_SEC_CTRL_STATE_INVALID = 0x120, 2005 NVME_SC_CTRL_RES_NUM_INVALID = 0x121, 2006 NVME_SC_RES_ID_INVALID = 0x122, 2007 NVME_SC_PMR_SAN_PROHIBITED = 0x123, 2008 NVME_SC_ANA_GROUP_ID_INVALID = 0x124, 2009 NVME_SC_ANA_ATTACH_FAILED = 0x125, 2010 2011 /* 2012 * I/O Command Set Specific - NVM commands: 2013 */ 2014 NVME_SC_BAD_ATTRIBUTES = 0x180, 2015 NVME_SC_INVALID_PI = 0x181, 2016 NVME_SC_READ_ONLY = 0x182, 2017 NVME_SC_ONCS_NOT_SUPPORTED = 0x183, 2018 2019 /* 2020 * I/O Command Set Specific - Fabrics commands: 2021 */ 2022 NVME_SC_CONNECT_FORMAT = 0x180, 2023 NVME_SC_CONNECT_CTRL_BUSY = 0x181, 2024 NVME_SC_CONNECT_INVALID_PARAM = 0x182, 2025 NVME_SC_CONNECT_RESTART_DISC = 0x183, 2026 NVME_SC_CONNECT_INVALID_HOST = 0x184, 2027 2028 NVME_SC_DISCOVERY_RESTART = 0x190, 2029 NVME_SC_AUTH_REQUIRED = 0x191, 2030 2031 /* 2032 * I/O Command Set Specific - Zoned commands: 2033 */ 2034 NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8, 2035 NVME_SC_ZONE_FULL = 0x1b9, 2036 NVME_SC_ZONE_READ_ONLY = 0x1ba, 2037 NVME_SC_ZONE_OFFLINE = 0x1bb, 2038 NVME_SC_ZONE_INVALID_WRITE = 0x1bc, 2039 NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd, 2040 NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be, 2041 NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf, 2042 2043 /* 2044 * Media and Data Integrity Errors: 2045 */ 2046 NVME_SCT_MEDIA_ERROR = 0x200, 2047 NVME_SC_WRITE_FAULT = 0x280, 2048 NVME_SC_READ_ERROR = 0x281, 2049 NVME_SC_GUARD_CHECK = 0x282, 2050 NVME_SC_APPTAG_CHECK = 0x283, 2051 NVME_SC_REFTAG_CHECK = 0x284, 2052 NVME_SC_COMPARE_FAILED = 0x285, 2053 NVME_SC_ACCESS_DENIED = 0x286, 2054 NVME_SC_UNWRITTEN_BLOCK = 0x287, 2055 2056 /* 2057 * Path-related Errors: 2058 */ 2059 NVME_SCT_PATH = 0x300, 2060 NVME_SC_INTERNAL_PATH_ERROR = 0x300, 2061 NVME_SC_ANA_PERSISTENT_LOSS = 0x301, 2062 NVME_SC_ANA_INACCESSIBLE = 0x302, 2063 NVME_SC_ANA_TRANSITION = 0x303, 2064 NVME_SC_CTRL_PATH_ERROR = 0x360, 2065 NVME_SC_HOST_PATH_ERROR = 0x370, 2066 NVME_SC_HOST_ABORTED_CMD = 0x371, 2067 2068 NVME_SC_MASK = 0x00ff, /* Status Code */ 2069 NVME_SCT_MASK = 0x0700, /* Status Code Type */ 2070 NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, 2071 2072 NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */ 2073 NVME_STATUS_MORE = 0x2000, 2074 NVME_STATUS_DNR = 0x4000, /* Do Not Retry */ 2075 }; 2076 2077 #define NVME_SCT(status) ((status) >> 8 & 7) 2078 2079 struct nvme_completion { 2080 /* 2081 * Used by Admin and Fabrics commands to return data: 2082 */ 2083 union nvme_result { 2084 __le16 u16; 2085 __le32 u32; 2086 __le64 u64; 2087 } result; 2088 __le16 sq_head; /* how much of this queue may be reclaimed */ 2089 __le16 sq_id; /* submission queue that generated this entry */ 2090 __u16 command_id; /* of the command which completed */ 2091 __le16 status; /* did the command fail, and if so, why? */ 2092 }; 2093 2094 #define NVME_VS(major, minor, tertiary) \ 2095 (((major) << 16) | ((minor) << 8) | (tertiary)) 2096 2097 #define NVME_MAJOR(ver) ((ver) >> 16) 2098 #define NVME_MINOR(ver) (((ver) >> 8) & 0xff) 2099 #define NVME_TERTIARY(ver) ((ver) & 0xff) 2100 2101 enum { 2102 NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00, 2103 }; 2104 2105 enum { 2106 NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00, 2107 NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01, 2108 NVME_PR_LOG_RESERVATION_RELEASED = 0x02, 2109 NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03, 2110 }; 2111 2112 enum { 2113 NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1, 2114 NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2, 2115 NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3, 2116 }; 2117 2118 struct nvme_pr_log { 2119 __le64 count; 2120 __u8 type; 2121 __u8 nr_pages; 2122 __u8 rsvd1[2]; 2123 __le32 nsid; 2124 __u8 rsvd2[48]; 2125 }; 2126 2127 struct nvmet_pr_register_data { 2128 __le64 crkey; 2129 __le64 nrkey; 2130 }; 2131 2132 struct nvmet_pr_acquire_data { 2133 __le64 crkey; 2134 __le64 prkey; 2135 }; 2136 2137 struct nvmet_pr_release_data { 2138 __le64 crkey; 2139 }; 2140 2141 enum nvme_pr_capabilities { 2142 NVME_PR_SUPPORT_PTPL = 1, 2143 NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1, 2144 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2, 2145 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3, 2146 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4, 2147 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5, 2148 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6, 2149 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7, 2150 }; 2151 2152 enum nvme_pr_register_action { 2153 NVME_PR_REGISTER_ACT_REG = 0, 2154 NVME_PR_REGISTER_ACT_UNREG = 1, 2155 NVME_PR_REGISTER_ACT_REPLACE = 1 << 1, 2156 }; 2157 2158 enum nvme_pr_acquire_action { 2159 NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, 2160 NVME_PR_ACQUIRE_ACT_PREEMPT = 1, 2161 NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1, 2162 }; 2163 2164 enum nvme_pr_release_action { 2165 NVME_PR_RELEASE_ACT_RELEASE = 0, 2166 NVME_PR_RELEASE_ACT_CLEAR = 1, 2167 }; 2168 2169 #endif /* _LINUX_NVME_H */ 2170