1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #ifndef SPDK_NVMF_SPEC_H 7 #define SPDK_NVMF_SPEC_H 8 9 #include "spdk/stdinc.h" 10 11 #include "spdk/assert.h" 12 #include "spdk/nvme_spec.h" 13 14 /** 15 * \file 16 * NVMe over Fabrics specification definitions 17 */ 18 19 #pragma pack(push, 1) 20 21 struct spdk_nvmf_capsule_cmd { 22 uint8_t opcode; 23 uint8_t reserved1; 24 uint16_t cid; 25 uint8_t fctype; 26 uint8_t reserved2[35]; 27 uint8_t fabric_specific[24]; 28 }; 29 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_capsule_cmd) == 64, "Incorrect size"); 30 31 /* Fabric Command Set */ 32 #define SPDK_NVME_OPC_FABRIC 0x7f 33 34 enum spdk_nvmf_fabric_cmd_types { 35 SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET = 0x00, 36 SPDK_NVMF_FABRIC_COMMAND_CONNECT = 0x01, 37 SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET = 0x04, 38 SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND = 0x05, 39 SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV = 0x06, 40 SPDK_NVMF_FABRIC_COMMAND_START_VENDOR_SPECIFIC = 0xC0, 41 }; 42 43 enum spdk_nvmf_fabric_cmd_status_code { 44 SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT = 0x80, 45 SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY = 0x81, 46 SPDK_NVMF_FABRIC_SC_INVALID_PARAM = 0x82, 47 SPDK_NVMF_FABRIC_SC_RESTART_DISCOVERY = 0x83, 48 SPDK_NVMF_FABRIC_SC_INVALID_HOST = 0x84, 49 SPDK_NVMF_FABRIC_SC_LOG_RESTART_DISCOVERY = 0x90, 50 SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED = 0x91, 51 }; 52 53 /** 54 * RDMA Queue Pair service types 55 */ 56 enum spdk_nvmf_rdma_qptype { 57 /** Reliable connected */ 58 SPDK_NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED = 0x1, 59 60 /** Reliable datagram */ 61 SPDK_NVMF_RDMA_QPTYPE_RELIABLE_DATAGRAM = 0x2, 62 }; 63 64 /** 65 * RDMA provider types 66 */ 67 enum spdk_nvmf_rdma_prtype { 68 /** No provider specified */ 69 SPDK_NVMF_RDMA_PRTYPE_NONE = 0x1, 70 71 /** InfiniBand */ 72 SPDK_NVMF_RDMA_PRTYPE_IB = 0x2, 73 74 /** RoCE v1 */ 75 SPDK_NVMF_RDMA_PRTYPE_ROCE = 0x3, 76 77 /** RoCE v2 */ 78 SPDK_NVMF_RDMA_PRTYPE_ROCE2 = 0x4, 79 80 /** iWARP */ 81 SPDK_NVMF_RDMA_PRTYPE_IWARP = 0x5, 82 }; 83 84 /** 85 * RDMA connection management service types 86 */ 87 enum spdk_nvmf_rdma_cms { 88 /** Sockets based endpoint addressing */ 89 SPDK_NVMF_RDMA_CMS_RDMA_CM = 0x1, 90 }; 91 92 /** 93 * NVMe over Fabrics transport types 94 */ 95 enum spdk_nvmf_trtype { 96 /** RDMA */ 97 SPDK_NVMF_TRTYPE_RDMA = 0x1, 98 99 /** Fibre Channel */ 100 SPDK_NVMF_TRTYPE_FC = 0x2, 101 102 /** TCP */ 103 SPDK_NVMF_TRTYPE_TCP = 0x3, 104 105 /** Intra-host transport (loopback) */ 106 SPDK_NVMF_TRTYPE_INTRA_HOST = 0xfe, 107 }; 108 109 /** 110 * Address family types 111 */ 112 enum spdk_nvmf_adrfam { 113 /** IPv4 (AF_INET) */ 114 SPDK_NVMF_ADRFAM_IPV4 = 0x1, 115 116 /** IPv6 (AF_INET6) */ 117 SPDK_NVMF_ADRFAM_IPV6 = 0x2, 118 119 /** InfiniBand (AF_IB) */ 120 SPDK_NVMF_ADRFAM_IB = 0x3, 121 122 /** Fibre Channel address family */ 123 SPDK_NVMF_ADRFAM_FC = 0x4, 124 125 /** Intra-host transport (loopback) */ 126 SPDK_NVMF_ADRFAM_INTRA_HOST = 0xfe, 127 }; 128 129 /** 130 * NVM subsystem types 131 */ 132 enum spdk_nvmf_subtype { 133 /** Discovery type for NVM subsystem */ 134 SPDK_NVMF_SUBTYPE_DISCOVERY = 0x1, 135 136 /** NVMe type for NVM subsystem */ 137 SPDK_NVMF_SUBTYPE_NVME = 0x2, 138 }; 139 140 /** 141 * Connections shall be made over a fabric secure channel 142 */ 143 enum spdk_nvmf_treq_secure_channel { 144 /** Not specified */ 145 SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED = 0x0, 146 147 /** Required */ 148 SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED = 0x1, 149 150 /** Not required */ 151 SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED = 0x2, 152 }; 153 154 struct spdk_nvmf_fabric_auth_recv_cmd { 155 uint8_t opcode; 156 uint8_t reserved1; 157 uint16_t cid; 158 uint8_t fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV (0x06) */ 159 uint8_t reserved2[19]; 160 struct spdk_nvme_sgl_descriptor sgl1; 161 uint8_t reserved3; 162 uint8_t spsp0; 163 uint8_t spsp1; 164 uint8_t secp; 165 uint32_t al; 166 uint8_t reserved4[16]; 167 }; 168 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_auth_recv_cmd) == 64, "Incorrect size"); 169 170 struct spdk_nvmf_fabric_auth_send_cmd { 171 uint8_t opcode; 172 uint8_t reserved1; 173 uint16_t cid; 174 uint8_t fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND (0x05) */ 175 uint8_t reserved2[19]; 176 struct spdk_nvme_sgl_descriptor sgl1; 177 uint8_t reserved3; 178 uint8_t spsp0; 179 uint8_t spsp1; 180 uint8_t secp; 181 uint32_t tl; 182 uint8_t reserved4[16]; 183 }; 184 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_auth_send_cmd) == 64, "Incorrect size"); 185 186 struct spdk_nvmf_fabric_connect_data { 187 uint8_t hostid[16]; 188 uint16_t cntlid; 189 uint8_t reserved5[238]; 190 uint8_t subnqn[SPDK_NVME_NQN_FIELD_SIZE]; 191 uint8_t hostnqn[SPDK_NVME_NQN_FIELD_SIZE]; 192 uint8_t reserved6[256]; 193 }; 194 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_connect_data) == 1024, "Incorrect size"); 195 196 struct spdk_nvmf_fabric_connect_cmd { 197 uint8_t opcode; 198 uint8_t reserved1; 199 uint16_t cid; 200 uint8_t fctype; 201 uint8_t reserved2[19]; 202 struct spdk_nvme_sgl_descriptor sgl1; 203 uint16_t recfmt; /* Connect Record Format */ 204 uint16_t qid; /* Queue Identifier */ 205 uint16_t sqsize; /* Submission Queue Size */ 206 uint8_t cattr; /* queue attributes */ 207 uint8_t reserved3; 208 uint32_t kato; /* keep alive timeout */ 209 uint8_t reserved4[12]; 210 }; 211 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_connect_cmd) == 64, "Incorrect size"); 212 213 struct spdk_nvmf_fabric_connect_rsp { 214 union { 215 struct { 216 uint16_t cntlid; 217 uint16_t authreq; 218 } success; 219 220 struct { 221 uint16_t ipo; 222 uint8_t iattr; 223 uint8_t reserved; 224 } invalid; 225 226 uint32_t raw; 227 } status_code_specific; 228 229 uint32_t reserved0; 230 uint16_t sqhd; 231 uint16_t reserved1; 232 uint16_t cid; 233 struct spdk_nvme_status status; 234 }; 235 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_connect_rsp) == 16, "Incorrect size"); 236 237 #define SPDK_NVMF_PROP_SIZE_4 0 238 #define SPDK_NVMF_PROP_SIZE_8 1 239 240 struct spdk_nvmf_fabric_prop_get_cmd { 241 uint8_t opcode; 242 uint8_t reserved1; 243 uint16_t cid; 244 uint8_t fctype; 245 uint8_t reserved2[35]; 246 struct { 247 uint8_t size : 3; 248 uint8_t reserved : 5; 249 } attrib; 250 uint8_t reserved3[3]; 251 uint32_t ofst; 252 uint8_t reserved4[16]; 253 }; 254 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_prop_get_cmd) == 64, "Incorrect size"); 255 256 struct spdk_nvmf_fabric_prop_get_rsp { 257 union { 258 uint64_t u64; 259 struct { 260 uint32_t low; 261 uint32_t high; 262 } u32; 263 } value; 264 265 uint16_t sqhd; 266 uint16_t reserved0; 267 uint16_t cid; 268 struct spdk_nvme_status status; 269 }; 270 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_prop_get_rsp) == 16, "Incorrect size"); 271 272 struct spdk_nvmf_fabric_prop_set_cmd { 273 uint8_t opcode; 274 uint8_t reserved0; 275 uint16_t cid; 276 uint8_t fctype; 277 uint8_t reserved1[35]; 278 struct { 279 uint8_t size : 3; 280 uint8_t reserved : 5; 281 } attrib; 282 uint8_t reserved2[3]; 283 uint32_t ofst; 284 285 union { 286 uint64_t u64; 287 struct { 288 uint32_t low; 289 uint32_t high; 290 } u32; 291 } value; 292 293 uint8_t reserved4[8]; 294 }; 295 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_prop_set_cmd) == 64, "Incorrect size"); 296 297 #define SPDK_NVMF_NQN_MIN_LEN 11 /* The prefix in the spec is 11 characters */ 298 #define SPDK_NVMF_NQN_MAX_LEN 223 299 #define SPDK_NVMF_NQN_UUID_PRE_LEN 32 300 #define SPDK_NVMF_UUID_STRING_LEN 36 301 #define SPDK_NVMF_NQN_UUID_PRE "nqn.2014-08.org.nvmexpress:uuid:" 302 #define SPDK_NVMF_DISCOVERY_NQN "nqn.2014-08.org.nvmexpress.discovery" 303 304 #define SPDK_DOMAIN_LABEL_MAX_LEN 63 /* RFC 1034 max domain label length */ 305 306 #define SPDK_NVMF_TRSTRING_MAX_LEN 32 307 #define SPDK_NVMF_TRADDR_MAX_LEN 256 308 #define SPDK_NVMF_TRSVCID_MAX_LEN 32 309 310 /** RDMA transport-specific address subtype */ 311 struct spdk_nvmf_rdma_transport_specific_address_subtype { 312 /** RDMA QP service type (\ref spdk_nvmf_rdma_qptype) */ 313 uint8_t rdma_qptype; 314 315 /** RDMA provider type (\ref spdk_nvmf_rdma_prtype) */ 316 uint8_t rdma_prtype; 317 318 /** RDMA connection management service (\ref spdk_nvmf_rdma_cms) */ 319 uint8_t rdma_cms; 320 321 uint8_t reserved0[5]; 322 323 /** RDMA partition key for AF_IB */ 324 uint16_t rdma_pkey; 325 326 uint8_t reserved2[246]; 327 }; 328 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_rdma_transport_specific_address_subtype) == 256, 329 "Incorrect size"); 330 331 /** TCP Secure Socket Type */ 332 enum spdk_nvme_tcp_secure_socket_type { 333 /** No security */ 334 SPDK_NVME_TCP_SECURITY_NONE = 0, 335 336 /** TLS (Secure Sockets) */ 337 SPDK_NVME_TCP_SECURITY_TLS = 1, 338 }; 339 340 /** TCP transport-specific address subtype */ 341 struct spdk_nvme_tcp_transport_specific_address_subtype { 342 /** Security type (\ref spdk_nvme_tcp_secure_socket_type) */ 343 uint8_t sectype; 344 345 uint8_t reserved0[255]; 346 }; 347 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_transport_specific_address_subtype) == 256, 348 "Incorrect size"); 349 350 /** Transport-specific address subtype */ 351 union spdk_nvmf_transport_specific_address_subtype { 352 uint8_t raw[256]; 353 354 /** RDMA */ 355 struct spdk_nvmf_rdma_transport_specific_address_subtype rdma; 356 357 /** TCP */ 358 struct spdk_nvme_tcp_transport_specific_address_subtype tcp; 359 }; 360 SPDK_STATIC_ASSERT(sizeof(union spdk_nvmf_transport_specific_address_subtype) == 256, 361 "Incorrect size"); 362 363 #define SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE 32 364 365 /** 366 * Discovery Log Page entry 367 */ 368 struct spdk_nvmf_discovery_log_page_entry { 369 /** Transport type (\ref spdk_nvmf_trtype) */ 370 uint8_t trtype; 371 372 /** Address family (\ref spdk_nvmf_adrfam) */ 373 uint8_t adrfam; 374 375 /** Subsystem type (\ref spdk_nvmf_subtype) */ 376 uint8_t subtype; 377 378 /** Transport requirements */ 379 struct { 380 /** Secure channel requirements (\ref spdk_nvmf_treq_secure_channel) */ 381 uint8_t secure_channel : 2; 382 383 uint8_t reserved : 6; 384 } treq; 385 386 /** NVM subsystem port ID */ 387 uint16_t portid; 388 389 /** Controller ID */ 390 uint16_t cntlid; 391 392 /** Admin max SQ size */ 393 uint16_t asqsz; 394 395 uint8_t reserved0[22]; 396 397 /** Transport service identifier */ 398 uint8_t trsvcid[SPDK_NVMF_TRSVCID_MAX_LEN]; 399 400 uint8_t reserved1[192]; 401 402 /** NVM subsystem qualified name */ 403 uint8_t subnqn[256]; 404 405 /** Transport address */ 406 uint8_t traddr[SPDK_NVMF_TRADDR_MAX_LEN]; 407 408 /** Transport-specific address subtype */ 409 union spdk_nvmf_transport_specific_address_subtype tsas; 410 }; 411 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_discovery_log_page_entry) == 1024, "Incorrect size"); 412 413 struct spdk_nvmf_discovery_log_page { 414 uint64_t genctr; 415 uint64_t numrec; 416 uint16_t recfmt; 417 uint8_t reserved0[1006]; 418 struct spdk_nvmf_discovery_log_page_entry entries[0]; 419 }; 420 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_discovery_log_page) == 1024, "Incorrect size"); 421 422 /* RDMA Fabric specific definitions below */ 423 424 #define SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY 0xF 425 426 struct spdk_nvmf_rdma_request_private_data { 427 uint16_t recfmt; /* record format */ 428 uint16_t qid; /* queue id */ 429 uint16_t hrqsize; /* host receive queue size */ 430 uint16_t hsqsize; /* host send queue size */ 431 uint16_t cntlid; /* controller id */ 432 uint8_t reserved[22]; 433 }; 434 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_rdma_request_private_data) == 32, "Incorrect size"); 435 436 struct spdk_nvmf_rdma_accept_private_data { 437 uint16_t recfmt; /* record format */ 438 uint16_t crqsize; /* controller receive queue size */ 439 uint8_t reserved[28]; 440 }; 441 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_rdma_accept_private_data) == 32, "Incorrect size"); 442 443 struct spdk_nvmf_rdma_reject_private_data { 444 uint16_t recfmt; /* record format */ 445 uint16_t sts; /* status */ 446 }; 447 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_rdma_reject_private_data) == 4, "Incorrect size"); 448 449 union spdk_nvmf_rdma_private_data { 450 struct spdk_nvmf_rdma_request_private_data pd_request; 451 struct spdk_nvmf_rdma_accept_private_data pd_accept; 452 struct spdk_nvmf_rdma_reject_private_data pd_reject; 453 }; 454 SPDK_STATIC_ASSERT(sizeof(union spdk_nvmf_rdma_private_data) == 32, "Incorrect size"); 455 456 enum spdk_nvmf_rdma_transport_error { 457 SPDK_NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH = 0x1, 458 SPDK_NVMF_RDMA_ERROR_INVALID_RECFMT = 0x2, 459 SPDK_NVMF_RDMA_ERROR_INVALID_QID = 0x3, 460 SPDK_NVMF_RDMA_ERROR_INVALID_HSQSIZE = 0x4, 461 SPDK_NVMF_RDMA_ERROR_INVALID_HRQSIZE = 0x5, 462 SPDK_NVMF_RDMA_ERROR_NO_RESOURCES = 0x6, 463 SPDK_NVMF_RDMA_ERROR_INVALID_IRD = 0x7, 464 SPDK_NVMF_RDMA_ERROR_INVALID_ORD = 0x8, 465 }; 466 467 /* TCP transport specific definitions below */ 468 469 /** NVMe/TCP PDU type */ 470 enum spdk_nvme_tcp_pdu_type { 471 /** Initialize Connection Request (ICReq) */ 472 SPDK_NVME_TCP_PDU_TYPE_IC_REQ = 0x00, 473 474 /** Initialize Connection Response (ICResp) */ 475 SPDK_NVME_TCP_PDU_TYPE_IC_RESP = 0x01, 476 477 /** Terminate Connection Request (TermReq) */ 478 SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ = 0x02, 479 480 /** Terminate Connection Response (TermResp) */ 481 SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ = 0x03, 482 483 /** Command Capsule (CapsuleCmd) */ 484 SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD = 0x04, 485 486 /** Response Capsule (CapsuleRsp) */ 487 SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP = 0x05, 488 489 /** Host To Controller Data (H2CData) */ 490 SPDK_NVME_TCP_PDU_TYPE_H2C_DATA = 0x06, 491 492 /** Controller To Host Data (C2HData) */ 493 SPDK_NVME_TCP_PDU_TYPE_C2H_DATA = 0x07, 494 495 /** Ready to Transfer (R2T) */ 496 SPDK_NVME_TCP_PDU_TYPE_R2T = 0x09, 497 }; 498 499 /** Common NVMe/TCP PDU header */ 500 struct spdk_nvme_tcp_common_pdu_hdr { 501 /** PDU type (\ref spdk_nvme_tcp_pdu_type) */ 502 uint8_t pdu_type; 503 504 /** pdu_type-specific flags */ 505 uint8_t flags; 506 507 /** Length of PDU header (not including the Header Digest) */ 508 uint8_t hlen; 509 510 /** PDU Data Offset from the start of the PDU */ 511 uint8_t pdo; 512 513 /** Total number of bytes in PDU, including pdu_hdr */ 514 uint32_t plen; 515 }; 516 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_common_pdu_hdr) == 8, "Incorrect size"); 517 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type) == 0, 518 "Incorrect offset"); 519 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_common_pdu_hdr, flags) == 1, "Incorrect offset"); 520 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen) == 2, "Incorrect offset"); 521 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo) == 3, "Incorrect offset"); 522 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen) == 4, "Incorrect offset"); 523 524 #define SPDK_NVME_TCP_CH_FLAGS_HDGSTF (1u << 0) 525 #define SPDK_NVME_TCP_CH_FLAGS_DDGSTF (1u << 1) 526 527 /** 528 * ICReq 529 * 530 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ 531 */ 532 struct spdk_nvme_tcp_ic_req { 533 struct spdk_nvme_tcp_common_pdu_hdr common; 534 uint16_t pfv; 535 /** Specifies the data alignment for all PDUs transferred from the controller to the host that contain data */ 536 uint8_t hpda; 537 union { 538 uint8_t raw; 539 struct { 540 uint8_t hdgst_enable : 1; 541 uint8_t ddgst_enable : 1; 542 uint8_t reserved : 6; 543 } bits; 544 } dgst; 545 uint32_t maxr2t; 546 uint8_t reserved16[112]; 547 }; 548 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_ic_req) == 128, "Incorrect size"); 549 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_req, pfv) == 8, "Incorrect offset"); 550 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_req, hpda) == 10, "Incorrect offset"); 551 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_req, maxr2t) == 12, "Incorrect offset"); 552 553 #define SPDK_NVME_TCP_HPDA_MAX 31 554 #define SPDK_NVME_TCP_CPDA_MAX 31 555 #define SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET ((SPDK_NVME_TCP_CPDA_MAX + 1) << 2) 556 557 /** 558 * ICResp 559 * 560 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP 561 */ 562 struct spdk_nvme_tcp_ic_resp { 563 struct spdk_nvme_tcp_common_pdu_hdr common; 564 uint16_t pfv; 565 /** Specifies the data alignment for all PDUs transferred from the host to the controller that contain data */ 566 uint8_t cpda; 567 union { 568 uint8_t raw; 569 struct { 570 uint8_t hdgst_enable : 1; 571 uint8_t ddgst_enable : 1; 572 uint8_t reserved : 6; 573 } bits; 574 } dgst; 575 /** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */ 576 uint32_t maxh2cdata; 577 uint8_t reserved16[112]; 578 }; 579 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_ic_resp) == 128, "Incorrect size"); 580 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_resp, pfv) == 8, "Incorrect offset"); 581 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_resp, cpda) == 10, "Incorrect offset"); 582 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata) == 12, "Incorrect offset"); 583 584 /** 585 * TermReq 586 * 587 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_TERM_REQ 588 */ 589 struct spdk_nvme_tcp_term_req_hdr { 590 struct spdk_nvme_tcp_common_pdu_hdr common; 591 uint16_t fes; 592 uint8_t fei[4]; 593 uint8_t reserved14[10]; 594 }; 595 596 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_term_req_hdr) == 24, "Incorrect size"); 597 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_term_req_hdr, fes) == 8, "Incorrect offset"); 598 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_term_req_hdr, fei) == 10, "Incorrect offset"); 599 600 enum spdk_nvme_tcp_term_req_fes { 601 SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD = 0x01, 602 SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR = 0x02, 603 SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR = 0x03, 604 SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE = 0x04, 605 SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED = 0x05, 606 SPDK_NVME_TCP_TERM_REQ_FES_R2T_LIMIT_EXCEEDED = 0x05, 607 SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER = 0x06, 608 }; 609 610 /* Total length of term req PDU (including PDU header and DATA) in bytes shall not exceed a limit of 152 bytes. */ 611 #define SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE 128 612 #define SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE (SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE + sizeof(struct spdk_nvme_tcp_term_req_hdr)) 613 614 /** 615 * CapsuleCmd 616 * 617 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD 618 */ 619 struct spdk_nvme_tcp_cmd { 620 struct spdk_nvme_tcp_common_pdu_hdr common; 621 struct spdk_nvme_cmd ccsqe; 622 /**< icdoff hdgst padding + in-capsule data + ddgst (if enabled) */ 623 }; 624 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_cmd) == 72, "Incorrect size"); 625 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_cmd, ccsqe) == 8, "Incorrect offset"); 626 627 /** 628 * CapsuleResp 629 * 630 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP 631 */ 632 struct spdk_nvme_tcp_rsp { 633 struct spdk_nvme_tcp_common_pdu_hdr common; 634 struct spdk_nvme_cpl rccqe; 635 }; 636 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_rsp) == 24, "incorrect size"); 637 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_rsp, rccqe) == 8, "Incorrect offset"); 638 639 640 /** 641 * H2CData 642 * 643 * hdr.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_DATA 644 */ 645 struct spdk_nvme_tcp_h2c_data_hdr { 646 struct spdk_nvme_tcp_common_pdu_hdr common; 647 uint16_t cccid; 648 uint16_t ttag; 649 uint32_t datao; 650 uint32_t datal; 651 uint8_t reserved20[4]; 652 }; 653 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_h2c_data_hdr) == 24, "Incorrect size"); 654 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid) == 8, "Incorrect offset"); 655 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag) == 10, "Incorrect offset"); 656 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_h2c_data_hdr, datao) == 12, "Incorrect offset"); 657 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_h2c_data_hdr, datal) == 16, "Incorrect offset"); 658 659 #define SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU (1u << 2) 660 #define SPDK_NVME_TCP_H2C_DATA_FLAGS_SUCCESS (1u << 3) 661 #define SPDK_NVME_TCP_H2C_DATA_PDO_MULT 8u 662 663 /** 664 * C2HData 665 * 666 * hdr.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA 667 */ 668 struct spdk_nvme_tcp_c2h_data_hdr { 669 struct spdk_nvme_tcp_common_pdu_hdr common; 670 uint16_t cccid; 671 uint8_t reserved10[2]; 672 uint32_t datao; 673 uint32_t datal; 674 uint8_t reserved20[4]; 675 }; 676 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_c2h_data_hdr) == 24, "Incorrect size"); 677 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid) == 8, "Incorrect offset"); 678 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao) == 12, "Incorrect offset"); 679 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal) == 16, "Incorrect offset"); 680 681 #define SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS (1u << 3) 682 #define SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU (1u << 2) 683 #define SPDK_NVME_TCP_C2H_DATA_PDO_MULT 8u 684 685 /** 686 * R2T 687 * 688 * common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T 689 */ 690 struct spdk_nvme_tcp_r2t_hdr { 691 struct spdk_nvme_tcp_common_pdu_hdr common; 692 uint16_t cccid; 693 uint16_t ttag; 694 uint32_t r2to; 695 uint32_t r2tl; 696 uint8_t reserved20[4]; 697 }; 698 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_tcp_r2t_hdr) == 24, "Incorrect size"); 699 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid) == 8, "Incorrect offset"); 700 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_r2t_hdr, ttag) == 10, "Incorrect offset"); 701 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to) == 12, "Incorrect offset"); 702 SPDK_STATIC_ASSERT(offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl) == 16, "Incorrect offset"); 703 704 #pragma pack(pop) 705 706 #endif /* __NVMF_SPEC_H__ */ 707