1 /* SPDX-License-Identifier: MIT 2 * Google Virtual Ethernet (gve) driver 3 * Copyright (C) 2015-2022 Google, Inc. 4 */ 5 6 /* GVE DQO Descriptor formats */ 7 8 #ifndef _GVE_DESC_DQO_H_ 9 #define _GVE_DESC_DQO_H_ 10 11 #include "gve_osdep.h" 12 13 #define GVE_TX_MAX_HDR_SIZE_DQO 255 14 #define GVE_TX_MIN_TSO_MSS_DQO 88 15 16 /* Basic TX descriptor (DTYPE 0x0C) */ 17 struct gve_tx_pkt_desc_dqo { 18 __le64 buf_addr; 19 20 /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */ 21 u8 dtype: 5; 22 23 /* Denotes the last descriptor of a packet. */ 24 u8 end_of_packet: 1; 25 u8 checksum_offload_enable: 1; 26 27 /* If set, will generate a descriptor completion for this descriptor. */ 28 u8 report_event: 1; 29 u8 reserved0; 30 __le16 reserved1; 31 32 /* The TX completion associated with this packet will contain this tag. 33 */ 34 __le16 compl_tag; 35 u16 buf_size: 14; 36 u16 reserved2: 2; 37 } __packed; 38 GVE_CHECK_STRUCT_LEN(16, gve_tx_pkt_desc_dqo); 39 40 #define GVE_TX_PKT_DESC_DTYPE_DQO 0xc 41 #define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1) 42 43 /* Maximum number of data descriptors allowed per packet, or per-TSO segment. */ 44 #define GVE_TX_MAX_DATA_DESCS 10 45 46 /* Min gap between tail and head to avoid cacheline overlap */ 47 #define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4 48 49 /* "report_event" on TX packet descriptors may only be reported on the last 50 * descriptor of a TX packet, and they must be spaced apart with at least this 51 * value. 52 */ 53 #define GVE_TX_MIN_RE_INTERVAL 32 54 55 struct gve_tx_context_cmd_dtype { 56 u8 dtype: 5; 57 u8 tso: 1; 58 u8 reserved1: 2; 59 60 u8 reserved2; 61 }; 62 63 GVE_CHECK_STRUCT_LEN(2, gve_tx_context_cmd_dtype); 64 65 /* TX Native TSO Context DTYPE (0x05) 66 * 67 * "flex" fields allow the driver to send additional packet context to HW. 68 */ 69 struct gve_tx_tso_context_desc_dqo { 70 /* The L4 payload bytes that should be segmented. */ 71 u32 tso_total_len: 24; 72 u32 flex10: 8; 73 74 /* Max segment size in TSO excluding headers. */ 75 u16 mss: 14; 76 u16 reserved: 2; 77 78 u8 header_len; /* Header length to use for TSO offload */ 79 u8 flex11; 80 struct gve_tx_context_cmd_dtype cmd_dtype; 81 u8 flex0; 82 u8 flex5; 83 u8 flex6; 84 u8 flex7; 85 u8 flex8; 86 u8 flex9; 87 } __packed; 88 GVE_CHECK_STRUCT_LEN(16, gve_tx_tso_context_desc_dqo); 89 90 #define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5 91 92 /* General context descriptor for sending metadata. */ 93 struct gve_tx_general_context_desc_dqo { 94 u8 flex4; 95 u8 flex5; 96 u8 flex6; 97 u8 flex7; 98 u8 flex8; 99 u8 flex9; 100 u8 flex10; 101 u8 flex11; 102 struct gve_tx_context_cmd_dtype cmd_dtype; 103 u16 reserved; 104 u8 flex0; 105 u8 flex1; 106 u8 flex2; 107 u8 flex3; 108 } __packed; 109 GVE_CHECK_STRUCT_LEN(16, gve_tx_general_context_desc_dqo); 110 111 #define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4 112 113 /* Logical structure of metadata which is packed into context descriptor flex 114 * fields. 115 */ 116 struct gve_tx_metadata_dqo { 117 union { 118 struct { 119 u8 version; 120 121 /* If `skb->l4_hash` is set, this value should be 122 * derived from `skb->hash`. 123 * 124 * A zero value means no l4_hash was associated with the 125 * skb. 126 */ 127 u16 path_hash: 15; 128 129 /* Should be set to 1 if the flow associated with the 130 * skb had a rehash from the TCP stack. 131 */ 132 u16 rehash_event: 1; 133 } __packed; 134 u8 bytes[12]; 135 }; 136 } __packed; 137 GVE_CHECK_STRUCT_LEN(12, gve_tx_metadata_dqo); 138 139 #define GVE_TX_METADATA_VERSION_DQO 0 140 141 /* TX completion descriptor */ 142 struct gve_tx_compl_desc { 143 /* For types 0-4 this is the TX queue ID associated with this 144 * completion. 145 */ 146 u16 id: 11; 147 148 /* See: GVE_COMPL_TYPE_DQO* */ 149 u16 type: 3; 150 u16 reserved0: 1; 151 152 /* Flipped by HW to notify the descriptor is populated. */ 153 u16 generation: 1; 154 union { 155 /* For descriptor completions, this is the last index fetched 156 * by HW + 1. 157 */ 158 __le16 tx_head; 159 160 /* For packet completions, this is the completion tag set on the 161 * TX packet descriptors. 162 */ 163 __le16 completion_tag; 164 }; 165 __le32 reserved1; 166 } __packed; 167 GVE_CHECK_STRUCT_LEN(8, gve_tx_compl_desc); 168 169 #define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */ 170 #define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */ 171 #define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */ 172 #define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */ 173 174 /* Descriptor to post buffers to HW on buffer queue. */ 175 struct gve_rx_desc_dqo { 176 __le16 buf_id; /* ID returned in Rx completion descriptor */ 177 __le16 reserved0; 178 __le32 reserved1; 179 __le64 buf_addr; /* DMA address of the buffer */ 180 __le64 header_buf_addr; 181 __le64 reserved2; 182 } __packed; 183 GVE_CHECK_STRUCT_LEN(32, gve_rx_desc_dqo); 184 185 /* Descriptor for HW to notify SW of new packets received on RX queue. */ 186 struct gve_rx_compl_desc_dqo { 187 /* Must be 1 */ 188 u8 rxdid: 4; 189 u8 reserved0: 4; 190 191 /* Packet originated from this system rather than the network. */ 192 u8 loopback: 1; 193 /* Set when IPv6 packet contains a destination options header or routing 194 * header. 195 */ 196 u8 ipv6_ex_add: 1; 197 /* Invalid packet was received. */ 198 u8 rx_error: 1; 199 u8 reserved1: 5; 200 201 u16 packet_type: 10; 202 u16 ip_hdr_err: 1; 203 u16 udp_len_err: 1; 204 u16 raw_cs_invalid: 1; 205 u16 reserved2: 3; 206 207 u16 packet_len: 14; 208 /* Flipped by HW to notify the descriptor is populated. */ 209 u16 generation: 1; 210 /* Should be zero. */ 211 u16 buffer_queue_id: 1; 212 213 u16 header_len: 10; 214 u16 rsc: 1; 215 u16 split_header: 1; 216 u16 reserved3: 4; 217 218 u8 descriptor_done: 1; 219 u8 end_of_packet: 1; 220 u8 header_buffer_overflow: 1; 221 u8 l3_l4_processed: 1; 222 u8 csum_ip_err: 1; 223 u8 csum_l4_err: 1; 224 u8 csum_external_ip_err: 1; 225 u8 csum_external_udp_err: 1; 226 227 u8 status_error1; 228 229 __le16 reserved5; 230 __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */ 231 232 union { 233 /* Packet checksum. */ 234 __le16 raw_cs; 235 /* Segment length for RSC packets. */ 236 __le16 rsc_seg_len; 237 }; 238 __le32 hash; 239 __le32 reserved6; 240 __le64 reserved7; 241 } __packed; 242 243 GVE_CHECK_STRUCT_LEN(32, gve_rx_compl_desc_dqo); 244 245 /* Ringing the doorbell too often can hurt performance. 246 * 247 * HW requires this value to be at least 8. 248 */ 249 #define GVE_RX_BUF_THRESH_DQO 32 250 251 #endif /* _GVE_DESC_DQO_H_ */ 252