xref: /dpdk/drivers/net/gve/base/gve_desc.h (revision c9ba2caf63025f57d70d8816a54688e384537a3b)
1 /* SPDX-License-Identifier: MIT
2  * Google Virtual Ethernet (gve) driver
3  * Copyright (C) 2015-2022 Google, Inc.
4  */
5 
6 /* GVE Transmit Descriptor formats */
7 
8 #ifndef _GVE_DESC_H_
9 #define _GVE_DESC_H_
10 
11 #include "gve_osdep.h"
12 
13 /* A note on seg_addrs
14  *
15  * Base addresses encoded in seg_addr are not assumed to be physical
16  * addresses. The ring format assumes these come from some linear address
17  * space. This could be physical memory, kernel virtual memory, user virtual
18  * memory.
19  * If raw dma addressing is not supported then gVNIC uses lists of registered
20  * pages. Each queue is assumed to be associated with a single such linear
21  * address space to ensure a consistent meaning for seg_addrs posted to its
22  * rings.
23  */
24 
25 struct gve_tx_pkt_desc {
26 	u8	type_flags;  /* desc type is lower 4 bits, flags upper */
27 	u8	l4_csum_offset;  /* relative offset of L4 csum word */
28 	u8	l4_hdr_offset;  /* Offset of start of L4 headers in packet */
29 	u8	desc_cnt;  /* Total descriptors for this packet */
30 	__be16	len;  /* Total length of this packet (in bytes) */
31 	__be16	seg_len;  /* Length of this descriptor's segment */
32 	__be64	seg_addr;  /* Base address (see note) of this segment */
33 } __packed;
34 
35 struct gve_tx_mtd_desc {
36 	u8      type_flags;     /* type is lower 4 bits, subtype upper  */
37 	u8      path_state;     /* state is lower 4 bits, hash type upper */
38 	__be16  reserved0;
39 	__be32  path_hash;
40 	__be64  reserved1;
41 } __packed;
42 
43 struct gve_tx_seg_desc {
44 	u8	type_flags;	/* type is lower 4 bits, flags upper	*/
45 	u8	l3_offset;	/* TSO: 2 byte units to start of IPH	*/
46 	__be16	reserved;
47 	__be16	mss;		/* TSO MSS				*/
48 	__be16	seg_len;
49 	__be64	seg_addr;
50 } __packed;
51 
52 /* GVE Transmit Descriptor Types */
53 #define	GVE_TXD_STD		(0x0 << 4) /* Std with Host Address	*/
54 #define	GVE_TXD_TSO		(0x1 << 4) /* TSO with Host Address	*/
55 #define	GVE_TXD_SEG		(0x2 << 4) /* Seg with Host Address	*/
56 #define	GVE_TXD_MTD		(0x3 << 4) /* Metadata			*/
57 
58 /* GVE Transmit Descriptor Flags for Std Pkts */
59 #define	GVE_TXF_L4CSUM	BIT(0)	/* Need csum offload */
60 #define	GVE_TXF_TSTAMP	BIT(2)	/* Timestamp required */
61 
62 /* GVE Transmit Descriptor Flags for TSO Segs */
63 #define	GVE_TXSF_IPV6	BIT(1)	/* IPv6 TSO */
64 
65 /* GVE Transmit Descriptor Options for MTD Segs */
66 #define GVE_MTD_SUBTYPE_PATH		0
67 
68 #define GVE_MTD_PATH_STATE_DEFAULT	0
69 #define GVE_MTD_PATH_STATE_TIMEOUT	1
70 #define GVE_MTD_PATH_STATE_CONGESTION	2
71 #define GVE_MTD_PATH_STATE_RETRANSMIT	3
72 
73 #define GVE_MTD_PATH_HASH_NONE         (0x0 << 4)
74 #define GVE_MTD_PATH_HASH_L4           (0x1 << 4)
75 
76 /* GVE Receive Packet Descriptor */
77 /* The start of an ethernet packet comes 2 bytes into the rx buffer.
78  * gVNIC adds this padding so that both the DMA and the L3/4 protocol header
79  * access is aligned.
80  */
81 #define GVE_RX_PAD 2
82 
83 struct gve_rx_desc {
84 	u8	padding[48];
85 	__be32	rss_hash;  /* Receive-side scaling hash (Toeplitz for gVNIC) */
86 	__be16	mss;
87 	__be16	reserved;  /* Reserved to zero */
88 	u8	hdr_len;  /* Header length (L2-L4) including padding */
89 	u8	hdr_off;  /* 64-byte-scaled offset into RX_DATA entry */
90 	__sum16	csum;  /* 1's-complement partial checksum of L3+ bytes */
91 	__be16	len;  /* Length of the received packet */
92 	__be16	flags_seq;  /* Flags [15:3] and sequence number [2:0] (1-7) */
93 } __packed;
94 GVE_CHECK_STRUCT_LEN(64, gve_rx_desc);
95 
96 /* If the device supports raw dma addressing then the addr in data slot is
97  * the dma address of the buffer.
98  * If the device only supports registered segments then the addr is a byte
99  * offset into the registered segment (an ordered list of pages) where the
100  * buffer is.
101  */
102 union gve_rx_data_slot {
103 	__be64 qpl_offset;
104 	__be64 addr;
105 };
106 
107 /* GVE Receive Packet Descriptor Seq No */
108 #define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
109 
110 /* GVE Receive Packet Descriptor Flags */
111 #define GVE_RXFLG(x)	cpu_to_be16(1 << (3 + (x)))
112 #define	GVE_RXF_FRAG		GVE_RXFLG(3)	/* IP Fragment			*/
113 #define	GVE_RXF_IPV4		GVE_RXFLG(4)	/* IPv4				*/
114 #define	GVE_RXF_IPV6		GVE_RXFLG(5)	/* IPv6				*/
115 #define	GVE_RXF_TCP		GVE_RXFLG(6)	/* TCP Packet			*/
116 #define	GVE_RXF_UDP		GVE_RXFLG(7)	/* UDP Packet			*/
117 #define	GVE_RXF_ERR		GVE_RXFLG(8)	/* Packet Error Detected	*/
118 #define	GVE_RXF_PKT_CONT	GVE_RXFLG(10)	/* Multi Fragment RX packet	*/
119 
120 /* GVE IRQ */
121 #define GVE_IRQ_ACK	BIT(31)
122 #define GVE_IRQ_MASK	BIT(30)
123 #define GVE_IRQ_EVENT	BIT(29)
124 
gve_needs_rss(__be16 flag)125 static inline bool gve_needs_rss(__be16 flag)
126 {
127 	if (flag & GVE_RXF_FRAG)
128 		return false;
129 	if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
130 		return true;
131 	return false;
132 }
133 
gve_next_seqno(u8 seq)134 static inline u8 gve_next_seqno(u8 seq)
135 {
136 	return (seq + 1) == 8 ? 1 : seq + 1;
137 }
138 #endif /* _GVE_DESC_H_ */
139