xref: /dpdk/drivers/net/gve/base/gve_osdep.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #ifndef _GVE_OSDEP_H_
6 #define _GVE_OSDEP_H_
7 
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <stdbool.h>
14 
15 #include <rte_bitops.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_ether.h>
19 #include <rte_io.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_version.h>
25 
26 #include "../gve_logs.h"
27 
28 #ifdef RTE_EXEC_ENV_LINUX
29 #include <sys/utsname.h>
30 #endif
31 
32 #ifndef u8
33 #define u8 uint8_t
34 #endif
35 #ifndef u16
36 #define u16 uint16_t
37 #endif
38 #ifndef u32
39 #define u32 uint32_t
40 #endif
41 #ifndef u64
42 #define u64 uint64_t
43 #endif
44 
45 #ifndef __sum16
46 #define __sum16 rte_be16_t
47 #endif
48 
49 #ifndef __be16
50 #define __be16 rte_be16_t
51 #endif
52 #ifndef __be32
53 #define __be32 rte_be32_t
54 #endif
55 #ifndef __be64
56 #define __be64 rte_be64_t
57 #endif
58 
59 #ifndef __le16
60 #define __le16 rte_le16_t
61 #endif
62 #ifndef __le32
63 #define __le32 rte_le32_t
64 #endif
65 #ifndef __le64
66 #define __le64 rte_le64_t
67 #endif
68 
69 #ifndef dma_addr_t
70 #define dma_addr_t rte_iova_t
71 #endif
72 
73 #define ETH_MIN_MTU	RTE_ETHER_MIN_MTU
74 #define ETH_ALEN	RTE_ETHER_ADDR_LEN
75 
76 #ifndef PAGE_SHIFT
77 #define PAGE_SHIFT	12
78 #endif
79 #ifndef PAGE_SIZE
80 #define PAGE_SIZE	(1UL << PAGE_SHIFT)
81 #endif
82 
83 #define BIT(nr)		RTE_BIT32(nr)
84 
85 #define be16_to_cpu(x) rte_be_to_cpu_16(x)
86 #define be32_to_cpu(x) rte_be_to_cpu_32(x)
87 #define be64_to_cpu(x) rte_be_to_cpu_64(x)
88 
89 #define cpu_to_be16(x) rte_cpu_to_be_16(x)
90 #define cpu_to_be32(x) rte_cpu_to_be_32(x)
91 #define cpu_to_be64(x) rte_cpu_to_be_64(x)
92 
93 #define READ_ONCE32(x) rte_read32(&(x))
94 
95 #ifndef ____cacheline_aligned
96 #define ____cacheline_aligned	__rte_cache_aligned
97 #endif
98 #ifndef __packed
99 #define __packed		__attribute__((__packed__))
100 #endif
101 #define __iomem
102 
103 #define msleep(ms)		rte_delay_ms(ms)
104 
105 #define OS_VERSION_STRLEN 128
106 struct os_version_string {
107 	char os_version_str1[OS_VERSION_STRLEN];
108 	char os_version_str2[OS_VERSION_STRLEN];
109 };
110 
111 /* These macros are used to generate compilation errors if a struct/union
112  * is not exactly the correct length. It gives a divide by zero error if
113  * the struct/union is not of the correct size, otherwise it creates an
114  * enum that is never used.
115  */
116 #define GVE_CHECK_STRUCT_LEN(n, X) enum gve_static_assert_enum_##X \
117 	{ gve_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
118 #define GVE_CHECK_UNION_LEN(n, X) enum gve_static_asset_enum_##X \
119 	{ gve_static_assert_##X = (n) / ((sizeof(union X) == (n)) ? 1 : 0) }
120 
121 static __rte_always_inline u8
122 readb(volatile void *addr)
123 {
124 	return rte_read8(addr);
125 }
126 
127 static __rte_always_inline void
128 writeb(u8 value, volatile void *addr)
129 {
130 	rte_write8(value, addr);
131 }
132 
133 static __rte_always_inline void
134 writel(u32 value, volatile void *addr)
135 {
136 	rte_write32(value, addr);
137 }
138 
139 static __rte_always_inline u32
140 ioread32be(const volatile void *addr)
141 {
142 	return rte_be_to_cpu_32(rte_read32(addr));
143 }
144 
145 static __rte_always_inline void
146 iowrite32be(u32 value, volatile void *addr)
147 {
148 	writel(rte_cpu_to_be_32(value), addr);
149 }
150 
151 /* DMA memory allocation tracking */
152 struct gve_dma_mem {
153 	void *va;
154 	rte_iova_t pa;
155 	uint32_t size;
156 	const void *zone;
157 };
158 
159 static inline void *
160 gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
161 {
162 	static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
163 	const struct rte_memzone *mz = NULL;
164 	char z_name[RTE_MEMZONE_NAMESIZE];
165 
166 	if (!mem)
167 		return NULL;
168 
169 	snprintf(z_name, sizeof(z_name), "gve_dma_%u",
170 		 rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
171 	mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
172 					 RTE_MEMZONE_IOVA_CONTIG,
173 					 PAGE_SIZE);
174 	if (!mz)
175 		return NULL;
176 
177 	mem->size = size;
178 	mem->va = mz->addr;
179 	mem->pa = mz->iova;
180 	mem->zone = mz;
181 	PMD_DRV_LOG(DEBUG, "memzone %s is allocated", mz->name);
182 
183 	return mem->va;
184 }
185 
186 static inline void
187 gve_free_dma_mem(struct gve_dma_mem *mem)
188 {
189 	PMD_DRV_LOG(DEBUG, "memzone %s to be freed",
190 		    ((const struct rte_memzone *)mem->zone)->name);
191 
192 	rte_memzone_free(mem->zone);
193 	mem->zone = NULL;
194 	mem->va = NULL;
195 	mem->pa = 0;
196 }
197 
198 static inline void
199 populate_driver_version_strings(char *str1, char *str2)
200 {
201 	struct utsname uts;
202 	if (uname(&uts) >= 0) {
203 		/* release */
204 		rte_strscpy(str1, uts.release,
205 			OS_VERSION_STRLEN);
206 		/* version */
207 		rte_strscpy(str2, uts.version,
208 			OS_VERSION_STRLEN);
209 	}
210 }
211 #endif /* _GVE_OSDEP_H_ */
212