xref: /dpdk/drivers/net/ntnic/include/ntnic_virt_queue.h (revision f0fe222ea9cfe3c8a6972318d02a81a637aefd47)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #ifndef __NTOSS_VIRT_QUEUE_H__
7 #define __NTOSS_VIRT_QUEUE_H__
8 
9 #include <stdint.h>
10 #include <stdalign.h>
11 
12 #include <rte_memory.h>
13 
14 struct nthw_virt_queue;
15 
16 #define SPLIT_RING        0
17 #define PACKED_RING       1
18 #define IN_ORDER          1
19 
20 /*
21  * SPLIT : This marks a buffer as continuing via the next field.
22  * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
23  * contiguous) In Used descriptors it must be ignored
24  */
25 #define VIRTQ_DESC_F_NEXT 1
26 /*
27  * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
28  * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
29  * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
30  * the device into any parts of the buffer.
31  */
32 #define VIRTQ_DESC_F_WRITE 2
33 
34 /*
35  * Split Ring virtq Descriptor
36  */
37 struct __rte_aligned(8) virtq_desc {
38 	/* Address (guest-physical). */
39 	uint64_t addr;
40 	/* Length. */
41 	uint32_t len;
42 	/* The flags as indicated above. */
43 	uint16_t flags;
44 	/* Next field if flags & NEXT */
45 	uint16_t next;
46 };
47 
48 
49 /*
50  * Packed Ring special structures and defines
51  */
52 
53 /* additional packed ring flags */
54 #define VIRTQ_DESC_F_AVAIL     (1 << 7)
55 #define VIRTQ_DESC_F_USED      (1 << 15)
56 
57 /* descr phys address must be 16 byte aligned */
58 struct __rte_aligned(16) pvirtq_desc {
59 	/* Buffer Address. */
60 	uint64_t addr;
61 	/* Buffer Length. */
62 	uint32_t len;
63 	/* Buffer ID. */
64 	uint16_t id;
65 	/* The flags depending on descriptor type. */
66 	uint16_t flags;
67 };
68 
69 /* Disable events */
70 #define RING_EVENT_FLAGS_DISABLE 0x1
71 
72 struct __rte_aligned(16) pvirtq_event_suppress {
73 	union {
74 		struct {
75 			/* Descriptor Ring Change Event Offset */
76 			uint16_t desc_event_off : 15;
77 			/* Descriptor Ring Change Event Wrap Counter */
78 			uint16_t desc_event_wrap : 1;
79 		};
80 		/* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
81 		uint16_t desc;
82 	};
83 
84 	union {
85 		struct {
86 			uint16_t desc_event_flags : 2;	/* Descriptor Ring Change Event Flags */
87 			uint16_t reserved : 14;	/* Reserved, set to 0 */
88 		};
89 		uint16_t flags;
90 	};
91 };
92 
93 /*
94  * Common virtq descr
95  */
96 #define vq_set_next(vq, index, nxt) \
97 do { \
98 	struct nthw_cvirtq_desc *temp_vq = (vq); \
99 	if (temp_vq->vq_type == SPLIT_RING) \
100 		temp_vq->s[index].next = nxt; \
101 } while (0)
102 
103 #define vq_add_flags(vq, index, flgs) \
104 do { \
105 	struct nthw_cvirtq_desc *temp_vq = (vq); \
106 	uint16_t tmp_index = (index); \
107 	typeof(flgs) tmp_flgs = (flgs); \
108 	if (temp_vq->vq_type == SPLIT_RING) \
109 		temp_vq->s[tmp_index].flags |= tmp_flgs; \
110 	else if (temp_vq->vq_type == PACKED_RING) \
111 		temp_vq->p[tmp_index].flags |= tmp_flgs; \
112 } while (0)
113 
114 #define vq_set_flags(vq, index, flgs) \
115 do { \
116 	struct nthw_cvirtq_desc *temp_vq = (vq); \
117 	uint32_t temp_flags = (flgs); \
118 	uint32_t temp_index = (index); \
119 	if ((temp_vq)->vq_type == SPLIT_RING) \
120 		(temp_vq)->s[temp_index].flags = temp_flags; \
121 	else if ((temp_vq)->vq_type == PACKED_RING) \
122 		(temp_vq)->p[temp_index].flags = temp_flags; \
123 } while (0)
124 
125 struct nthw_virtq_desc_buf {
126 	/* Address (guest-physical). */
127 	alignas(16) uint64_t addr;
128 	/* Length. */
129 	uint32_t len;
130 };
131 
132 struct nthw_cvirtq_desc {
133 	union {
134 		struct nthw_virtq_desc_buf *b;  /* buffer part as is common */
135 		struct virtq_desc     *s;  /* SPLIT */
136 		struct pvirtq_desc    *p;  /* PACKED */
137 	};
138 	uint16_t vq_type;
139 };
140 
141 struct nthw_received_packets {
142 	void *addr;
143 	uint32_t len;
144 };
145 
146 #endif /* __NTOSS_VIRT_QUEUE_H__ */
147