1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #ifndef RTE_EXEC_ENV_WINDOWS
6
7 #include "test_table_ports.h"
8 #include "test_table.h"
9
10 port_test port_tests[] = {
11 test_port_ring_reader,
12 test_port_ring_writer,
13 };
14
15 unsigned n_port_tests = RTE_DIM(port_tests);
16
17 /* Port tests */
18 int
test_port_ring_reader(void)19 test_port_ring_reader(void)
20 {
21 int status, i;
22 struct rte_port_ring_reader_params port_ring_reader_params;
23 void *port;
24
25 /* Invalid params */
26 port = rte_port_ring_reader_ops.f_create(NULL, 0);
27 if (port != NULL)
28 return -1;
29
30 status = rte_port_ring_reader_ops.f_free(port);
31 if (status >= 0)
32 return -2;
33
34 /* Create and free */
35 port_ring_reader_params.ring = RING_RX;
36 port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);
37 if (port == NULL)
38 return -3;
39
40 status = rte_port_ring_reader_ops.f_free(port);
41 if (status != 0)
42 return -4;
43
44 /* -- Traffic RX -- */
45 int expected_pkts, received_pkts;
46 struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
47 void *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
48
49 port_ring_reader_params.ring = RING_RX;
50 port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);
51
52 /* Single packet */
53 mbuf[0] = (void *)rte_pktmbuf_alloc(pool);
54
55 expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
56 mbuf, 1, NULL);
57 received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);
58
59 if (received_pkts < expected_pkts)
60 return -5;
61
62 rte_pktmbuf_free(res_mbuf[0]);
63
64 /* Multiple packets */
65 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
66 mbuf[i] = rte_pktmbuf_alloc(pool);
67
68 expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
69 (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
70 received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
71 RTE_PORT_IN_BURST_SIZE_MAX);
72
73 if (received_pkts < expected_pkts)
74 return -6;
75
76 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
77 rte_pktmbuf_free(res_mbuf[i]);
78
79 return 0;
80 }
81
82 int
test_port_ring_writer(void)83 test_port_ring_writer(void)
84 {
85 int status, i;
86 struct rte_port_ring_writer_params port_ring_writer_params;
87 void *port;
88
89 /* Invalid params */
90 port = rte_port_ring_writer_ops.f_create(NULL, 0);
91 if (port != NULL)
92 return -1;
93
94 status = rte_port_ring_writer_ops.f_free(port);
95 if (status >= 0)
96 return -2;
97
98 port_ring_writer_params.ring = NULL;
99
100 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
101 if (port != NULL)
102 return -3;
103
104 port_ring_writer_params.ring = RING_TX;
105 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX + 1;
106
107 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
108 if (port != NULL)
109 return -4;
110
111 /* Create and free */
112 port_ring_writer_params.ring = RING_TX;
113 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;
114
115 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
116 if (port == NULL)
117 return -5;
118
119 status = rte_port_ring_writer_ops.f_free(port);
120 if (status != 0)
121 return -6;
122
123 /* -- Traffic TX -- */
124 int expected_pkts, received_pkts;
125 struct rte_mbuf *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
126 struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
127
128 port_ring_writer_params.ring = RING_TX;
129 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;
130 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
131
132 /* Single packet */
133 mbuf[0] = rte_pktmbuf_alloc(pool);
134
135 rte_port_ring_writer_ops.f_tx(port, mbuf[0]);
136 rte_port_ring_writer_ops.f_flush(port);
137 expected_pkts = 1;
138 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
139 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
140
141 if (received_pkts < expected_pkts)
142 return -7;
143
144 rte_pktmbuf_free(res_mbuf[0]);
145
146 /* Multiple packets */
147 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
148 mbuf[i] = rte_pktmbuf_alloc(pool);
149 rte_port_ring_writer_ops.f_tx(port, mbuf[i]);
150 }
151
152 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
153 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
154 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
155
156 if (received_pkts < expected_pkts)
157 return -8;
158
159 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
160 rte_pktmbuf_free(res_mbuf[i]);
161
162 /* TX Bulk */
163 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
164 mbuf[i] = rte_pktmbuf_alloc(pool);
165 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-1);
166
167 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
168 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
169 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
170
171 if (received_pkts < expected_pkts)
172 return -8;
173
174 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
175 rte_pktmbuf_free(res_mbuf[i]);
176
177 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
178 mbuf[i] = rte_pktmbuf_alloc(pool);
179 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-3);
180 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)2);
181
182 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
183 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
184 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
185
186 if (received_pkts < expected_pkts)
187 return -9;
188
189 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
190 rte_pktmbuf_free(res_mbuf[i]);
191
192 return 0;
193 }
194
195 #endif /* !RTE_EXEC_ENV_WINDOWS */
196