1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 #include <string.h> 5 6 #include <rte_malloc.h> 7 8 #include "rte_port_sched.h" 9 10 #include "port_log.h" 11 12 /* 13 * Reader 14 */ 15 #ifdef RTE_PORT_STATS_COLLECT 16 17 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val) \ 18 port->stats.n_pkts_in += val 19 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val) \ 20 port->stats.n_pkts_drop += val 21 22 #else 23 24 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val) 25 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val) 26 27 #endif 28 29 struct rte_port_sched_reader { 30 struct rte_port_in_stats stats; 31 32 struct rte_sched_port *sched; 33 }; 34 35 static void * 36 rte_port_sched_reader_create(void *params, int socket_id) 37 { 38 struct rte_port_sched_reader_params *conf = 39 params; 40 struct rte_port_sched_reader *port; 41 42 /* Check input parameters */ 43 if ((conf == NULL) || 44 (conf->sched == NULL)) { 45 PORT_LOG(ERR, "%s: Invalid params", __func__); 46 return NULL; 47 } 48 49 /* Memory allocation */ 50 port = rte_zmalloc_socket("PORT", sizeof(*port), 51 RTE_CACHE_LINE_SIZE, socket_id); 52 if (port == NULL) { 53 PORT_LOG(ERR, "%s: Failed to allocate port", __func__); 54 return NULL; 55 } 56 57 /* Initialization */ 58 port->sched = conf->sched; 59 60 return port; 61 } 62 63 static int 64 rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts) 65 { 66 struct rte_port_sched_reader *p = port; 67 uint32_t nb_rx; 68 69 nb_rx = rte_sched_port_dequeue(p->sched, pkts, n_pkts); 70 RTE_PORT_SCHED_READER_PKTS_IN_ADD(p, nb_rx); 71 72 return nb_rx; 73 } 74 75 static int 76 rte_port_sched_reader_free(void *port) 77 { 78 if (port == NULL) { 79 PORT_LOG(ERR, "%s: port is NULL", __func__); 80 return -EINVAL; 81 } 82 83 rte_free(port); 84 85 return 0; 86 } 87 88 static int 89 rte_port_sched_reader_stats_read(void *port, 90 struct rte_port_in_stats *stats, int clear) 91 { 92 struct rte_port_sched_reader *p = 93 port; 94 95 if (stats != NULL) 96 memcpy(stats, &p->stats, sizeof(p->stats)); 97 98 if (clear) 99 memset(&p->stats, 0, sizeof(p->stats)); 100 101 return 0; 102 } 103 104 /* 105 * Writer 106 */ 107 #ifdef RTE_PORT_STATS_COLLECT 108 109 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val) \ 110 port->stats.n_pkts_in += val 111 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val) \ 112 port->stats.n_pkts_drop += val 113 114 #else 115 116 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val) 117 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val) 118 119 #endif 120 121 struct rte_port_sched_writer { 122 struct rte_port_out_stats stats; 123 124 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; 125 struct rte_sched_port *sched; 126 uint32_t tx_burst_sz; 127 uint32_t tx_buf_count; 128 uint64_t bsz_mask; 129 }; 130 131 static void * 132 rte_port_sched_writer_create(void *params, int socket_id) 133 { 134 struct rte_port_sched_writer_params *conf = 135 params; 136 struct rte_port_sched_writer *port; 137 138 /* Check input parameters */ 139 if ((conf == NULL) || 140 (conf->sched == NULL) || 141 (conf->tx_burst_sz == 0) || 142 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || 143 (!rte_is_power_of_2(conf->tx_burst_sz))) { 144 PORT_LOG(ERR, "%s: Invalid params", __func__); 145 return NULL; 146 } 147 148 /* Memory allocation */ 149 port = rte_zmalloc_socket("PORT", sizeof(*port), 150 RTE_CACHE_LINE_SIZE, socket_id); 151 if (port == NULL) { 152 PORT_LOG(ERR, "%s: Failed to allocate port", __func__); 153 return NULL; 154 } 155 156 /* Initialization */ 157 port->sched = conf->sched; 158 port->tx_burst_sz = conf->tx_burst_sz; 159 port->tx_buf_count = 0; 160 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); 161 162 return port; 163 } 164 165 static int 166 rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt) 167 { 168 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port; 169 170 p->tx_buf[p->tx_buf_count++] = pkt; 171 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1); 172 if (p->tx_buf_count >= p->tx_burst_sz) { 173 __rte_unused uint32_t nb_tx; 174 175 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count); 176 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); 177 p->tx_buf_count = 0; 178 } 179 180 return 0; 181 } 182 183 static int 184 rte_port_sched_writer_tx_bulk(void *port, 185 struct rte_mbuf **pkts, 186 uint64_t pkts_mask) 187 { 188 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port; 189 uint64_t bsz_mask = p->bsz_mask; 190 uint32_t tx_buf_count = p->tx_buf_count; 191 uint64_t expr = (pkts_mask & (pkts_mask + 1)) | 192 ((pkts_mask & bsz_mask) ^ bsz_mask); 193 194 if (expr == 0) { 195 __rte_unused uint32_t nb_tx; 196 uint64_t n_pkts = rte_popcount64(pkts_mask); 197 198 if (tx_buf_count) { 199 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, 200 tx_buf_count); 201 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx); 202 p->tx_buf_count = 0; 203 } 204 205 nb_tx = rte_sched_port_enqueue(p->sched, pkts, n_pkts); 206 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - nb_tx); 207 } else { 208 for ( ; pkts_mask; ) { 209 uint32_t pkt_index = rte_ctz64(pkts_mask); 210 uint64_t pkt_mask = 1LLU << pkt_index; 211 struct rte_mbuf *pkt = pkts[pkt_index]; 212 213 p->tx_buf[tx_buf_count++] = pkt; 214 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1); 215 pkts_mask &= ~pkt_mask; 216 } 217 p->tx_buf_count = tx_buf_count; 218 219 if (tx_buf_count >= p->tx_burst_sz) { 220 __rte_unused uint32_t nb_tx; 221 222 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, 223 tx_buf_count); 224 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx); 225 p->tx_buf_count = 0; 226 } 227 } 228 229 return 0; 230 } 231 232 static int 233 rte_port_sched_writer_flush(void *port) 234 { 235 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port; 236 237 if (p->tx_buf_count) { 238 __rte_unused uint32_t nb_tx; 239 240 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count); 241 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); 242 p->tx_buf_count = 0; 243 } 244 245 return 0; 246 } 247 248 static int 249 rte_port_sched_writer_free(void *port) 250 { 251 if (port == NULL) { 252 PORT_LOG(ERR, "%s: port is NULL", __func__); 253 return -EINVAL; 254 } 255 256 rte_port_sched_writer_flush(port); 257 rte_free(port); 258 259 return 0; 260 } 261 262 static int 263 rte_port_sched_writer_stats_read(void *port, 264 struct rte_port_out_stats *stats, int clear) 265 { 266 struct rte_port_sched_writer *p = 267 port; 268 269 if (stats != NULL) 270 memcpy(stats, &p->stats, sizeof(p->stats)); 271 272 if (clear) 273 memset(&p->stats, 0, sizeof(p->stats)); 274 275 return 0; 276 } 277 278 /* 279 * Summary of port operations 280 */ 281 struct rte_port_in_ops rte_port_sched_reader_ops = { 282 .f_create = rte_port_sched_reader_create, 283 .f_free = rte_port_sched_reader_free, 284 .f_rx = rte_port_sched_reader_rx, 285 .f_stats = rte_port_sched_reader_stats_read, 286 }; 287 288 struct rte_port_out_ops rte_port_sched_writer_ops = { 289 .f_create = rte_port_sched_writer_create, 290 .f_free = rte_port_sched_writer_free, 291 .f_tx = rte_port_sched_writer_tx, 292 .f_tx_bulk = rte_port_sched_writer_tx_bulk, 293 .f_flush = rte_port_sched_writer_flush, 294 .f_stats = rte_port_sched_writer_stats_read, 295 }; 296