182092c87SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
25b4c63bdSAdrien Mazarguil * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler * Copyright 2017 Mellanox Technologies, Ltd
45b4c63bdSAdrien Mazarguil */
55b4c63bdSAdrien Mazarguil
65b4c63bdSAdrien Mazarguil /**
75b4c63bdSAdrien Mazarguil * @file
85b4c63bdSAdrien Mazarguil * Rx queues configuration for mlx4 driver.
95b4c63bdSAdrien Mazarguil */
105b4c63bdSAdrien Mazarguil
115b4c63bdSAdrien Mazarguil #include <errno.h>
125b4c63bdSAdrien Mazarguil #include <stddef.h>
135b4c63bdSAdrien Mazarguil #include <stdint.h>
145b4c63bdSAdrien Mazarguil #include <string.h>
155b4c63bdSAdrien Mazarguil
165b4c63bdSAdrien Mazarguil /* Verbs headers do not support -pedantic. */
175b4c63bdSAdrien Mazarguil #ifdef PEDANTIC
185b4c63bdSAdrien Mazarguil #pragma GCC diagnostic ignored "-Wpedantic"
195b4c63bdSAdrien Mazarguil #endif
205697a414SAdrien Mazarguil #include <infiniband/mlx4dv.h>
215b4c63bdSAdrien Mazarguil #include <infiniband/verbs.h>
225b4c63bdSAdrien Mazarguil #ifdef PEDANTIC
235b4c63bdSAdrien Mazarguil #pragma GCC diagnostic error "-Wpedantic"
245b4c63bdSAdrien Mazarguil #endif
255b4c63bdSAdrien Mazarguil
266681b845SMoti Haimovsky #include <rte_byteorder.h>
275b4c63bdSAdrien Mazarguil #include <rte_common.h>
285b4c63bdSAdrien Mazarguil #include <rte_errno.h>
29df96fd0dSBruce Richardson #include <ethdev_driver.h>
30fee75e14SAdrien Mazarguil #include <rte_flow.h>
315b4c63bdSAdrien Mazarguil #include <rte_malloc.h>
325b4c63bdSAdrien Mazarguil #include <rte_mbuf.h>
335b4c63bdSAdrien Mazarguil #include <rte_mempool.h>
345b4c63bdSAdrien Mazarguil
355b4c63bdSAdrien Mazarguil #include "mlx4.h"
364eba244bSAdrien Mazarguil #include "mlx4_glue.h"
37bdcad2f4SAdrien Mazarguil #include "mlx4_flow.h"
385b4c63bdSAdrien Mazarguil #include "mlx4_rxtx.h"
395b4c63bdSAdrien Mazarguil #include "mlx4_utils.h"
405b4c63bdSAdrien Mazarguil
415b4c63bdSAdrien Mazarguil /**
42078b8b45SAdrien Mazarguil * Historical RSS hash key.
43078b8b45SAdrien Mazarguil *
44078b8b45SAdrien Mazarguil * This used to be the default for mlx4 in Linux before v3.19 switched to
45078b8b45SAdrien Mazarguil * generating random hash keys through netdev_rss_key_fill().
46078b8b45SAdrien Mazarguil *
47078b8b45SAdrien Mazarguil * It is used in this PMD for consistency with past DPDK releases but can
48078b8b45SAdrien Mazarguil * now be overridden through user configuration.
49078b8b45SAdrien Mazarguil *
50078b8b45SAdrien Mazarguil * Note: this is not const to work around API quirks.
51078b8b45SAdrien Mazarguil */
52078b8b45SAdrien Mazarguil uint8_t
53078b8b45SAdrien Mazarguil mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
54078b8b45SAdrien Mazarguil 0x2c, 0xc6, 0x81, 0xd1,
55078b8b45SAdrien Mazarguil 0x5b, 0xdb, 0xf4, 0xf7,
56078b8b45SAdrien Mazarguil 0xfc, 0xa2, 0x83, 0x19,
57078b8b45SAdrien Mazarguil 0xdb, 0x1a, 0x3e, 0x94,
58078b8b45SAdrien Mazarguil 0x6b, 0x9e, 0x38, 0xd9,
59078b8b45SAdrien Mazarguil 0x2c, 0x9c, 0x03, 0xd1,
60078b8b45SAdrien Mazarguil 0xad, 0x99, 0x44, 0xa7,
61078b8b45SAdrien Mazarguil 0xd9, 0x56, 0x3d, 0x59,
62078b8b45SAdrien Mazarguil 0x06, 0x3c, 0x25, 0xf3,
63078b8b45SAdrien Mazarguil 0xfc, 0x1f, 0xdc, 0x2a,
64078b8b45SAdrien Mazarguil };
65078b8b45SAdrien Mazarguil
66078b8b45SAdrien Mazarguil /**
67078b8b45SAdrien Mazarguil * Obtain a RSS context with specified properties.
68078b8b45SAdrien Mazarguil *
69078b8b45SAdrien Mazarguil * Used when creating a flow rule targeting one or several Rx queues.
70078b8b45SAdrien Mazarguil *
71078b8b45SAdrien Mazarguil * If a matching RSS context already exists, it is returned with its
72078b8b45SAdrien Mazarguil * reference count incremented.
73078b8b45SAdrien Mazarguil *
74078b8b45SAdrien Mazarguil * @param priv
75078b8b45SAdrien Mazarguil * Pointer to private structure.
76078b8b45SAdrien Mazarguil * @param fields
77078b8b45SAdrien Mazarguil * Fields for RSS processing (Verbs format).
78078b8b45SAdrien Mazarguil * @param[in] key
79078b8b45SAdrien Mazarguil * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
80078b8b45SAdrien Mazarguil * @param queues
81078b8b45SAdrien Mazarguil * Number of target queues.
82078b8b45SAdrien Mazarguil * @param[in] queue_id
83078b8b45SAdrien Mazarguil * Target queues.
84078b8b45SAdrien Mazarguil *
85078b8b45SAdrien Mazarguil * @return
86078b8b45SAdrien Mazarguil * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
87078b8b45SAdrien Mazarguil */
88078b8b45SAdrien Mazarguil struct mlx4_rss *
mlx4_rss_get(struct mlx4_priv * priv,uint64_t fields,const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],uint16_t queues,const uint16_t queue_id[])89dbeba4cfSThomas Monjalon mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
90ac8d22deSAdrien Mazarguil const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
91078b8b45SAdrien Mazarguil uint16_t queues, const uint16_t queue_id[])
92078b8b45SAdrien Mazarguil {
93078b8b45SAdrien Mazarguil struct mlx4_rss *rss;
94078b8b45SAdrien Mazarguil size_t queue_id_size = sizeof(queue_id[0]) * queues;
95078b8b45SAdrien Mazarguil
96078b8b45SAdrien Mazarguil LIST_FOREACH(rss, &priv->rss, next)
97078b8b45SAdrien Mazarguil if (fields == rss->fields &&
98078b8b45SAdrien Mazarguil queues == rss->queues &&
99078b8b45SAdrien Mazarguil !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
100078b8b45SAdrien Mazarguil !memcmp(queue_id, rss->queue_id, queue_id_size)) {
101078b8b45SAdrien Mazarguil ++rss->refcnt;
102078b8b45SAdrien Mazarguil return rss;
103078b8b45SAdrien Mazarguil }
104078b8b45SAdrien Mazarguil rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
105078b8b45SAdrien Mazarguil queue_id_size, 0);
106078b8b45SAdrien Mazarguil if (!rss)
107078b8b45SAdrien Mazarguil goto error;
108078b8b45SAdrien Mazarguil *rss = (struct mlx4_rss){
109078b8b45SAdrien Mazarguil .priv = priv,
110078b8b45SAdrien Mazarguil .refcnt = 1,
111078b8b45SAdrien Mazarguil .usecnt = 0,
112078b8b45SAdrien Mazarguil .qp = NULL,
113078b8b45SAdrien Mazarguil .ind = NULL,
114078b8b45SAdrien Mazarguil .fields = fields,
115078b8b45SAdrien Mazarguil .queues = queues,
116078b8b45SAdrien Mazarguil };
117078b8b45SAdrien Mazarguil memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
118078b8b45SAdrien Mazarguil memcpy(rss->queue_id, queue_id, queue_id_size);
119078b8b45SAdrien Mazarguil LIST_INSERT_HEAD(&priv->rss, rss, next);
120078b8b45SAdrien Mazarguil return rss;
121078b8b45SAdrien Mazarguil error:
122078b8b45SAdrien Mazarguil rte_errno = ENOMEM;
123078b8b45SAdrien Mazarguil return NULL;
124078b8b45SAdrien Mazarguil }
125078b8b45SAdrien Mazarguil
126078b8b45SAdrien Mazarguil /**
127078b8b45SAdrien Mazarguil * Release a RSS context instance.
128078b8b45SAdrien Mazarguil *
129078b8b45SAdrien Mazarguil * Used when destroying a flow rule targeting one or several Rx queues.
130078b8b45SAdrien Mazarguil *
131078b8b45SAdrien Mazarguil * This function decrements the reference count of the context and destroys
132078b8b45SAdrien Mazarguil * it after reaching 0. The context must have no users at this point; all
133078b8b45SAdrien Mazarguil * prior calls to mlx4_rss_attach() must have been followed by matching
134078b8b45SAdrien Mazarguil * calls to mlx4_rss_detach().
135078b8b45SAdrien Mazarguil *
136078b8b45SAdrien Mazarguil * @param rss
137078b8b45SAdrien Mazarguil * RSS context to release.
138078b8b45SAdrien Mazarguil */
139b3d197b4SAdrien Mazarguil void
mlx4_rss_put(struct mlx4_rss * rss)140b3d197b4SAdrien Mazarguil mlx4_rss_put(struct mlx4_rss *rss)
141078b8b45SAdrien Mazarguil {
1428e08df22SAlexander Kozyrev MLX4_ASSERT(rss->refcnt);
143078b8b45SAdrien Mazarguil if (--rss->refcnt)
144078b8b45SAdrien Mazarguil return;
1458e08df22SAlexander Kozyrev MLX4_ASSERT(!rss->usecnt);
1468e08df22SAlexander Kozyrev MLX4_ASSERT(!rss->qp);
1478e08df22SAlexander Kozyrev MLX4_ASSERT(!rss->ind);
148078b8b45SAdrien Mazarguil LIST_REMOVE(rss, next);
149078b8b45SAdrien Mazarguil rte_free(rss);
150078b8b45SAdrien Mazarguil }
151078b8b45SAdrien Mazarguil
152078b8b45SAdrien Mazarguil /**
153078b8b45SAdrien Mazarguil * Attach a user to a RSS context instance.
154078b8b45SAdrien Mazarguil *
155078b8b45SAdrien Mazarguil * Used when the RSS QP and indirection table objects must be instantiated,
156078b8b45SAdrien Mazarguil * that is, when a flow rule must be enabled.
157078b8b45SAdrien Mazarguil *
158078b8b45SAdrien Mazarguil * This function increments the usage count of the context.
159078b8b45SAdrien Mazarguil *
160078b8b45SAdrien Mazarguil * @param rss
161078b8b45SAdrien Mazarguil * RSS context to attach to.
1620ef007c9SAdrien Mazarguil *
1630ef007c9SAdrien Mazarguil * @return
1640ef007c9SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set.
165078b8b45SAdrien Mazarguil */
166b3d197b4SAdrien Mazarguil int
mlx4_rss_attach(struct mlx4_rss * rss)167b3d197b4SAdrien Mazarguil mlx4_rss_attach(struct mlx4_rss *rss)
168078b8b45SAdrien Mazarguil {
1698e08df22SAlexander Kozyrev MLX4_ASSERT(rss->refcnt);
170078b8b45SAdrien Mazarguil if (rss->usecnt++) {
1718e08df22SAlexander Kozyrev MLX4_ASSERT(rss->qp);
1728e08df22SAlexander Kozyrev MLX4_ASSERT(rss->ind);
173078b8b45SAdrien Mazarguil return 0;
174078b8b45SAdrien Mazarguil }
175078b8b45SAdrien Mazarguil
176078b8b45SAdrien Mazarguil struct ibv_wq *ind_tbl[rss->queues];
177dbeba4cfSThomas Monjalon struct mlx4_priv *priv = rss->priv;
178099c2c53SYongseok Koh struct rte_eth_dev *dev = ETH_DEV(priv);
179078b8b45SAdrien Mazarguil const char *msg;
1805697a414SAdrien Mazarguil unsigned int i = 0;
181078b8b45SAdrien Mazarguil int ret;
182078b8b45SAdrien Mazarguil
183078b8b45SAdrien Mazarguil if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
1840ef007c9SAdrien Mazarguil ret = EINVAL;
185078b8b45SAdrien Mazarguil msg = "number of RSS queues must be a power of two";
186078b8b45SAdrien Mazarguil goto error;
187078b8b45SAdrien Mazarguil }
188078b8b45SAdrien Mazarguil for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
189078b8b45SAdrien Mazarguil uint16_t id = rss->queue_id[i];
190078b8b45SAdrien Mazarguil struct rxq *rxq = NULL;
191078b8b45SAdrien Mazarguil
192099c2c53SYongseok Koh if (id < dev->data->nb_rx_queues)
193099c2c53SYongseok Koh rxq = dev->data->rx_queues[id];
194078b8b45SAdrien Mazarguil if (!rxq) {
1950ef007c9SAdrien Mazarguil ret = EINVAL;
196078b8b45SAdrien Mazarguil msg = "RSS target queue is not configured";
197078b8b45SAdrien Mazarguil goto error;
198078b8b45SAdrien Mazarguil }
1995697a414SAdrien Mazarguil ret = mlx4_rxq_attach(rxq);
2005697a414SAdrien Mazarguil if (ret) {
2015697a414SAdrien Mazarguil ret = -ret;
2025697a414SAdrien Mazarguil msg = "unable to attach RSS target queue";
2035697a414SAdrien Mazarguil goto error;
2045697a414SAdrien Mazarguil }
205078b8b45SAdrien Mazarguil ind_tbl[i] = rxq->wq;
206078b8b45SAdrien Mazarguil }
2074eba244bSAdrien Mazarguil rss->ind = mlx4_glue->create_rwq_ind_table
208078b8b45SAdrien Mazarguil (priv->ctx,
209078b8b45SAdrien Mazarguil &(struct ibv_rwq_ind_table_init_attr){
210078b8b45SAdrien Mazarguil .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
211078b8b45SAdrien Mazarguil .ind_tbl = ind_tbl,
212078b8b45SAdrien Mazarguil .comp_mask = 0,
213078b8b45SAdrien Mazarguil });
214078b8b45SAdrien Mazarguil if (!rss->ind) {
2150ef007c9SAdrien Mazarguil ret = errno ? errno : EINVAL;
216078b8b45SAdrien Mazarguil msg = "RSS indirection table creation failure";
217078b8b45SAdrien Mazarguil goto error;
218078b8b45SAdrien Mazarguil }
2194eba244bSAdrien Mazarguil rss->qp = mlx4_glue->create_qp_ex
220078b8b45SAdrien Mazarguil (priv->ctx,
221078b8b45SAdrien Mazarguil &(struct ibv_qp_init_attr_ex){
222078b8b45SAdrien Mazarguil .comp_mask = (IBV_QP_INIT_ATTR_PD |
223078b8b45SAdrien Mazarguil IBV_QP_INIT_ATTR_RX_HASH |
224078b8b45SAdrien Mazarguil IBV_QP_INIT_ATTR_IND_TABLE),
225078b8b45SAdrien Mazarguil .qp_type = IBV_QPT_RAW_PACKET,
226078b8b45SAdrien Mazarguil .pd = priv->pd,
227078b8b45SAdrien Mazarguil .rwq_ind_tbl = rss->ind,
228078b8b45SAdrien Mazarguil .rx_hash_conf = {
229078b8b45SAdrien Mazarguil .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
230078b8b45SAdrien Mazarguil .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
231078b8b45SAdrien Mazarguil .rx_hash_key = rss->key,
232078b8b45SAdrien Mazarguil .rx_hash_fields_mask = rss->fields,
233078b8b45SAdrien Mazarguil },
234078b8b45SAdrien Mazarguil });
235078b8b45SAdrien Mazarguil if (!rss->qp) {
2360ef007c9SAdrien Mazarguil ret = errno ? errno : EINVAL;
237078b8b45SAdrien Mazarguil msg = "RSS hash QP creation failure";
238078b8b45SAdrien Mazarguil goto error;
239078b8b45SAdrien Mazarguil }
2404eba244bSAdrien Mazarguil ret = mlx4_glue->modify_qp
241078b8b45SAdrien Mazarguil (rss->qp,
242078b8b45SAdrien Mazarguil &(struct ibv_qp_attr){
243078b8b45SAdrien Mazarguil .qp_state = IBV_QPS_INIT,
244078b8b45SAdrien Mazarguil .port_num = priv->port,
245078b8b45SAdrien Mazarguil },
246078b8b45SAdrien Mazarguil IBV_QP_STATE | IBV_QP_PORT);
247078b8b45SAdrien Mazarguil if (ret) {
248078b8b45SAdrien Mazarguil msg = "failed to switch RSS hash QP to INIT state";
249078b8b45SAdrien Mazarguil goto error;
250078b8b45SAdrien Mazarguil }
2514eba244bSAdrien Mazarguil ret = mlx4_glue->modify_qp
252078b8b45SAdrien Mazarguil (rss->qp,
253078b8b45SAdrien Mazarguil &(struct ibv_qp_attr){
254078b8b45SAdrien Mazarguil .qp_state = IBV_QPS_RTR,
255078b8b45SAdrien Mazarguil },
256078b8b45SAdrien Mazarguil IBV_QP_STATE);
257078b8b45SAdrien Mazarguil if (ret) {
258078b8b45SAdrien Mazarguil msg = "failed to switch RSS hash QP to RTR state";
259078b8b45SAdrien Mazarguil goto error;
260078b8b45SAdrien Mazarguil }
261078b8b45SAdrien Mazarguil return 0;
262078b8b45SAdrien Mazarguil error:
2630ef007c9SAdrien Mazarguil if (rss->qp) {
2644eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_qp(rss->qp));
2650ef007c9SAdrien Mazarguil rss->qp = NULL;
2660ef007c9SAdrien Mazarguil }
2670ef007c9SAdrien Mazarguil if (rss->ind) {
2684eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
2690ef007c9SAdrien Mazarguil rss->ind = NULL;
2700ef007c9SAdrien Mazarguil }
2715697a414SAdrien Mazarguil while (i--)
272099c2c53SYongseok Koh mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
273078b8b45SAdrien Mazarguil ERROR("mlx4: %s", msg);
274078b8b45SAdrien Mazarguil --rss->usecnt;
2750ef007c9SAdrien Mazarguil rte_errno = ret;
2760ef007c9SAdrien Mazarguil return -ret;
277078b8b45SAdrien Mazarguil }
278078b8b45SAdrien Mazarguil
279078b8b45SAdrien Mazarguil /**
280078b8b45SAdrien Mazarguil * Detach a user from a RSS context instance.
281078b8b45SAdrien Mazarguil *
282078b8b45SAdrien Mazarguil * Used when disabling (not destroying) a flow rule.
283078b8b45SAdrien Mazarguil *
284078b8b45SAdrien Mazarguil * This function decrements the usage count of the context and destroys
285078b8b45SAdrien Mazarguil * usage resources after reaching 0.
286078b8b45SAdrien Mazarguil *
287078b8b45SAdrien Mazarguil * @param rss
288078b8b45SAdrien Mazarguil * RSS context to detach from.
289078b8b45SAdrien Mazarguil */
290b3d197b4SAdrien Mazarguil void
mlx4_rss_detach(struct mlx4_rss * rss)291b3d197b4SAdrien Mazarguil mlx4_rss_detach(struct mlx4_rss *rss)
292078b8b45SAdrien Mazarguil {
293dbeba4cfSThomas Monjalon struct mlx4_priv *priv = rss->priv;
294099c2c53SYongseok Koh struct rte_eth_dev *dev = ETH_DEV(priv);
2955697a414SAdrien Mazarguil unsigned int i;
2965697a414SAdrien Mazarguil
2978e08df22SAlexander Kozyrev MLX4_ASSERT(rss->refcnt);
2988e08df22SAlexander Kozyrev MLX4_ASSERT(rss->qp);
2998e08df22SAlexander Kozyrev MLX4_ASSERT(rss->ind);
300078b8b45SAdrien Mazarguil if (--rss->usecnt)
301078b8b45SAdrien Mazarguil return;
3024eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_qp(rss->qp));
303078b8b45SAdrien Mazarguil rss->qp = NULL;
3044eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
305078b8b45SAdrien Mazarguil rss->ind = NULL;
3065697a414SAdrien Mazarguil for (i = 0; i != rss->queues; ++i)
307099c2c53SYongseok Koh mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
308078b8b45SAdrien Mazarguil }
309078b8b45SAdrien Mazarguil
310078b8b45SAdrien Mazarguil /**
3115697a414SAdrien Mazarguil * Initialize common RSS context resources.
3125697a414SAdrien Mazarguil *
3135697a414SAdrien Mazarguil * Because ConnectX-3 hardware limitations require a fixed order in the
3145697a414SAdrien Mazarguil * indirection table, WQs must be allocated sequentially to be part of a
3155697a414SAdrien Mazarguil * common RSS context.
3165697a414SAdrien Mazarguil *
3175697a414SAdrien Mazarguil * Since a newly created WQ cannot be moved to a different context, this
3185697a414SAdrien Mazarguil * function allocates them all at once, one for each configured Rx queue,
3195697a414SAdrien Mazarguil * as well as all related resources (CQs and mbufs).
3205697a414SAdrien Mazarguil *
3215697a414SAdrien Mazarguil * This must therefore be done before creating any Rx flow rules relying on
3225697a414SAdrien Mazarguil * indirection tables.
3235697a414SAdrien Mazarguil *
3245697a414SAdrien Mazarguil * @param priv
3255697a414SAdrien Mazarguil * Pointer to private structure.
3265697a414SAdrien Mazarguil *
3275697a414SAdrien Mazarguil * @return
3285697a414SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set.
3295697a414SAdrien Mazarguil */
3305697a414SAdrien Mazarguil int
mlx4_rss_init(struct mlx4_priv * priv)331dbeba4cfSThomas Monjalon mlx4_rss_init(struct mlx4_priv *priv)
3325697a414SAdrien Mazarguil {
333099c2c53SYongseok Koh struct rte_eth_dev *dev = ETH_DEV(priv);
3345697a414SAdrien Mazarguil uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
3355697a414SAdrien Mazarguil uint32_t wq_num_prev = 0;
3365697a414SAdrien Mazarguil const char *msg;
3375697a414SAdrien Mazarguil unsigned int i;
3385697a414SAdrien Mazarguil int ret;
3395697a414SAdrien Mazarguil
34084a68486SAdrien Mazarguil if (priv->rss_init)
34184a68486SAdrien Mazarguil return 0;
342099c2c53SYongseok Koh if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
3432b4e423fSMoti Haimovsky ERROR("RSS does not support more than %d queues",
3442b4e423fSMoti Haimovsky priv->hw_rss_max_qps);
3452b4e423fSMoti Haimovsky rte_errno = EINVAL;
3462b4e423fSMoti Haimovsky return -rte_errno;
3472b4e423fSMoti Haimovsky }
3485697a414SAdrien Mazarguil /* Prepare range for RSS contexts before creating the first WQ. */
3494eba244bSAdrien Mazarguil ret = mlx4_glue->dv_set_context_attr
3504eba244bSAdrien Mazarguil (priv->ctx,
3515697a414SAdrien Mazarguil MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
3525697a414SAdrien Mazarguil &log2_range);
3535697a414SAdrien Mazarguil if (ret) {
3545697a414SAdrien Mazarguil ERROR("cannot set up range size for RSS context to %u"
3555697a414SAdrien Mazarguil " (for %u Rx queues), error: %s",
3565697a414SAdrien Mazarguil 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
3575697a414SAdrien Mazarguil rte_errno = ret;
3585697a414SAdrien Mazarguil return -ret;
3595697a414SAdrien Mazarguil }
360099c2c53SYongseok Koh for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
361099c2c53SYongseok Koh struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
3625697a414SAdrien Mazarguil struct ibv_cq *cq;
3635697a414SAdrien Mazarguil struct ibv_wq *wq;
3645697a414SAdrien Mazarguil uint32_t wq_num;
3655697a414SAdrien Mazarguil
3665697a414SAdrien Mazarguil /* Attach the configured Rx queues. */
3675697a414SAdrien Mazarguil if (rxq) {
3688e08df22SAlexander Kozyrev MLX4_ASSERT(!rxq->usecnt);
3695697a414SAdrien Mazarguil ret = mlx4_rxq_attach(rxq);
3705697a414SAdrien Mazarguil if (!ret) {
3715697a414SAdrien Mazarguil wq_num = rxq->wq->wq_num;
3725697a414SAdrien Mazarguil goto wq_num_check;
3735697a414SAdrien Mazarguil }
3745697a414SAdrien Mazarguil ret = -ret;
3755697a414SAdrien Mazarguil msg = "unable to create Rx queue resources";
3765697a414SAdrien Mazarguil goto error;
3775697a414SAdrien Mazarguil }
3785697a414SAdrien Mazarguil /*
3795697a414SAdrien Mazarguil * WQs are temporarily allocated for unconfigured Rx queues
3805697a414SAdrien Mazarguil * to maintain proper index alignment in indirection table
3815697a414SAdrien Mazarguil * by skipping unused WQ numbers.
3825697a414SAdrien Mazarguil *
3835697a414SAdrien Mazarguil * The reason this works at all even though these WQs are
3845697a414SAdrien Mazarguil * immediately destroyed is that WQNs are allocated
3855697a414SAdrien Mazarguil * sequentially and are guaranteed to never be reused in the
3865697a414SAdrien Mazarguil * same context by the underlying implementation.
3875697a414SAdrien Mazarguil */
3884eba244bSAdrien Mazarguil cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
3895697a414SAdrien Mazarguil if (!cq) {
3905697a414SAdrien Mazarguil ret = ENOMEM;
3915697a414SAdrien Mazarguil msg = "placeholder CQ creation failure";
3925697a414SAdrien Mazarguil goto error;
3935697a414SAdrien Mazarguil }
3944eba244bSAdrien Mazarguil wq = mlx4_glue->create_wq
3955697a414SAdrien Mazarguil (priv->ctx,
3965697a414SAdrien Mazarguil &(struct ibv_wq_init_attr){
3975697a414SAdrien Mazarguil .wq_type = IBV_WQT_RQ,
3985697a414SAdrien Mazarguil .max_wr = 1,
3995697a414SAdrien Mazarguil .max_sge = 1,
4005697a414SAdrien Mazarguil .pd = priv->pd,
4015697a414SAdrien Mazarguil .cq = cq,
4025697a414SAdrien Mazarguil });
4035697a414SAdrien Mazarguil if (wq) {
4045697a414SAdrien Mazarguil wq_num = wq->wq_num;
4054eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_wq(wq));
4065697a414SAdrien Mazarguil } else {
4075697a414SAdrien Mazarguil wq_num = 0; /* Shut up GCC 4.8 warnings. */
4085697a414SAdrien Mazarguil }
4094eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_cq(cq));
4105697a414SAdrien Mazarguil if (!wq) {
4115697a414SAdrien Mazarguil ret = ENOMEM;
4125697a414SAdrien Mazarguil msg = "placeholder WQ creation failure";
4135697a414SAdrien Mazarguil goto error;
4145697a414SAdrien Mazarguil }
4155697a414SAdrien Mazarguil wq_num_check:
4165697a414SAdrien Mazarguil /*
4175697a414SAdrien Mazarguil * While guaranteed by the implementation, make sure WQ
4185697a414SAdrien Mazarguil * numbers are really sequential (as the saying goes,
4195697a414SAdrien Mazarguil * trust, but verify).
4205697a414SAdrien Mazarguil */
4215697a414SAdrien Mazarguil if (i && wq_num - wq_num_prev != 1) {
4225697a414SAdrien Mazarguil if (rxq)
4235697a414SAdrien Mazarguil mlx4_rxq_detach(rxq);
4245697a414SAdrien Mazarguil ret = ERANGE;
4255697a414SAdrien Mazarguil msg = "WQ numbers are not sequential";
4265697a414SAdrien Mazarguil goto error;
4275697a414SAdrien Mazarguil }
4285697a414SAdrien Mazarguil wq_num_prev = wq_num;
4295697a414SAdrien Mazarguil }
43084a68486SAdrien Mazarguil priv->rss_init = 1;
4315697a414SAdrien Mazarguil return 0;
4325697a414SAdrien Mazarguil error:
4335697a414SAdrien Mazarguil ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
4345697a414SAdrien Mazarguil i, msg, strerror(ret));
4355697a414SAdrien Mazarguil while (i--) {
436099c2c53SYongseok Koh struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
4375697a414SAdrien Mazarguil
4385697a414SAdrien Mazarguil if (rxq)
4395697a414SAdrien Mazarguil mlx4_rxq_detach(rxq);
4405697a414SAdrien Mazarguil }
4415697a414SAdrien Mazarguil rte_errno = ret;
4425697a414SAdrien Mazarguil return -ret;
4435697a414SAdrien Mazarguil }
4445697a414SAdrien Mazarguil
4455697a414SAdrien Mazarguil /**
4465697a414SAdrien Mazarguil * Release common RSS context resources.
4475697a414SAdrien Mazarguil *
4485697a414SAdrien Mazarguil * As the reverse of mlx4_rss_init(), this must be done after removing all
4495697a414SAdrien Mazarguil * flow rules relying on indirection tables.
4505697a414SAdrien Mazarguil *
4515697a414SAdrien Mazarguil * @param priv
4525697a414SAdrien Mazarguil * Pointer to private structure.
4535697a414SAdrien Mazarguil */
4545697a414SAdrien Mazarguil void
mlx4_rss_deinit(struct mlx4_priv * priv)455dbeba4cfSThomas Monjalon mlx4_rss_deinit(struct mlx4_priv *priv)
4565697a414SAdrien Mazarguil {
4575697a414SAdrien Mazarguil unsigned int i;
4585697a414SAdrien Mazarguil
45984a68486SAdrien Mazarguil if (!priv->rss_init)
46084a68486SAdrien Mazarguil return;
461099c2c53SYongseok Koh for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
462099c2c53SYongseok Koh struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
4635697a414SAdrien Mazarguil
4645697a414SAdrien Mazarguil if (rxq) {
4658e08df22SAlexander Kozyrev MLX4_ASSERT(rxq->usecnt == 1);
4665697a414SAdrien Mazarguil mlx4_rxq_detach(rxq);
4675697a414SAdrien Mazarguil }
4685697a414SAdrien Mazarguil }
46984a68486SAdrien Mazarguil priv->rss_init = 0;
4705697a414SAdrien Mazarguil }
4715697a414SAdrien Mazarguil
4725697a414SAdrien Mazarguil /**
4735697a414SAdrien Mazarguil * Attach a user to a Rx queue.
4745697a414SAdrien Mazarguil *
4755697a414SAdrien Mazarguil * Used when the resources of an Rx queue must be instantiated for it to
4765697a414SAdrien Mazarguil * become in a usable state.
4775697a414SAdrien Mazarguil *
4785697a414SAdrien Mazarguil * This function increments the usage count of the Rx queue.
4795b4c63bdSAdrien Mazarguil *
4805b4c63bdSAdrien Mazarguil * @param rxq
4815b4c63bdSAdrien Mazarguil * Pointer to Rx queue structure.
4825b4c63bdSAdrien Mazarguil *
4835b4c63bdSAdrien Mazarguil * @return
4845b4c63bdSAdrien Mazarguil * 0 on success, negative errno value otherwise and rte_errno is set.
4855b4c63bdSAdrien Mazarguil */
4865697a414SAdrien Mazarguil int
mlx4_rxq_attach(struct rxq * rxq)4875697a414SAdrien Mazarguil mlx4_rxq_attach(struct rxq *rxq)
4885b4c63bdSAdrien Mazarguil {
4895697a414SAdrien Mazarguil if (rxq->usecnt++) {
4908e08df22SAlexander Kozyrev MLX4_ASSERT(rxq->cq);
4918e08df22SAlexander Kozyrev MLX4_ASSERT(rxq->wq);
4928e08df22SAlexander Kozyrev MLX4_ASSERT(rxq->wqes);
4938e08df22SAlexander Kozyrev MLX4_ASSERT(rxq->rq_db);
4945697a414SAdrien Mazarguil return 0;
4955697a414SAdrien Mazarguil }
4965697a414SAdrien Mazarguil
497dbeba4cfSThomas Monjalon struct mlx4_priv *priv = rxq->priv;
498099c2c53SYongseok Koh struct rte_eth_dev *dev = ETH_DEV(priv);
4996681b845SMoti Haimovsky const uint32_t elts_n = 1 << rxq->elts_n;
5006681b845SMoti Haimovsky const uint32_t sges_n = 1 << rxq->sges_n;
5016681b845SMoti Haimovsky struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
5025697a414SAdrien Mazarguil struct mlx4dv_obj mlxdv;
5035697a414SAdrien Mazarguil struct mlx4dv_rwq dv_rwq;
50462e96ffbSMoti Haimovsky struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
5055697a414SAdrien Mazarguil const char *msg;
5065697a414SAdrien Mazarguil struct ibv_cq *cq = NULL;
5075697a414SAdrien Mazarguil struct ibv_wq *wq = NULL;
508de1df14eSOphir Munk uint32_t create_flags = 0;
509de1df14eSOphir Munk uint32_t comp_mask = 0;
5105697a414SAdrien Mazarguil volatile struct mlx4_wqe_data_seg (*wqes)[];
5115b4c63bdSAdrien Mazarguil unsigned int i;
5125697a414SAdrien Mazarguil int ret;
5135b4c63bdSAdrien Mazarguil
5148e08df22SAlexander Kozyrev MLX4_ASSERT(rte_is_power_of_2(elts_n));
5158e493764SYongseok Koh priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_RX_QUEUE;
5168e493764SYongseok Koh priv->verbs_alloc_ctx.obj = rxq;
5174eba244bSAdrien Mazarguil cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
5184eba244bSAdrien Mazarguil rxq->channel, 0);
5195697a414SAdrien Mazarguil if (!cq) {
5205697a414SAdrien Mazarguil ret = ENOMEM;
5215697a414SAdrien Mazarguil msg = "CQ creation failure";
5225697a414SAdrien Mazarguil goto error;
5235697a414SAdrien Mazarguil }
524de1df14eSOphir Munk /* By default, FCS (CRC) is stripped by hardware. */
525de1df14eSOphir Munk if (rxq->crc_present) {
526de1df14eSOphir Munk create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
527de1df14eSOphir Munk comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
528de1df14eSOphir Munk }
5294eba244bSAdrien Mazarguil wq = mlx4_glue->create_wq
5305697a414SAdrien Mazarguil (priv->ctx,
5315697a414SAdrien Mazarguil &(struct ibv_wq_init_attr){
5325697a414SAdrien Mazarguil .wq_type = IBV_WQT_RQ,
5335697a414SAdrien Mazarguil .max_wr = elts_n / sges_n,
5345697a414SAdrien Mazarguil .max_sge = sges_n,
5355697a414SAdrien Mazarguil .pd = priv->pd,
5365697a414SAdrien Mazarguil .cq = cq,
537de1df14eSOphir Munk .comp_mask = comp_mask,
538de1df14eSOphir Munk .create_flags = create_flags,
5395697a414SAdrien Mazarguil });
5405697a414SAdrien Mazarguil if (!wq) {
5415697a414SAdrien Mazarguil ret = errno ? errno : EINVAL;
5425697a414SAdrien Mazarguil msg = "WQ creation failure";
5435697a414SAdrien Mazarguil goto error;
5445697a414SAdrien Mazarguil }
5454eba244bSAdrien Mazarguil ret = mlx4_glue->modify_wq
5465697a414SAdrien Mazarguil (wq,
5475697a414SAdrien Mazarguil &(struct ibv_wq_attr){
5485697a414SAdrien Mazarguil .attr_mask = IBV_WQ_ATTR_STATE,
5495697a414SAdrien Mazarguil .wq_state = IBV_WQS_RDY,
5505697a414SAdrien Mazarguil });
5515697a414SAdrien Mazarguil if (ret) {
5525697a414SAdrien Mazarguil msg = "WQ state change to IBV_WQS_RDY failed";
5535697a414SAdrien Mazarguil goto error;
5545697a414SAdrien Mazarguil }
5555697a414SAdrien Mazarguil /* Retrieve device queue information. */
5565697a414SAdrien Mazarguil mlxdv.cq.in = cq;
5575697a414SAdrien Mazarguil mlxdv.cq.out = &dv_cq;
5585697a414SAdrien Mazarguil mlxdv.rwq.in = wq;
5595697a414SAdrien Mazarguil mlxdv.rwq.out = &dv_rwq;
5604eba244bSAdrien Mazarguil ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
5615697a414SAdrien Mazarguil if (ret) {
5625697a414SAdrien Mazarguil msg = "failed to obtain device information from WQ/CQ objects";
5635697a414SAdrien Mazarguil goto error;
5645697a414SAdrien Mazarguil }
5659797bfccSYongseok Koh /* Pre-register Rx mempool. */
5669797bfccSYongseok Koh DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
567099c2c53SYongseok Koh ETH_DEV(priv)->data->port_id, rxq->stats.idx,
5689797bfccSYongseok Koh rxq->mp->name, rxq->mp->nb_mem_chunks);
5699797bfccSYongseok Koh mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
5705697a414SAdrien Mazarguil wqes = (volatile struct mlx4_wqe_data_seg (*)[])
5715697a414SAdrien Mazarguil ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
572c64c58adSAdrien Mazarguil for (i = 0; i != RTE_DIM(*elts); ++i) {
5735697a414SAdrien Mazarguil volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
5745b4c63bdSAdrien Mazarguil struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
5755b4c63bdSAdrien Mazarguil
5765b4c63bdSAdrien Mazarguil if (buf == NULL) {
577c64c58adSAdrien Mazarguil while (i--) {
5786681b845SMoti Haimovsky rte_pktmbuf_free_seg((*elts)[i]);
5796681b845SMoti Haimovsky (*elts)[i] = NULL;
580c64c58adSAdrien Mazarguil }
5815697a414SAdrien Mazarguil ret = ENOMEM;
5825697a414SAdrien Mazarguil msg = "cannot allocate mbuf";
5835697a414SAdrien Mazarguil goto error;
5845b4c63bdSAdrien Mazarguil }
5855b4c63bdSAdrien Mazarguil /* Headroom is reserved by rte_pktmbuf_alloc(). */
5868e08df22SAlexander Kozyrev MLX4_ASSERT(buf->data_off == RTE_PKTMBUF_HEADROOM);
5875b4c63bdSAdrien Mazarguil /* Buffer is supposed to be empty. */
5888e08df22SAlexander Kozyrev MLX4_ASSERT(rte_pktmbuf_data_len(buf) == 0);
5898e08df22SAlexander Kozyrev MLX4_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
5906681b845SMoti Haimovsky /* Only the first segment keeps headroom. */
5916681b845SMoti Haimovsky if (i % sges_n)
5926681b845SMoti Haimovsky buf->data_off = 0;
5936681b845SMoti Haimovsky buf->port = rxq->port_id;
5946681b845SMoti Haimovsky buf->data_len = rte_pktmbuf_tailroom(buf);
5956681b845SMoti Haimovsky buf->pkt_len = rte_pktmbuf_tailroom(buf);
5966681b845SMoti Haimovsky buf->nb_segs = 1;
5976681b845SMoti Haimovsky *scat = (struct mlx4_wqe_data_seg){
5986681b845SMoti Haimovsky .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
5996681b845SMoti Haimovsky uintptr_t)),
6006681b845SMoti Haimovsky .byte_count = rte_cpu_to_be_32(buf->data_len),
6019797bfccSYongseok Koh .lkey = mlx4_rx_mb2mr(rxq, buf),
6026681b845SMoti Haimovsky };
6036681b845SMoti Haimovsky (*elts)[i] = buf;
6045b4c63bdSAdrien Mazarguil }
6056681b845SMoti Haimovsky DEBUG("%p: allocated and configured %u segments (max %u packets)",
6066681b845SMoti Haimovsky (void *)rxq, elts_n, elts_n / sges_n);
6075697a414SAdrien Mazarguil rxq->cq = cq;
6085697a414SAdrien Mazarguil rxq->wq = wq;
6095697a414SAdrien Mazarguil rxq->wqes = wqes;
6105697a414SAdrien Mazarguil rxq->rq_db = dv_rwq.rdb;
6115697a414SAdrien Mazarguil rxq->mcq.buf = dv_cq.buf.buf;
6125697a414SAdrien Mazarguil rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
6135697a414SAdrien Mazarguil rxq->mcq.set_ci_db = dv_cq.set_ci_db;
6145697a414SAdrien Mazarguil rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
61562e96ffbSMoti Haimovsky rxq->mcq.arm_db = dv_cq.arm_db;
61662e96ffbSMoti Haimovsky rxq->mcq.arm_sn = dv_cq.arm_sn;
61762e96ffbSMoti Haimovsky rxq->mcq.cqn = dv_cq.cqn;
61862e96ffbSMoti Haimovsky rxq->mcq.cq_uar = dv_cq.cq_uar;
61962e96ffbSMoti Haimovsky rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
6205697a414SAdrien Mazarguil /* Update doorbell counter. */
6215697a414SAdrien Mazarguil rxq->rq_ci = elts_n / sges_n;
6225697a414SAdrien Mazarguil rte_wmb();
6235697a414SAdrien Mazarguil *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
6248e493764SYongseok Koh priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
6255b4c63bdSAdrien Mazarguil return 0;
6265697a414SAdrien Mazarguil error:
6275697a414SAdrien Mazarguil if (wq)
6284eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_wq(wq));
6295697a414SAdrien Mazarguil if (cq)
6304eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_cq(cq));
63184a68486SAdrien Mazarguil --rxq->usecnt;
6325697a414SAdrien Mazarguil rte_errno = ret;
6335697a414SAdrien Mazarguil ERROR("error while attaching Rx queue %p: %s: %s",
6345697a414SAdrien Mazarguil (void *)rxq, msg, strerror(ret));
6358e493764SYongseok Koh priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
6365697a414SAdrien Mazarguil return -ret;
6375b4c63bdSAdrien Mazarguil }
6385b4c63bdSAdrien Mazarguil
6395b4c63bdSAdrien Mazarguil /**
6405697a414SAdrien Mazarguil * Detach a user from a Rx queue.
6415697a414SAdrien Mazarguil *
6425697a414SAdrien Mazarguil * This function decrements the usage count of the Rx queue and destroys
6435697a414SAdrien Mazarguil * usage resources after reaching 0.
6445b4c63bdSAdrien Mazarguil *
6455b4c63bdSAdrien Mazarguil * @param rxq
6465b4c63bdSAdrien Mazarguil * Pointer to Rx queue structure.
6475b4c63bdSAdrien Mazarguil */
6485697a414SAdrien Mazarguil void
mlx4_rxq_detach(struct rxq * rxq)6495697a414SAdrien Mazarguil mlx4_rxq_detach(struct rxq *rxq)
6505b4c63bdSAdrien Mazarguil {
6515b4c63bdSAdrien Mazarguil unsigned int i;
6526681b845SMoti Haimovsky struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
6535b4c63bdSAdrien Mazarguil
6545697a414SAdrien Mazarguil if (--rxq->usecnt)
6555697a414SAdrien Mazarguil return;
6565697a414SAdrien Mazarguil rxq->rq_ci = 0;
6575697a414SAdrien Mazarguil memset(&rxq->mcq, 0, sizeof(rxq->mcq));
6585697a414SAdrien Mazarguil rxq->rq_db = NULL;
6595697a414SAdrien Mazarguil rxq->wqes = NULL;
6604eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_wq(rxq->wq));
6615697a414SAdrien Mazarguil rxq->wq = NULL;
6624eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_cq(rxq->cq));
6635697a414SAdrien Mazarguil rxq->cq = NULL;
6646681b845SMoti Haimovsky DEBUG("%p: freeing Rx queue elements", (void *)rxq);
665c64c58adSAdrien Mazarguil for (i = 0; (i != RTE_DIM(*elts)); ++i) {
6666681b845SMoti Haimovsky if (!(*elts)[i])
667c64c58adSAdrien Mazarguil continue;
6686681b845SMoti Haimovsky rte_pktmbuf_free_seg((*elts)[i]);
6696681b845SMoti Haimovsky (*elts)[i] = NULL;
670c64c58adSAdrien Mazarguil }
6715b4c63bdSAdrien Mazarguil }
6725b4c63bdSAdrien Mazarguil
6735b4c63bdSAdrien Mazarguil /**
674597d2ce5SShahaf Shuler * Returns the per-queue supported offloads.
675597d2ce5SShahaf Shuler *
676597d2ce5SShahaf Shuler * @param priv
677597d2ce5SShahaf Shuler * Pointer to private structure.
678597d2ce5SShahaf Shuler *
679597d2ce5SShahaf Shuler * @return
680597d2ce5SShahaf Shuler * Supported Tx offloads.
681597d2ce5SShahaf Shuler */
682597d2ce5SShahaf Shuler uint64_t
mlx4_get_rx_queue_offloads(struct mlx4_priv * priv)683dbeba4cfSThomas Monjalon mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
684597d2ce5SShahaf Shuler {
685*295968d1SFerruh Yigit uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
686*295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_KEEP_CRC |
687*295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH;
688597d2ce5SShahaf Shuler
689597d2ce5SShahaf Shuler if (priv->hw_csum)
690*295968d1SFerruh Yigit offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
691597d2ce5SShahaf Shuler return offloads;
692597d2ce5SShahaf Shuler }
693597d2ce5SShahaf Shuler
694597d2ce5SShahaf Shuler /**
695597d2ce5SShahaf Shuler * Returns the per-port supported offloads.
696597d2ce5SShahaf Shuler *
697597d2ce5SShahaf Shuler * @param priv
698597d2ce5SShahaf Shuler * Pointer to private structure.
699597d2ce5SShahaf Shuler *
700597d2ce5SShahaf Shuler * @return
701597d2ce5SShahaf Shuler * Supported Rx offloads.
702597d2ce5SShahaf Shuler */
703597d2ce5SShahaf Shuler uint64_t
mlx4_get_rx_port_offloads(struct mlx4_priv * priv)704dbeba4cfSThomas Monjalon mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
705597d2ce5SShahaf Shuler {
706*295968d1SFerruh Yigit uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
707597d2ce5SShahaf Shuler
708597d2ce5SShahaf Shuler (void)priv;
709597d2ce5SShahaf Shuler return offloads;
710597d2ce5SShahaf Shuler }
711597d2ce5SShahaf Shuler
712597d2ce5SShahaf Shuler /**
7135b4c63bdSAdrien Mazarguil * DPDK callback to configure a Rx queue.
7145b4c63bdSAdrien Mazarguil *
7155b4c63bdSAdrien Mazarguil * @param dev
7165b4c63bdSAdrien Mazarguil * Pointer to Ethernet device structure.
7175b4c63bdSAdrien Mazarguil * @param idx
7185b4c63bdSAdrien Mazarguil * Rx queue index.
7195b4c63bdSAdrien Mazarguil * @param desc
7205b4c63bdSAdrien Mazarguil * Number of descriptors to configure in queue.
7215b4c63bdSAdrien Mazarguil * @param socket
7225b4c63bdSAdrien Mazarguil * NUMA socket on which memory must be allocated.
7235b4c63bdSAdrien Mazarguil * @param[in] conf
7245b4c63bdSAdrien Mazarguil * Thresholds parameters.
7255b4c63bdSAdrien Mazarguil * @param mp
7265b4c63bdSAdrien Mazarguil * Memory pool for buffer allocations.
7275b4c63bdSAdrien Mazarguil *
7285b4c63bdSAdrien Mazarguil * @return
7295b4c63bdSAdrien Mazarguil * 0 on success, negative errno value otherwise and rte_errno is set.
7305b4c63bdSAdrien Mazarguil */
7315b4c63bdSAdrien Mazarguil int
mlx4_rx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_rxconf * conf,struct rte_mempool * mp)7325b4c63bdSAdrien Mazarguil mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
7335b4c63bdSAdrien Mazarguil unsigned int socket, const struct rte_eth_rxconf *conf,
7345b4c63bdSAdrien Mazarguil struct rte_mempool *mp)
7355b4c63bdSAdrien Mazarguil {
736dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private;
73779770826SAdrien Mazarguil uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
7386681b845SMoti Haimovsky struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
73979770826SAdrien Mazarguil struct rxq *rxq;
740c64c58adSAdrien Mazarguil struct mlx4_malloc_vec vec[] = {
741c64c58adSAdrien Mazarguil {
742c64c58adSAdrien Mazarguil .align = RTE_CACHE_LINE_SIZE,
743c64c58adSAdrien Mazarguil .size = sizeof(*rxq),
744c64c58adSAdrien Mazarguil .addr = (void **)&rxq,
745c64c58adSAdrien Mazarguil },
746c64c58adSAdrien Mazarguil {
747c64c58adSAdrien Mazarguil .align = RTE_CACHE_LINE_SIZE,
748c64c58adSAdrien Mazarguil .size = sizeof(*elts),
749c64c58adSAdrien Mazarguil .addr = (void **)&elts,
750c64c58adSAdrien Mazarguil },
751c64c58adSAdrien Mazarguil };
7525b4c63bdSAdrien Mazarguil int ret;
753de1df14eSOphir Munk uint32_t crc_present;
754a4996bd8SWei Dai uint64_t offloads;
7551bb4a528SFerruh Yigit uint32_t max_rx_pktlen;
7565b4c63bdSAdrien Mazarguil
757a4996bd8SWei Dai offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
758a4996bd8SWei Dai
7595b4c63bdSAdrien Mazarguil DEBUG("%p: configuring queue %u for %u descriptors",
7605b4c63bdSAdrien Mazarguil (void *)dev, idx, desc);
761a4996bd8SWei Dai
762be65fdcbSAdrien Mazarguil if (idx >= dev->data->nb_rx_queues) {
7635b4c63bdSAdrien Mazarguil rte_errno = EOVERFLOW;
7645b4c63bdSAdrien Mazarguil ERROR("%p: queue index out of range (%u >= %u)",
765be65fdcbSAdrien Mazarguil (void *)dev, idx, dev->data->nb_rx_queues);
7665b4c63bdSAdrien Mazarguil return -rte_errno;
7675b4c63bdSAdrien Mazarguil }
76879770826SAdrien Mazarguil rxq = dev->data->rx_queues[idx];
76979770826SAdrien Mazarguil if (rxq) {
7705b4c63bdSAdrien Mazarguil rte_errno = EEXIST;
77179770826SAdrien Mazarguil ERROR("%p: Rx queue %u already configured, release it first",
77279770826SAdrien Mazarguil (void *)dev, idx);
7735b4c63bdSAdrien Mazarguil return -rte_errno;
7745b4c63bdSAdrien Mazarguil }
77579770826SAdrien Mazarguil if (!desc) {
77679770826SAdrien Mazarguil rte_errno = EINVAL;
77779770826SAdrien Mazarguil ERROR("%p: invalid number of Rx descriptors", (void *)dev);
77879770826SAdrien Mazarguil return -rte_errno;
77979770826SAdrien Mazarguil }
7806681b845SMoti Haimovsky if (desc != RTE_DIM(*elts)) {
7816681b845SMoti Haimovsky desc = RTE_DIM(*elts);
7826681b845SMoti Haimovsky WARN("%p: increased number of descriptors in Rx queue %u"
7836681b845SMoti Haimovsky " to the next power of two (%u)",
7846681b845SMoti Haimovsky (void *)dev, idx, desc);
7856681b845SMoti Haimovsky }
786de1df14eSOphir Munk /* By default, FCS (CRC) is stripped by hardware. */
787de1df14eSOphir Munk crc_present = 0;
788*295968d1SFerruh Yigit if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
78970815c9eSFerruh Yigit if (priv->hw_fcs_strip) {
790de1df14eSOphir Munk crc_present = 1;
791de1df14eSOphir Munk } else {
792de1df14eSOphir Munk WARN("%p: CRC stripping has been disabled but will still"
793de1df14eSOphir Munk " be performed by hardware, make sure MLNX_OFED and"
794de1df14eSOphir Munk " firmware are up to date",
795de1df14eSOphir Munk (void *)dev);
79670815c9eSFerruh Yigit }
797de1df14eSOphir Munk }
798de1df14eSOphir Munk DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
799de1df14eSOphir Munk " incoming frames to hide it",
800de1df14eSOphir Munk (void *)dev,
801de1df14eSOphir Munk crc_present ? "disabled" : "enabled",
802de1df14eSOphir Munk crc_present << 2);
80379770826SAdrien Mazarguil /* Allocate and initialize Rx queue. */
804c64c58adSAdrien Mazarguil mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
80579770826SAdrien Mazarguil if (!rxq) {
8065b4c63bdSAdrien Mazarguil ERROR("%p: unable to allocate queue index %u",
8075b4c63bdSAdrien Mazarguil (void *)dev, idx);
8085b4c63bdSAdrien Mazarguil return -rte_errno;
8095b4c63bdSAdrien Mazarguil }
81079770826SAdrien Mazarguil *rxq = (struct rxq){
81179770826SAdrien Mazarguil .priv = priv,
81279770826SAdrien Mazarguil .mp = mp,
81379770826SAdrien Mazarguil .port_id = dev->data->port_id,
8146681b845SMoti Haimovsky .sges_n = 0,
8156681b845SMoti Haimovsky .elts_n = rte_log2_u32(desc),
816c64c58adSAdrien Mazarguil .elts = elts,
8179f57340aSMoti Haimovsky /* Toggle Rx checksum offload if hardware supports it. */
818597d2ce5SShahaf Shuler .csum = priv->hw_csum &&
819*295968d1SFerruh Yigit (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
820597d2ce5SShahaf Shuler .csum_l2tun = priv->hw_csum_l2tun &&
821*295968d1SFerruh Yigit (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
822de1df14eSOphir Munk .crc_present = crc_present,
82378214fb8SMoti Haimovsky .l2tun_offload = priv->hw_csum_l2tun,
824cf2fdf72SAdrien Mazarguil .stats = {
825cf2fdf72SAdrien Mazarguil .idx = idx,
826cf2fdf72SAdrien Mazarguil },
82779770826SAdrien Mazarguil .socket = socket,
82879770826SAdrien Mazarguil };
8297483341aSXueming Li dev->data->rx_queues[idx] = rxq;
83079770826SAdrien Mazarguil /* Enable scattered packets support for this queue if necessary. */
8318e08df22SAlexander Kozyrev MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
8321bb4a528SFerruh Yigit max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
8331bb4a528SFerruh Yigit if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
83479770826SAdrien Mazarguil ;
835*295968d1SFerruh Yigit } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
8361bb4a528SFerruh Yigit uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
8376681b845SMoti Haimovsky uint32_t sges_n;
8386681b845SMoti Haimovsky
8396681b845SMoti Haimovsky /*
8406681b845SMoti Haimovsky * Determine the number of SGEs needed for a full packet
8416681b845SMoti Haimovsky * and round it to the next power of two.
8426681b845SMoti Haimovsky */
8436681b845SMoti Haimovsky sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
8446681b845SMoti Haimovsky rxq->sges_n = sges_n;
8456681b845SMoti Haimovsky /* Make sure sges_n did not overflow. */
8466681b845SMoti Haimovsky size = mb_len * (1 << rxq->sges_n);
8476681b845SMoti Haimovsky size -= RTE_PKTMBUF_HEADROOM;
8481bb4a528SFerruh Yigit if (size < max_rx_pktlen) {
8496681b845SMoti Haimovsky rte_errno = EOVERFLOW;
8506681b845SMoti Haimovsky ERROR("%p: too many SGEs (%u) needed to handle"
8516681b845SMoti Haimovsky " requested maximum packet size %u",
8526681b845SMoti Haimovsky (void *)dev,
8531bb4a528SFerruh Yigit 1 << sges_n, max_rx_pktlen);
8546681b845SMoti Haimovsky goto error;
8556681b845SMoti Haimovsky }
8565b4c63bdSAdrien Mazarguil } else {
85779770826SAdrien Mazarguil WARN("%p: the requested maximum Rx packet size (%u) is"
85879770826SAdrien Mazarguil " larger than a single mbuf (%u) and scattered"
85979770826SAdrien Mazarguil " mode has not been requested",
8601bb4a528SFerruh Yigit (void *)dev, max_rx_pktlen,
86179770826SAdrien Mazarguil mb_len - RTE_PKTMBUF_HEADROOM);
86279770826SAdrien Mazarguil }
8636681b845SMoti Haimovsky DEBUG("%p: maximum number of segments per packet: %u",
8646681b845SMoti Haimovsky (void *)dev, 1 << rxq->sges_n);
8656681b845SMoti Haimovsky if (desc % (1 << rxq->sges_n)) {
8666681b845SMoti Haimovsky rte_errno = EINVAL;
8676681b845SMoti Haimovsky ERROR("%p: number of Rx queue descriptors (%u) is not a"
8686681b845SMoti Haimovsky " multiple of maximum segments per packet (%u)",
8696681b845SMoti Haimovsky (void *)dev,
8706681b845SMoti Haimovsky desc,
8716681b845SMoti Haimovsky 1 << rxq->sges_n);
8726681b845SMoti Haimovsky goto error;
8736681b845SMoti Haimovsky }
8749797bfccSYongseok Koh if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh,
8759797bfccSYongseok Koh MLX4_MR_BTREE_CACHE_N, socket)) {
8769797bfccSYongseok Koh /* rte_errno is already set. */
8779797bfccSYongseok Koh goto error;
8789797bfccSYongseok Koh }
87979770826SAdrien Mazarguil if (dev->data->dev_conf.intr_conf.rxq) {
8804eba244bSAdrien Mazarguil rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
88179770826SAdrien Mazarguil if (rxq->channel == NULL) {
88279770826SAdrien Mazarguil rte_errno = ENOMEM;
88379770826SAdrien Mazarguil ERROR("%p: Rx interrupt completion channel creation"
88479770826SAdrien Mazarguil " failure: %s",
88579770826SAdrien Mazarguil (void *)dev, strerror(rte_errno));
88679770826SAdrien Mazarguil goto error;
88779770826SAdrien Mazarguil }
88879770826SAdrien Mazarguil if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
88979770826SAdrien Mazarguil ERROR("%p: unable to make Rx interrupt completion"
89079770826SAdrien Mazarguil " channel non-blocking: %s",
89179770826SAdrien Mazarguil (void *)dev, strerror(rte_errno));
89279770826SAdrien Mazarguil goto error;
89379770826SAdrien Mazarguil }
89479770826SAdrien Mazarguil }
89579770826SAdrien Mazarguil DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
89679770826SAdrien Mazarguil return 0;
89779770826SAdrien Mazarguil error:
89879770826SAdrien Mazarguil ret = rte_errno;
8997483341aSXueming Li mlx4_rx_queue_release(dev, idx);
90079770826SAdrien Mazarguil rte_errno = ret;
9018e08df22SAlexander Kozyrev MLX4_ASSERT(rte_errno > 0);
90279770826SAdrien Mazarguil return -rte_errno;
9035b4c63bdSAdrien Mazarguil }
9045b4c63bdSAdrien Mazarguil
9055b4c63bdSAdrien Mazarguil /**
9065b4c63bdSAdrien Mazarguil * DPDK callback to release a Rx queue.
9075b4c63bdSAdrien Mazarguil *
9087483341aSXueming Li * @param dev
9097483341aSXueming Li * Pointer to Ethernet device structure.
9107483341aSXueming Li * @param idx
9117483341aSXueming Li * Receive queue index.
9125b4c63bdSAdrien Mazarguil */
9135b4c63bdSAdrien Mazarguil void
mlx4_rx_queue_release(struct rte_eth_dev * dev,uint16_t idx)9147483341aSXueming Li mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
9155b4c63bdSAdrien Mazarguil {
9167483341aSXueming Li struct rxq *rxq = dev->data->rx_queues[idx];
9175b4c63bdSAdrien Mazarguil
9185b4c63bdSAdrien Mazarguil if (rxq == NULL)
9195b4c63bdSAdrien Mazarguil return;
9207483341aSXueming Li dev->data->rx_queues[idx] = NULL;
9217483341aSXueming Li DEBUG("%p: removing Rx queue %hu from list", (void *)dev, idx);
9228e08df22SAlexander Kozyrev MLX4_ASSERT(!rxq->cq);
9238e08df22SAlexander Kozyrev MLX4_ASSERT(!rxq->wq);
9248e08df22SAlexander Kozyrev MLX4_ASSERT(!rxq->wqes);
9258e08df22SAlexander Kozyrev MLX4_ASSERT(!rxq->rq_db);
92679770826SAdrien Mazarguil if (rxq->channel)
9274eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
9289797bfccSYongseok Koh mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);
9295b4c63bdSAdrien Mazarguil rte_free(rxq);
9305b4c63bdSAdrien Mazarguil }
931