Lines Matching +full:burst +full:- +full:read
1 /*-
2 * Copyright (c) 2016-2020 Mellanox Technologies. All rights reserved.
57 void *sqc = param->sqc; in mlx5e_rl_build_sq_param()
59 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); in mlx5e_rl_build_sq_param()
63 MLX5_SET(wq, wq, pd, rl->priv->pdn); in mlx5e_rl_build_sq_param()
65 param->wq.linear = 1; in mlx5e_rl_build_sq_param()
72 void *cqc = param->cqc; in mlx5e_rl_build_cq_param()
73 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); in mlx5e_rl_build_cq_param()
76 MLX5_SET(cqc, cqc, cq_period, rl->param.tx_coalesce_usecs); in mlx5e_rl_build_cq_param()
77 MLX5_SET(cqc, cqc, cq_max_count, rl->param.tx_coalesce_pkts); in mlx5e_rl_build_cq_param()
78 MLX5_SET(cqc, cqc, uar_page, rl->priv->mdev->priv.uar->index); in mlx5e_rl_build_cq_param()
80 switch (rl->param.tx_coalesce_mode) { in mlx5e_rl_build_cq_param()
85 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_start_from_cqe)) in mlx5e_rl_build_cq_param()
99 mlx5e_rl_build_sq_param(rl, &cparam->sq); in mlx5e_rl_build_channel_param()
100 mlx5e_rl_build_cq_param(rl, &cparam->cq); in mlx5e_rl_build_channel_param()
107 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_rl_create_sq()
108 void *sqc = param->sqc; in mlx5e_rl_create_sq()
113 if ((err = -bus_dma_tag_create( in mlx5e_rl_create_sq()
114 bus_get_dma_tag(mdev->pdev->dev.bsddev), in mlx5e_rl_create_sq()
125 &sq->dma_tag))) in mlx5e_rl_create_sq()
128 sq->mkey_be = cpu_to_be32(priv->mr.key); in mlx5e_rl_create_sq()
129 sq->ifp = priv->ifp; in mlx5e_rl_create_sq()
130 sq->priv = priv; in mlx5e_rl_create_sq()
132 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, in mlx5e_rl_create_sq()
133 &sq->wq_ctrl); in mlx5e_rl_create_sq()
137 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; in mlx5e_rl_create_sq()
148 mlx5_wq_destroy(&sq->wq_ctrl); in mlx5e_rl_create_sq()
150 bus_dma_tag_destroy(sq->dma_tag); in mlx5e_rl_create_sq()
160 mlx5_wq_destroy(&sq->wq_ctrl); in mlx5e_rl_destroy_sq()
161 bus_dma_tag_destroy(sq->dma_tag); in mlx5e_rl_destroy_sq()
174 return -ENOMEM; in mlx5e_rl_query_sq()
176 err = mlx5_core_query_sq(sq->priv->mdev, sq->sqn, out); in mlx5e_rl_query_sq()
180 sq->queue_handle = MLX5_GET(query_sq_out, out, sq_context.queue_handle); in mlx5e_rl_query_sq()
197 err = mlx5e_enable_sq(sq, param, &priv->channel[ix].bfreg, priv->rl.tisn); in mlx5e_rl_open_sq()
205 if (MLX5_CAP_QOS(priv->mdev, qos_remap_pp)) { in mlx5e_rl_open_sq()
208 mlx5_en_err(priv->ifp, "Failed retrieving send queue handle for" in mlx5e_rl_open_sq()
209 "SQ remap - sqn=%u, err=(%d)\n", sq->sqn, err); in mlx5e_rl_open_sq()
210 sq->queue_handle = MLX5_INVALID_QUEUE_HANDLE; in mlx5e_rl_open_sq()
213 sq->queue_handle = MLX5_INVALID_QUEUE_HANDLE; in mlx5e_rl_open_sq()
215 WRITE_ONCE(sq->running, 1); in mlx5e_rl_open_sq()
230 mtx_init(&sq->lock, "mlx5tx-rl", NULL, MTX_DEF); in mlx5e_rl_chan_mtx_init()
231 mtx_init(&sq->comp_lock, "mlx5comp-rl", NULL, MTX_DEF); in mlx5e_rl_chan_mtx_init()
233 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); in mlx5e_rl_chan_mtx_init()
235 sq->cev_factor = priv->rl.param.tx_completion_fact; in mlx5e_rl_chan_mtx_init()
238 if (sq->cev_factor == 0) in mlx5e_rl_chan_mtx_init()
239 sq->cev_factor = 1; in mlx5e_rl_chan_mtx_init()
247 struct mlx5e_priv *priv = rlw->priv; in mlx5e_rl_open_channel()
257 err = mlx5e_open_cq(priv, &cparam->cq, &sq->cq, in mlx5e_rl_open_channel()
262 err = mlx5e_rl_open_sq(priv, sq, &cparam->sq, eq_ix); in mlx5e_rl_open_channel()
270 sq->cq.mcq.comp(&sq->cq.mcq, NULL); in mlx5e_rl_open_channel()
275 mlx5e_close_cq(&sq->cq); in mlx5e_rl_open_channel()
279 mtx_destroy(&sq->lock); in mlx5e_rl_open_channel()
280 mtx_destroy(&sq->comp_lock); in mlx5e_rl_open_channel()
282 atomic_add_64(&priv->rl.stats.tx_allocate_resource_failure, 1ULL); in mlx5e_rl_open_channel()
303 mlx5e_close_cq(&sq->cq); in mlx5e_rl_close_channel()
306 mtx_destroy(&sq->lock); in mlx5e_rl_close_channel()
307 mtx_destroy(&sq->comp_lock); in mlx5e_rl_close_channel()
324 uint64_t max = rl->param.tx_queue_size / in mlx5e_rl_sync_tx_completion_fact()
330 * 16-bits. in mlx5e_rl_sync_tx_completion_fact()
336 rl->param.tx_completion_fact_max = max; in mlx5e_rl_sync_tx_completion_fact()
342 if (rl->param.tx_completion_fact < 1) in mlx5e_rl_sync_tx_completion_fact()
343 rl->param.tx_completion_fact = 1; in mlx5e_rl_sync_tx_completion_fact()
344 else if (rl->param.tx_completion_fact > max) in mlx5e_rl_sync_tx_completion_fact()
345 rl->param.tx_completion_fact = max; in mlx5e_rl_sync_tx_completion_fact()
351 struct mlx5e_priv *priv = sq->priv; in mlx5e_rl_modify_sq()
352 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_rl_modify_sq()
362 return (-ENOMEM); in mlx5e_rl_modify_sq()
366 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); in mlx5e_rl_modify_sq()
389 uint64_t distance = -1ULL; in mlx5e_rl_find_best_rate_locked()
395 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_find_best_rate_locked()
396 uint64_t rate = rl->rate_limit_table[x]; in mlx5e_rl_find_best_rate_locked()
401 diff = rate - user_rate; in mlx5e_rl_find_best_rate_locked()
403 diff = user_rate - rate; in mlx5e_rl_find_best_rate_locked()
413 if (user_rate > rl->param.tx_limit_max) in mlx5e_rl_find_best_rate_locked()
414 user_rate = rl->param.tx_limit_max; in mlx5e_rl_find_best_rate_locked()
418 rl->param.tx_allowed_deviation, 1000ULL)) in mlx5e_rl_find_best_rate_locked()
433 mtx_lock(&iq->lock); in mlx5e_rl_post_sq_remap_wqe()
436 mtx_unlock(&iq->lock); in mlx5e_rl_post_sq_remap_wqe()
437 return (-ENOMEM); in mlx5e_rl_post_sq_remap_wqe()
439 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); in mlx5e_rl_post_sq_remap_wqe()
443 wqe->qos_remap.qos_handle = cpu_to_be32(scq_handle); in mlx5e_rl_post_sq_remap_wqe()
444 wqe->qos_remap.queue_handle = cpu_to_be32(sq_handle); in mlx5e_rl_post_sq_remap_wqe()
446 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) | in mlx5e_rl_post_sq_remap_wqe()
448 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt); in mlx5e_rl_post_sq_remap_wqe()
449 wqe->ctrl.imm = cpu_to_be32(iq->priv->tisn[0] << 8); in mlx5e_rl_post_sq_remap_wqe()
450 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; in mlx5e_rl_post_sq_remap_wqe()
453 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32)); in mlx5e_rl_post_sq_remap_wqe()
455 iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); in mlx5e_rl_post_sq_remap_wqe()
456 iq->data[pi].p_refcount = &sq_channel->refcount; in mlx5e_rl_post_sq_remap_wqe()
457 atomic_add_int(iq->data[pi].p_refcount, 1); in mlx5e_rl_post_sq_remap_wqe()
458 iq->pc += iq->data[pi].num_wqebbs; in mlx5e_rl_post_sq_remap_wqe()
462 mtx_unlock(&iq->lock); in mlx5e_rl_post_sq_remap_wqe()
477 iq_channel = &sq->priv->channel[sq->sqn % sq->priv->params.num_channels]; in mlx5e_rl_remap_sq()
479 sq_handle = sq->queue_handle; in mlx5e_rl_remap_sq()
480 scq_handle = mlx5_rl_get_scq_handle(sq->priv->mdev, index); in mlx5e_rl_remap_sq()
484 error = -1; in mlx5e_rl_remap_sq()
486 error = mlx5e_rl_post_sq_remap_wqe(&iq_channel->iq, scq_handle, in mlx5e_rl_remap_sq()
501 struct mlx5e_rl_priv_data *rl = &rlw->priv->rl; in mlx5e_rlw_channel_set_rate_locked()
505 uint16_t burst; in mlx5e_rlw_channel_set_rate_locked() local
514 /* get current burst size in bytes */ in mlx5e_rlw_channel_set_rate_locked()
515 temp = rl->param.tx_burst_size * in mlx5e_rlw_channel_set_rate_locked()
516 MLX5E_SW2HW_MTU(if_getmtu(rlw->priv->ifp)); in mlx5e_rlw_channel_set_rate_locked()
518 /* limit burst size to 64K currently */ in mlx5e_rlw_channel_set_rate_locked()
521 burst = temp; in mlx5e_rlw_channel_set_rate_locked()
532 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
535 error = -mlx5_rl_add_rate(rlw->priv->mdev, in mlx5e_rlw_channel_set_rate_locked()
536 howmany(rate, 1000), burst, &index); in mlx5e_rlw_channel_set_rate_locked()
542 atomic_add_64(&rlw->priv->rl.stats.tx_add_new_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
548 burst = 0; /* default */ in mlx5e_rlw_channel_set_rate_locked()
551 /* paced <--> non-paced transitions must go via FW */ in mlx5e_rlw_channel_set_rate_locked()
552 use_sq_remap = MLX5_CAP_QOS(rlw->priv->mdev, qos_remap_pp) && in mlx5e_rlw_channel_set_rate_locked()
553 channel->last_rate != 0 && rate != 0; in mlx5e_rlw_channel_set_rate_locked()
556 temp = channel->last_rate; in mlx5e_rlw_channel_set_rate_locked()
557 channel->last_rate = rate; in mlx5e_rlw_channel_set_rate_locked()
560 /* atomically swap burst size */ in mlx5e_rlw_channel_set_rate_locked()
561 temp = channel->last_burst; in mlx5e_rlw_channel_set_rate_locked()
562 channel->last_burst = burst; in mlx5e_rlw_channel_set_rate_locked()
563 burst = temp; in mlx5e_rlw_channel_set_rate_locked()
568 mlx5_rl_remove_rate(rlw->priv->mdev, in mlx5e_rlw_channel_set_rate_locked()
569 howmany(rate, 1000), burst); in mlx5e_rlw_channel_set_rate_locked()
573 sq = channel->sq; in mlx5e_rlw_channel_set_rate_locked()
574 if (sq != NULL && READ_ONCE(sq->running) != 0) { in mlx5e_rlw_channel_set_rate_locked()
576 while (atomic_load_int(&channel->refcount) != 0 && in mlx5e_rlw_channel_set_rate_locked()
577 rlw->priv->mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR && in mlx5e_rlw_channel_set_rate_locked()
578 pci_channel_offline(rlw->priv->mdev->pdev) == 0) in mlx5e_rlw_channel_set_rate_locked()
582 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
589 return (-error); in mlx5e_rlw_channel_set_rate_locked()
610 priv = rlw->priv; in mlx5e_rl_worker()
613 ix = (rlw - priv->rl.workers) % in mlx5e_rl_worker()
614 priv->mdev->priv.eq_table.num_comp_vectors; in mlx5e_rl_worker()
620 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { in mlx5e_rl_worker()
621 struct mlx5e_rl_channel *channel = rlw->channels + x; in mlx5e_rl_worker()
624 if (channel->state == MLX5E_RL_ST_FREE) in mlx5e_rl_worker()
629 MLX5E_RL_RLOCK(&priv->rl); in mlx5e_rl_worker()
631 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
632 MLX5E_RL_RUNLOCK(&priv->rl); in mlx5e_rl_worker()
636 mlx5_en_err(priv->ifp, in mlx5e_rl_worker()
640 mlx5e_rlw_channel_set_rate_locked(rlw, channel, channel->init_rate); in mlx5e_rl_worker()
643 if (STAILQ_FIRST(&rlw->process_head) == NULL) { in mlx5e_rl_worker()
645 if (rlw->worker_done != 0) in mlx5e_rl_worker()
647 cv_wait(&rlw->cv, &rlw->mtx); in mlx5e_rl_worker()
650 if (rlw->worker_done != 0) in mlx5e_rl_worker()
652 channel = STAILQ_FIRST(&rlw->process_head); in mlx5e_rl_worker()
654 STAILQ_REMOVE_HEAD(&rlw->process_head, entry); in mlx5e_rl_worker()
656 switch (channel->state) { in mlx5e_rl_worker()
658 channel->state = MLX5E_RL_ST_USED; in mlx5e_rl_worker()
662 if (channel->sq == NULL) { in mlx5e_rl_worker()
663 MLX5E_RL_RLOCK(&priv->rl); in mlx5e_rl_worker()
665 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
666 MLX5E_RL_RUNLOCK(&priv->rl); in mlx5e_rl_worker()
669 mlx5_en_err(priv->ifp, in mlx5e_rl_worker()
672 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, 1ULL); in mlx5e_rl_worker()
675 mlx5e_resume_sq(channel->sq); in mlx5e_rl_worker()
681 channel->new_rate * 8ULL); in mlx5e_rl_worker()
683 mlx5_en_err(priv->ifp, in mlx5e_rl_worker()
692 mlx5_en_err(priv->ifp, in mlx5e_rl_worker()
696 if (channel->sq != NULL) { in mlx5e_rl_worker()
703 mlx5e_drain_sq(channel->sq); in mlx5e_rl_worker()
707 STAILQ_INSERT_HEAD(&rlw->index_list_head, channel, entry); in mlx5e_rl_worker()
708 channel->state = MLX5E_RL_ST_FREE; in mlx5e_rl_worker()
709 atomic_add_64(&priv->rl.stats.tx_active_connections, -1ULL); in mlx5e_rl_worker()
719 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { in mlx5e_rl_worker()
720 struct mlx5e_rl_channel *channel = rlw->channels + x; in mlx5e_rl_worker()
723 channel->init_rate = channel->last_rate; in mlx5e_rl_worker()
728 if (channel->sq != NULL) { in mlx5e_rl_worker()
730 mlx5e_rl_close_channel(&channel->sq); in mlx5e_rl_worker()
731 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, -1ULL); in mlx5e_rl_worker()
736 rlw->worker_done = 0; in mlx5e_rl_worker()
737 cv_broadcast(&rlw->cv); in mlx5e_rl_worker()
746 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_rl_open_tis()
753 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); in mlx5e_rl_open_tis()
755 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->rl.tisn)); in mlx5e_rl_open_tis()
761 mlx5_core_destroy_tis(priv->mdev, priv->rl.tisn, 0); in mlx5e_rl_close_tis()
769 param->tx_worker_threads_def = mdev->priv.eq_table.num_comp_vectors; in mlx5e_rl_set_default_params()
770 param->tx_worker_threads_max = MLX5E_RL_MAX_WORKERS; in mlx5e_rl_set_default_params()
773 if (param->tx_worker_threads_def == 0 || in mlx5e_rl_set_default_params()
774 param->tx_worker_threads_def > param->tx_worker_threads_max) in mlx5e_rl_set_default_params()
775 param->tx_worker_threads_def = param->tx_worker_threads_max; in mlx5e_rl_set_default_params()
778 param->tx_channels_per_worker_def = MLX5E_RL_MAX_SQS / in mlx5e_rl_set_default_params()
779 param->tx_worker_threads_def; in mlx5e_rl_set_default_params()
780 param->tx_channels_per_worker_max = MLX5E_RL_MAX_SQS; in mlx5e_rl_set_default_params()
783 if (param->tx_channels_per_worker_def > MLX5E_RL_DEF_SQ_PER_WORKER) in mlx5e_rl_set_default_params()
784 param->tx_channels_per_worker_def = MLX5E_RL_DEF_SQ_PER_WORKER; in mlx5e_rl_set_default_params()
786 /* set default burst size */ in mlx5e_rl_set_default_params()
787 param->tx_burst_size = 4; /* MTUs */ in mlx5e_rl_set_default_params()
790 * Set maximum burst size in mlx5e_rl_set_default_params()
792 * The burst size is multiplied by the MTU and clamped to the in mlx5e_rl_set_default_params()
796 * NOTE: If the burst size or MTU is changed only ratelimit in mlx5e_rl_set_default_params()
797 * connections made after the change will use the new burst in mlx5e_rl_set_default_params()
800 param->tx_burst_size_max = 255; in mlx5e_rl_set_default_params()
803 param->tx_limit_min = mdev->priv.rl_table.min_rate * 1000ULL; in mlx5e_rl_set_default_params()
804 param->tx_limit_max = mdev->priv.rl_table.max_rate * 1000ULL; in mlx5e_rl_set_default_params()
807 param->tx_rates_max = mdev->priv.rl_table.max_size; in mlx5e_rl_set_default_params()
810 if (param->tx_rates_max > MLX5E_RL_MAX_TX_RATES) in mlx5e_rl_set_default_params()
811 param->tx_rates_max = MLX5E_RL_MAX_TX_RATES; in mlx5e_rl_set_default_params()
814 param->tx_rates_def = param->tx_rates_max; in mlx5e_rl_set_default_params()
817 if (param->tx_limit_max != 0) { in mlx5e_rl_set_default_params()
820 * overflow unsigned 64-bit: in mlx5e_rl_set_default_params()
822 param->tx_allowed_deviation_max = -1ULL / in mlx5e_rl_set_default_params()
823 param->tx_limit_max; in mlx5e_rl_set_default_params()
826 param->tx_allowed_deviation = 50; /* 5.0% */ in mlx5e_rl_set_default_params()
829 param->tx_queue_size = (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); in mlx5e_rl_set_default_params()
830 param->tx_coalesce_usecs = MLX5E_RL_TX_COAL_USEC_DEFAULT; in mlx5e_rl_set_default_params()
831 param->tx_coalesce_pkts = MLX5E_RL_TX_COAL_PKTS_DEFAULT; in mlx5e_rl_set_default_params()
832 param->tx_coalesce_mode = MLX5E_RL_TX_COAL_MODE_DEFAULT; in mlx5e_rl_set_default_params()
833 param->tx_completion_fact = MLX5E_RL_TX_COMP_FACT_DEFAULT; in mlx5e_rl_set_default_params()
851 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_init()
860 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing)) in mlx5e_rl_init()
863 rl->priv = priv; in mlx5e_rl_init()
865 sysctl_ctx_init(&rl->ctx); in mlx5e_rl_init()
867 sx_init(&rl->rl_sxlock, "ratelimit-sxlock"); in mlx5e_rl_init()
875 mlx5e_rl_set_default_params(&rl->param, priv->mdev); in mlx5e_rl_init()
881 node = SYSCTL_ADD_NODE(&rl->ctx, in mlx5e_rl_init()
882 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, in mlx5e_rl_init()
894 stats = SYSCTL_ADD_NODE(&rl->ctx, SYSCTL_CHILDREN(node), in mlx5e_rl_init()
908 rl->workers = malloc(sizeof(rl->workers[0]) * in mlx5e_rl_init()
909 rl->param.tx_worker_threads_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
912 rl->rate_limit_table = malloc(sizeof(rl->rate_limit_table[0]) * in mlx5e_rl_init()
913 rl->param.tx_rates_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
917 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_init()
923 for (i = 0; i != rl->param.tx_rates_def; i++) { in mlx5e_rl_init()
926 device_get_unit(priv->mdev->pdev->dev.bsddev), (int)i); in mlx5e_rl_init()
940 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { in mlx5e_rl_init()
941 struct mlx5e_rl_worker *rlw = rl->workers + j; in mlx5e_rl_init()
943 rlw->priv = priv; in mlx5e_rl_init()
945 cv_init(&rlw->cv, "mlx5-worker-cv"); in mlx5e_rl_init()
946 mtx_init(&rlw->mtx, "mlx5-worker-mtx", NULL, MTX_DEF); in mlx5e_rl_init()
947 STAILQ_INIT(&rlw->index_list_head); in mlx5e_rl_init()
948 STAILQ_INIT(&rlw->process_head); in mlx5e_rl_init()
950 rlw->channels = malloc(sizeof(rlw->channels[0]) * in mlx5e_rl_init()
951 rl->param.tx_channels_per_worker_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
954 for (i = 0; i < rl->param.tx_channels_per_worker_def; i++) { in mlx5e_rl_init()
955 struct mlx5e_rl_channel *channel = rlw->channels + i; in mlx5e_rl_init()
956 channel->worker = rlw; in mlx5e_rl_init()
957 STAILQ_INSERT_TAIL(&rlw->index_list_head, channel, entry); in mlx5e_rl_init()
967 mlx5_en_err(priv->ifp, in mlx5e_rl_init()
974 sysctl_ctx_free(&rl->ctx); in mlx5e_rl_init()
975 sx_destroy(&rl->rl_sxlock); in mlx5e_rl_init()
982 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_open_workers()
988 if (priv->gone || rl->opened) in mlx5e_rl_open_workers()
989 return (-EINVAL); in mlx5e_rl_open_workers()
993 mlx5e_rl_build_channel_param(rl, &rl->chan_param); in mlx5e_rl_open_workers()
996 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { in mlx5e_rl_open_workers()
997 struct mlx5e_rl_worker *rlw = rl->workers + j; in mlx5e_rl_open_workers()
1001 RFHIGHPID, 0, "mlx5-ratelimit", "mlx5-rl-worker-thread-%d", (int)j); in mlx5e_rl_open_workers()
1003 mlx5_en_err(rl->priv->ifp, in mlx5e_rl_open_workers()
1005 rlw->worker_done = 1; in mlx5e_rl_open_workers()
1009 rl->opened = 1; in mlx5e_rl_open_workers()
1017 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_close_workers()
1020 if (rl->opened == 0) in mlx5e_rl_close_workers()
1024 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_close_workers()
1025 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_close_workers()
1029 if (rlw->worker_done == 0) { in mlx5e_rl_close_workers()
1030 rlw->worker_done = 1; in mlx5e_rl_close_workers()
1031 cv_broadcast(&rlw->cv); in mlx5e_rl_close_workers()
1034 rlw->worker_done = 0; in mlx5e_rl_close_workers()
1040 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_close_workers()
1041 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_close_workers()
1045 while (rlw->worker_done != 0) in mlx5e_rl_close_workers()
1046 cv_wait(&rlw->cv, &rlw->mtx); in mlx5e_rl_close_workers()
1050 rl->opened = 0; in mlx5e_rl_close_workers()
1059 for (x = 0; x != rl->param.tx_rates_def; x++) in mlx5e_rl_reset_rates()
1060 rl->rate_limit_table[x] = 0; in mlx5e_rl_reset_rates()
1067 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_cleanup()
1071 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing)) in mlx5e_rl_cleanup()
1076 sysctl_ctx_free(&rl->ctx); in mlx5e_rl_cleanup()
1087 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_cleanup()
1088 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_cleanup()
1090 cv_destroy(&rlw->cv); in mlx5e_rl_cleanup()
1091 mtx_destroy(&rlw->mtx); in mlx5e_rl_cleanup()
1092 free(rlw->channels, M_MLX5EN); in mlx5e_rl_cleanup()
1094 free(rl->rate_limit_table, M_MLX5EN); in mlx5e_rl_cleanup()
1095 free(rl->workers, M_MLX5EN); in mlx5e_rl_cleanup()
1096 sx_destroy(&rl->rl_sxlock); in mlx5e_rl_cleanup()
1103 STAILQ_INSERT_TAIL(&rlw->process_head, channel, entry); in mlx5e_rlw_queue_channel_locked()
1104 cv_broadcast(&rlw->cv); in mlx5e_rlw_queue_channel_locked()
1114 switch (channel->state) { in mlx5e_rl_free()
1116 channel->state = MLX5E_RL_ST_DESTROY; in mlx5e_rl_free()
1119 channel->state = MLX5E_RL_ST_DESTROY; in mlx5e_rl_free()
1133 channel->new_rate = rate; in mlx5e_rl_modify()
1134 switch (channel->state) { in mlx5e_rl_modify()
1136 channel->state = MLX5E_RL_ST_MODIFY; in mlx5e_rl_modify()
1154 switch (channel->state) { in mlx5e_rl_query()
1156 params->rate_limit.max_rate = channel->last_rate; in mlx5e_rl_query()
1157 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); in mlx5e_rl_query()
1161 params->rate_limit.max_rate = channel->last_rate; in mlx5e_rl_query()
1162 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); in mlx5e_rl_query()
1183 if ((channel = STAILQ_FIRST(&rlw->index_list_head)) != NULL) { in mlx5e_find_available_tx_ring_index()
1186 STAILQ_REMOVE_HEAD(&rlw->index_list_head, entry); in mlx5e_find_available_tx_ring_index()
1187 channel->state = MLX5E_RL_ST_USED; in mlx5e_find_available_tx_ring_index()
1188 atomic_add_64(&rlw->priv->rl.stats.tx_active_connections, 1ULL); in mlx5e_find_available_tx_ring_index()
1190 atomic_add_64(&rlw->priv->rl.stats.tx_available_resource_failure, 1ULL); in mlx5e_find_available_tx_ring_index()
1196 mlx5_en_info(rlw->priv->ifp, in mlx5e_find_available_tx_ring_index()
1215 if (!MLX5_CAP_GEN(priv->mdev, qos) || in mlx5e_rl_snd_tag_alloc()
1216 !MLX5_CAP_QOS(priv->mdev, packet_pacing) || priv->gone || in mlx5e_rl_snd_tag_alloc()
1217 params->rate_limit.hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT) in mlx5e_rl_snd_tag_alloc()
1221 rlw = priv->rl.workers + ((params->rate_limit.hdr.flowid % 128) % in mlx5e_rl_snd_tag_alloc()
1222 priv->rl.param.tx_worker_threads_def); in mlx5e_rl_snd_tag_alloc()
1228 error = mlx5e_rl_modify(rlw, channel, params->rate_limit.max_rate); in mlx5e_rl_snd_tag_alloc()
1235 MPASS(channel->tag.refcount == 0); in mlx5e_rl_snd_tag_alloc()
1236 m_snd_tag_init(&channel->tag, ifp, &mlx5e_rl_snd_tag_sw); in mlx5e_rl_snd_tag_alloc()
1237 *ppmt = &channel->tag; in mlx5e_rl_snd_tag_alloc()
1249 return (mlx5e_rl_modify(channel->worker, channel, params->rate_limit.max_rate)); in mlx5e_rl_snd_tag_modify()
1258 return (mlx5e_rl_query(channel->worker, channel, params)); in mlx5e_rl_snd_tag_query()
1267 mlx5e_rl_free(channel->worker, channel); in mlx5e_rl_snd_tag_free()
1274 struct mlx5e_priv *priv = rl->priv; in mlx5e_rl_sysctl_show_rate_table()
1285 sbuf_new_for_sysctl(&sbuf, NULL, 128 * rl->param.tx_rates_def, req); in mlx5e_rl_sysctl_show_rate_table()
1288 "\n\n" "\t" "ENTRY" "\t" "BURST" "\t" "RATE [bit/s]\n" in mlx5e_rl_sysctl_show_rate_table()
1289 "\t" "--------------------------------------------\n"); in mlx5e_rl_sysctl_show_rate_table()
1292 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_sysctl_show_rate_table()
1293 if (rl->rate_limit_table[x] == 0) in mlx5e_rl_sysctl_show_rate_table()
1297 x, (unsigned)rl->param.tx_burst_size, in mlx5e_rl_sysctl_show_rate_table()
1298 (long long)rl->rate_limit_table[x]); in mlx5e_rl_sysctl_show_rate_table()
1318 mlx5e_rl_build_channel_param(rl, &rl->chan_param); in mlx5e_rl_refresh_channel_params()
1321 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_refresh_channel_params()
1322 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_refresh_channel_params()
1324 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { in mlx5e_rl_refresh_channel_params()
1328 channel = rlw->channels + x; in mlx5e_rl_refresh_channel_params()
1329 sq = channel->sq; in mlx5e_rl_refresh_channel_params()
1334 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_mode_modify)) { in mlx5e_rl_refresh_channel_params()
1335 mlx5_core_modify_cq_moderation_mode(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params()
1336 rl->param.tx_coalesce_usecs, in mlx5e_rl_refresh_channel_params()
1337 rl->param.tx_coalesce_pkts, in mlx5e_rl_refresh_channel_params()
1338 rl->param.tx_coalesce_mode); in mlx5e_rl_refresh_channel_params()
1340 mlx5_core_modify_cq_moderation(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params()
1341 rl->param.tx_coalesce_usecs, in mlx5e_rl_refresh_channel_params()
1342 rl->param.tx_coalesce_pkts); in mlx5e_rl_refresh_channel_params()
1355 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_refresh_sq_inline()
1356 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_refresh_sq_inline()
1358 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { in mlx5e_rl_refresh_sq_inline()
1362 channel = rlw->channels + x; in mlx5e_rl_refresh_sq_inline()
1363 sq = channel->sq; in mlx5e_rl_refresh_sq_inline()
1368 mtx_lock(&sq->lock); in mlx5e_rl_refresh_sq_inline()
1370 mtx_unlock(&sq->lock); in mlx5e_rl_refresh_sq_inline()
1382 mlx5_rl_is_in_range(rl->priv->mdev, howmany(value, 1000), 0) == 0) in mlx5e_rl_tx_limit_add()
1389 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_add()
1390 if (rl->rate_limit_table[x] != value) in mlx5e_rl_tx_limit_add()
1397 if (x == rl->param.tx_rates_def) { in mlx5e_rl_tx_limit_add()
1398 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_add()
1399 if (rl->rate_limit_table[x] != 0) in mlx5e_rl_tx_limit_add()
1401 rl->rate_limit_table[x] = value; in mlx5e_rl_tx_limit_add()
1423 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_clr()
1424 if (rl->rate_limit_table[x] != value) in mlx5e_rl_tx_limit_clr()
1427 rl->rate_limit_table[x] = 0; in mlx5e_rl_tx_limit_clr()
1432 if (x == rl->param.tx_rates_def) in mlx5e_rl_tx_limit_clr()
1445 struct mlx5e_priv *priv = rl->priv; in mlx5e_rl_sysctl_handler()
1454 value = rl->param.arg[arg2]; in mlx5e_rl_sysctl_handler()
1459 if (error || req->newptr == NULL || in mlx5e_rl_sysctl_handler()
1460 value == rl->param.arg[arg2]) in mlx5e_rl_sysctl_handler()
1467 if (priv->gone) { in mlx5e_rl_sysctl_handler()
1471 was_opened = rl->opened; in mlx5e_rl_sysctl_handler()
1472 mode_modify = MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify); in mlx5e_rl_sysctl_handler()
1476 if (value > rl->param.tx_worker_threads_max) in mlx5e_rl_sysctl_handler()
1477 value = rl->param.tx_worker_threads_max; in mlx5e_rl_sysctl_handler()
1482 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1486 if (value > rl->param.tx_channels_per_worker_max) in mlx5e_rl_sysctl_handler()
1487 value = rl->param.tx_channels_per_worker_max; in mlx5e_rl_sysctl_handler()
1492 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1496 if (value > rl->param.tx_rates_max) in mlx5e_rl_sysctl_handler()
1497 value = rl->param.tx_rates_max; in mlx5e_rl_sysctl_handler()
1502 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1513 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1528 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1545 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1564 else if (value > priv->params_ethtool.tx_queue_size_max) in mlx5e_rl_sysctl_handler()
1565 value = priv->params_ethtool.tx_queue_size_max; in mlx5e_rl_sysctl_handler()
1571 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1587 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1607 if (value > rl->param.tx_allowed_deviation_max) in mlx5e_rl_sysctl_handler()
1608 value = rl->param.tx_allowed_deviation_max; in mlx5e_rl_sysctl_handler()
1609 else if (value < rl->param.tx_allowed_deviation_min) in mlx5e_rl_sysctl_handler()
1610 value = rl->param.tx_allowed_deviation_min; in mlx5e_rl_sysctl_handler()
1613 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1619 if (value > rl->param.tx_burst_size_max) in mlx5e_rl_sysctl_handler()
1620 value = rl->param.tx_burst_size_max; in mlx5e_rl_sysctl_handler()
1621 else if (value < rl->param.tx_burst_size_min) in mlx5e_rl_sysctl_handler()
1622 value = rl->param.tx_burst_size_min; in mlx5e_rl_sysctl_handler()
1625 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1642 * NOTE: In FreeBSD-11 and newer the CTLFLAG_RWTUN flag will in mlx5e_rl_sysctl_add_u64_oid()
1647 /* read-only SYSCTLs */ in mlx5e_rl_sysctl_add_u64_oid()
1648 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1654 /* tunable read-only advanced SYSCTLs */ in mlx5e_rl_sysctl_add_u64_oid()
1655 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1660 /* read-write SYSCTLs */ in mlx5e_rl_sysctl_add_u64_oid()
1661 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1672 /* read-only SYSCTLs */ in mlx5e_rl_sysctl_add_stats_u64_oid()
1673 SYSCTL_ADD_U64(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, name, in mlx5e_rl_sysctl_add_stats_u64_oid()
1674 CTLFLAG_RD, &rl->stats.arg[x], 0, desc); in mlx5e_rl_sysctl_add_stats_u64_oid()