xref: /dpdk/lib/ring/rte_ring_c11_pvt.h (revision 3197a1ff2a2a6bef224cd51f835f135be3776f23)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  *
399a2dd95SBruce Richardson  * Copyright (c) 2017,2018 HXT-semitech Corporation.
499a2dd95SBruce Richardson  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5cac2a49bSRuifeng Wang  * Copyright (c) 2021 Arm Limited
699a2dd95SBruce Richardson  * All rights reserved.
799a2dd95SBruce Richardson  * Derived from FreeBSD's bufring.h
899a2dd95SBruce Richardson  * Used as BSD-3 Licensed with permission from Kip Macy.
999a2dd95SBruce Richardson  */
1099a2dd95SBruce Richardson 
1199a2dd95SBruce Richardson #ifndef _RTE_RING_C11_PVT_H_
1299a2dd95SBruce Richardson #define _RTE_RING_C11_PVT_H_
1399a2dd95SBruce Richardson 
14*3197a1ffSKonstantin Ananyev /**
15*3197a1ffSKonstantin Ananyev  * @file rte_ring_c11_pvt.h
16*3197a1ffSKonstantin Ananyev  * It is not recommended to include this file directly,
17*3197a1ffSKonstantin Ananyev  * include <rte_ring.h> instead.
18*3197a1ffSKonstantin Ananyev  * Contains internal helper functions for MP/SP and MC/SC ring modes.
19*3197a1ffSKonstantin Ananyev  * For more information please refer to <rte_ring.h>.
20*3197a1ffSKonstantin Ananyev  */
21*3197a1ffSKonstantin Ananyev 
22*3197a1ffSKonstantin Ananyev /**
23*3197a1ffSKonstantin Ananyev  * @internal This function updates tail values.
24*3197a1ffSKonstantin Ananyev  */
2599a2dd95SBruce Richardson static __rte_always_inline void
2699a2dd95SBruce Richardson __rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
2799a2dd95SBruce Richardson 		uint32_t new_val, uint32_t single, uint32_t enqueue)
2899a2dd95SBruce Richardson {
2999a2dd95SBruce Richardson 	RTE_SET_USED(enqueue);
3099a2dd95SBruce Richardson 
3199a2dd95SBruce Richardson 	/*
3299a2dd95SBruce Richardson 	 * If there are other enqueues/dequeues in progress that preceded us,
3399a2dd95SBruce Richardson 	 * we need to wait for them to complete
3499a2dd95SBruce Richardson 	 */
3599a2dd95SBruce Richardson 	if (!single)
3632faaf30STyler Retzlaff 		rte_wait_until_equal_32((uint32_t *)(uintptr_t)&ht->tail, old_val,
3732faaf30STyler Retzlaff 			rte_memory_order_relaxed);
3899a2dd95SBruce Richardson 
3932faaf30STyler Retzlaff 	rte_atomic_store_explicit(&ht->tail, new_val, rte_memory_order_release);
4099a2dd95SBruce Richardson }
4199a2dd95SBruce Richardson 
4299a2dd95SBruce Richardson /**
43*3197a1ffSKonstantin Ananyev  * @internal This is a helper function that moves the producer/consumer head
4499a2dd95SBruce Richardson  *
45*3197a1ffSKonstantin Ananyev  * @param d
46*3197a1ffSKonstantin Ananyev  *   A pointer to the headtail structure with head value to be moved
47*3197a1ffSKonstantin Ananyev  * @param s
48*3197a1ffSKonstantin Ananyev  *   A pointer to the counter-part headtail structure. Note that this
49*3197a1ffSKonstantin Ananyev  *   function only reads tail value from it
50*3197a1ffSKonstantin Ananyev  * @param capacity
51*3197a1ffSKonstantin Ananyev  *   Either ring capacity value (for producer), or zero (for consumer)
52*3197a1ffSKonstantin Ananyev  * @param is_st
53*3197a1ffSKonstantin Ananyev  *   Indicates whether multi-thread safe path is needed or not
5499a2dd95SBruce Richardson  * @param n
55*3197a1ffSKonstantin Ananyev  *   The number of elements we want to move head value on
5699a2dd95SBruce Richardson  * @param behavior
57*3197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_FIXED:    Move on a fixed number of items
58*3197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_VARIABLE: Move on as many items as possible
5999a2dd95SBruce Richardson  * @param old_head
60*3197a1ffSKonstantin Ananyev  *   Returns head value as it was before the move
6199a2dd95SBruce Richardson  * @param new_head
62*3197a1ffSKonstantin Ananyev  *   Returns the new head value
63*3197a1ffSKonstantin Ananyev  * @param entries
64*3197a1ffSKonstantin Ananyev  *   Returns the number of ring entries available BEFORE head was moved
6599a2dd95SBruce Richardson  * @return
66*3197a1ffSKonstantin Ananyev  *   Actual number of objects the head was moved on
67*3197a1ffSKonstantin Ananyev  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only
6899a2dd95SBruce Richardson  */
6999a2dd95SBruce Richardson static __rte_always_inline unsigned int
70*3197a1ffSKonstantin Ananyev __rte_ring_headtail_move_head(struct rte_ring_headtail *d,
71*3197a1ffSKonstantin Ananyev 		const struct rte_ring_headtail *s, uint32_t capacity,
72*3197a1ffSKonstantin Ananyev 		unsigned int is_st, unsigned int n,
73*3197a1ffSKonstantin Ananyev 		enum rte_ring_queue_behavior behavior,
74*3197a1ffSKonstantin Ananyev 		uint32_t *old_head, uint32_t *new_head, uint32_t *entries)
7599a2dd95SBruce Richardson {
76*3197a1ffSKonstantin Ananyev 	uint32_t stail;
7799a2dd95SBruce Richardson 	int success;
78*3197a1ffSKonstantin Ananyev 	unsigned int max = n;
7999a2dd95SBruce Richardson 
80*3197a1ffSKonstantin Ananyev 	*old_head = rte_atomic_load_explicit(&d->head,
81*3197a1ffSKonstantin Ananyev 			rte_memory_order_relaxed);
8299a2dd95SBruce Richardson 	do {
8399a2dd95SBruce Richardson 		/* Reset n to the initial burst count */
8499a2dd95SBruce Richardson 		n = max;
8599a2dd95SBruce Richardson 
8699a2dd95SBruce Richardson 		/* Ensure the head is read before tail */
87283d8437STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_acquire);
8899a2dd95SBruce Richardson 
8999a2dd95SBruce Richardson 		/* load-acquire synchronize with store-release of ht->tail
9099a2dd95SBruce Richardson 		 * in update_tail.
9199a2dd95SBruce Richardson 		 */
92*3197a1ffSKonstantin Ananyev 		stail = rte_atomic_load_explicit(&s->tail,
9332faaf30STyler Retzlaff 					rte_memory_order_acquire);
9499a2dd95SBruce Richardson 
9599a2dd95SBruce Richardson 		/* The subtraction is done between two unsigned 32bits value
9699a2dd95SBruce Richardson 		 * (the result is always modulo 32 bits even if we have
97*3197a1ffSKonstantin Ananyev 		 * *old_head > s->tail). So 'entries' is always between 0
9899a2dd95SBruce Richardson 		 * and capacity (which is < size).
9999a2dd95SBruce Richardson 		 */
100*3197a1ffSKonstantin Ananyev 		*entries = (capacity + stail - *old_head);
10199a2dd95SBruce Richardson 
10299a2dd95SBruce Richardson 		/* check that we have enough room in ring */
103*3197a1ffSKonstantin Ananyev 		if (unlikely(n > *entries))
10499a2dd95SBruce Richardson 			n = (behavior == RTE_RING_QUEUE_FIXED) ?
105*3197a1ffSKonstantin Ananyev 					0 : *entries;
10699a2dd95SBruce Richardson 
10799a2dd95SBruce Richardson 		if (n == 0)
10899a2dd95SBruce Richardson 			return 0;
10999a2dd95SBruce Richardson 
11099a2dd95SBruce Richardson 		*new_head = *old_head + n;
111*3197a1ffSKonstantin Ananyev 		if (is_st) {
112*3197a1ffSKonstantin Ananyev 			d->head = *new_head;
11332faaf30STyler Retzlaff 			success = 1;
11432faaf30STyler Retzlaff 		} else
11599a2dd95SBruce Richardson 			/* on failure, *old_head is updated */
116*3197a1ffSKonstantin Ananyev 			success = rte_atomic_compare_exchange_strong_explicit(
117*3197a1ffSKonstantin Ananyev 					&d->head, old_head, *new_head,
11832faaf30STyler Retzlaff 					rte_memory_order_relaxed,
11932faaf30STyler Retzlaff 					rte_memory_order_relaxed);
12099a2dd95SBruce Richardson 	} while (unlikely(success == 0));
12199a2dd95SBruce Richardson 	return n;
12299a2dd95SBruce Richardson }
12399a2dd95SBruce Richardson 
12499a2dd95SBruce Richardson #endif /* _RTE_RING_C11_PVT_H_ */
125