xref: /dpdk/drivers/event/sw/sw_evdev_worker.c (revision a3a2e2c8f7de433e10b1548df65b20bf10086d9c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <rte_atomic.h>
34 #include <rte_cycles.h>
35 
36 #include "sw_evdev.h"
37 #include "event_ring.h"
38 
39 #define PORT_ENQUEUE_MAX_BURST_SIZE 64
40 
41 static inline void
42 sw_event_release(struct sw_port *p, uint8_t index)
43 {
44 	/*
45 	 * Drops the next outstanding event in our history. Used on dequeue
46 	 * to clear any history before dequeuing more events.
47 	 */
48 	RTE_SET_USED(index);
49 
50 	/* create drop message */
51 	struct rte_event ev;
52 	ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
53 
54 	uint16_t free_count;
55 	qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
56 
57 	/* each release returns one credit */
58 	p->outstanding_releases--;
59 	p->inflight_credits++;
60 }
61 
62 uint16_t
63 sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
64 {
65 	int32_t i;
66 	uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
67 	struct sw_port *p = port;
68 	struct sw_evdev *sw = (void *)p->sw;
69 	uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
70 
71 	if (unlikely(p->inflight_max < sw_inflights))
72 		return 0;
73 
74 	if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
75 		num = PORT_ENQUEUE_MAX_BURST_SIZE;
76 
77 	if (p->inflight_credits < num) {
78 		/* check if event enqueue brings port over max threshold */
79 		uint32_t credit_update_quanta = sw->credit_update_quanta;
80 		if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
81 			return 0;
82 
83 		rte_atomic32_add(&sw->inflights, credit_update_quanta);
84 		p->inflight_credits += (credit_update_quanta);
85 
86 		if (p->inflight_credits < num)
87 			return 0;
88 	}
89 
90 	uint32_t forwards = 0;
91 	for (i = 0; i < num; i++) {
92 		int op = ev[i].op;
93 		int outstanding = p->outstanding_releases > 0;
94 		const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
95 
96 		p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
97 		p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
98 					outstanding;
99 		forwards += (op == RTE_EVENT_OP_FORWARD);
100 
101 		new_ops[i] = sw_qe_flag_map[op];
102 		new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
103 
104 		/* FWD and RELEASE packets will both resolve to taken (assuming
105 		 * correct usage of the API), providing very high correct
106 		 * prediction rate.
107 		 */
108 		if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
109 			p->outstanding_releases--;
110 
111 		/* error case: branch to avoid touching p->stats */
112 		if (unlikely(invalid_qid)) {
113 			p->stats.rx_dropped++;
114 			p->inflight_credits++;
115 		}
116 	}
117 
118 	/* handle directed port forward credits */
119 	p->inflight_credits -= forwards * p->is_directed;
120 
121 	/* returns number of events actually enqueued */
122 	uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
123 					     new_ops);
124 	if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
125 		uint64_t burst_ticks = rte_get_timer_cycles() -
126 				p->last_dequeue_ticks;
127 		uint64_t burst_pkt_ticks =
128 			burst_ticks / p->last_dequeue_burst_sz;
129 		p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
130 		p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
131 		p->last_dequeue_ticks = 0;
132 	}
133 	return enq;
134 }
135 
136 uint16_t
137 sw_event_enqueue(void *port, const struct rte_event *ev)
138 {
139 	return sw_event_enqueue_burst(port, ev, 1);
140 }
141 
142 uint16_t
143 sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
144 		uint64_t wait)
145 {
146 	RTE_SET_USED(wait);
147 	struct sw_port *p = (void *)port;
148 	struct sw_evdev *sw = (void *)p->sw;
149 	struct qe_ring *ring = p->cq_worker_ring;
150 	uint32_t credit_update_quanta = sw->credit_update_quanta;
151 
152 	/* check that all previous dequeues have been released */
153 	if (!p->is_directed) {
154 		uint16_t out_rels = p->outstanding_releases;
155 		uint16_t i;
156 		for (i = 0; i < out_rels; i++)
157 			sw_event_release(p, i);
158 	}
159 
160 	/* returns number of events actually dequeued */
161 	uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
162 	if (unlikely(ndeq == 0)) {
163 		p->outstanding_releases = 0;
164 		p->zero_polls++;
165 		p->total_polls++;
166 		goto end;
167 	}
168 
169 	/* only add credits for directed ports - LB ports send RELEASEs */
170 	p->inflight_credits += ndeq * p->is_directed;
171 	p->outstanding_releases = ndeq;
172 	p->last_dequeue_burst_sz = ndeq;
173 	p->last_dequeue_ticks = rte_get_timer_cycles();
174 	p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
175 	p->total_polls++;
176 
177 end:
178 	if (p->inflight_credits >= credit_update_quanta * 2 &&
179 			p->inflight_credits > credit_update_quanta + ndeq) {
180 		rte_atomic32_sub(&sw->inflights, credit_update_quanta);
181 		p->inflight_credits -= credit_update_quanta;
182 	}
183 	return ndeq;
184 }
185 
186 uint16_t
187 sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
188 {
189 	return sw_event_dequeue_burst(port, ev, 1, wait);
190 }
191