xref: /dpdk/drivers/event/octeontx/ssovf_worker.h (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
7 
8 #include <octeontx_mbox.h>
9 
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
12 
13 enum {
14 	SSO_SYNC_ORDERED,
15 	SSO_SYNC_ATOMIC,
16 	SSO_SYNC_UNTAGGED,
17 	SSO_SYNC_EMPTY
18 };
19 
20 #ifndef __hot
21 #define __hot	__attribute__((hot))
22 #endif
23 
24 /* SSO Operations */
25 
26 static __rte_always_inline struct rte_mbuf *
27 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
28 {
29 	struct rte_mbuf *mbuf;
30 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
31 
32 	/* Get mbuf from wqe */
33 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
34 			OCTTX_PACKET_WQE_SKIP);
35 	rte_prefetch_non_temporal(mbuf);
36 	mbuf->packet_type =
37 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
38 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
39 	mbuf->pkt_len = wqe->s.w1.len;
40 	mbuf->data_len = mbuf->pkt_len;
41 	mbuf->nb_segs = 1;
42 	mbuf->ol_flags = 0;
43 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
44 	rte_mbuf_refcnt_set(mbuf, 1);
45 
46 	return mbuf;
47 }
48 
49 static __rte_always_inline uint16_t
50 ssows_get_work(struct ssows *ws, struct rte_event *ev)
51 {
52 	uint64_t get_work0, get_work1;
53 	uint64_t sched_type_queue;
54 
55 	ssovf_load_pair(get_work0, get_work1, ws->getwork);
56 
57 	sched_type_queue = (get_work0 >> 32) & 0xfff;
58 	ws->cur_tt = sched_type_queue & 0x3;
59 	ws->cur_grp = sched_type_queue >> 2;
60 	sched_type_queue = sched_type_queue << 38;
61 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
62 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
63 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
64 				(ev->event >> 20) & 0x7F);
65 	} else {
66 		ev->u64 = get_work1;
67 	}
68 
69 	return !!get_work1;
70 }
71 
72 static __rte_always_inline void
73 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
74 			const uint8_t new_tt, const uint8_t grp)
75 {
76 	uint64_t add_work0;
77 
78 	add_work0 = tag | ((uint64_t)(new_tt) << 32);
79 	ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
80 }
81 
82 static __rte_always_inline void
83 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
84 			const uint8_t new_tt, const uint8_t grp)
85 {
86 	uint64_t swtag_full0;
87 
88 	swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
89 				((uint64_t)grp << 34);
90 	ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
91 				SSOW_VHWS_OP_SWTAG_FULL0));
92 }
93 
94 static __rte_always_inline void
95 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
96 {
97 	uint64_t val;
98 
99 	val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
100 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
101 }
102 
103 static __rte_always_inline void
104 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
105 {
106 	uint64_t val;
107 
108 	val = tag | ((uint64_t)(new_tt & 0x3) << 32);
109 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
110 }
111 
112 static __rte_always_inline void
113 ssows_swtag_untag(struct ssows *ws)
114 {
115 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
116 	ws->cur_tt = SSO_SYNC_UNTAGGED;
117 }
118 
119 static __rte_always_inline void
120 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
121 {
122 	ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
123 				SSOW_VHWS_OP_UPD_WQP_GRP0));
124 }
125 
126 static __rte_always_inline void
127 ssows_desched(struct ssows *ws)
128 {
129 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
130 }
131 
132 static __rte_always_inline void
133 ssows_swtag_wait(struct ssows *ws)
134 {
135 	/* Wait for the SWTAG/SWTAG_FULL operation */
136 	while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
137 	;
138 }
139