1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2017. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include "ssovf_worker.h" 34 35 static force_inline void 36 ssows_new_event(struct ssows *ws, const struct rte_event *ev) 37 { 38 const uint64_t event_ptr = ev->u64; 39 const uint32_t tag = (uint32_t)ev->event; 40 const uint8_t new_tt = ev->sched_type; 41 const uint8_t grp = ev->queue_id; 42 43 ssows_add_work(ws, event_ptr, tag, new_tt, grp); 44 } 45 46 static force_inline void 47 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp) 48 { 49 const uint8_t cur_tt = ws->cur_tt; 50 const uint8_t new_tt = ev->sched_type; 51 const uint32_t tag = (uint32_t)ev->event; 52 /* 53 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED 54 * 55 * SSO_SYNC_ORDERED norm norm untag 56 * SSO_SYNC_ATOMIC norm norm untag 57 * SSO_SYNC_UNTAGGED full full NOOP 58 */ 59 if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) { 60 if (new_tt != SSO_SYNC_UNTAGGED) { 61 ssows_swtag_full(ws, ev->u64, tag, 62 new_tt, grp); 63 } 64 } else { 65 if (likely(new_tt != SSO_SYNC_UNTAGGED)) 66 ssows_swtag_norm(ws, tag, new_tt); 67 else 68 ssows_swtag_untag(ws); 69 } 70 ws->swtag_req = 1; 71 } 72 73 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1) 74 75 static force_inline void 76 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp) 77 { 78 const uint64_t event_ptr = ev->u64; 79 const uint32_t tag = (uint32_t)ev->event; 80 const uint8_t cur_tt = ws->cur_tt; 81 const uint8_t new_tt = ev->sched_type; 82 83 if (cur_tt == SSO_SYNC_ORDERED) { 84 /* Create unique tag based on custom event type and new grp */ 85 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28; 86 87 newtag |= grp << 20; 88 newtag |= tag; 89 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC); 90 rte_smp_wmb(); 91 ssows_swtag_wait(ws); 92 } else { 93 rte_smp_wmb(); 94 } 95 ssows_add_work(ws, event_ptr, tag, new_tt, grp); 96 } 97 98 static force_inline void 99 ssows_forward_event(struct ssows *ws, const struct rte_event *ev) 100 { 101 const uint8_t grp = ev->queue_id; 102 103 /* Group hasn't changed, Use SWTAG to forward the event */ 104 if (ws->cur_grp == grp) 105 ssows_fwd_swtag(ws, ev, grp); 106 else 107 /* 108 * Group has been changed for group based work pipelining, 109 * Use deschedule/add_work operation to transfer the event to 110 * new group/core 111 */ 112 ssows_fwd_group(ws, ev, grp); 113 } 114 115 static force_inline void 116 ssows_release_event(struct ssows *ws) 117 { 118 if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED)) 119 ssows_swtag_untag(ws); 120 } 121 122 force_inline uint16_t __hot 123 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks) 124 { 125 struct ssows *ws = port; 126 127 RTE_SET_USED(timeout_ticks); 128 129 ssows_swtag_wait(ws); 130 if (ws->swtag_req) { 131 ws->swtag_req = 0; 132 return 1; 133 } else { 134 return ssows_get_work(ws, ev); 135 } 136 } 137 138 force_inline uint16_t __hot 139 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks) 140 { 141 struct ssows *ws = port; 142 uint64_t iter; 143 uint16_t ret = 1; 144 145 ssows_swtag_wait(ws); 146 if (ws->swtag_req) { 147 ws->swtag_req = 0; 148 } else { 149 ret = ssows_get_work(ws, ev); 150 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) 151 ret = ssows_get_work(ws, ev); 152 } 153 return ret; 154 } 155 156 uint16_t __hot 157 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events, 158 uint64_t timeout_ticks) 159 { 160 RTE_SET_USED(nb_events); 161 162 return ssows_deq(port, ev, timeout_ticks); 163 } 164 165 uint16_t __hot 166 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events, 167 uint64_t timeout_ticks) 168 { 169 RTE_SET_USED(nb_events); 170 171 return ssows_deq_timeout(port, ev, timeout_ticks); 172 } 173 174 force_inline uint16_t __hot 175 ssows_enq(void *port, const struct rte_event *ev) 176 { 177 struct ssows *ws = port; 178 uint16_t ret = 1; 179 180 switch (ev->op) { 181 case RTE_EVENT_OP_NEW: 182 ssows_new_event(ws, ev); 183 break; 184 case RTE_EVENT_OP_FORWARD: 185 ssows_forward_event(ws, ev); 186 break; 187 case RTE_EVENT_OP_RELEASE: 188 ssows_release_event(ws); 189 break; 190 default: 191 ret = 0; 192 } 193 return ret; 194 } 195 196 uint16_t __hot 197 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) 198 { 199 RTE_SET_USED(nb_events); 200 return ssows_enq(port, ev); 201 } 202 203 void 204 ssows_flush_events(struct ssows *ws, uint8_t queue_id) 205 { 206 uint32_t reg_off; 207 uint64_t aq_cnt = 1; 208 uint64_t cq_ds_cnt = 1; 209 uint64_t enable, get_work0, get_work1; 210 uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); 211 212 RTE_SET_USED(get_work0); 213 RTE_SET_USED(get_work1); 214 215 enable = ssovf_read64(base + SSO_VHGRP_QCTL); 216 if (!enable) 217 return; 218 219 reg_off = SSOW_VHWS_OP_GET_WORK0; 220 reg_off |= 1 << 17; /* Grouped */ 221 reg_off |= 1 << 16; /* WAIT */ 222 reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */ 223 while (aq_cnt || cq_ds_cnt) { 224 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT); 225 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT); 226 /* Extract cq and ds count */ 227 cq_ds_cnt &= 0x1FFF1FFF0000; 228 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off); 229 } 230 } 231 232 void 233 ssows_reset(struct ssows *ws) 234 { 235 uint64_t tag; 236 uint64_t pend_tag; 237 uint8_t pend_tt; 238 uint8_t tt; 239 240 tag = ssovf_read64(ws->base + SSOW_VHWS_TAG); 241 pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG); 242 243 if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */ 244 pend_tt = (pend_tag >> 32) & 0x3; 245 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC) 246 ssows_desched(ws); 247 } else { 248 tt = (tag >> 32) & 0x3; 249 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC) 250 ssows_swtag_untag(ws); 251 } 252 } 253