1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2022 Microsoft Corporation 3 */ 4 5 #include <ethdev_driver.h> 6 #include <rte_io.h> 7 8 #include "mana.h" 9 10 uint8_t * 11 gdma_get_wqe_pointer(struct mana_gdma_queue *queue) 12 { 13 uint32_t offset_in_bytes = 14 (queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & 15 (queue->size - 1); 16 17 DP_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u", 18 queue->head, queue->size, offset_in_bytes); 19 20 if (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size) 21 DP_LOG(ERR, "fatal error: offset_in_bytes %u too big", 22 offset_in_bytes); 23 24 return ((uint8_t *)queue->buffer) + offset_in_bytes; 25 } 26 27 static uint32_t 28 write_dma_client_oob(uint8_t *work_queue_buffer_pointer, 29 const struct gdma_work_request *work_request, 30 uint32_t client_oob_size) 31 { 32 uint8_t *p = work_queue_buffer_pointer; 33 34 struct gdma_wqe_dma_oob *header = (struct gdma_wqe_dma_oob *)p; 35 36 memset(header, 0, sizeof(struct gdma_wqe_dma_oob)); 37 header->num_sgl_entries = work_request->num_sgl_elements; 38 header->inline_client_oob_size_in_dwords = 39 client_oob_size / sizeof(uint32_t); 40 header->client_data_unit = work_request->client_data_unit; 41 42 DP_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u", 43 work_queue_buffer_pointer, header->num_sgl_entries, 44 header->inline_client_oob_size_in_dwords, 45 header->client_data_unit, work_request->inline_oob_data, 46 work_request->inline_oob_size_in_bytes); 47 48 p += sizeof(struct gdma_wqe_dma_oob); 49 if (work_request->inline_oob_data && 50 work_request->inline_oob_size_in_bytes > 0) { 51 memcpy(p, work_request->inline_oob_data, 52 work_request->inline_oob_size_in_bytes); 53 if (client_oob_size > work_request->inline_oob_size_in_bytes) 54 memset(p + work_request->inline_oob_size_in_bytes, 0, 55 client_oob_size - 56 work_request->inline_oob_size_in_bytes); 57 } 58 59 return sizeof(struct gdma_wqe_dma_oob) + client_oob_size; 60 } 61 62 static uint32_t 63 write_scatter_gather_list(uint8_t *work_queue_head_pointer, 64 uint8_t *work_queue_end_pointer, 65 uint8_t *work_queue_cur_pointer, 66 struct gdma_work_request *work_request) 67 { 68 struct gdma_sgl_element *sge_list; 69 struct gdma_sgl_element dummy_sgl[1]; 70 uint8_t *address; 71 uint32_t size; 72 uint32_t num_sge; 73 uint32_t size_to_queue_end; 74 uint32_t sge_list_size; 75 76 DP_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x", 77 work_queue_cur_pointer, work_request->flags); 78 79 num_sge = work_request->num_sgl_elements; 80 sge_list = work_request->sgl; 81 size_to_queue_end = (uint32_t)(work_queue_end_pointer - 82 work_queue_cur_pointer); 83 84 if (num_sge == 0) { 85 /* Per spec, the case of an empty SGL should be handled as 86 * follows to avoid corrupted WQE errors: 87 * Write one dummy SGL entry 88 * Set the address to 1, leave the rest as 0 89 */ 90 dummy_sgl[num_sge].address = 1; 91 dummy_sgl[num_sge].size = 0; 92 dummy_sgl[num_sge].memory_key = 0; 93 num_sge++; 94 sge_list = dummy_sgl; 95 } 96 97 sge_list_size = 0; 98 { 99 address = (uint8_t *)sge_list; 100 size = sizeof(struct gdma_sgl_element) * num_sge; 101 if (size_to_queue_end < size) { 102 memcpy(work_queue_cur_pointer, address, 103 size_to_queue_end); 104 work_queue_cur_pointer = work_queue_head_pointer; 105 address += size_to_queue_end; 106 size -= size_to_queue_end; 107 } 108 109 memcpy(work_queue_cur_pointer, address, size); 110 sge_list_size = size; 111 } 112 113 DP_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u", 114 num_sge, sge_list->address, sge_list->size, 115 sge_list->memory_key, sge_list_size); 116 117 return sge_list_size; 118 } 119 120 /* 121 * Post a work request to queue. 122 */ 123 int 124 gdma_post_work_request(struct mana_gdma_queue *queue, 125 struct gdma_work_request *work_req, 126 uint32_t *wqe_size_in_bu) 127 { 128 uint32_t client_oob_size = 129 work_req->inline_oob_size_in_bytes > 130 INLINE_OOB_SMALL_SIZE_IN_BYTES ? 131 INLINE_OOB_LARGE_SIZE_IN_BYTES : 132 INLINE_OOB_SMALL_SIZE_IN_BYTES; 133 134 uint32_t sgl_data_size = sizeof(struct gdma_sgl_element) * 135 RTE_MAX((uint32_t)1, work_req->num_sgl_elements); 136 uint32_t wqe_size = 137 RTE_ALIGN(sizeof(struct gdma_wqe_dma_oob) + 138 client_oob_size + sgl_data_size, 139 GDMA_WQE_ALIGNMENT_UNIT_SIZE); 140 uint8_t *wq_buffer_pointer; 141 uint32_t queue_free_units = queue->count - (queue->head - queue->tail); 142 143 if (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) { 144 DP_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u", 145 wqe_size, queue->count, queue->head, queue->tail); 146 return -EBUSY; 147 } 148 149 DP_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u", 150 client_oob_size, sgl_data_size, wqe_size); 151 152 *wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; 153 154 wq_buffer_pointer = gdma_get_wqe_pointer(queue); 155 wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req, 156 client_oob_size); 157 if (wq_buffer_pointer >= ((uint8_t *)queue->buffer) + queue->size) 158 wq_buffer_pointer -= queue->size; 159 160 write_scatter_gather_list((uint8_t *)queue->buffer, 161 (uint8_t *)queue->buffer + queue->size, 162 wq_buffer_pointer, work_req); 163 164 queue->head += wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; 165 166 return 0; 167 } 168 169 union gdma_doorbell_entry { 170 uint64_t as_uint64; 171 172 struct { 173 uint64_t id : 24; 174 uint64_t reserved : 8; 175 uint64_t tail_ptr : 31; 176 uint64_t arm : 1; 177 } cq; 178 179 struct { 180 uint64_t id : 24; 181 uint64_t wqe_cnt : 8; 182 uint64_t tail_ptr : 32; 183 } rq; 184 185 struct { 186 uint64_t id : 24; 187 uint64_t reserved : 8; 188 uint64_t tail_ptr : 32; 189 } sq; 190 191 struct { 192 uint64_t id : 16; 193 uint64_t reserved : 16; 194 uint64_t tail_ptr : 31; 195 uint64_t arm : 1; 196 } eq; 197 }; /* HW DATA */ 198 199 enum { 200 DOORBELL_OFFSET_SQ = 0x0, 201 DOORBELL_OFFSET_RQ = 0x400, 202 DOORBELL_OFFSET_CQ = 0x800, 203 DOORBELL_OFFSET_EQ = 0xFF8, 204 }; 205 206 /* 207 * Write to hardware doorbell to notify new activity. 208 */ 209 int 210 mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, 211 uint32_t queue_id, uint32_t tail, uint8_t arm) 212 { 213 uint8_t *addr = db_page; 214 union gdma_doorbell_entry e = {}; 215 216 switch (queue_type) { 217 case GDMA_QUEUE_SEND: 218 e.sq.id = queue_id; 219 e.sq.tail_ptr = tail; 220 addr += DOORBELL_OFFSET_SQ; 221 break; 222 223 case GDMA_QUEUE_RECEIVE: 224 e.rq.id = queue_id; 225 e.rq.tail_ptr = tail; 226 e.rq.wqe_cnt = arm; 227 addr += DOORBELL_OFFSET_RQ; 228 break; 229 230 case GDMA_QUEUE_COMPLETION: 231 e.cq.id = queue_id; 232 e.cq.tail_ptr = tail; 233 e.cq.arm = arm; 234 addr += DOORBELL_OFFSET_CQ; 235 break; 236 237 default: 238 DP_LOG(ERR, "Unsupported queue type %d", queue_type); 239 return -1; 240 } 241 242 /* Ensure all writes are done before ringing doorbell */ 243 rte_wmb(); 244 245 DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", 246 db_page, addr, queue_id, queue_type, tail, arm); 247 248 rte_write64(e.as_uint64, addr); 249 return 0; 250 } 251 252 /* 253 * Poll completion queue for completions. 254 */ 255 uint32_t 256 gdma_poll_completion_queue(struct mana_gdma_queue *cq, 257 struct gdma_comp *gdma_comp, uint32_t max_comp) 258 { 259 struct gdma_hardware_completion_entry *cqe; 260 uint32_t new_owner_bits, old_owner_bits; 261 uint32_t cqe_owner_bits; 262 uint32_t num_comp = 0; 263 struct gdma_hardware_completion_entry *buffer = cq->buffer; 264 265 while (num_comp < max_comp) { 266 cqe = &buffer[cq->head % cq->count]; 267 new_owner_bits = (cq->head / cq->count) & 268 COMPLETION_QUEUE_OWNER_MASK; 269 old_owner_bits = (cq->head / cq->count - 1) & 270 COMPLETION_QUEUE_OWNER_MASK; 271 cqe_owner_bits = cqe->owner_bits; 272 273 DP_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x", 274 cqe_owner_bits, old_owner_bits); 275 276 /* No new entry */ 277 if (cqe_owner_bits == old_owner_bits) 278 break; 279 280 if (cqe_owner_bits != new_owner_bits) { 281 DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x", 282 cq->id, cqe_owner_bits, new_owner_bits); 283 break; 284 } 285 286 gdma_comp[num_comp].cqe_data = cqe->dma_client_data; 287 num_comp++; 288 289 cq->head++; 290 291 DP_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u", 292 new_owner_bits, old_owner_bits, cqe_owner_bits, 293 cqe->wq_num, cqe->is_sq, cq->head); 294 } 295 296 /* Make sure the CQE owner bits are checked before we access the data 297 * in CQE 298 */ 299 rte_rmb(); 300 301 return num_comp; 302 } 303