1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2022 Microsoft Corporation 3 */ 4 5 #include <ethdev_driver.h> 6 #include <rte_io.h> 7 8 #include "mana.h" 9 10 uint8_t * 11 gdma_get_wqe_pointer(struct mana_gdma_queue *queue) 12 { 13 uint32_t offset_in_bytes = 14 (queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & 15 (queue->size - 1); 16 17 DRV_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u", 18 queue->head, queue->size, offset_in_bytes); 19 20 if (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size) 21 DRV_LOG(ERR, "fatal error: offset_in_bytes %u too big", 22 offset_in_bytes); 23 24 return ((uint8_t *)queue->buffer) + offset_in_bytes; 25 } 26 27 static uint32_t 28 write_dma_client_oob(uint8_t *work_queue_buffer_pointer, 29 const struct gdma_work_request *work_request, 30 uint32_t client_oob_size) 31 { 32 uint8_t *p = work_queue_buffer_pointer; 33 34 struct gdma_wqe_dma_oob *header = (struct gdma_wqe_dma_oob *)p; 35 36 memset(header, 0, sizeof(struct gdma_wqe_dma_oob)); 37 header->num_sgl_entries = work_request->num_sgl_elements; 38 header->inline_client_oob_size_in_dwords = 39 client_oob_size / sizeof(uint32_t); 40 header->client_data_unit = work_request->client_data_unit; 41 42 DRV_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u", 43 work_queue_buffer_pointer, header->num_sgl_entries, 44 header->inline_client_oob_size_in_dwords, 45 header->client_data_unit, work_request->inline_oob_data, 46 work_request->inline_oob_size_in_bytes); 47 48 p += sizeof(struct gdma_wqe_dma_oob); 49 if (work_request->inline_oob_data && 50 work_request->inline_oob_size_in_bytes > 0) { 51 memcpy(p, work_request->inline_oob_data, 52 work_request->inline_oob_size_in_bytes); 53 if (client_oob_size > work_request->inline_oob_size_in_bytes) 54 memset(p + work_request->inline_oob_size_in_bytes, 0, 55 client_oob_size - 56 work_request->inline_oob_size_in_bytes); 57 } 58 59 return sizeof(struct gdma_wqe_dma_oob) + client_oob_size; 60 } 61 62 static uint32_t 63 write_scatter_gather_list(uint8_t *work_queue_head_pointer, 64 uint8_t *work_queue_end_pointer, 65 uint8_t *work_queue_cur_pointer, 66 struct gdma_work_request *work_request) 67 { 68 struct gdma_sgl_element *sge_list; 69 struct gdma_sgl_element dummy_sgl[1]; 70 uint8_t *address; 71 uint32_t size; 72 uint32_t num_sge; 73 uint32_t size_to_queue_end; 74 uint32_t sge_list_size; 75 76 DRV_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x", 77 work_queue_cur_pointer, work_request->flags); 78 79 num_sge = work_request->num_sgl_elements; 80 sge_list = work_request->sgl; 81 size_to_queue_end = (uint32_t)(work_queue_end_pointer - 82 work_queue_cur_pointer); 83 84 if (num_sge == 0) { 85 /* Per spec, the case of an empty SGL should be handled as 86 * follows to avoid corrupted WQE errors: 87 * Write one dummy SGL entry 88 * Set the address to 1, leave the rest as 0 89 */ 90 dummy_sgl[num_sge].address = 1; 91 dummy_sgl[num_sge].size = 0; 92 dummy_sgl[num_sge].memory_key = 0; 93 num_sge++; 94 sge_list = dummy_sgl; 95 } 96 97 sge_list_size = 0; 98 { 99 address = (uint8_t *)sge_list; 100 size = sizeof(struct gdma_sgl_element) * num_sge; 101 if (size_to_queue_end < size) { 102 memcpy(work_queue_cur_pointer, address, 103 size_to_queue_end); 104 work_queue_cur_pointer = work_queue_head_pointer; 105 address += size_to_queue_end; 106 size -= size_to_queue_end; 107 } 108 109 memcpy(work_queue_cur_pointer, address, size); 110 sge_list_size = size; 111 } 112 113 DRV_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u", 114 num_sge, sge_list->address, sge_list->size, 115 sge_list->memory_key, sge_list_size); 116 117 return sge_list_size; 118 } 119 120 /* 121 * Post a work request to queue. 122 */ 123 int 124 gdma_post_work_request(struct mana_gdma_queue *queue, 125 struct gdma_work_request *work_req, 126 struct gdma_posted_wqe_info *wqe_info) 127 { 128 uint32_t client_oob_size = 129 work_req->inline_oob_size_in_bytes > 130 INLINE_OOB_SMALL_SIZE_IN_BYTES ? 131 INLINE_OOB_LARGE_SIZE_IN_BYTES : 132 INLINE_OOB_SMALL_SIZE_IN_BYTES; 133 134 uint32_t sgl_data_size = sizeof(struct gdma_sgl_element) * 135 RTE_MAX((uint32_t)1, work_req->num_sgl_elements); 136 uint32_t wqe_size = 137 RTE_ALIGN(sizeof(struct gdma_wqe_dma_oob) + 138 client_oob_size + sgl_data_size, 139 GDMA_WQE_ALIGNMENT_UNIT_SIZE); 140 uint8_t *wq_buffer_pointer; 141 uint32_t queue_free_units = queue->count - (queue->head - queue->tail); 142 143 if (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) { 144 DRV_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u", 145 wqe_size, queue->count, queue->head, queue->tail); 146 return -EBUSY; 147 } 148 149 DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u", 150 client_oob_size, sgl_data_size, wqe_size); 151 152 if (wqe_info) { 153 wqe_info->wqe_index = 154 ((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & 155 (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE; 156 wqe_info->unmasked_queue_offset = queue->head; 157 wqe_info->wqe_size_in_bu = 158 wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; 159 } 160 161 wq_buffer_pointer = gdma_get_wqe_pointer(queue); 162 wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req, 163 client_oob_size); 164 if (wq_buffer_pointer >= ((uint8_t *)queue->buffer) + queue->size) 165 wq_buffer_pointer -= queue->size; 166 167 write_scatter_gather_list((uint8_t *)queue->buffer, 168 (uint8_t *)queue->buffer + queue->size, 169 wq_buffer_pointer, work_req); 170 171 queue->head += wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; 172 173 return 0; 174 } 175 176 union gdma_doorbell_entry { 177 uint64_t as_uint64; 178 179 struct { 180 uint64_t id : 24; 181 uint64_t reserved : 8; 182 uint64_t tail_ptr : 31; 183 uint64_t arm : 1; 184 } cq; 185 186 struct { 187 uint64_t id : 24; 188 uint64_t wqe_cnt : 8; 189 uint64_t tail_ptr : 32; 190 } rq; 191 192 struct { 193 uint64_t id : 24; 194 uint64_t reserved : 8; 195 uint64_t tail_ptr : 32; 196 } sq; 197 198 struct { 199 uint64_t id : 16; 200 uint64_t reserved : 16; 201 uint64_t tail_ptr : 31; 202 uint64_t arm : 1; 203 } eq; 204 }; /* HW DATA */ 205 206 enum { 207 DOORBELL_OFFSET_SQ = 0x0, 208 DOORBELL_OFFSET_RQ = 0x400, 209 DOORBELL_OFFSET_CQ = 0x800, 210 DOORBELL_OFFSET_EQ = 0xFF8, 211 }; 212 213 /* 214 * Write to hardware doorbell to notify new activity. 215 */ 216 int 217 mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, 218 uint32_t queue_id, uint32_t tail, uint8_t arm) 219 { 220 uint8_t *addr = db_page; 221 union gdma_doorbell_entry e = {}; 222 223 switch (queue_type) { 224 case GDMA_QUEUE_SEND: 225 e.sq.id = queue_id; 226 e.sq.tail_ptr = tail; 227 addr += DOORBELL_OFFSET_SQ; 228 break; 229 230 case GDMA_QUEUE_RECEIVE: 231 e.rq.id = queue_id; 232 e.rq.tail_ptr = tail; 233 e.rq.wqe_cnt = arm; 234 addr += DOORBELL_OFFSET_RQ; 235 break; 236 237 case GDMA_QUEUE_COMPLETION: 238 e.cq.id = queue_id; 239 e.cq.tail_ptr = tail; 240 e.cq.arm = arm; 241 addr += DOORBELL_OFFSET_CQ; 242 break; 243 244 default: 245 DRV_LOG(ERR, "Unsupported queue type %d", queue_type); 246 return -1; 247 } 248 249 /* Ensure all writes are done before ringing doorbell */ 250 rte_wmb(); 251 252 DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", 253 db_page, addr, queue_id, queue_type, tail, arm); 254 255 rte_write64(e.as_uint64, addr); 256 return 0; 257 } 258 259 /* 260 * Poll completion queue for completions. 261 */ 262 int 263 gdma_poll_completion_queue(struct mana_gdma_queue *cq, struct gdma_comp *comp) 264 { 265 struct gdma_hardware_completion_entry *cqe; 266 uint32_t head = cq->head % cq->count; 267 uint32_t new_owner_bits, old_owner_bits; 268 uint32_t cqe_owner_bits; 269 struct gdma_hardware_completion_entry *buffer = cq->buffer; 270 271 cqe = &buffer[head]; 272 new_owner_bits = (cq->head / cq->count) & COMPLETION_QUEUE_OWNER_MASK; 273 old_owner_bits = (cq->head / cq->count - 1) & 274 COMPLETION_QUEUE_OWNER_MASK; 275 cqe_owner_bits = cqe->owner_bits; 276 277 DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x", 278 cqe_owner_bits, old_owner_bits); 279 280 if (cqe_owner_bits == old_owner_bits) 281 return 0; /* No new entry */ 282 283 if (cqe_owner_bits != new_owner_bits) { 284 DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x", 285 cq->id, cqe_owner_bits, new_owner_bits); 286 return -1; 287 } 288 289 /* Ensure checking owner bits happens before reading from CQE */ 290 rte_rmb(); 291 292 comp->work_queue_number = cqe->wq_num; 293 comp->send_work_queue = cqe->is_sq; 294 295 memcpy(comp->completion_data, cqe->dma_client_data, GDMA_COMP_DATA_SIZE); 296 297 cq->head++; 298 299 DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u", 300 new_owner_bits, old_owner_bits, cqe_owner_bits, 301 comp->work_queue_number, comp->send_work_queue, cq->head); 302 return 1; 303 } 304