Lines Matching +full:supports +full:- +full:cqe

2  * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
45 #include "cxgb4-abi.h"
122 context->ibv_ctx.cmd_fd = cmd_fd; in c4iw_alloc_context()
126 if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd, in c4iw_alloc_context()
134 context->status_page_size = resp.status_page_size; in c4iw_alloc_context()
136 context->status_page = mmap(NULL, resp.status_page_size, in c4iw_alloc_context()
139 if (context->status_page == MAP_FAILED) in c4iw_alloc_context()
143 context->ibv_ctx.device = ibdev; in c4iw_alloc_context()
144 context->ibv_ctx.ops = c4iw_ctx_ops; in c4iw_alloc_context()
146 switch (rhp->chip_version) { in c4iw_alloc_context()
153 context->ibv_ctx.ops.async_event = c4iw_async_event; in c4iw_alloc_context()
154 context->ibv_ctx.ops.post_send = c4iw_post_send; in c4iw_alloc_context()
155 context->ibv_ctx.ops.post_recv = c4iw_post_receive; in c4iw_alloc_context()
156 context->ibv_ctx.ops.poll_cq = c4iw_poll_cq; in c4iw_alloc_context()
157 context->ibv_ctx.ops.req_notify_cq = c4iw_arm_cq; in c4iw_alloc_context()
161 rhp->chip_version); in c4iw_alloc_context()
166 if (!rhp->mmid2ptr) { in c4iw_alloc_context()
169 ret = ibv_cmd_query_device(&context->ibv_ctx, &attr, &raw_fw_ver, &qcmd, in c4iw_alloc_context()
173 rhp->max_mr = attr.max_mr; in c4iw_alloc_context()
174 rhp->mmid2ptr = calloc(attr.max_mr, sizeof(void *)); in c4iw_alloc_context()
175 if (!rhp->mmid2ptr) { in c4iw_alloc_context()
178 if (rhp->abi_version < 3) { in c4iw_alloc_context()
180 " than libcxgb4:: %d\n", rhp->abi_version); in c4iw_alloc_context()
181 rhp->max_qp = T4_QID_BASE + attr.max_qp; in c4iw_alloc_context()
183 rhp->max_qp = context->status_page->qp_start + in c4iw_alloc_context()
184 context->status_page->qp_size; in c4iw_alloc_context()
186 rhp->qpid2ptr = calloc(rhp->max_qp, sizeof(void *)); in c4iw_alloc_context()
187 if (!rhp->qpid2ptr) { in c4iw_alloc_context()
190 if (rhp->abi_version < 3) in c4iw_alloc_context()
191 rhp->max_cq = T4_QID_BASE + attr.max_cq; in c4iw_alloc_context()
193 rhp->max_cq = context->status_page->cq_start + in c4iw_alloc_context()
194 context->status_page->cq_size; in c4iw_alloc_context()
195 rhp->cqid2ptr = calloc(rhp->max_cq, sizeof(void *)); in c4iw_alloc_context()
196 if (!rhp->cqid2ptr) in c4iw_alloc_context()
204 if (t5_en_wc && !context->status_page->wc_supported) { in c4iw_alloc_context()
209 return &context->ibv_ctx; in c4iw_alloc_context()
212 munmap(context->status_page, context->status_page_size); in c4iw_alloc_context()
214 if (rhp->cqid2ptr) in c4iw_alloc_context()
215 free(rhp->cqid2ptr); in c4iw_alloc_context()
216 if (rhp->qpid2ptr) in c4iw_alloc_context()
217 free(rhp->cqid2ptr); in c4iw_alloc_context()
218 if (rhp->mmid2ptr) in c4iw_alloc_context()
219 free(rhp->cqid2ptr); in c4iw_alloc_context()
228 if (context->status_page_size) in c4iw_free_context()
229 munmap(context->status_page, context->status_page_size); in c4iw_free_context()
249 chp->cq.cqid, chp->cq.queue, chp->cq.cidx, in dump_cq()
250 chp->cq.sw_queue, chp->cq.sw_cidx, chp->cq.sw_pidx, chp->cq.sw_in_use, in dump_cq()
251 … chp->cq.size, chp->cq.error, chp->cq.gen, chp->cq.cidx_inc, be64toh(chp->cq.bits_type_ts), in dump_cq()
252 t4_cq_notempty(&chp->cq)); in dump_cq()
254 for (i=0; i < chp->cq.size; i++) { in dump_cq()
255 u64 *p = (u64 *)(chp->cq.queue + i); in dump_cq()
258 if (i == chp->cq.cidx) in dump_cq()
259 fprintf(stderr, " <-- cidx\n"); in dump_cq()
286 qhp->wq.sq.qid, in dump_qp()
287 qhp->wq.error, in dump_qp()
288 qhp->wq.flushed, in dump_qp()
289 qhp->wq.qid_mask, in dump_qp()
290 qhp->wq.sq.qid, in dump_qp()
291 qhp->wq.sq.queue, in dump_qp()
292 qhp->wq.sq.sw_sq, in dump_qp()
293 qhp->wq.sq.cidx, in dump_qp()
294 qhp->wq.sq.pidx, in dump_qp()
295 qhp->wq.sq.in_use, in dump_qp()
296 qhp->wq.sq.wq_pidx, in dump_qp()
297 qhp->wq.sq.size, in dump_qp()
298 qhp->wq.sq.flags, in dump_qp()
299 qhp->wq.sq.flush_cidx, in dump_qp()
300 qhp->wq.rq.qid, in dump_qp()
301 qhp->wq.rq.queue, in dump_qp()
302 qhp->wq.rq.sw_rq, in dump_qp()
303 qhp->wq.rq.cidx, in dump_qp()
304 qhp->wq.rq.pidx, in dump_qp()
305 qhp->wq.rq.in_use, in dump_qp()
306 qhp->wq.rq.size); in dump_qp()
307 cidx = qhp->wq.sq.cidx; in dump_qp()
308 pidx = qhp->wq.sq.pidx; in dump_qp()
312 swsqe = &qhp->wq.sq.sw_sq[cidx]; in dump_qp()
315 "complete %u signaled %u cqe %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 "\n", in dump_qp()
317 swsqe->wr_id, in dump_qp()
318 swsqe->idx, in dump_qp()
319 swsqe->read_len, in dump_qp()
320 swsqe->opcode, in dump_qp()
321 swsqe->complete, in dump_qp()
322 swsqe->signaled, in dump_qp()
323 htobe64(((uint64_t *)&swsqe->cqe)[0]), in dump_qp()
324 htobe64(((uint64_t *)&swsqe->cqe)[1]), in dump_qp()
325 htobe64(((uint64_t *)&swsqe->cqe)[2]), in dump_qp()
326 htobe64(((uint64_t *)&swsqe->cqe)[3])); in dump_qp()
327 if (++cidx == qhp->wq.sq.size) in dump_qp()
332 p = (u64 *)qhp->wq.sq.queue; in dump_qp()
333 for (i=0; i < qhp->wq.sq.size * T4_SQ_NUM_SLOTS; i++) { in dump_qp()
337 if (j == 0 && i == qhp->wq.sq.wq_pidx) in dump_qp()
338 fprintf(stderr, " <-- pidx"); in dump_qp()
343 cidx = qhp->wq.rq.cidx; in dump_qp()
344 pidx = qhp->wq.rq.pidx; in dump_qp()
348 swrqe = &qhp->wq.rq.sw_rq[cidx]; in dump_qp()
351 swrqe->wr_id ); in dump_qp()
352 if (++cidx == qhp->wq.rq.size) in dump_qp()
357 p = (u64 *)qhp->wq.rq.queue; in dump_qp()
358 for (i=0; i < qhp->wq.rq.size * T4_RQ_NUM_SLOTS; i++) { in dump_qp()
362 if (j == 0 && i == qhp->wq.rq.pidx) in dump_qp()
363 fprintf(stderr, " <-- pidx"); in dump_qp()
364 if (j == 0 && i == qhp->wq.rq.cidx) in dump_qp()
365 fprintf(stderr, " <-- cidx"); in dump_qp()
379 //pthread_spin_lock(&dev->lock); in dump_state()
380 fprintf(stderr, "Device %s\n", dev->ibv_dev.name); in dump_state()
381 for (i=0; i < dev->max_cq; i++) { in dump_state()
382 if (dev->cqid2ptr[i]) { in dump_state()
383 struct c4iw_cq *chp = dev->cqid2ptr[i]; in dump_state()
384 //pthread_spin_lock(&chp->lock); in dump_state()
386 //pthread_spin_unlock(&chp->lock); in dump_state()
389 for (i=0; i < dev->max_qp; i++) { in dump_state()
390 if (dev->qpid2ptr[i]) { in dump_state()
391 struct c4iw_qp *qhp = dev->qpid2ptr[i]; in dump_state()
392 //pthread_spin_lock(&qhp->lock); in dump_state()
394 //pthread_spin_unlock(&qhp->lock); in dump_state()
397 //pthread_spin_unlock(&dev->lock); in dump_state()
406 * can know if the driver supports the kernel mode db ringing.
508 PDBG("libcxgb4: non-fatal firmware version mismatch. " in cxgb4_driver_init()
522 if (pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE)) in cxgb4_driver_init()
525 dev->ibv_dev.ops = &c4iw_dev_ops; in cxgb4_driver_init()
526 dev->chip_version = CHELSIO_CHIP_VERSION(hca_table[i].device >> 8); in cxgb4_driver_init()
527 dev->abi_version = abi_version; in cxgb4_driver_init()
558 return &dev->ibv_dev; in cxgb4_driver_init()
570 c4iw_page_mask = ~(c4iw_page_size - 1); in cxgb4_register_driver()
578 syslog(LOG_NOTICE, "cxgb4 stats - sends %lu recv %lu read %lu " in cs_fini()
579 "write %lu arm %lu cqe %lu mr %lu qp %lu cq %lu\n", in cs_fini()
581 c4iw_stats.write, c4iw_stats.arm, c4iw_stats.cqe, in cs_fini()