Lines Matching defs:ctx
62 int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
127 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
128 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
131 close_msg_fds(struct vhu_msg_context *ctx)
135 for (i = 0; i < ctx->fd_num; i++) {
136 int fd = ctx->fds[i];
141 ctx->fds[i] = -1;
151 validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds)
153 if (ctx->fd_num == expected_fds)
158 expected_fds, vhost_message_handlers[ctx->msg.request.frontend].description,
159 ctx->fd_num);
161 close_msg_fds(ctx);
327 struct vhu_msg_context *ctx __rte_unused,
335 struct vhu_msg_context *ctx __rte_unused,
352 struct vhu_msg_context *ctx,
360 ctx->msg.payload.u64 = features;
361 ctx->msg.size = sizeof(ctx->msg.payload.u64);
362 ctx->fd_num = 0;
372 struct vhu_msg_context *ctx,
380 ctx->msg.payload.u64 = (uint64_t)queue_num;
381 ctx->msg.size = sizeof(ctx->msg.payload.u64);
382 ctx->fd_num = 0;
392 struct vhu_msg_context *ctx,
396 uint64_t features = ctx->msg.payload.u64;
482 struct vhu_msg_context *ctx,
486 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
488 if (ctx->msg.payload.state.num > 32768) {
491 ctx->msg.payload.state.num);
495 vq->size = ctx->msg.payload.state.num;
976 struct vhu_msg_context *ctx,
981 struct vhost_vring_addr *addr = &ctx->msg.payload.addr;
988 vq = dev->virtqueue[ctx->msg.payload.addr.index];
1022 struct vhu_msg_context *ctx,
1026 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
1027 uint64_t val = ctx->msg.payload.state.num;
1045 vq->last_used_idx = ctx->msg.payload.state.num;
1046 vq->last_avail_idx = ctx->msg.payload.state.num;
1052 ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx);
1236 struct vhu_msg_context *ctx)
1251 memory = &ctx->msg.payload.memory;
1258 ctx->fd_num = 0;
1259 send_vhost_reply(dev, main_fd, ctx);
1389 struct vhu_msg_context *ctx,
1393 struct VhostUserMemory *memory = &ctx->msg.payload.memory;
1400 if (validate_msg_fds(dev, ctx, memory->nregions) != 0)
1413 close_msg_fds(ctx);
1480 reg->fd = ctx->fds[i];
1486 ctx->fds[i] = -1;
1501 if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
1544 close_msg_fds(ctx);
1682 struct vhu_msg_context *ctx,
1693 if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) {
1696 ctx->msg.size);
1717 num_queues = ctx->msg.payload.inflight.num_queues;
1718 queue_size = ctx->msg.payload.inflight.queue_size;
1722 ctx->msg.payload.inflight.num_queues);
1725 ctx->msg.payload.inflight.queue_size);
1736 ctx->msg.payload.inflight.mmap_size = 0;
1752 dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size;
1753 dev->inflight_info->fd = ctx->fds[0] = fd;
1754 ctx->msg.payload.inflight.mmap_offset = 0;
1755 ctx->fd_num = 1;
1771 ctx->msg.payload.inflight.mmap_size);
1774 ctx->msg.payload.inflight.mmap_offset);
1776 "send inflight fd: %d", ctx->fds[0]);
1783 struct vhu_msg_context *ctx,
1795 if (validate_msg_fds(dev, ctx, 1) != 0)
1798 fd = ctx->fds[0];
1799 if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) {
1802 ctx->msg.size, fd);
1806 mmap_size = ctx->msg.payload.inflight.mmap_size;
1807 mmap_offset = ctx->msg.payload.inflight.mmap_offset;
1808 num_queues = ctx->msg.payload.inflight.num_queues;
1809 queue_size = ctx->msg.payload.inflight.queue_size;
1893 struct vhu_msg_context *ctx,
1901 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1902 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
1905 file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK;
1906 if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1909 file.fd = ctx->fds[0];
1930 struct vhu_msg_context *ctx,
1936 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1937 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
1940 if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1941 close(ctx->fds[0]);
2145 struct vhu_msg_context *ctx,
2153 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
2154 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
2157 file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK;
2158 if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)
2161 file.fd = ctx->fds[0];
2213 struct vhu_msg_context *ctx,
2217 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
2234 ctx->msg.payload.state.num = val;
2236 ctx->msg.payload.state.num = vq->last_avail_idx;
2241 ctx->msg.payload.state.index, ctx->msg.payload.state.num);
2273 ctx->msg.size = sizeof(ctx->msg.payload.state);
2274 ctx->fd_num = 0;
2292 struct vhu_msg_context *ctx,
2297 bool enable = !!ctx->msg.payload.state.num;
2298 int index = (int)ctx->msg.payload.state.index;
2322 struct vhu_msg_context *ctx,
2330 ctx->msg.payload.u64 = protocol_features;
2331 ctx->msg.size = sizeof(ctx->msg.payload.u64);
2332 ctx->fd_num = 0;
2339 struct vhu_msg_context *ctx,
2343 uint64_t protocol_features = ctx->msg.payload.u64;
2363 struct vhu_msg_context *ctx,
2367 int fd = ctx->fds[0];
2373 if (validate_msg_fds(dev, ctx, 1) != 0)
2381 if (ctx->msg.size != sizeof(VhostUserLog)) {
2384 ctx->msg.size, (int)sizeof(VhostUserLog));
2388 size = ctx->msg.payload.log.mmap_size;
2389 off = ctx->msg.payload.log.mmap_offset;
2449 ctx->msg.size = 0;
2450 ctx->fd_num = 0;
2455 close_msg_fds(ctx);
2460 struct vhu_msg_context *ctx,
2465 if (validate_msg_fds(dev, ctx, 1) != 0)
2468 close(ctx->fds[0]);
2484 struct vhu_msg_context *ctx,
2488 uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64;
2513 struct vhu_msg_context *ctx,
2518 if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU ||
2519 ctx->msg.payload.u64 > VIRTIO_MAX_MTU) {
2522 ctx->msg.payload.u64);
2527 dev->mtu = ctx->msg.payload.u64;
2534 struct vhu_msg_context *ctx,
2538 int fd = ctx->fds[0];
2540 if (validate_msg_fds(dev, ctx, 1) != 0)
2634 struct vhu_msg_context *ctx,
2641 if (validate_msg_fds(dev, ctx, 0) != 0)
2651 ctx->msg.payload.cfg.region,
2652 ctx->msg.payload.cfg.size);
2654 ctx->msg.size = 0;
2666 struct vhu_msg_context *ctx,
2673 if (validate_msg_fds(dev, ctx, 0) != 0)
2676 if (ctx->msg.payload.cfg.size > VHOST_USER_MAX_CONFIG_SIZE) {
2679 ctx->msg.payload.cfg.size, VHOST_USER_MAX_CONFIG_SIZE);
2690 ctx->msg.payload.cfg.region,
2691 ctx->msg.payload.cfg.offset,
2692 ctx->msg.payload.cfg.size,
2693 ctx->msg.payload.cfg.flags);
2708 struct vhu_msg_context *ctx,
2712 struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb;
2768 struct vhu_msg_context *ctx,
2793 ctx->fds[0] = dev->postcopy_ufd;
2794 ctx->fd_num = 1;
2799 ctx->fd_num = 0;
2807 struct vhu_msg_context *ctx __rte_unused,
2824 struct vhu_msg_context *ctx,
2835 ctx->msg.payload.u64 = 0;
2836 ctx->msg.size = sizeof(ctx->msg.payload.u64);
2837 ctx->fd_num = 0;
2844 struct vhu_msg_context *ctx,
2849 ctx->msg.payload.u64 = dev->status;
2850 ctx->msg.size = sizeof(ctx->msg.payload.u64);
2851 ctx->fd_num = 0;
2858 struct vhu_msg_context *ctx,
2864 if (ctx->msg.payload.u64 > UINT8_MAX) {
2867 ctx->msg.payload.u64);
2871 dev->status = ctx->msg.payload.u64;
2919 read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2923 ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE,
2924 ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num);
2934 if (ctx->msg.size) {
2935 if (ctx->msg.size > sizeof(ctx->msg.payload)) {
2937 ctx->msg.size);
2941 ret = read(sockfd, &ctx->msg.payload, ctx->msg.size);
2944 if (ret != (int)ctx->msg.size) {
2953 close_msg_fds(ctx);
2959 send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2961 if (!ctx)
2964 return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg,
2965 VHOST_USER_HDR_SIZE + ctx->msg.size, ctx->fds, ctx->fd_num);
2969 send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2971 if (!ctx)
2974 ctx->msg.flags &= ~VHOST_USER_VERSION_MASK;
2975 ctx->msg.flags &= ~VHOST_USER_NEED_REPLY;
2976 ctx->msg.flags |= VHOST_USER_VERSION;
2977 ctx->msg.flags |= VHOST_USER_REPLY_MASK;
2979 return send_vhost_message(dev, sockfd, ctx);
2983 send_vhost_backend_message(struct virtio_net *dev, struct vhu_msg_context *ctx)
2985 return send_vhost_message(dev, dev->backend_req_fd, ctx);
2989 send_vhost_backend_message_process_reply(struct virtio_net *dev, struct vhu_msg_context *ctx)
2995 ret = send_vhost_backend_message(dev, ctx);
3012 if (msg_reply.msg.request.backend != ctx->msg.request.backend) {
3015 msg_reply.msg.request.backend, ctx->msg.request.backend);
3031 struct vhu_msg_context *ctx)
3035 switch (ctx->msg.request.frontend) {
3039 vring_idx = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK;
3045 vring_idx = ctx->msg.payload.state.index;
3048 vring_idx = ctx->msg.payload.addr.index;
3051 vring_idx = ctx->msg.payload.inflight.num_queues - 1;
3108 struct vhu_msg_context ctx;
3132 ctx.msg.request.frontend = VHOST_USER_NONE;
3133 ret = read_vhost_message(dev, fd, &ctx);
3139 request = ctx.msg.request.frontend;
3166 ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx);
3189 msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
3192 send_vhost_reply(dev, fd, &ctx);
3207 if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
3210 msg_result = msg_handler->callback(&dev, &ctx, fd);
3230 send_vhost_reply(dev, fd, &ctx);
3241 msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
3244 send_vhost_reply(dev, fd, &ctx);
3260 close_msg_fds(&ctx);
3269 if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
3270 ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
3271 ctx.msg.size = sizeof(ctx.msg.payload.u64);
3272 ctx.fd_num = 0;
3273 send_vhost_reply(dev, fd, &ctx);
3314 blk_call_fd = ctx.msg.payload.u64 & VHOST_USER_VRING_IDX_MASK;
3337 struct vhu_msg_context ctx = {
3341 .size = sizeof(ctx.msg.payload.iotlb),
3350 ret = send_vhost_message(dev, dev->backend_req_fd, &ctx);
3364 struct vhu_msg_context ctx = {
3379 ret = send_vhost_backend_message(dev, &ctx);
3381 ctx.msg.flags |= VHOST_USER_NEED_REPLY;
3382 ret = send_vhost_backend_message_process_reply(dev, &ctx);
3396 struct vhu_msg_context ctx = {
3400 .size = sizeof(ctx.msg.payload.area),
3410 ctx.msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
3412 ctx.fds[0] = fd;
3413 ctx.fd_num = 1;
3416 ret = send_vhost_backend_message_process_reply(dev, &ctx);