Lines Matching +full:pd +full:- +full:disable

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
45 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_fill_qp_context()
46 if_t dev = priv->dev; in mlx4_en_fill_qp_context()
49 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); in mlx4_en_fill_qp_context()
50 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context()
51 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context()
53 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
55 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
57 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context()
58 context->usr_page = cpu_to_be32(mdev->priv_uar.index); in mlx4_en_fill_qp_context()
59 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
60 context->pri_path.ackto = 1 & 0x07; in mlx4_en_fill_qp_context()
61 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; in mlx4_en_fill_qp_context()
63 context->pri_path.sched_queue |= user_prio << 3; in mlx4_en_fill_qp_context()
64 context->pri_path.feup = 1 << 6; in mlx4_en_fill_qp_context()
66 context->pri_path.counter_index = (u8)(priv->counter_index); in mlx4_en_fill_qp_context()
68 (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) && in mlx4_en_fill_qp_context()
69 context->pri_path.counter_index != 0xFF) { in mlx4_en_fill_qp_context()
70 /* disable multicast loopback to qp with same counter */ in mlx4_en_fill_qp_context()
71 context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB; in mlx4_en_fill_qp_context()
72 context->pri_path.vlan_control |= in mlx4_en_fill_qp_context()
76 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context()
77 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context()
78 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); in mlx4_en_fill_qp_context()
80 context->param3 |= cpu_to_be32(1 << 30); in mlx4_en_fill_qp_context()
89 // if nbufs == 1 - there is no need to vmap in mlx4_en_map_buffer()
90 // if buf->direct.buf is not NULL it means that vmap was already done by mlx4_alloc_buff in mlx4_en_map_buffer()
91 if (buf->direct.buf != NULL || buf->nbufs == 1) in mlx4_en_map_buffer()
94 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); in mlx4_en_map_buffer()
96 return -ENOMEM; in mlx4_en_map_buffer()
98 for (i = 0; i < buf->nbufs; ++i) in mlx4_en_map_buffer()
99 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
101 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); in mlx4_en_map_buffer()
103 if (!buf->direct.buf) in mlx4_en_map_buffer()
104 return -ENOMEM; in mlx4_en_map_buffer()
111 if (BITS_PER_LONG == 64 || buf->nbufs == 1) in mlx4_en_unmap_buffer()
114 vunmap(buf->direct.buf); in mlx4_en_unmap_buffer()