Lines Matching defs:cqe

132 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
148 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
153 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
170 if (get_cqe_lro_tcppsh(cqe))
175 th->th_ack = cqe->lro_ack_seq_num;
176 th->th_win = cqe->lro_tcp_win;
189 if (get_cqe_lro_timestamp_valid(cqe) &&
194 * cqe->timestamp is 64bit long.
198 ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
199 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
203 ip4->ip_ttl = cqe->lro_min_ttl;
208 ip6->ip6_hlim = cqe->lro_min_ttl;
280 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
289 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
291 mlx5e_lro_update_hdr(mb, cqe);
314 if (cqe->rss_hash_type != 0) {
315 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
318 switch (cqe->rss_hash_type &
348 if (cqe_is_tunneled(cqe))
358 if (cqe_is_tunneled(cqe)) {
364 if (((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK)) ==
372 if (likely((cqe->hds_ip_ext & CQE_L4_OK) == CQE_L4_OK)) {
381 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
391 if (cqe_has_vlan(cqe)) {
392 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
398 tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp));
402 * instead of the cqe generation.
412 switch (get_cqe_tls_offload(cqe)) {
425 mlx5e_accel_ipsec_handle_rx(mb, cqe, mr);
511 struct mlx5_cqe64 *cqe;
517 cqe = mlx5e_get_cqe(&rq->cq);
518 if (!cqe)
521 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
526 wqe_counter_be = cqe->wqe_counter;
529 byte_cnt = be32_to_cpu(cqe->byte_cnt);
535 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
536 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe);
571 if (!mlx5e_accel_ipsec_flow(cqe) /* tag is already assigned
590 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter],