1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_ip.h> 11 #include <rte_tcp.h> 12 13 #include "sfc.h" 14 #include "sfc_debug.h" 15 #include "sfc_tx.h" 16 #include "sfc_ev.h" 17 18 /** Standard TSO header length */ 19 #define SFC_TSOH_STD_LEN 256 20 21 /** The number of TSO option descriptors that precede the packet descriptors */ 22 #define SFC_TSO_OPDESCS_IDX_SHIFT 2 23 24 int 25 sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, 26 unsigned int txq_entries, unsigned int socket_id) 27 { 28 unsigned int i; 29 30 for (i = 0; i < txq_entries; ++i) { 31 sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj", 32 SFC_TSOH_STD_LEN, 33 RTE_CACHE_LINE_SIZE, 34 socket_id); 35 if (sw_ring[i].tsoh == NULL) 36 goto fail_alloc_tsoh_objs; 37 } 38 39 return 0; 40 41 fail_alloc_tsoh_objs: 42 while (i > 0) 43 rte_free(sw_ring[--i].tsoh); 44 45 return ENOMEM; 46 } 47 48 void 49 sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, 50 unsigned int txq_entries) 51 { 52 unsigned int i; 53 54 for (i = 0; i < txq_entries; ++i) { 55 rte_free(sw_ring[i].tsoh); 56 sw_ring[i].tsoh = NULL; 57 } 58 } 59 60 static void 61 sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg, 62 size_t *in_off, unsigned int idx, size_t bytes_left) 63 { 64 struct rte_mbuf *m = *in_seg; 65 size_t bytes_to_copy = 0; 66 uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; 67 68 do { 69 bytes_to_copy = MIN(bytes_left, m->data_len); 70 71 rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *), 72 bytes_to_copy); 73 74 bytes_left -= bytes_to_copy; 75 tsoh += bytes_to_copy; 76 77 if (bytes_left > 0) { 78 m = m->next; 79 SFC_ASSERT(m != NULL); 80 } 81 } while (bytes_left > 0); 82 83 if (bytes_to_copy == m->data_len) { 84 *in_seg = m->next; 85 *in_off = 0; 86 } else { 87 *in_seg = m; 88 *in_off = bytes_to_copy; 89 } 90 } 91 92 int 93 sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, 94 struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend, 95 unsigned int *pkt_descs, size_t *pkt_len) 96 { 97 uint8_t *tsoh; 98 const struct tcp_hdr *th; 99 efsys_dma_addr_t header_paddr; 100 uint16_t packet_id; 101 uint32_t sent_seq; 102 struct rte_mbuf *m = *in_seg; 103 size_t nh_off = m->l2_len; /* IP header offset */ 104 size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */ 105 size_t header_len = m->l2_len + m->l3_len + m->l4_len; 106 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic); 107 108 idx += SFC_TSO_OPDESCS_IDX_SHIFT; 109 110 /* Packets which have too big headers should be discarded */ 111 if (unlikely(header_len > SFC_TSOH_STD_LEN)) 112 return EMSGSIZE; 113 114 /* 115 * The TCP header must start at most 208 bytes into the frame. 116 * If it starts later than this then the NIC won't realise 117 * it's a TCP packet and TSO edits won't be applied 118 */ 119 if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit)) 120 return EMSGSIZE; 121 122 header_paddr = rte_pktmbuf_iova(m); 123 124 /* 125 * Sometimes headers may be split across multiple mbufs. In such cases 126 * we need to glue those pieces and store them in some temporary place. 127 * Also, packet headers must be contiguous in memory, so that 128 * they can be referred to with a single DMA descriptor. EF10 has no 129 * limitations on address boundaries crossing by DMA descriptor data. 130 */ 131 if (m->data_len < header_len) { 132 sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx, 133 header_len); 134 tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; 135 136 header_paddr = rte_malloc_virt2iova((void *)tsoh); 137 } else { 138 if (m->data_len == header_len) { 139 *in_off = 0; 140 *in_seg = m->next; 141 } else { 142 *in_off = header_len; 143 } 144 145 tsoh = rte_pktmbuf_mtod(m, uint8_t *); 146 } 147 148 /* Handle IP header */ 149 if (m->ol_flags & PKT_TX_IPV4) { 150 const struct ipv4_hdr *iphe4; 151 152 iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off); 153 rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t)); 154 packet_id = rte_be_to_cpu_16(packet_id); 155 } else if (m->ol_flags & PKT_TX_IPV6) { 156 packet_id = 0; 157 } else { 158 return EINVAL; 159 } 160 161 /* Handle TCP header */ 162 th = (const struct tcp_hdr *)(tsoh + tcph_off); 163 164 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); 165 sent_seq = rte_be_to_cpu_32(sent_seq); 166 167 efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz, 168 *pend, EFX_TX_FATSOV2_OPT_NDESCS); 169 170 *pend += EFX_TX_FATSOV2_OPT_NDESCS; 171 *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS; 172 173 efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len, 174 B_FALSE, (*pend)++); 175 (*pkt_descs)++; 176 *pkt_len -= header_len; 177 178 return 0; 179 } 180