1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 #ifndef __DPAA_ETHDEV_H__ 8 #define __DPAA_ETHDEV_H__ 9 10 /* System headers */ 11 #include <stdbool.h> 12 #include <ethdev_driver.h> 13 #include <rte_compat.h> 14 #include <rte_event_eth_rx_adapter.h> 15 16 #include <fsl_usd.h> 17 #include <fsl_qman.h> 18 #include <fsl_bman.h> 19 #include <dpaa_of.h> 20 #include <netcfg.h> 21 22 #define MAX_DPAA_CORES 4 23 #define DPAA_MBUF_HW_ANNOTATION 64 24 #define DPAA_FD_PTA_SIZE 64 25 26 /* we will re-use the HEADROOM for annotation in RX */ 27 #define DPAA_HW_BUF_RESERVE 0 28 #define DPAA_PACKET_LAYOUT_ALIGN 64 29 30 /* Alignment to use for cpu-local structs to avoid coherency problems. */ 31 #define MAX_CACHELINE 64 32 33 #define DPAA_MAX_RX_PKT_LEN 10240 34 35 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 36 37 /* Maximum SG segments supported on all cores*/ 38 #define DPAA_MAX_SGS 128 39 /* SG pool size */ 40 #define DPAA_POOL_SIZE 2048 41 /* SG pool cache size */ 42 #define DPAA_POOL_CACHE_SIZE 256 43 44 /* RX queue tail drop threshold (CGR Based) in frame count */ 45 #define CGR_RX_PERFQ_THRESH 256 46 #define CGR_TX_CGR_THRESH 512 47 48 /*max mac filter for memac(8) including primary mac addr*/ 49 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) 50 51 /*Maximum number of slots available in TX ring*/ 52 #define DPAA_TX_BURST_SIZE 7 53 54 /* Optimal burst size for RX and TX as default */ 55 #define DPAA_DEF_RX_BURST_SIZE 7 56 #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE 57 58 #ifndef VLAN_TAG_SIZE 59 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ 60 #endif 61 62 #define DPAA_ETH_MAX_LEN (RTE_ETHER_MTU + \ 63 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ 64 VLAN_TAG_SIZE) 65 66 /* PCD frame queues */ 67 #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 68 #define DPAA_VSP_PROFILE_MAX_NUM 8 69 #define DPAA_MAX_NUM_PCD_QUEUES DPAA_VSP_PROFILE_MAX_NUM 70 /*Same as VSP profile number*/ 71 72 #define DPAA_IF_TX_PRIORITY 3 73 #define DPAA_IF_RX_PRIORITY 0 74 #define DPAA_IF_DEBUG_PRIORITY 7 75 76 #define DPAA_IF_RX_ANNOTATION_STASH 1 77 #define DPAA_IF_RX_DATA_STASH 1 78 #define DPAA_IF_RX_CONTEXT_STASH 0 79 80 /* Each "debug" FQ is represented by one of these */ 81 #define DPAA_DEBUG_FQ_RX_ERROR 0 82 #define DPAA_DEBUG_FQ_TX_ERROR 1 83 84 #define DPAA_RSS_OFFLOAD_ALL ( \ 85 RTE_ETH_RSS_L2_PAYLOAD | \ 86 RTE_ETH_RSS_IP | \ 87 RTE_ETH_RSS_UDP | \ 88 RTE_ETH_RSS_TCP | \ 89 RTE_ETH_RSS_SCTP) 90 91 #define DPAA_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 92 RTE_MBUF_F_TX_TCP_CKSUM | \ 93 RTE_MBUF_F_TX_UDP_CKSUM) 94 95 /* DPAA Frame descriptor macros */ 96 97 #define DPAA_FD_CMD_FCO 0x80000000 98 /**< Frame queue Context Override */ 99 #define DPAA_FD_CMD_RPD 0x40000000 100 /**< Read Prepended Data */ 101 #define DPAA_FD_CMD_UPD 0x20000000 102 /**< Update Prepended Data */ 103 #define DPAA_FD_CMD_DTC 0x10000000 104 /**< Do IP/TCP/UDP Checksum */ 105 #define DPAA_FD_CMD_DCL4C 0x10000000 106 /**< Didn't calculate L4 Checksum */ 107 #define DPAA_FD_CMD_CFQ 0x00ffffff 108 /**< Confirmation Frame Queue */ 109 110 #define DPAA_DEFAULT_RXQ_VSP_ID 1 111 112 #define FMC_FILE "/tmp/fmc.bin" 113 114 extern struct rte_mempool *dpaa_tx_sg_pool; 115 116 /* structure to free external and indirect 117 * buffers. 118 */ 119 struct dpaa_sw_buf_free { 120 /* To which packet this segment belongs */ 121 uint16_t pkt_id; 122 /* The actual segment */ 123 struct rte_mbuf *seg; 124 }; 125 126 /* Each network interface is represented by one of these */ 127 struct dpaa_if { 128 int valid; 129 char *name; 130 const struct fm_eth_port_cfg *cfg; 131 struct qman_fq *rx_queues; 132 struct qman_cgr *cgr_rx; 133 struct qman_fq *tx_queues; 134 struct qman_cgr *cgr_tx; 135 struct qman_fq debug_queues[2]; 136 uint16_t nb_rx_queues; 137 uint16_t nb_tx_queues; 138 uint32_t ifid; 139 struct dpaa_bp_info *bp_info; 140 struct rte_eth_fc_conf *fc_conf; 141 void *port_handle; 142 void *netenv_handle; 143 void *scheme_handle[2]; 144 uint32_t scheme_count; 145 146 void *vsp_handle[DPAA_VSP_PROFILE_MAX_NUM]; 147 uint32_t vsp_bpid[DPAA_VSP_PROFILE_MAX_NUM]; 148 }; 149 150 struct dpaa_if_stats { 151 /* Rx Statistics Counter */ 152 uint64_t reoct; /**<Rx Eth Octets Counter */ 153 uint64_t roct; /**<Rx Octet Counters */ 154 uint64_t raln; /**<Rx Alignment Error Counter */ 155 uint64_t rxpf; /**<Rx valid Pause Frame */ 156 uint64_t rfrm; /**<Rx Frame counter */ 157 uint64_t rfcs; /**<Rx frame check seq error */ 158 uint64_t rvlan; /**<Rx Vlan Frame Counter */ 159 uint64_t rerr; /**<Rx Frame error */ 160 uint64_t ruca; /**<Rx Unicast */ 161 uint64_t rmca; /**<Rx Multicast */ 162 uint64_t rbca; /**<Rx Broadcast */ 163 uint64_t rdrp; /**<Rx Dropped Packet */ 164 uint64_t rpkt; /**<Rx packet */ 165 uint64_t rund; /**<Rx undersized packets */ 166 uint32_t res_x[14]; 167 uint64_t rovr; /**<Rx oversized but good */ 168 uint64_t rjbr; /**<Rx oversized with bad csum */ 169 uint64_t rfrg; /**<Rx fragment Packet */ 170 uint64_t rcnp; /**<Rx control packets (0x8808 */ 171 uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */ 172 uint32_t res01d0[12]; 173 /* Tx Statistics Counter */ 174 uint64_t teoct; /**<Tx eth octets */ 175 uint64_t toct; /**<Tx Octets */ 176 uint32_t res0210[2]; 177 uint64_t txpf; /**<Tx valid pause frame */ 178 uint64_t tfrm; /**<Tx frame counter */ 179 uint64_t tfcs; /**<Tx FCS error */ 180 uint64_t tvlan; /**<Tx Vlan Frame */ 181 uint64_t terr; /**<Tx frame error */ 182 uint64_t tuca; /**<Tx Unicast */ 183 uint64_t tmca; /**<Tx Multicast */ 184 uint64_t tbca; /**<Tx Broadcast */ 185 uint32_t res0258[2]; 186 uint64_t tpkt; /**<Tx Packet */ 187 uint64_t tund; /**<Tx Undersized */ 188 }; 189 190 __rte_internal 191 int 192 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 193 int eth_rx_queue_id, 194 u16 ch_id, 195 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 196 197 __rte_internal 198 int 199 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 200 int eth_rx_queue_id); 201 202 enum qman_cb_dqrr_result 203 dpaa_rx_cb_parallel(void *event, 204 struct qman_portal *qm __always_unused, 205 struct qman_fq *fq, 206 const struct qm_dqrr_entry *dqrr, 207 void **bufs); 208 enum qman_cb_dqrr_result 209 dpaa_rx_cb_atomic(void *event, 210 struct qman_portal *qm __always_unused, 211 struct qman_fq *fq, 212 const struct qm_dqrr_entry *dqrr, 213 void **bufs); 214 215 /* PMD related logs */ 216 extern int dpaa_logtype_pmd; 217 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd 218 219 #define DPAA_PMD_LOG(level, fmt, args...) \ 220 rte_log(RTE_LOG_ ## level, dpaa_logtype_pmd, "%s(): " fmt "\n", \ 221 __func__, ##args) 222 223 #define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>") 224 225 #define DPAA_PMD_DEBUG(fmt, args...) \ 226 DPAA_PMD_LOG(DEBUG, fmt, ## args) 227 #define DPAA_PMD_ERR(fmt, args...) \ 228 DPAA_PMD_LOG(ERR, fmt, ## args) 229 #define DPAA_PMD_INFO(fmt, args...) \ 230 DPAA_PMD_LOG(INFO, fmt, ## args) 231 #define DPAA_PMD_WARN(fmt, args...) \ 232 DPAA_PMD_LOG(WARNING, fmt, ## args) 233 234 /* DP Logs, toggled out at compile time if level lower than current level */ 235 #define DPAA_DP_LOG(level, fmt, args...) \ 236 RTE_LOG_DP(level, DPAA_PMD, fmt, ## args) 237 238 #endif 239