1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 #ifndef __DPAA_ETHDEV_H__ 8 #define __DPAA_ETHDEV_H__ 9 10 /* System headers */ 11 #include <stdbool.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_event_eth_rx_adapter.h> 14 15 #include <fsl_usd.h> 16 #include <fsl_qman.h> 17 #include <fsl_bman.h> 18 #include <of.h> 19 #include <netcfg.h> 20 21 #define DPAA_MBUF_HW_ANNOTATION 64 22 #define DPAA_FD_PTA_SIZE 64 23 24 #if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM 25 #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" 26 #endif 27 28 /* mbuf->seqn will be used to store event entry index for 29 * driver specific usage. For parallel mode queues, invalid 30 * index will be set and for atomic mode queues, valid value 31 * ranging from 1 to 16. 32 */ 33 #define DPAA_INVALID_MBUF_SEQN 0 34 35 /* we will re-use the HEADROOM for annotation in RX */ 36 #define DPAA_HW_BUF_RESERVE 0 37 #define DPAA_PACKET_LAYOUT_ALIGN 64 38 39 /* Alignment to use for cpu-local structs to avoid coherency problems. */ 40 #define MAX_CACHELINE 64 41 42 #define DPAA_MAX_RX_PKT_LEN 10240 43 44 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 45 46 /* RX queue tail drop threshold (CGR Based) in frame count */ 47 #define CGR_RX_PERFQ_THRESH 256 48 49 /*max mac filter for memac(8) including primary mac addr*/ 50 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) 51 52 /*Maximum number of slots available in TX ring*/ 53 #define DPAA_TX_BURST_SIZE 7 54 55 /* Optimal burst size for RX and TX as default */ 56 #define DPAA_DEF_RX_BURST_SIZE 7 57 #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE 58 59 #ifndef VLAN_TAG_SIZE 60 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ 61 #endif 62 63 /* PCD frame queues */ 64 #define DPAA_PCD_FQID_START 0x400 65 #define DPAA_PCD_FQID_MULTIPLIER 0x100 66 #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 67 #define DPAA_MAX_NUM_PCD_QUEUES 4 68 69 #define DPAA_IF_TX_PRIORITY 3 70 #define DPAA_IF_RX_PRIORITY 0 71 #define DPAA_IF_DEBUG_PRIORITY 7 72 73 #define DPAA_IF_RX_ANNOTATION_STASH 1 74 #define DPAA_IF_RX_DATA_STASH 1 75 #define DPAA_IF_RX_CONTEXT_STASH 0 76 77 /* Each "debug" FQ is represented by one of these */ 78 #define DPAA_DEBUG_FQ_RX_ERROR 0 79 #define DPAA_DEBUG_FQ_TX_ERROR 1 80 81 #define DPAA_RSS_OFFLOAD_ALL ( \ 82 ETH_RSS_IP | \ 83 ETH_RSS_UDP | \ 84 ETH_RSS_TCP | \ 85 ETH_RSS_SCTP) 86 87 #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ 88 PKT_TX_IP_CKSUM | \ 89 PKT_TX_TCP_CKSUM | \ 90 PKT_TX_UDP_CKSUM) 91 92 /* DPAA Frame descriptor macros */ 93 94 #define DPAA_FD_CMD_FCO 0x80000000 95 /**< Frame queue Context Override */ 96 #define DPAA_FD_CMD_RPD 0x40000000 97 /**< Read Prepended Data */ 98 #define DPAA_FD_CMD_UPD 0x20000000 99 /**< Update Prepended Data */ 100 #define DPAA_FD_CMD_DTC 0x10000000 101 /**< Do IP/TCP/UDP Checksum */ 102 #define DPAA_FD_CMD_DCL4C 0x10000000 103 /**< Didn't calculate L4 Checksum */ 104 #define DPAA_FD_CMD_CFQ 0x00ffffff 105 /**< Confirmation Frame Queue */ 106 107 /* Each network interface is represented by one of these */ 108 struct dpaa_if { 109 int valid; 110 char *name; 111 const struct fm_eth_port_cfg *cfg; 112 struct qman_fq *rx_queues; 113 struct qman_cgr *cgr_rx; 114 struct qman_fq *tx_queues; 115 struct qman_fq debug_queues[2]; 116 uint16_t nb_rx_queues; 117 uint16_t nb_tx_queues; 118 uint32_t ifid; 119 struct fman_if *fif; 120 struct dpaa_bp_info *bp_info; 121 struct rte_eth_fc_conf *fc_conf; 122 }; 123 124 struct dpaa_if_stats { 125 /* Rx Statistics Counter */ 126 uint64_t reoct; /**<Rx Eth Octets Counter */ 127 uint64_t roct; /**<Rx Octet Counters */ 128 uint64_t raln; /**<Rx Alignment Error Counter */ 129 uint64_t rxpf; /**<Rx valid Pause Frame */ 130 uint64_t rfrm; /**<Rx Frame counter */ 131 uint64_t rfcs; /**<Rx frame check seq error */ 132 uint64_t rvlan; /**<Rx Vlan Frame Counter */ 133 uint64_t rerr; /**<Rx Frame error */ 134 uint64_t ruca; /**<Rx Unicast */ 135 uint64_t rmca; /**<Rx Multicast */ 136 uint64_t rbca; /**<Rx Broadcast */ 137 uint64_t rdrp; /**<Rx Dropped Packet */ 138 uint64_t rpkt; /**<Rx packet */ 139 uint64_t rund; /**<Rx undersized packets */ 140 uint32_t res_x[14]; 141 uint64_t rovr; /**<Rx oversized but good */ 142 uint64_t rjbr; /**<Rx oversized with bad csum */ 143 uint64_t rfrg; /**<Rx fragment Packet */ 144 uint64_t rcnp; /**<Rx control packets (0x8808 */ 145 uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */ 146 uint32_t res01d0[12]; 147 /* Tx Statistics Counter */ 148 uint64_t teoct; /**<Tx eth octets */ 149 uint64_t toct; /**<Tx Octets */ 150 uint32_t res0210[2]; 151 uint64_t txpf; /**<Tx valid pause frame */ 152 uint64_t tfrm; /**<Tx frame counter */ 153 uint64_t tfcs; /**<Tx FCS error */ 154 uint64_t tvlan; /**<Tx Vlan Frame */ 155 uint64_t terr; /**<Tx frame error */ 156 uint64_t tuca; /**<Tx Unicast */ 157 uint64_t tmca; /**<Tx Multicast */ 158 uint64_t tbca; /**<Tx Broadcast */ 159 uint32_t res0258[2]; 160 uint64_t tpkt; /**<Tx Packet */ 161 uint64_t tund; /**<Tx Undersized */ 162 }; 163 164 int 165 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 166 int eth_rx_queue_id, 167 u16 ch_id, 168 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 169 170 int 171 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 172 int eth_rx_queue_id); 173 174 enum qman_cb_dqrr_result 175 dpaa_rx_cb_parallel(void *event, 176 struct qman_portal *qm __always_unused, 177 struct qman_fq *fq, 178 const struct qm_dqrr_entry *dqrr, 179 void **bufs); 180 enum qman_cb_dqrr_result 181 dpaa_rx_cb_atomic(void *event, 182 struct qman_portal *qm __always_unused, 183 struct qman_fq *fq, 184 const struct qm_dqrr_entry *dqrr, 185 void **bufs); 186 187 #endif 188