1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2023 Intel Corporation 3 */ 4 #include "cpfl_ethdev.h" 5 6 #include "cpfl_fxp_rule.h" 7 #include "cpfl_logs.h" 8 9 #define CTLQ_SEND_RETRIES 100 10 #define CTLQ_RECEIVE_RETRIES 100 11 12 int 13 cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, 14 struct idpf_ctlq_msg q_msg[]) 15 { 16 struct idpf_ctlq_msg **msg_ptr_list; 17 u16 clean_count = 0; 18 int num_cleaned = 0; 19 int retries = 0; 20 int ret = 0; 21 22 msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *)); 23 if (!msg_ptr_list) { 24 PMD_INIT_LOG(ERR, "no memory for cleaning ctlq"); 25 ret = -ENOMEM; 26 goto err; 27 } 28 29 ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg); 30 if (ret) { 31 PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret); 32 goto send_err; 33 } 34 35 while (retries <= CTLQ_SEND_RETRIES) { 36 clean_count = num_q_msg - num_cleaned; 37 ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count, 38 &msg_ptr_list[num_cleaned]); 39 if (ret) { 40 PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret); 41 goto send_err; 42 } 43 44 num_cleaned += clean_count; 45 retries++; 46 if (num_cleaned >= num_q_msg) 47 break; 48 rte_delay_us_sleep(10); 49 } 50 51 if (retries > CTLQ_SEND_RETRIES) { 52 PMD_INIT_LOG(ERR, "timed out while polling for completions"); 53 ret = -1; 54 goto send_err; 55 } 56 57 send_err: 58 free(msg_ptr_list); 59 err: 60 return ret; 61 } 62 63 int 64 cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, 65 struct idpf_ctlq_msg q_msg[]) 66 { 67 int retries = 0; 68 struct idpf_dma_mem *dma; 69 u16 i; 70 uint16_t buff_cnt; 71 int ret = 0; 72 73 retries = 0; 74 while (retries <= CTLQ_RECEIVE_RETRIES) { 75 rte_delay_us_sleep(10); 76 ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]); 77 78 if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && ret != CPFL_ERR_CTLQ_ERROR && 79 ret != CPFL_ERR_CTLQ_EMPTY) { 80 PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x", ret); 81 retries++; 82 continue; 83 } 84 85 if (ret == CPFL_ERR_CTLQ_NO_WORK) { 86 retries++; 87 continue; 88 } 89 90 if (ret == CPFL_ERR_CTLQ_EMPTY) 91 break; 92 93 /* TODO - process rx controlq message */ 94 for (i = 0; i < num_q_msg; i++) { 95 ret = q_msg[i].status; 96 if (ret != CPFL_CFG_PKT_ERR_OK && 97 q_msg[i].opcode != cpfl_ctlq_sem_query_del_rule_hash_addr) { 98 PMD_INIT_LOG(ERR, "Failed to process rx_ctrlq msg: %s", 99 cpfl_cfg_pkt_errormsg[ret]); 100 return ret; 101 } 102 103 if (q_msg[i].data_len > 0) 104 dma = q_msg[i].ctx.indirect.payload; 105 else 106 dma = NULL; 107 108 buff_cnt = dma ? 1 : 0; 109 ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); 110 if (ret) 111 PMD_INIT_LOG(WARNING, "could not posted recv bufs"); 112 } 113 break; 114 } 115 116 if (retries > CTLQ_RECEIVE_RETRIES) { 117 PMD_INIT_LOG(ERR, "timed out while polling for receive response"); 118 ret = -1; 119 } 120 121 return ret; 122 } 123 124 static int 125 cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, 126 struct idpf_ctlq_msg *msg) 127 { 128 struct cpfl_mod_rule_info *minfo = &rinfo->mod; 129 union cpfl_rule_cfg_pkt_record *blob = NULL; 130 struct cpfl_rule_cfg_data cfg = {0}; 131 132 /* prepare rule blob */ 133 if (!dma->va) { 134 PMD_INIT_LOG(ERR, "dma mem passed to %s is null", __func__); 135 return -1; 136 } 137 blob = (union cpfl_rule_cfg_pkt_record *)dma->va; 138 memset(blob, 0, sizeof(*blob)); 139 memset(&cfg, 0, sizeof(cfg)); 140 141 /* fill info for both query and add/update */ 142 cpfl_fill_rule_mod_content(minfo->mod_obj_size, 143 minfo->pin_mod_content, 144 minfo->mod_index, 145 &cfg.ext.mod_content); 146 147 /* only fill content for add/update */ 148 memcpy(blob->mod_blob, minfo->mod_content, 149 minfo->mod_content_byte_len); 150 151 #define NO_HOST_NEEDED 0 152 /* pack message */ 153 cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule, 154 rinfo->cookie, 155 0, /* vsi_id not used for mod */ 156 rinfo->port_num, 157 NO_HOST_NEEDED, 158 0, /* time_sel */ 159 0, /* time_sel_val */ 160 0, /* cache_wr_thru */ 161 rinfo->resp_req, 162 (u16)sizeof(*blob), 163 (void *)dma, 164 &cfg.common); 165 cpfl_prep_rule_desc(&cfg, msg); 166 return 0; 167 } 168 169 static int 170 cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, 171 struct idpf_ctlq_msg *msg, bool add) 172 { 173 union cpfl_rule_cfg_pkt_record *blob = NULL; 174 enum cpfl_ctlq_rule_cfg_opc opc; 175 struct cpfl_rule_cfg_data cfg = {0}; 176 uint16_t cfg_ctrl; 177 178 if (!dma->va) { 179 PMD_INIT_LOG(ERR, "dma mem passed to %s is null", __func__); 180 return -1; 181 } 182 blob = (union cpfl_rule_cfg_pkt_record *)dma->va; 183 memset(blob, 0, sizeof(*blob)); 184 memset(msg, 0, sizeof(*msg)); 185 186 if (rinfo->type == CPFL_RULE_TYPE_SEM) { 187 cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id, 188 rinfo->sem.sub_prof_id, 189 rinfo->sem.pin_to_cache, 190 rinfo->sem.fixed_fetch); 191 cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len, 192 rinfo->act_bytes, rinfo->act_byte_len, 193 cfg_ctrl, blob); 194 opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule; 195 } else { 196 PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type); 197 return -1; 198 } 199 200 cpfl_fill_rule_cfg_data_common(opc, 201 rinfo->cookie, 202 rinfo->vsi, 203 rinfo->port_num, 204 rinfo->host_id, 205 0, /* time_sel */ 206 0, /* time_sel_val */ 207 0, /* cache_wr_thru */ 208 rinfo->resp_req, 209 sizeof(union cpfl_rule_cfg_pkt_record), 210 dma, 211 &cfg.common); 212 cpfl_prep_rule_desc(&cfg, msg); 213 return 0; 214 } 215 216 static int 217 cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, 218 struct idpf_ctlq_msg *msg, bool add) 219 { 220 int ret = 0; 221 222 if (rinfo->type == CPFL_RULE_TYPE_SEM) { 223 if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0) 224 ret = -1; 225 } else if (rinfo->type == CPFL_RULE_TYPE_MOD) { 226 if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0) 227 ret = -1; 228 } else { 229 PMD_INIT_LOG(ERR, "Invalid type of rule"); 230 ret = -1; 231 } 232 233 return ret; 234 } 235 236 int 237 cpfl_rule_process(struct cpfl_itf *itf, 238 struct idpf_ctlq_info *tx_cq, 239 struct idpf_ctlq_info *rx_cq, 240 struct cpfl_rule_info *rinfo, 241 int rule_num, 242 bool add) 243 { 244 struct idpf_hw *hw = &itf->adapter->base.hw; 245 int i; 246 int ret = 0; 247 248 if (rule_num == 0) 249 return 0; 250 251 for (i = 0; i < rule_num; i++) { 252 ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add); 253 if (ret) { 254 PMD_INIT_LOG(ERR, "Could not pack rule"); 255 return ret; 256 } 257 } 258 ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg); 259 if (ret) { 260 PMD_INIT_LOG(ERR, "Failed to send control message"); 261 return ret; 262 } 263 ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg); 264 if (ret) { 265 PMD_INIT_LOG(ERR, "Failed to update rule"); 266 return ret; 267 } 268 269 return 0; 270 } 271