xref: /dpdk/drivers/net/bnxt/tf_core/tf_msg.c (revision e11bdd37745229bf26b557305c07d118c3dbaad7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdlib.h>
9 
10 #include "bnxt.h"
11 #include "tf_core.h"
12 #include "tf_session.h"
13 #include "tfp.h"
14 
15 #include "tf_msg_common.h"
16 #include "tf_msg.h"
17 #include "hsi_struct_def_dpdk.h"
18 #include "hwrm_tf.h"
19 
20 /**
21  * Endian converts min and max values from the HW response to the query
22  */
23 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do {            \
24 	(query)->hw_query[index].min =                                       \
25 		tfp_le_to_cpu_16(response. element ## _min);                 \
26 	(query)->hw_query[index].max =                                       \
27 		tfp_le_to_cpu_16(response. element ## _max);                 \
28 } while (0)
29 
30 /**
31  * Endian converts the number of entries from the alloc to the request
32  */
33 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element)                   \
34 	(request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
35 
36 /**
37  * Endian converts the start and stride value from the free to the request
38  */
39 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do {            \
40 	request.element ## _start =                                          \
41 		tfp_cpu_to_le_16(hw_entry[index].start);                     \
42 	request.element ## _stride =                                         \
43 		tfp_cpu_to_le_16(hw_entry[index].stride);                    \
44 } while (0)
45 
46 /**
47  * Endian converts the start and stride from the HW response to the
48  * alloc
49  */
50 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do {         \
51 	hw_entry[index].start =                                              \
52 		tfp_le_to_cpu_16(response.element ## _start);                \
53 	hw_entry[index].stride =                                             \
54 		tfp_le_to_cpu_16(response.element ## _stride);               \
55 } while (0)
56 
57 /**
58  * Endian converts min and max values from the SRAM response to the
59  * query
60  */
61 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do {          \
62 	(query)->sram_query[index].min =                                     \
63 		tfp_le_to_cpu_16(response.element ## _min);                  \
64 	(query)->sram_query[index].max =                                     \
65 		tfp_le_to_cpu_16(response.element ## _max);                  \
66 } while (0)
67 
68 /**
69  * Endian converts the number of entries from the action (alloc) to
70  * the request
71  */
72 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element)                \
73 	(request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
74 
75 /**
76  * Endian converts the start and stride value from the free to the request
77  */
78 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do {        \
79 	request.element ## _start =                                          \
80 		tfp_cpu_to_le_16(sram_entry[index].start);                   \
81 	request.element ## _stride =                                         \
82 		tfp_cpu_to_le_16(sram_entry[index].stride);                  \
83 } while (0)
84 
85 /**
86  * Endian converts the start and stride from the HW response to the
87  * alloc
88  */
89 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do {     \
90 	sram_entry[index].start =                                            \
91 		tfp_le_to_cpu_16(response.element ## _start);                \
92 	sram_entry[index].stride =                                           \
93 		tfp_le_to_cpu_16(response.element ## _stride);               \
94 } while (0)
95 
96 /**
97  * This is the MAX data we can transport across regular HWRM
98  */
99 #define TF_PCI_BUF_SIZE_MAX 88
100 
101 /**
102  * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
103  */
104 struct tf_msg_dma_buf {
105 	void *va_addr;
106 	uint64_t pa_addr;
107 };
108 
109 static int
110 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
111 		   uint32_t *hwrm_type)
112 {
113 	int rc = 0;
114 
115 	switch (tcam_type) {
116 	case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
117 		*hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
118 		break;
119 	case TF_TCAM_TBL_TYPE_PROF_TCAM:
120 		*hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
121 		break;
122 	case TF_TCAM_TBL_TYPE_WC_TCAM:
123 		*hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
124 		break;
125 	case TF_TCAM_TBL_TYPE_VEB_TCAM:
126 		rc = -EOPNOTSUPP;
127 		break;
128 	case TF_TCAM_TBL_TYPE_SP_TCAM:
129 		rc = -EOPNOTSUPP;
130 		break;
131 	case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
132 		rc = -EOPNOTSUPP;
133 		break;
134 	default:
135 		rc = -EOPNOTSUPP;
136 		break;
137 	}
138 
139 	return rc;
140 }
141 
142 /**
143  * Sends session open request to TF Firmware
144  */
145 int
146 tf_msg_session_open(struct tf *tfp,
147 		    char *ctrl_chan_name,
148 		    uint8_t *fw_session_id)
149 {
150 	int rc;
151 	struct hwrm_tf_session_open_input req = { 0 };
152 	struct hwrm_tf_session_open_output resp = { 0 };
153 	struct tfp_send_msg_parms parms = { 0 };
154 
155 	/* Populate the request */
156 	memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
157 
158 	parms.tf_type = HWRM_TF_SESSION_OPEN;
159 	parms.req_data = (uint32_t *)&req;
160 	parms.req_size = sizeof(req);
161 	parms.resp_data = (uint32_t *)&resp;
162 	parms.resp_size = sizeof(resp);
163 	parms.mailbox = TF_KONG_MB;
164 
165 	rc = tfp_send_msg_direct(tfp,
166 				 &parms);
167 	if (rc)
168 		return rc;
169 
170 	*fw_session_id = resp.fw_session_id;
171 
172 	return rc;
173 }
174 
175 /**
176  * Sends session attach request to TF Firmware
177  */
178 int
179 tf_msg_session_attach(struct tf *tfp __rte_unused,
180 		      char *ctrl_chan_name __rte_unused,
181 		      uint8_t tf_fw_session_id __rte_unused)
182 {
183 	return -1;
184 }
185 
186 /**
187  * Sends session close request to TF Firmware
188  */
189 int
190 tf_msg_session_close(struct tf *tfp)
191 {
192 	int rc;
193 	struct hwrm_tf_session_close_input req = { 0 };
194 	struct hwrm_tf_session_close_output resp = { 0 };
195 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
196 	struct tfp_send_msg_parms parms = { 0 };
197 
198 	/* Populate the request */
199 	req.fw_session_id =
200 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
201 
202 	parms.tf_type = HWRM_TF_SESSION_CLOSE;
203 	parms.req_data = (uint32_t *)&req;
204 	parms.req_size = sizeof(req);
205 	parms.resp_data = (uint32_t *)&resp;
206 	parms.resp_size = sizeof(resp);
207 	parms.mailbox = TF_KONG_MB;
208 
209 	rc = tfp_send_msg_direct(tfp,
210 				 &parms);
211 	return rc;
212 }
213 
214 /**
215  * Sends session query config request to TF Firmware
216  */
217 int
218 tf_msg_session_qcfg(struct tf *tfp)
219 {
220 	int rc;
221 	struct hwrm_tf_session_qcfg_input  req = { 0 };
222 	struct hwrm_tf_session_qcfg_output resp = { 0 };
223 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
224 	struct tfp_send_msg_parms parms = { 0 };
225 
226 	/* Populate the request */
227 	req.fw_session_id =
228 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
229 
230 	parms.tf_type = HWRM_TF_SESSION_QCFG,
231 	parms.req_data = (uint32_t *)&req;
232 	parms.req_size = sizeof(req);
233 	parms.resp_data = (uint32_t *)&resp;
234 	parms.resp_size = sizeof(resp);
235 	parms.mailbox = TF_KONG_MB;
236 
237 	rc = tfp_send_msg_direct(tfp,
238 				 &parms);
239 	return rc;
240 }
241 
242 /**
243  * Sends session HW resource query capability request to TF Firmware
244  */
245 int
246 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
247 			     enum tf_dir dir,
248 			     struct tf_rm_hw_query *query)
249 {
250 	int rc;
251 	struct tfp_send_msg_parms parms = { 0 };
252 	struct tf_session_hw_resc_qcaps_input req = { 0 };
253 	struct tf_session_hw_resc_qcaps_output resp = { 0 };
254 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
255 
256 	memset(query, 0, sizeof(*query));
257 
258 	/* Populate the request */
259 	req.fw_session_id =
260 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
261 	req.flags = tfp_cpu_to_le_16(dir);
262 
263 	MSG_PREP(parms,
264 		 TF_KONG_MB,
265 		 HWRM_TF,
266 		 HWRM_TFT_SESSION_HW_RESC_QCAPS,
267 		 req,
268 		 resp);
269 
270 	rc = tfp_send_msg_tunneled(tfp, &parms);
271 	if (rc)
272 		return rc;
273 
274 	/* Process the response */
275 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
276 			    l2_ctx_tcam_entries);
277 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
278 			    prof_func);
279 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
280 			    prof_tcam_entries);
281 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
282 			    em_prof_id);
283 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
284 			    em_record_entries);
285 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
286 			    wc_tcam_prof_id);
287 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
288 			    wc_tcam_entries);
289 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
290 			    meter_profiles);
291 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
292 			    resp, meter_inst);
293 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
294 			    mirrors);
295 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
296 			    upar);
297 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
298 			    sp_tcam_entries);
299 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
300 			    l2_func);
301 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
302 			    flex_key_templ);
303 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
304 			    tbl_scope);
305 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
306 			    epoch0_entries);
307 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
308 			    epoch1_entries);
309 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
310 			    metadata);
311 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
312 			    ct_state);
313 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
314 			    range_prof);
315 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
316 			    range_entries);
317 	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
318 			    lag_tbl_entries);
319 
320 	return tfp_le_to_cpu_32(parms.tf_resp_code);
321 }
322 
323 /**
324  * Sends session HW resource allocation request to TF Firmware
325  */
326 int
327 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
328 			     enum tf_dir dir,
329 			     struct tf_rm_hw_alloc *hw_alloc __rte_unused,
330 			     struct tf_rm_entry *hw_entry __rte_unused)
331 {
332 	int rc;
333 	struct tfp_send_msg_parms parms = { 0 };
334 	struct tf_session_hw_resc_alloc_input req = { 0 };
335 	struct tf_session_hw_resc_alloc_output resp = { 0 };
336 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
337 
338 	memset(hw_entry, 0, sizeof(*hw_entry));
339 
340 	/* Populate the request */
341 	req.fw_session_id =
342 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
343 	req.flags = tfp_cpu_to_le_16(dir);
344 
345 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
346 			   l2_ctx_tcam_entries);
347 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
348 			   prof_func_entries);
349 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
350 			   prof_tcam_entries);
351 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
352 			   em_prof_id);
353 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
354 			   em_record_entries);
355 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
356 			   wc_tcam_prof_id);
357 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
358 			   wc_tcam_entries);
359 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
360 			   meter_profiles);
361 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
362 			   meter_inst);
363 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
364 			   mirrors);
365 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
366 			   upar);
367 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
368 			   sp_tcam_entries);
369 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
370 			   l2_func);
371 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
372 			   flex_key_templ);
373 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
374 			   tbl_scope);
375 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
376 			   epoch0_entries);
377 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
378 			   epoch1_entries);
379 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
380 			   metadata);
381 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
382 			   ct_state);
383 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
384 			   range_prof);
385 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
386 			   range_entries);
387 	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
388 			   lag_tbl_entries);
389 
390 	MSG_PREP(parms,
391 		 TF_KONG_MB,
392 		 HWRM_TF,
393 		 HWRM_TFT_SESSION_HW_RESC_ALLOC,
394 		 req,
395 		 resp);
396 
397 	rc = tfp_send_msg_tunneled(tfp, &parms);
398 	if (rc)
399 		return rc;
400 
401 	/* Process the response */
402 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
403 			    l2_ctx_tcam_entries);
404 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
405 			    prof_func);
406 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
407 			    prof_tcam_entries);
408 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
409 			    em_prof_id);
410 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
411 			    em_record_entries);
412 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
413 			    wc_tcam_prof_id);
414 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
415 			    wc_tcam_entries);
416 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
417 			    meter_profiles);
418 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
419 			    meter_inst);
420 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
421 			    mirrors);
422 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
423 			    upar);
424 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
425 			    sp_tcam_entries);
426 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
427 			    l2_func);
428 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
429 			    flex_key_templ);
430 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
431 			    tbl_scope);
432 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
433 			    epoch0_entries);
434 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
435 			    epoch1_entries);
436 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
437 			    metadata);
438 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
439 			    ct_state);
440 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
441 			    range_prof);
442 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
443 			    range_entries);
444 	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
445 			    lag_tbl_entries);
446 
447 	return tfp_le_to_cpu_32(parms.tf_resp_code);
448 }
449 
450 /**
451  * Sends session HW resource free request to TF Firmware
452  */
453 int
454 tf_msg_session_hw_resc_free(struct tf *tfp,
455 			    enum tf_dir dir,
456 			    struct tf_rm_entry *hw_entry)
457 {
458 	int rc;
459 	struct tfp_send_msg_parms parms = { 0 };
460 	struct tf_session_hw_resc_free_input req = { 0 };
461 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
462 
463 	memset(hw_entry, 0, sizeof(*hw_entry));
464 
465 	/* Populate the request */
466 	req.fw_session_id =
467 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
468 	req.flags = tfp_cpu_to_le_16(dir);
469 
470 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
471 			  l2_ctx_tcam_entries);
472 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
473 			  prof_func);
474 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
475 			  prof_tcam_entries);
476 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
477 			  em_prof_id);
478 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
479 			  em_record_entries);
480 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
481 			  wc_tcam_prof_id);
482 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
483 			  wc_tcam_entries);
484 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
485 			  meter_profiles);
486 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
487 			  meter_inst);
488 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
489 			  mirrors);
490 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
491 			  upar);
492 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
493 			  sp_tcam_entries);
494 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
495 			  l2_func);
496 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
497 			  flex_key_templ);
498 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
499 			  tbl_scope);
500 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
501 			  epoch0_entries);
502 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
503 			  epoch1_entries);
504 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
505 			  metadata);
506 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
507 			  ct_state);
508 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
509 			  range_prof);
510 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
511 			  range_entries);
512 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
513 			  lag_tbl_entries);
514 
515 	MSG_PREP_NO_RESP(parms,
516 			 TF_KONG_MB,
517 			 HWRM_TF,
518 			 HWRM_TFT_SESSION_HW_RESC_FREE,
519 			 req);
520 
521 	rc = tfp_send_msg_tunneled(tfp, &parms);
522 	if (rc)
523 		return rc;
524 
525 	return tfp_le_to_cpu_32(parms.tf_resp_code);
526 }
527 
528 /**
529  * Sends session HW resource flush request to TF Firmware
530  */
531 int
532 tf_msg_session_hw_resc_flush(struct tf *tfp,
533 			     enum tf_dir dir,
534 			     struct tf_rm_entry *hw_entry)
535 {
536 	int rc;
537 	struct tfp_send_msg_parms parms = { 0 };
538 	struct tf_session_hw_resc_free_input req = { 0 };
539 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
540 
541 	/* Populate the request */
542 	req.fw_session_id =
543 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
544 	req.flags = tfp_cpu_to_le_16(dir);
545 
546 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
547 			  l2_ctx_tcam_entries);
548 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
549 			  prof_func);
550 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
551 			  prof_tcam_entries);
552 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
553 			  em_prof_id);
554 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
555 			  em_record_entries);
556 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
557 			  wc_tcam_prof_id);
558 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
559 			  wc_tcam_entries);
560 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
561 			  meter_profiles);
562 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
563 			  meter_inst);
564 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
565 			  mirrors);
566 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
567 			  upar);
568 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
569 			  sp_tcam_entries);
570 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
571 			  l2_func);
572 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
573 			  flex_key_templ);
574 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
575 			  tbl_scope);
576 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
577 			  epoch0_entries);
578 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
579 			  epoch1_entries);
580 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
581 			  metadata);
582 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
583 			  ct_state);
584 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
585 			  range_prof);
586 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
587 			  range_entries);
588 	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
589 			  lag_tbl_entries);
590 
591 	MSG_PREP_NO_RESP(parms,
592 			 TF_KONG_MB,
593 			 TF_TYPE_TRUFLOW,
594 			 HWRM_TFT_SESSION_HW_RESC_FLUSH,
595 			 req);
596 
597 	rc = tfp_send_msg_tunneled(tfp, &parms);
598 	if (rc)
599 		return rc;
600 
601 	return tfp_le_to_cpu_32(parms.tf_resp_code);
602 }
603 
604 /**
605  * Sends session SRAM resource query capability request to TF Firmware
606  */
607 int
608 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
609 			       enum tf_dir dir,
610 			       struct tf_rm_sram_query *query __rte_unused)
611 {
612 	int rc;
613 	struct tfp_send_msg_parms parms = { 0 };
614 	struct tf_session_sram_resc_qcaps_input req = { 0 };
615 	struct tf_session_sram_resc_qcaps_output resp = { 0 };
616 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
617 
618 	/* Populate the request */
619 	req.fw_session_id =
620 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
621 	req.flags = tfp_cpu_to_le_16(dir);
622 
623 	MSG_PREP(parms,
624 		 TF_KONG_MB,
625 		 HWRM_TF,
626 		 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
627 		 req,
628 		 resp);
629 
630 	rc = tfp_send_msg_tunneled(tfp, &parms);
631 	if (rc)
632 		return rc;
633 
634 	/* Process the response */
635 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
636 			      full_action);
637 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
638 			      mcg);
639 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
640 			      encap_8b);
641 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
642 			      encap_16b);
643 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
644 			      encap_64b);
645 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
646 			      sp_smac);
647 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
648 			      sp_smac_ipv4);
649 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
650 			      sp_smac_ipv6);
651 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
652 			      counter_64b);
653 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
654 			      nat_sport);
655 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
656 			      nat_dport);
657 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
658 			      nat_s_ipv4);
659 	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
660 			      nat_d_ipv4);
661 
662 	return tfp_le_to_cpu_32(parms.tf_resp_code);
663 }
664 
665 /**
666  * Sends session SRAM resource allocation request to TF Firmware
667  */
668 int
669 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
670 			       enum tf_dir dir,
671 			       struct tf_rm_sram_alloc *sram_alloc __rte_unused,
672 			       struct tf_rm_entry *sram_entry __rte_unused)
673 {
674 	int rc;
675 	struct tfp_send_msg_parms parms = { 0 };
676 	struct tf_session_sram_resc_alloc_input req = { 0 };
677 	struct tf_session_sram_resc_alloc_output resp;
678 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
679 
680 	memset(&resp, 0, sizeof(resp));
681 
682 	/* Populate the request */
683 	req.fw_session_id =
684 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
685 	req.flags = tfp_cpu_to_le_16(dir);
686 
687 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
688 			     full_action);
689 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
690 			     mcg);
691 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
692 			     encap_8b);
693 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
694 			     encap_16b);
695 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
696 			     encap_64b);
697 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
698 			     sp_smac);
699 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
700 			     req, sp_smac_ipv4);
701 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
702 			     req, sp_smac_ipv6);
703 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
704 			     req, counter_64b);
705 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
706 			     nat_sport);
707 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
708 			     nat_dport);
709 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
710 			     nat_s_ipv4);
711 	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
712 			     nat_d_ipv4);
713 
714 	MSG_PREP(parms,
715 		 TF_KONG_MB,
716 		 HWRM_TF,
717 		 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
718 		 req,
719 		 resp);
720 
721 	rc = tfp_send_msg_tunneled(tfp, &parms);
722 	if (rc)
723 		return rc;
724 
725 	/* Process the response */
726 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
727 			      resp, full_action);
728 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
729 			      mcg);
730 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
731 			      encap_8b);
732 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
733 			      encap_16b);
734 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
735 			      encap_64b);
736 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
737 			      sp_smac);
738 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
739 			      resp, sp_smac_ipv4);
740 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
741 			      resp, sp_smac_ipv6);
742 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
743 			      counter_64b);
744 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
745 			      nat_sport);
746 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
747 			      nat_dport);
748 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
749 			      nat_s_ipv4);
750 	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
751 			      nat_d_ipv4);
752 
753 	return tfp_le_to_cpu_32(parms.tf_resp_code);
754 }
755 
756 /**
757  * Sends session SRAM resource free request to TF Firmware
758  */
759 int
760 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
761 			      enum tf_dir dir,
762 			      struct tf_rm_entry *sram_entry __rte_unused)
763 {
764 	int rc;
765 	struct tfp_send_msg_parms parms = { 0 };
766 	struct tf_session_sram_resc_free_input req = { 0 };
767 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
768 
769 	/* Populate the request */
770 	req.fw_session_id =
771 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
772 	req.flags = tfp_cpu_to_le_16(dir);
773 
774 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
775 			    full_action);
776 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
777 			    mcg);
778 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
779 			    encap_8b);
780 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
781 			    encap_16b);
782 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
783 			    encap_64b);
784 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
785 			    sp_smac);
786 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
787 			    sp_smac_ipv4);
788 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
789 			    sp_smac_ipv6);
790 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
791 			    counter_64b);
792 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
793 			    nat_sport);
794 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
795 			    nat_dport);
796 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
797 			    nat_s_ipv4);
798 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
799 			    nat_d_ipv4);
800 
801 	MSG_PREP_NO_RESP(parms,
802 			 TF_KONG_MB,
803 			 HWRM_TF,
804 			 HWRM_TFT_SESSION_SRAM_RESC_FREE,
805 			 req);
806 
807 	rc = tfp_send_msg_tunneled(tfp, &parms);
808 	if (rc)
809 		return rc;
810 
811 	return tfp_le_to_cpu_32(parms.tf_resp_code);
812 }
813 
814 /**
815  * Sends session SRAM resource flush request to TF Firmware
816  */
817 int
818 tf_msg_session_sram_resc_flush(struct tf *tfp,
819 			       enum tf_dir dir,
820 			       struct tf_rm_entry *sram_entry)
821 {
822 	int rc;
823 	struct tfp_send_msg_parms parms = { 0 };
824 	struct tf_session_sram_resc_free_input req = { 0 };
825 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
826 
827 	/* Populate the request */
828 	req.fw_session_id =
829 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
830 	req.flags = tfp_cpu_to_le_16(dir);
831 
832 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
833 			    full_action);
834 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
835 			    mcg);
836 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
837 			    encap_8b);
838 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
839 			    encap_16b);
840 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
841 			    encap_64b);
842 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
843 			    sp_smac);
844 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
845 			    sp_smac_ipv4);
846 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
847 			    sp_smac_ipv6);
848 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
849 			    counter_64b);
850 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
851 			    nat_sport);
852 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
853 			    nat_dport);
854 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
855 			    nat_s_ipv4);
856 	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
857 			    nat_d_ipv4);
858 
859 	MSG_PREP_NO_RESP(parms,
860 			 TF_KONG_MB,
861 			 TF_TYPE_TRUFLOW,
862 			 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
863 			 req);
864 
865 	rc = tfp_send_msg_tunneled(tfp, &parms);
866 	if (rc)
867 		return rc;
868 
869 	return tfp_le_to_cpu_32(parms.tf_resp_code);
870 }
871 
872 /**
873  * Sends EM mem register request to Firmware
874  */
875 int tf_msg_em_mem_rgtr(struct tf *tfp,
876 		       int           page_lvl,
877 		       int           page_size,
878 		       uint64_t      dma_addr,
879 		       uint16_t     *ctx_id)
880 {
881 	int rc;
882 	struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
883 	struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
884 	struct tfp_send_msg_parms parms = { 0 };
885 
886 	req.page_level = page_lvl;
887 	req.page_size = page_size;
888 	req.page_dir = tfp_cpu_to_le_64(dma_addr);
889 
890 	parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
891 	parms.req_data = (uint32_t *)&req;
892 	parms.req_size = sizeof(req);
893 	parms.resp_data = (uint32_t *)&resp;
894 	parms.resp_size = sizeof(resp);
895 	parms.mailbox = TF_KONG_MB;
896 
897 	rc = tfp_send_msg_direct(tfp,
898 				 &parms);
899 	if (rc)
900 		return rc;
901 
902 	*ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
903 
904 	return rc;
905 }
906 
907 /**
908  * Sends EM mem unregister request to Firmware
909  */
910 int tf_msg_em_mem_unrgtr(struct tf *tfp,
911 			 uint16_t  *ctx_id)
912 {
913 	int rc;
914 	struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
915 	struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
916 	struct tfp_send_msg_parms parms = { 0 };
917 
918 	req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
919 
920 	parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
921 	parms.req_data = (uint32_t *)&req;
922 	parms.req_size = sizeof(req);
923 	parms.resp_data = (uint32_t *)&resp;
924 	parms.resp_size = sizeof(resp);
925 	parms.mailbox = TF_KONG_MB;
926 
927 	rc = tfp_send_msg_direct(tfp,
928 				 &parms);
929 	return rc;
930 }
931 
932 /**
933  * Sends EM qcaps request to Firmware
934  */
935 int tf_msg_em_qcaps(struct tf *tfp,
936 		    int dir,
937 		    struct tf_em_caps *em_caps)
938 {
939 	int rc;
940 	struct hwrm_tf_ext_em_qcaps_input  req = {0};
941 	struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
942 	uint32_t             flags;
943 	struct tfp_send_msg_parms parms = { 0 };
944 
945 	flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
946 		 HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
947 	req.flags = tfp_cpu_to_le_32(flags);
948 
949 	parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
950 	parms.req_data = (uint32_t *)&req;
951 	parms.req_size = sizeof(req);
952 	parms.resp_data = (uint32_t *)&resp;
953 	parms.resp_size = sizeof(resp);
954 	parms.mailbox = TF_KONG_MB;
955 
956 	rc = tfp_send_msg_direct(tfp,
957 				 &parms);
958 	if (rc)
959 		return rc;
960 
961 	em_caps->supported = tfp_le_to_cpu_32(resp.supported);
962 	em_caps->max_entries_supported =
963 		tfp_le_to_cpu_32(resp.max_entries_supported);
964 	em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
965 	em_caps->record_entry_size =
966 		tfp_le_to_cpu_16(resp.record_entry_size);
967 	em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
968 
969 	return rc;
970 }
971 
972 /**
973  * Sends EM config request to Firmware
974  */
975 int tf_msg_em_cfg(struct tf *tfp,
976 		  uint32_t   num_entries,
977 		  uint16_t   key0_ctx_id,
978 		  uint16_t   key1_ctx_id,
979 		  uint16_t   record_ctx_id,
980 		  uint16_t   efc_ctx_id,
981 		  uint8_t    flush_interval,
982 		  int        dir)
983 {
984 	int rc;
985 	struct hwrm_tf_ext_em_cfg_input  req = {0};
986 	struct hwrm_tf_ext_em_cfg_output resp = {0};
987 	uint32_t flags;
988 	struct tfp_send_msg_parms parms = { 0 };
989 
990 	flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
991 		 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
992 	flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
993 
994 	req.flags = tfp_cpu_to_le_32(flags);
995 	req.num_entries = tfp_cpu_to_le_32(num_entries);
996 
997 	req.flush_interval = flush_interval;
998 
999 	req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1000 	req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1001 	req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1002 	req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1003 
1004 	parms.tf_type = HWRM_TF_EXT_EM_CFG;
1005 	parms.req_data = (uint32_t *)&req;
1006 	parms.req_size = sizeof(req);
1007 	parms.resp_data = (uint32_t *)&resp;
1008 	parms.resp_size = sizeof(resp);
1009 	parms.mailbox = TF_KONG_MB;
1010 
1011 	rc = tfp_send_msg_direct(tfp,
1012 				 &parms);
1013 	return rc;
1014 }
1015 
1016 /**
1017  * Sends EM operation request to Firmware
1018  */
1019 int tf_msg_em_op(struct tf *tfp,
1020 		 int        dir,
1021 		 uint16_t   op)
1022 {
1023 	int rc;
1024 	struct hwrm_tf_ext_em_op_input  req = {0};
1025 	struct hwrm_tf_ext_em_op_output resp = {0};
1026 	uint32_t flags;
1027 	struct tfp_send_msg_parms parms = { 0 };
1028 
1029 	flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1030 		 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1031 	req.flags = tfp_cpu_to_le_32(flags);
1032 	req.op = tfp_cpu_to_le_16(op);
1033 
1034 	parms.tf_type = HWRM_TF_EXT_EM_OP;
1035 	parms.req_data = (uint32_t *)&req;
1036 	parms.req_size = sizeof(req);
1037 	parms.resp_data = (uint32_t *)&resp;
1038 	parms.resp_size = sizeof(resp);
1039 	parms.mailbox = TF_KONG_MB;
1040 
1041 	rc = tfp_send_msg_direct(tfp,
1042 				 &parms);
1043 	return rc;
1044 }
1045 
1046 int
1047 tf_msg_set_tbl_entry(struct tf *tfp,
1048 		     enum tf_dir dir,
1049 		     enum tf_tbl_type type,
1050 		     uint16_t size,
1051 		     uint8_t *data,
1052 		     uint32_t index)
1053 {
1054 	int rc;
1055 	struct tfp_send_msg_parms parms = { 0 };
1056 	struct tf_tbl_type_set_input req = { 0 };
1057 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1058 
1059 	/* Populate the request */
1060 	req.fw_session_id =
1061 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1062 	req.flags = tfp_cpu_to_le_16(dir);
1063 	req.type = tfp_cpu_to_le_32(type);
1064 	req.size = tfp_cpu_to_le_16(size);
1065 	req.index = tfp_cpu_to_le_32(index);
1066 
1067 	tfp_memcpy(&req.data,
1068 		   data,
1069 		   size);
1070 
1071 	MSG_PREP_NO_RESP(parms,
1072 			 TF_KONG_MB,
1073 			 HWRM_TF,
1074 			 HWRM_TFT_TBL_TYPE_SET,
1075 			 req);
1076 
1077 	rc = tfp_send_msg_tunneled(tfp, &parms);
1078 	if (rc)
1079 		return rc;
1080 
1081 	return tfp_le_to_cpu_32(parms.tf_resp_code);
1082 }
1083 
1084 int
1085 tf_msg_get_tbl_entry(struct tf *tfp,
1086 		     enum tf_dir dir,
1087 		     enum tf_tbl_type type,
1088 		     uint16_t size,
1089 		     uint8_t *data,
1090 		     uint32_t index)
1091 {
1092 	int rc;
1093 	struct tfp_send_msg_parms parms = { 0 };
1094 	struct tf_tbl_type_get_input req = { 0 };
1095 	struct tf_tbl_type_get_output resp = { 0 };
1096 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1097 
1098 	/* Populate the request */
1099 	req.fw_session_id =
1100 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1101 	req.flags = tfp_cpu_to_le_16(dir);
1102 	req.type = tfp_cpu_to_le_32(type);
1103 	req.index = tfp_cpu_to_le_32(index);
1104 
1105 	MSG_PREP(parms,
1106 		 TF_KONG_MB,
1107 		 HWRM_TF,
1108 		 HWRM_TFT_TBL_TYPE_GET,
1109 		 req,
1110 		 resp);
1111 
1112 	rc = tfp_send_msg_tunneled(tfp, &parms);
1113 	if (rc)
1114 		return rc;
1115 
1116 	/* Verify that we got enough buffer to return the requested data */
1117 	if (resp.size < size)
1118 		return -EINVAL;
1119 
1120 	tfp_memcpy(data,
1121 		   &resp.data,
1122 		   resp.size);
1123 
1124 	return tfp_le_to_cpu_32(parms.tf_resp_code);
1125 }
1126 
1127 #define TF_BYTES_PER_SLICE(tfp) 12
1128 #define NUM_SLICES(tfp, bytes) \
1129 	(((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1130 
1131 static int
1132 tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size)
1133 {
1134 	struct tfp_calloc_parms alloc_parms;
1135 	int rc;
1136 
1137 	/* Allocate session */
1138 	alloc_parms.nitems = 1;
1139 	alloc_parms.size = size;
1140 	alloc_parms.alignment = 0;
1141 	rc = tfp_calloc(&alloc_parms);
1142 	if (rc) {
1143 		/* Log error */
1144 		PMD_DRV_LOG(ERR,
1145 			    "Failed to allocate tcam dma entry, rc:%d\n",
1146 			    rc);
1147 		return -ENOMEM;
1148 	}
1149 
1150 	buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
1151 	buf->va_addr = alloc_parms.mem_va;
1152 
1153 	return 0;
1154 }
1155 
1156 int
1157 tf_msg_tcam_entry_set(struct tf *tfp,
1158 		      struct tf_set_tcam_entry_parms *parms)
1159 {
1160 	int rc;
1161 	struct tfp_send_msg_parms mparms = { 0 };
1162 	struct hwrm_tf_tcam_set_input req = { 0 };
1163 	struct hwrm_tf_tcam_set_output resp = { 0 };
1164 	uint16_t key_bytes =
1165 		TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1166 	uint16_t result_bytes =
1167 		TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1168 	struct tf_msg_dma_buf buf = { 0 };
1169 	uint8_t *data = NULL;
1170 	int data_size = 0;
1171 
1172 	rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1173 	if (rc != 0)
1174 		return rc;
1175 
1176 	req.idx = tfp_cpu_to_le_16(parms->idx);
1177 	if (parms->dir == TF_DIR_TX)
1178 		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1179 
1180 	req.key_size = key_bytes;
1181 	req.mask_offset = key_bytes;
1182 	/* Result follows after key and mask, thus multiply by 2 */
1183 	req.result_offset = 2 * key_bytes;
1184 	req.result_size = result_bytes;
1185 	data_size = 2 * req.key_size + req.result_size;
1186 
1187 	if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1188 		/* use pci buffer */
1189 		data = &req.dev_data[0];
1190 	} else {
1191 		/* use dma buffer */
1192 		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1193 		rc = tf_msg_get_dma_buf(&buf, data_size);
1194 		if (rc != 0)
1195 			return rc;
1196 		data = buf.va_addr;
1197 		memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
1198 	}
1199 
1200 	memcpy(&data[0], parms->key, key_bytes);
1201 	memcpy(&data[key_bytes], parms->mask, key_bytes);
1202 	memcpy(&data[req.result_offset], parms->result, result_bytes);
1203 
1204 	mparms.tf_type = HWRM_TF_TCAM_SET;
1205 	mparms.req_data = (uint32_t *)&req;
1206 	mparms.req_size = sizeof(req);
1207 	mparms.resp_data = (uint32_t *)&resp;
1208 	mparms.resp_size = sizeof(resp);
1209 	mparms.mailbox = TF_KONG_MB;
1210 
1211 	rc = tfp_send_msg_direct(tfp,
1212 				 &mparms);
1213 	if (rc)
1214 		return rc;
1215 
1216 	if (buf.va_addr != NULL)
1217 		tfp_free(buf.va_addr);
1218 
1219 	return rc;
1220 }
1221 
1222 int
1223 tf_msg_tcam_entry_free(struct tf *tfp,
1224 		       struct tf_free_tcam_entry_parms *in_parms)
1225 {
1226 	int rc;
1227 	struct hwrm_tf_tcam_free_input req =  { 0 };
1228 	struct hwrm_tf_tcam_free_output resp = { 0 };
1229 	struct tfp_send_msg_parms parms = { 0 };
1230 
1231 	/* Populate the request */
1232 	rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1233 	if (rc != 0)
1234 		return rc;
1235 
1236 	req.count = 1;
1237 	req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1238 	if (in_parms->dir == TF_DIR_TX)
1239 		req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1240 
1241 	parms.tf_type = HWRM_TF_TCAM_FREE;
1242 	parms.req_data = (uint32_t *)&req;
1243 	parms.req_size = sizeof(req);
1244 	parms.resp_data = (uint32_t *)&resp;
1245 	parms.resp_size = sizeof(resp);
1246 	parms.mailbox = TF_KONG_MB;
1247 
1248 	rc = tfp_send_msg_direct(tfp,
1249 				 &parms);
1250 	return rc;
1251 }
1252