xref: /dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c (revision 7be78d027918dbc846e502780faf94d5acdf5f75)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 /* Copyright (C) 2014-2017 aQuantia Corporation. */
3 
4 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
5 
6 #include "../atl_types.h"
7 #include "hw_atl_b0.h"
8 
9 #include "../atl_hw_regs.h"
10 #include "hw_atl_utils.h"
11 #include "hw_atl_llh.h"
12 #include "hw_atl_b0_internal.h"
13 #include "hw_atl_llh_internal.h"
14 #include "../atl_logs.h"
15 
hw_atl_b0_hw_reset(struct aq_hw_s * self)16 int hw_atl_b0_hw_reset(struct aq_hw_s *self)
17 {
18 	int err = 0;
19 
20 	err = hw_atl_utils_soft_reset(self);
21 	if (err)
22 		return err;
23 
24 	self->aq_fw_ops->set_state(self, MPI_RESET);
25 
26 	return err;
27 }
28 
hw_atl_b0_set_fc(struct aq_hw_s * self,u32 fc,u32 tc)29 int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
30 {
31 	hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
32 	return 0;
33 }
34 
hw_atl_b0_hw_qos_set(struct aq_hw_s * self)35 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
36 {
37 	u32 tc = 0U;
38 	u32 buff_size = 0U;
39 	unsigned int i_priority = 0U;
40 
41 	/* TPS Descriptor rate init */
42 	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
43 	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
44 
45 	/* TPS VM init */
46 	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
47 
48 	/* TPS TC credits init */
49 	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
50 	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
51 
52 	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
53 	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
54 	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
55 	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
56 
57 	/* Tx buf size */
58 	buff_size = HW_ATL_B0_TXBUF_MAX;
59 
60 	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
61 	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
62 						   (buff_size *
63 						   (1024 / 32U) * 66U) /
64 						   100U, tc);
65 	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
66 						   (buff_size *
67 						   (1024 / 32U) * 50U) /
68 						   100U, tc);
69 
70 	/* QoS Rx buf size per TC */
71 	tc = 0;
72 	buff_size = HW_ATL_B0_RXBUF_MAX;
73 
74 	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
75 	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
76 						   (buff_size *
77 						   (1024U / 32U) * 66U) /
78 						   100U, tc);
79 	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
80 						   (buff_size *
81 						   (1024U / 32U) * 50U) /
82 						   100U, tc);
83 	hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
84 
85 	/* QoS 802.1p priority -> TC mapping */
86 	for (i_priority = 8U; i_priority--;)
87 		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
88 
89 	return aq_hw_err_from_flags(self);
90 }
91 
92 /* calc hash only in IPv4 header, regardless of presence of TCP */
93 #define pif_rpf_rss_ipv4_hdr_only_i     (1 << 4)
94 /* calc hash only if TCP header and IPv4 */
95 #define pif_rpf_rss_ipv4_tcp_hdr_only_i (1 << 3)
96 /* calc hash only in IPv6 header, regardless of presence of TCP */
97 #define pif_rpf_rss_ipv6_hdr_only_i     (1 << 2)
98 /* calc hash only if TCP header and IPv4 */
99 #define pif_rpf_rss_ipv6_tcp_hdr_only_i (1 << 1)
100 /* bug 5124 - rss hashing types - FIXME */
101 #define pif_rpf_rss_dont_use_udp_i      (1 << 0)
102 
hw_atl_b0_hw_rss_hash_type_set(struct aq_hw_s * self)103 static int hw_atl_b0_hw_rss_hash_type_set(struct aq_hw_s *self)
104 {
105 	/* misc */
106 	unsigned int control_reg_val =
107 		IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U;
108 
109 	/* RSS hash type set for IP/TCP */
110 	control_reg_val |= pif_rpf_rss_ipv4_hdr_only_i;//0x1EU;
111 
112 	aq_hw_write_reg(self, 0x5040U, control_reg_val);
113 
114 	return aq_hw_err_from_flags(self);
115 }
116 
hw_atl_b0_hw_rss_hash_set(struct aq_hw_s * self,struct aq_rss_parameters * rss_params)117 int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
118 				     struct aq_rss_parameters *rss_params)
119 {
120 	struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
121 	int err = 0;
122 	unsigned int i = 0U;
123 	unsigned int addr = 0U;
124 
125 	for (i = 10, addr = 0U; i--; ++addr) {
126 		u32 key_data = cfg->is_rss ?
127 			htonl(rss_params->hash_secret_key[i]) : 0U;
128 		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
129 		hw_atl_rpf_rss_key_addr_set(self, addr);
130 		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
131 		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
132 			       1000U, 10U);
133 		if (err < 0)
134 			goto err_exit;
135 	}
136 
137 	/* RSS Ring selection */
138 	hw_atl_reg_rx_flr_rss_control1set(self,
139 				cfg->is_rss ? 0xB3333333U : 0x00000000U);
140 	hw_atl_b0_hw_rss_hash_type_set(self);
141 
142 	err = aq_hw_err_from_flags(self);
143 
144 err_exit:
145 	return err;
146 }
147 
148 
hw_atl_b0_hw_rss_set(struct aq_hw_s * self,struct aq_rss_parameters * rss_params)149 int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
150 			struct aq_rss_parameters *rss_params)
151 {
152 	u8 *indirection_table = rss_params->indirection_table;
153 	u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
154 	u32 i = 0;
155 	u32 addr = 0;
156 	u32 val = 0;
157 	u32 shift = 0;
158 	int err = 0;
159 
160 	for (i = 0; i < HW_ATL_B0_RSS_REDIRECTION_MAX; i++) {
161 		val |= (u32)(indirection_table[i] % num_rss_queues) << shift;
162 		shift += 3;
163 
164 		if (shift < 16)
165 			continue;
166 
167 		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, val & 0xffff);
168 		hw_atl_rpf_rss_redir_tbl_addr_set(self, addr);
169 
170 		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
171 		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
172 			1000U, 10U);
173 
174 		if (err < 0)
175 			goto err_exit;
176 
177 		shift -= 16;
178 		val >>= 16;
179 		addr++;
180 	}
181 
182 err_exit:
183 	return err;
184 }
185 
hw_atl_b0_hw_offload_set(struct aq_hw_s * self)186 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self)
187 				    /*struct aq_nic_cfg_s *aq_nic_cfg)*/
188 {
189 	unsigned int i;
190 
191 	/* TX checksums offloads*/
192 	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
193 	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
194 
195 	/* RX checksums offloads*/
196 	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
197 	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
198 
199 	/* LSO offloads*/
200 	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
201 
202 /* LRO offloads */
203 	{
204 		unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
205 			((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
206 			((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
207 
208 		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
209 			hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
210 
211 		hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
212 		hw_atl_rpo_lro_inactive_interval_set(self, 0);
213 		hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
214 
215 		hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
216 
217 		hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
218 
219 		hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
220 
221 		hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
222 
223 		hw_atl_rpo_lro_pkt_lim_set(self, 1U);
224 
225 		hw_atl_rpo_lro_en_set(self,
226 				self->aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
227 	}
228 	return aq_hw_err_from_flags(self);
229 }
230 
231 static
hw_atl_b0_hw_init_tx_path(struct aq_hw_s * self)232 int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
233 {
234 	/* Tx TC/RSS number config */
235 	hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
236 
237 	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
238 	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
239 	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
240 
241 	/* Tx interrupts */
242 	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
243 
244 	/* misc */
245 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
246 			0x00010000U : 0x00000000U);
247 	hw_atl_tdm_tx_dca_en_set(self, 0U);
248 	hw_atl_tdm_tx_dca_mode_set(self, 0U);
249 
250 	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
251 
252 	return aq_hw_err_from_flags(self);
253 }
254 
255 static
hw_atl_b0_hw_init_rx_path(struct aq_hw_s * self)256 int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
257 {
258 	struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
259 	int i;
260 
261 	/* Rx TC/RSS number config */
262 	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* 1: 4TC/8Queues */
263 
264 	/* Rx flow control */
265 	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
266 
267 	/* RSS Ring selection */
268 	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
269 					0xB3333333U : 0x00000000U);
270 
271 	/* Multicast filters */
272 	for (i = HW_ATL_B0_MAC_MAX; i--;) {
273 		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
274 		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
275 	}
276 
277 	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
278 	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
279 
280 	/* Vlan filters */
281 	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
282 	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
283 
284 	/* VLAN promisc by default */
285 	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
286 
287 	/* Rx Interrupts */
288 	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
289 
290 	hw_atl_b0_hw_rss_hash_type_set(self);
291 
292 	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
293 	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
294 
295 	hw_atl_rpfl2broadcast_en_set(self, 1U);
296 
297 	hw_atl_rdm_rx_dca_en_set(self, 0U);
298 	hw_atl_rdm_rx_dca_mode_set(self, 0U);
299 
300 	return aq_hw_err_from_flags(self);
301 }
302 
hw_atl_b0_hw_mac_addr_set(struct aq_hw_s * self,u8 * mac_addr)303 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
304 {
305 	int err = 0;
306 	unsigned int h = 0U;
307 	unsigned int l = 0U;
308 
309 	if (!mac_addr) {
310 		err = -EINVAL;
311 		goto err_exit;
312 	}
313 	h = (mac_addr[0] << 8) | (mac_addr[1]);
314 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
315 		(mac_addr[4] << 8) | mac_addr[5];
316 
317 	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
318 	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
319 	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
320 	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
321 
322 	err = aq_hw_err_from_flags(self);
323 
324 err_exit:
325 	return err;
326 }
327 
hw_atl_b0_hw_init(struct aq_hw_s * self,u8 * mac_addr)328 int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
329 {
330 	static u32 aq_hw_atl_igcr_table_[4][2] = {
331 		{ 0x20000080U, 0x20000080U }, /* AQ_IRQ_INVALID */
332 		{ 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
333 		{ 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
334 		{ 0x200000A2U, 0x200000A6U }  /* AQ_IRQ_MSIX */
335 	};
336 
337 	int err = 0;
338 	u32 val;
339 
340 	struct aq_hw_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
341 
342 	hw_atl_b0_hw_init_tx_path(self);
343 	hw_atl_b0_hw_init_rx_path(self);
344 
345 	hw_atl_b0_hw_mac_addr_set(self, mac_addr);
346 
347 	self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
348 	self->aq_fw_ops->set_state(self, MPI_INIT);
349 
350 	hw_atl_b0_hw_qos_set(self);
351 	hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
352 	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
353 
354 	/* Force limit MRRS on RDM/TDM to 2K */
355 	val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
356 	aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
357 			(val & ~0x707) | 0x404);
358 
359 	/* TX DMA total request limit. B0 hardware is not capable to
360 	 * handle more than (8K-MRRS) incoming DMA data.
361 	 * Value 24 in 256byte units
362 	 */
363 	aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
364 
365 	/* Reset link status and read out initial hardware counters */
366 	self->aq_link_status.mbps = 0;
367 	self->aq_fw_ops->update_stats(self);
368 
369 	err = aq_hw_err_from_flags(self);
370 	if (err < 0)
371 		goto err_exit;
372 
373 	/* Interrupts */
374 	hw_atl_reg_irq_glb_ctl_set(self,
375 				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
376 					 [(aq_nic_cfg->vecs > 1U) ?
377 					 1 : 0]);
378 
379 	hw_atl_itr_irq_auto_masklsw_set(self, 0xffffffff);
380 
381 	/* Interrupts */
382 	hw_atl_reg_gen_irq_map_set(self, 0, 0);
383 	hw_atl_reg_gen_irq_map_set(self, 0x80 | ATL_IRQ_CAUSE_LINK, 3);
384 
385 	hw_atl_b0_hw_offload_set(self);
386 
387 err_exit:
388 	return err;
389 }
390 
hw_atl_b0_hw_ring_tx_start(struct aq_hw_s * self,int index)391 int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index)
392 {
393 	hw_atl_tdm_tx_desc_en_set(self, 1, index);
394 	return aq_hw_err_from_flags(self);
395 }
396 
hw_atl_b0_hw_ring_rx_start(struct aq_hw_s * self,int index)397 int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index)
398 {
399 	hw_atl_rdm_rx_desc_en_set(self, 1, index);
400 	return aq_hw_err_from_flags(self);
401 }
402 
hw_atl_b0_hw_start(struct aq_hw_s * self)403 int hw_atl_b0_hw_start(struct aq_hw_s *self)
404 {
405 	hw_atl_tpb_tx_buff_en_set(self, 1);
406 	hw_atl_rpb_rx_buff_en_set(self, 1);
407 	return aq_hw_err_from_flags(self);
408 }
409 
hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s * self,int tail,int index)410 int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index)
411 {
412 	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, tail, index);
413 	return 0;
414 }
415 
hw_atl_b0_hw_ring_rx_init(struct aq_hw_s * self,uint64_t base_addr,int index,int size,int buff_size,int cpu,int vec)416 int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
417 		int index, int size, int buff_size, int cpu, int vec)
418 {
419 	u32 dma_desc_addr_lsw = (u32)base_addr;
420 	u32 dma_desc_addr_msw = (u32)(base_addr >> 32);
421 
422 	hw_atl_rdm_rx_desc_en_set(self, false, index);
423 
424 	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
425 
426 	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
427 						  index);
428 
429 	hw_atl_reg_rx_dma_desc_base_addressmswset(self, dma_desc_addr_msw,
430 						  index);
431 
432 	hw_atl_rdm_rx_desc_len_set(self, size / 8U, index);
433 
434 	hw_atl_rdm_rx_desc_data_buff_size_set(self, buff_size / 1024U, index);
435 
436 	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, index);
437 	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
438 	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, index);
439 
440 	/* Rx ring set mode */
441 
442 	/* Mapping interrupt vector */
443 	hw_atl_itr_irq_map_rx_set(self, vec, index);
444 	hw_atl_itr_irq_map_en_rx_set(self, true, index);
445 
446 	hw_atl_rdm_cpu_id_set(self, cpu, index);
447 	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, index);
448 	hw_atl_rdm_rx_head_dca_en_set(self, 0U, index);
449 	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, index);
450 
451 	return aq_hw_err_from_flags(self);
452 }
453 
hw_atl_b0_hw_ring_tx_init(struct aq_hw_s * self,uint64_t base_addr,int index,int size,int cpu,int vec)454 int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
455 			      int index, int size, int cpu, int vec)
456 {
457 	u32 dma_desc_lsw_addr = (u32)base_addr;
458 	u32 dma_desc_msw_addr = (u32)(base_addr >> 32);
459 
460 	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
461 						  index);
462 
463 	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
464 						  index);
465 
466 	hw_atl_tdm_tx_desc_len_set(self, size / 8U, index);
467 
468 	hw_atl_b0_hw_tx_ring_tail_update(self, 0, index);
469 
470 	/* Set Tx threshold */
471 	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, index);
472 
473 	/* Mapping interrupt vector */
474 	hw_atl_itr_irq_map_tx_set(self, vec, index);
475 	hw_atl_itr_irq_map_en_tx_set(self, true, index);
476 
477 	hw_atl_tdm_cpu_id_set(self, cpu, index);
478 	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, index);
479 
480 	return aq_hw_err_from_flags(self);
481 }
482 
hw_atl_b0_hw_irq_enable(struct aq_hw_s * self,u64 mask)483 int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
484 {
485 	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
486 	return aq_hw_err_from_flags(self);
487 }
488 
hw_atl_b0_hw_irq_disable(struct aq_hw_s * self,u64 mask)489 int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
490 {
491 	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
492 	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
493 
494 	return aq_hw_err_from_flags(self);
495 }
496 
hw_atl_b0_hw_irq_read(struct aq_hw_s * self,u64 * mask)497 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
498 {
499 	*mask = hw_atl_itr_irq_statuslsw_get(self);
500 	return aq_hw_err_from_flags(self);
501 }
502 
hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s * self,int index)503 int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index)
504 {
505 	hw_atl_tdm_tx_desc_en_set(self, 0U, index);
506 	return aq_hw_err_from_flags(self);
507 }
508 
hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s * self,int index)509 int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index)
510 {
511 	hw_atl_rdm_rx_desc_en_set(self, 0U, index);
512 	return aq_hw_err_from_flags(self);
513 }
514