xref: /openbsd-src/sys/dev/ic/qwx.c (revision 54fbbda3b5f8c42357b8601b12a514e2d25a2771)
1 /*	$OpenBSD: qwx.c,v 1.67 2024/09/01 03:08:56 jsg Exp $	*/
2 
3 /*
4  * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2018-2019 The Linux Foundation.
21  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted (subject to the limitations in the disclaimer
26  * below) provided that the following conditions are met:
27  *
28  *  * Redistributions of source code must retain the above copyright notice,
29  *    this list of conditions and the following disclaimer.
30  *
31  *  * Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  *
35  *  * Neither the name of [Owner Organization] nor the names of its
36  *    contributors may be used to endorse or promote products derived from
37  *    this software without specific prior written permission.
38  *
39  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
40  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
41  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
42  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
43  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
44  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
45  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
46  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
47  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
48  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
49  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
50  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 /*
54  * Driver for Qualcomm Technologies 802.11ax chipset.
55  */
56 
57 #include "bpfilter.h"
58 
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/device.h>
62 #include <sys/rwlock.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 
67 #include <sys/refcnt.h>
68 #include <sys/task.h>
69 
70 #include <machine/bus.h>
71 #include <machine/intr.h>
72 
73 #ifdef __HAVE_FDT
74 #include <dev/ofw/openfirm.h>
75 #endif
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 #include <net/if.h>
81 #include <net/if_media.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/if_ether.h>
85 
86 #include <net80211/ieee80211_var.h>
87 #include <net80211/ieee80211_radiotap.h>
88 
89 /* XXX linux porting goo */
90 #ifdef __LP64__
91 #define BITS_PER_LONG		64
92 #else
93 #define BITS_PER_LONG		32
94 #endif
95 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
96 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
97 #define ffz(x) ffs(~(x))
98 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
99 #define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
100 #define BIT(x)               (1UL << (x))
101 #define test_bit(i, a)  ((a) & (1 << (i)))
102 #define clear_bit(i, a) ((a)) &= ~(1 << (i))
103 #define set_bit(i, a)   ((a)) |= (1 << (i))
104 #define container_of(ptr, type, member) ({			\
105 	const __typeof( ((type *)0)->member ) *__mptr = (ptr);	\
106 	(type *)( (char *)__mptr - offsetof(type,member) );})
107 
108 /* #define QWX_DEBUG */
109 
110 #include <dev/ic/qwxreg.h>
111 #include <dev/ic/qwxvar.h>
112 
113 #ifdef QWX_DEBUG
114 uint32_t	qwx_debug = 0
115 		    | QWX_D_MISC
116 /*		    | QWX_D_MHI */
117 /*		    | QWX_D_QMI */
118 /*		    | QWX_D_WMI */
119 /*		    | QWX_D_HTC */
120 /*		    | QWX_D_HTT */
121 /*		    | QWX_D_MAC */
122 /*		    | QWX_D_MGMT */
123 		;
124 #endif
125 
126 int qwx_ce_init_pipes(struct qwx_softc *);
127 int qwx_hal_srng_src_num_free(struct qwx_softc *, struct hal_srng *, int);
128 int qwx_ce_per_engine_service(struct qwx_softc *, uint16_t);
129 int qwx_hal_srng_setup(struct qwx_softc *, enum hal_ring_type, int, int,
130     struct hal_srng_params *);
131 int qwx_ce_send(struct qwx_softc *, struct mbuf *, uint8_t, uint16_t);
132 int qwx_htc_connect_service(struct qwx_htc *, struct qwx_htc_svc_conn_req *,
133     struct qwx_htc_svc_conn_resp *);
134 void qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *, struct hal_srng *);
135 void qwx_wmi_free_dbring_caps(struct qwx_softc *);
136 int qwx_wmi_set_peer_param(struct qwx_softc *, uint8_t *, uint32_t,
137     uint32_t, uint32_t, uint32_t);
138 int qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *, int, int,
139     uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
140 const void **qwx_wmi_tlv_parse_alloc(struct qwx_softc *, const void *, size_t);
141 int qwx_core_init(struct qwx_softc *);
142 int qwx_qmi_event_server_arrive(struct qwx_softc *);
143 int qwx_mac_register(struct qwx_softc *);
144 int qwx_mac_start(struct qwx_softc *);
145 void qwx_mac_scan_finish(struct qwx_softc *);
146 int qwx_mac_mgmt_tx_wmi(struct qwx_softc *, struct qwx_vif *, uint8_t,
147     struct ieee80211_node *, struct mbuf *);
148 int qwx_dp_tx(struct qwx_softc *, struct qwx_vif *, uint8_t,
149     struct ieee80211_node *, struct mbuf *);
150 int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
151     enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
152     void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
153 void qwx_dp_rx_deliver_msdu(struct qwx_softc *, struct qwx_rx_msdu *);
154 void qwx_dp_service_mon_ring(void *);
155 void qwx_peer_frags_flush(struct qwx_softc *, struct ath11k_peer *);
156 int qwx_wmi_vdev_install_key(struct qwx_softc *,
157     struct wmi_vdev_install_key_arg *, uint8_t);
158 int qwx_dp_peer_rx_pn_replay_config(struct qwx_softc *, struct qwx_vif *,
159     struct ieee80211_node *, struct ieee80211_key *, int);
160 void qwx_setkey_clear(struct qwx_softc *);
161 void qwx_vif_free_all(struct qwx_softc *);
162 
163 int qwx_scan(struct qwx_softc *);
164 void qwx_scan_abort(struct qwx_softc *);
165 int qwx_auth(struct qwx_softc *);
166 int qwx_deauth(struct qwx_softc *);
167 int qwx_run(struct qwx_softc *);
168 int qwx_run_stop(struct qwx_softc *);
169 
170 struct ieee80211_node *
171 qwx_node_alloc(struct ieee80211com *ic)
172 {
173 	struct qwx_node *nq;
174 
175 	nq = malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
176 	if (nq != NULL)
177 		nq->peer.peer_id = HAL_INVALID_PEERID;
178 	return (struct ieee80211_node *)nq;
179 }
180 
181 int
182 qwx_init(struct ifnet *ifp)
183 {
184 	int error;
185 	struct qwx_softc *sc = ifp->if_softc;
186 	struct ieee80211com *ic = &sc->sc_ic;
187 
188 	sc->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
189 	/*
190 	 * There are several known hardware/software crypto issues
191 	 * on wcn6855 devices, firmware 0x1106196e. It is unclear
192 	 * if these are driver or firmware bugs.
193 	 *
194 	 * 1) Broadcast/Multicast frames will only be received on
195 	 *    encrypted networks if hardware crypto is used and a
196 	 *    CCMP group key is used. Otherwise such frames never
197 	 *    even trigger an interrupt. This breaks ARP and IPv6.
198 	 *    This issue is known to affect the Linux ath11k vendor
199 	 *    driver when software crypto mode is selected.
200 	 *    Workaround: Use hardware crypto on WPA2 networks.
201 	 *    However, even with hardware crypto broadcast frames
202 	 *    are never received if TKIP is used as the WPA2 group
203 	 *    cipher and we have no workaround for this.
204 	 *
205 	 * 2) Adding WEP keys for hardware crypto crashes the firmware.
206 	 *    Presumably, lack of WEP support is deliberate because the
207 	 *    Linux ath11k vendor driver rejects attempts to install
208 	 *    WEP keys to hardware.
209 	 *    Workaround: Use software crypto if WEP is enabled.
210 	 *    This suffers from the broadcast issues mentioned above.
211 	 *
212 	 * 3) A WPA1 group key handshake message from the AP is never
213 	 *    received if hardware crypto is used.
214 	 *    Workaround: Use software crypto if WPA1 is enabled.
215 	 *    This suffers from the broadcast issues mentioned above,
216 	 *    even on WPA2 networks when WPA1 and WPA2 are both enabled.
217 	 *    On OpenBSD, WPA1 is disabled by default.
218 	 *
219 	 * The only known fully working configurations are unencrypted
220 	 * networks, and WPA2/CCMP-only networks provided WPA1 remains
221 	 * disabled.
222 	 */
223 	if ((ic->ic_flags & IEEE80211_F_WEPON) ||
224 	    (ic->ic_rsnprotos & IEEE80211_PROTO_WPA))
225 		sc->crypto_mode = ATH11K_CRYPT_MODE_SW;
226 	else
227 		sc->crypto_mode = ATH11K_CRYPT_MODE_HW;
228 	sc->frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
229 	ic->ic_state = IEEE80211_S_INIT;
230 	sc->ns_nstate = IEEE80211_S_INIT;
231 	sc->scan.state = ATH11K_SCAN_IDLE;
232 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
233 
234 	error = qwx_core_init(sc);
235 	if (error)
236 		return error;
237 
238 	memset(&sc->qrtr_server, 0, sizeof(sc->qrtr_server));
239 	sc->qrtr_server.node = QRTR_NODE_BCAST;
240 
241 	/* wait for QRTR init to be done */
242 	while (sc->qrtr_server.node == QRTR_NODE_BCAST) {
243 		error = tsleep_nsec(&sc->qrtr_server, 0, "qwxqrtr",
244 		    SEC_TO_NSEC(5));
245 		if (error) {
246 			printf("%s: qrtr init timeout\n", sc->sc_dev.dv_xname);
247 			return error;
248 		}
249 	}
250 
251 	error = qwx_qmi_event_server_arrive(sc);
252 	if (error)
253 		return error;
254 
255 	if (sc->attached) {
256 		/* Update MAC in case the upper layers changed it. */
257 		IEEE80211_ADDR_COPY(ic->ic_myaddr,
258 		    ((struct arpcom *)ifp)->ac_enaddr);
259 	} else {
260 		sc->attached = 1;
261 
262 		/* Configure channel information obtained from firmware. */
263 		ieee80211_channel_init(ifp);
264 
265 		/* Configure initial MAC address. */
266 		error = if_setlladdr(ifp, ic->ic_myaddr);
267 		if (error)
268 			printf("%s: could not set MAC address %s: %d\n",
269 			    sc->sc_dev.dv_xname, ether_sprintf(ic->ic_myaddr),
270 			    error);
271 
272 		ieee80211_media_init(ifp, qwx_media_change,
273 		    ieee80211_media_status);
274 	}
275 
276 	if (ifp->if_flags & IFF_UP) {
277 		refcnt_init(&sc->task_refs);
278 
279 		ifq_clr_oactive(&ifp->if_snd);
280 		ifp->if_flags |= IFF_RUNNING;
281 
282 		error = qwx_mac_start(sc);
283 		if (error)
284 			return error;
285 
286 		ieee80211_begin_scan(ifp);
287 	}
288 
289 	return 0;
290 }
291 
292 void
293 qwx_add_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
294 {
295 	int s = splnet();
296 
297 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
298 		splx(s);
299 		return;
300 	}
301 
302 	refcnt_take(&sc->task_refs);
303 	if (!task_add(taskq, task))
304 		refcnt_rele_wake(&sc->task_refs);
305 	splx(s);
306 }
307 
308 void
309 qwx_del_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
310 {
311 	if (task_del(taskq, task))
312 		refcnt_rele(&sc->task_refs);
313 }
314 
315 void
316 qwx_stop(struct ifnet *ifp)
317 {
318 	struct qwx_softc *sc = ifp->if_softc;
319 	struct ieee80211com *ic = &sc->sc_ic;
320 	int s = splnet();
321 
322 	rw_assert_wrlock(&sc->ioctl_rwl);
323 
324 	timeout_del(&sc->mon_reap_timer);
325 
326 	/* Disallow new tasks. */
327 	set_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
328 
329 	/* Cancel scheduled tasks and let any stale tasks finish up. */
330 	task_del(systq, &sc->init_task);
331 	qwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
332 	qwx_del_task(sc, systq, &sc->setkey_task);
333 	refcnt_finalize(&sc->task_refs, "qwxstop");
334 
335 	qwx_setkey_clear(sc);
336 
337 	clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
338 
339 	ifp->if_timer = sc->sc_tx_timer = 0;
340 
341 	ifp->if_flags &= ~IFF_RUNNING;
342 	ifq_clr_oactive(&ifp->if_snd);
343 
344 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
345 	sc->ns_nstate = IEEE80211_S_INIT;
346 	sc->scan.state = ATH11K_SCAN_IDLE;
347 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
348 	sc->pdevs_active = 0;
349 
350 	/* power off hardware */
351 	qwx_core_deinit(sc);
352 
353 	qwx_vif_free_all(sc);
354 
355 	splx(s);
356 }
357 
358 void
359 qwx_free_firmware(struct qwx_softc *sc)
360 {
361 	int i;
362 
363 	for (i = 0; i < nitems(sc->fw_img); i++) {
364 		free(sc->fw_img[i].data, M_DEVBUF, sc->fw_img[i].size);
365 		sc->fw_img[i].data = NULL;
366 		sc->fw_img[i].size = 0;
367 	}
368 }
369 
370 int
371 qwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
372 {
373 	struct qwx_softc *sc = ifp->if_softc;
374 	int s, err = 0;
375 
376 	/*
377 	 * Prevent processes from entering this function while another
378 	 * process is tsleep'ing in it.
379 	 */
380 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
381 	if (err)
382 		return err;
383 	s = splnet();
384 
385 	switch (cmd) {
386 	case SIOCSIFADDR:
387 		ifp->if_flags |= IFF_UP;
388 		/* FALLTHROUGH */
389 	case SIOCSIFFLAGS:
390 		if (ifp->if_flags & IFF_UP) {
391 			if (!(ifp->if_flags & IFF_RUNNING)) {
392 				/* Force reload of firmware image from disk. */
393 				qwx_free_firmware(sc);
394 				err = qwx_init(ifp);
395 			}
396 		} else {
397 			if (ifp->if_flags & IFF_RUNNING)
398 				qwx_stop(ifp);
399 		}
400 		break;
401 
402 	default:
403 		err = ieee80211_ioctl(ifp, cmd, data);
404 	}
405 
406 	if (err == ENETRESET) {
407 		err = 0;
408 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
409 		    (IFF_UP | IFF_RUNNING)) {
410 			qwx_stop(ifp);
411 			err = qwx_init(ifp);
412 		}
413 	}
414 
415 	splx(s);
416 	rw_exit(&sc->ioctl_rwl);
417 
418 	return err;
419 }
420 
421 int
422 qwx_tx(struct qwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
423 {
424 	struct ieee80211_frame *wh;
425 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
426 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
427 	uint8_t frame_type;
428 
429 	wh = mtod(m, struct ieee80211_frame *);
430 	frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
431 
432 #if NBPFILTER > 0
433 	if (sc->sc_drvbpf != NULL) {
434 		struct qwx_tx_radiotap_header *tap = &sc->sc_txtap;
435 
436 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
437 		    m, BPF_DIRECTION_OUT);
438 	}
439 #endif
440 
441 	if (frame_type == IEEE80211_FC0_TYPE_MGT)
442 		return qwx_mac_mgmt_tx_wmi(sc, arvif, pdev_id, ni, m);
443 
444 	return qwx_dp_tx(sc, arvif, pdev_id, ni, m);
445 }
446 
447 void
448 qwx_start(struct ifnet *ifp)
449 {
450 	struct qwx_softc *sc = ifp->if_softc;
451 	struct ieee80211com *ic = &sc->sc_ic;
452 	struct ieee80211_node *ni;
453 	struct ether_header *eh;
454 	struct mbuf *m;
455 
456 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
457 		return;
458 
459 	for (;;) {
460 		/* why isn't this done per-queue? */
461 		if (sc->qfullmsk != 0) {
462 			ifq_set_oactive(&ifp->if_snd);
463 			break;
464 		}
465 
466 		/* need to send management frames even if we're not RUNning */
467 		m = mq_dequeue(&ic->ic_mgtq);
468 		if (m) {
469 			ni = m->m_pkthdr.ph_cookie;
470 			goto sendit;
471 		}
472 
473 		if (ic->ic_state != IEEE80211_S_RUN ||
474 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
475 			break;
476 
477 		m = ifq_dequeue(&ifp->if_snd);
478 		if (!m)
479 			break;
480 		if (m->m_len < sizeof (*eh) &&
481 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
482 			ifp->if_oerrors++;
483 			continue;
484 		}
485 #if NBPFILTER > 0
486 		if (ifp->if_bpf != NULL)
487 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
488 #endif
489 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
490 			ifp->if_oerrors++;
491 			continue;
492 		}
493 
494  sendit:
495 #if NBPFILTER > 0
496 		if (ic->ic_rawbpf != NULL)
497 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
498 #endif
499 		if (qwx_tx(sc, m, ni) != 0) {
500 			ieee80211_release_node(ic, ni);
501 			ifp->if_oerrors++;
502 			continue;
503 		}
504 
505 		if (ifp->if_flags & IFF_UP)
506 			ifp->if_timer = 1;
507 	}
508 }
509 
510 void
511 qwx_watchdog(struct ifnet *ifp)
512 {
513 	struct qwx_softc *sc = ifp->if_softc;
514 
515 	ifp->if_timer = 0;
516 
517 	if (sc->sc_tx_timer > 0) {
518 		if (--sc->sc_tx_timer == 0) {
519 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
520 			if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
521 				task_add(systq, &sc->init_task);
522 			ifp->if_oerrors++;
523 			return;
524 		}
525 		ifp->if_timer = 1;
526 	}
527 
528 	ieee80211_watchdog(ifp);
529 }
530 
531 int
532 qwx_media_change(struct ifnet *ifp)
533 {
534 	int err;
535 
536 	err = ieee80211_media_change(ifp);
537 	if (err != ENETRESET)
538 		return err;
539 
540 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
541 	    (IFF_UP | IFF_RUNNING)) {
542 		qwx_stop(ifp);
543 		err = qwx_init(ifp);
544 	}
545 
546 	return err;
547 }
548 
549 int
550 qwx_queue_setkey_cmd(struct ieee80211com *ic, struct ieee80211_node *ni,
551     struct ieee80211_key *k, int cmd)
552 {
553 	struct qwx_softc *sc = ic->ic_softc;
554 	struct qwx_setkey_task_arg *a;
555 
556 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg) ||
557 	    k->k_id > WMI_MAX_KEY_INDEX)
558 		return ENOSPC;
559 
560 	a = &sc->setkey_arg[sc->setkey_cur];
561 	a->ni = ieee80211_ref_node(ni);
562 	a->k = k;
563 	a->cmd = cmd;
564 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
565 	sc->setkey_nkeys++;
566 	qwx_add_task(sc, systq, &sc->setkey_task);
567 	return EBUSY;
568 }
569 
570 int
571 qwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
572     struct ieee80211_key *k)
573 {
574 	struct qwx_softc *sc = ic->ic_softc;
575 
576 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
577 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
578 	    k->k_cipher == IEEE80211_CIPHER_WEP104)
579 		return ieee80211_set_key(ic, ni, k);
580 
581 	return qwx_queue_setkey_cmd(ic, ni, k, QWX_ADD_KEY);
582 }
583 
584 void
585 qwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
586     struct ieee80211_key *k)
587 {
588 	struct qwx_softc *sc = ic->ic_softc;
589 
590 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
591 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
592 	    k->k_cipher == IEEE80211_CIPHER_WEP104) {
593 		ieee80211_delete_key(ic, ni, k);
594 		return;
595 	}
596 
597 	if (ic->ic_state != IEEE80211_S_RUN) {
598 		/* Keys removed implicitly when firmware station is removed. */
599 		return;
600 	}
601 
602 	/*
603 	 * net80211 calls us with a NULL node when deleting group keys,
604 	 * but firmware expects a MAC address in the command.
605 	 */
606 	if (ni == NULL)
607 		ni = ic->ic_bss;
608 
609 	qwx_queue_setkey_cmd(ic, ni, k, QWX_DEL_KEY);
610 }
611 
612 int
613 qwx_wmi_install_key_cmd(struct qwx_softc *sc, struct qwx_vif *arvif,
614     uint8_t *macaddr, struct ieee80211_key *k, uint32_t flags,
615     int delete_key)
616 {
617 	int ret;
618 	struct wmi_vdev_install_key_arg arg = {
619 		.vdev_id = arvif->vdev_id,
620 		.key_idx = k->k_id,
621 		.key_len = k->k_len,
622 		.key_data = k->k_key,
623 		.key_flags = flags,
624 		.macaddr = macaddr,
625 	};
626 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
627 #ifdef notyet
628 	lockdep_assert_held(&arvif->ar->conf_mutex);
629 
630 	reinit_completion(&ar->install_key_done);
631 #endif
632 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
633 		return 0;
634 
635 	if (delete_key) {
636 		arg.key_cipher = WMI_CIPHER_NONE;
637 		arg.key_data = NULL;
638 	} else {
639 		switch (k->k_cipher) {
640 		case IEEE80211_CIPHER_CCMP:
641 			arg.key_cipher = WMI_CIPHER_AES_CCM;
642 #if 0
643 			/* TODO: Re-check if flag is valid */
644 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
645 #endif
646 			break;
647 		case IEEE80211_CIPHER_TKIP:
648 			arg.key_cipher = WMI_CIPHER_TKIP;
649 			arg.key_txmic_len = 8;
650 			arg.key_rxmic_len = 8;
651 			break;
652 #if 0
653 		case WLAN_CIPHER_SUITE_CCMP_256:
654 			arg.key_cipher = WMI_CIPHER_AES_CCM;
655 			break;
656 		case WLAN_CIPHER_SUITE_GCMP:
657 		case WLAN_CIPHER_SUITE_GCMP_256:
658 			arg.key_cipher = WMI_CIPHER_AES_GCM;
659 			break;
660 #endif
661 		default:
662 			printf("%s: cipher %u is not supported\n",
663 			    sc->sc_dev.dv_xname, k->k_cipher);
664 			return EOPNOTSUPP;
665 		}
666 #if 0
667 		if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
668 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
669 				      IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
670 #endif
671 	}
672 
673 	sc->install_key_done = 0;
674 	ret = qwx_wmi_vdev_install_key(sc, &arg, pdev_id);
675 	if (ret)
676 		return ret;
677 
678 	while (!sc->install_key_done) {
679 		ret = tsleep_nsec(&sc->install_key_done, 0, "qwxinstkey",
680 		    SEC_TO_NSEC(1));
681 		if (ret) {
682 			printf("%s: install key timeout\n",
683 			    sc->sc_dev.dv_xname);
684 			return -1;
685 		}
686 	}
687 
688 	return sc->install_key_status;
689 }
690 
691 int
692 qwx_add_sta_key(struct qwx_softc *sc, struct ieee80211_node *ni,
693     struct ieee80211_key *k)
694 {
695 	struct ieee80211com *ic = &sc->sc_ic;
696 	struct qwx_node *nq = (struct qwx_node *)ni;
697 	struct ath11k_peer *peer = &nq->peer;
698 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
699 	int ret = 0;
700 	uint32_t flags = 0;
701 	const int want_keymask = (QWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
702 	    QWX_NODE_FLAG_HAVE_GROUP_KEY);
703 
704 	/*
705 	 * Flush the fragments cache during key (re)install to
706 	 * ensure all frags in the new frag list belong to the same key.
707 	 */
708 	qwx_peer_frags_flush(sc, peer);
709 
710 	if (k->k_flags & IEEE80211_KEY_GROUP)
711 		flags |= WMI_KEY_GROUP;
712 	else
713 		flags |= WMI_KEY_PAIRWISE;
714 
715 	ret = qwx_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, flags, 0);
716 	if (ret) {
717 		printf("%s: installing crypto key failed (%d)\n",
718 		    sc->sc_dev.dv_xname, ret);
719 		return ret;
720 	}
721 
722 	ret = qwx_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 0);
723 	if (ret) {
724 		printf("%s: failed to offload PN replay detection %d\n",
725 		    sc->sc_dev.dv_xname, ret);
726 		return ret;
727 	}
728 
729 	if (k->k_flags & IEEE80211_KEY_GROUP)
730 		nq->flags |= QWX_NODE_FLAG_HAVE_GROUP_KEY;
731 	else
732 		nq->flags |= QWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
733 
734 	if ((nq->flags & want_keymask) == want_keymask) {
735 		DPRINTF("marking port %s valid\n",
736 		    ether_sprintf(ni->ni_macaddr));
737 		ni->ni_port_valid = 1;
738 		ieee80211_set_link_state(ic, LINK_STATE_UP);
739 	}
740 
741 	return 0;
742 }
743 
744 int
745 qwx_del_sta_key(struct qwx_softc *sc, struct ieee80211_node *ni,
746     struct ieee80211_key *k)
747 {
748 	struct qwx_node *nq = (struct qwx_node *)ni;
749 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
750 	int ret = 0;
751 
752 	ret = qwx_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, 0, 1);
753 	if (ret) {
754 		printf("%s: deleting crypto key failed (%d)\n",
755 		    sc->sc_dev.dv_xname, ret);
756 		return ret;
757 	}
758 
759 	ret = qwx_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 1);
760 	if (ret) {
761 		printf("%s: failed to disable PN replay detection %d\n",
762 		    sc->sc_dev.dv_xname, ret);
763 		return ret;
764 	}
765 
766 	if (k->k_flags & IEEE80211_KEY_GROUP)
767 		nq->flags &= ~QWX_NODE_FLAG_HAVE_GROUP_KEY;
768 	else
769 		nq->flags &= ~QWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
770 
771 	return 0;
772 }
773 
774 void
775 qwx_setkey_task(void *arg)
776 {
777 	struct qwx_softc *sc = arg;
778 	struct ieee80211com *ic = &sc->sc_ic;
779 	struct qwx_setkey_task_arg *a;
780 	int err = 0, s = splnet();
781 
782 	while (sc->setkey_nkeys > 0) {
783 		if (err || test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
784 			break;
785 		a = &sc->setkey_arg[sc->setkey_tail];
786 		KASSERT(a->cmd == QWX_ADD_KEY || a->cmd == QWX_DEL_KEY);
787 		if (ic->ic_state == IEEE80211_S_RUN) {
788 			if (a->cmd == QWX_ADD_KEY)
789 				err = qwx_add_sta_key(sc, a->ni, a->k);
790 			else
791 				err = qwx_del_sta_key(sc, a->ni, a->k);
792 		}
793 		ieee80211_release_node(ic, a->ni);
794 		a->ni = NULL;
795 		a->k = NULL;
796 		sc->setkey_tail = (sc->setkey_tail + 1) %
797 		    nitems(sc->setkey_arg);
798 		sc->setkey_nkeys--;
799 	}
800 
801 	refcnt_rele_wake(&sc->task_refs);
802 	splx(s);
803 }
804 
805 void
806 qwx_setkey_clear(struct qwx_softc *sc)
807 {
808 	struct ieee80211com *ic = &sc->sc_ic;
809 	struct qwx_setkey_task_arg *a;
810 
811 	while (sc->setkey_nkeys > 0) {
812 		a = &sc->setkey_arg[sc->setkey_tail];
813 		ieee80211_release_node(ic, a->ni);
814 		a->ni = NULL;
815 		sc->setkey_tail = (sc->setkey_tail + 1) %
816 		    nitems(sc->setkey_arg);
817 		sc->setkey_nkeys--;
818 	}
819 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
820 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
821 }
822 
823 int
824 qwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
825 {
826 	struct ifnet *ifp = &ic->ic_if;
827 	struct qwx_softc *sc = ifp->if_softc;
828 
829 	/*
830 	 * Prevent attempts to transition towards the same state, unless
831 	 * we are scanning in which case a SCAN -> SCAN transition
832 	 * triggers another scan iteration. And AUTH -> AUTH is needed
833 	 * to support band-steering.
834 	 */
835 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
836 	    nstate != IEEE80211_S_AUTH)
837 		return 0;
838 	if (ic->ic_state == IEEE80211_S_RUN) {
839 #if 0
840 		qwx_del_task(sc, systq, &sc->ba_task);
841 #endif
842 		qwx_del_task(sc, systq, &sc->setkey_task);
843 		qwx_setkey_clear(sc);
844 #if 0
845 		qwx_del_task(sc, systq, &sc->bgscan_done_task);
846 #endif
847 	}
848 
849 	sc->ns_nstate = nstate;
850 	sc->ns_arg = arg;
851 
852 	qwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
853 
854 	return 0;
855 }
856 
857 void
858 qwx_newstate_task(void *arg)
859 {
860 	struct qwx_softc *sc = (struct qwx_softc *)arg;
861 	struct ieee80211com *ic = &sc->sc_ic;
862 	struct ifnet *ifp = &ic->ic_if;
863 	enum ieee80211_state nstate = sc->ns_nstate;
864 	enum ieee80211_state ostate = ic->ic_state;
865 	int err = 0, s = splnet();
866 
867 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
868 		/* qwx_stop() is waiting for us. */
869 		refcnt_rele_wake(&sc->task_refs);
870 		splx(s);
871 		return;
872 	}
873 
874 	if (ostate == IEEE80211_S_SCAN) {
875 		if (nstate == ostate) {
876 			if (sc->scan.state != ATH11K_SCAN_IDLE) {
877 				refcnt_rele_wake(&sc->task_refs);
878 				splx(s);
879 				return;
880 			}
881 			/* Firmware is no longer scanning. Do another scan. */
882 			goto next_scan;
883 		}
884 	}
885 
886 	if (nstate <= ostate) {
887 		switch (ostate) {
888 		case IEEE80211_S_RUN:
889 			err = qwx_run_stop(sc);
890 			if (err)
891 				goto out;
892 			/* FALLTHROUGH */
893 		case IEEE80211_S_ASSOC:
894 		case IEEE80211_S_AUTH:
895 			if (nstate <= IEEE80211_S_AUTH) {
896 				err = qwx_deauth(sc);
897 				if (err)
898 					goto out;
899 			}
900 			/* FALLTHROUGH */
901 		case IEEE80211_S_SCAN:
902 		case IEEE80211_S_INIT:
903 			break;
904 		}
905 
906 		/* Die now if qwx_stop() was called while we were sleeping. */
907 		if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
908 			refcnt_rele_wake(&sc->task_refs);
909 			splx(s);
910 			return;
911 		}
912 	}
913 
914 	switch (nstate) {
915 	case IEEE80211_S_INIT:
916 		break;
917 
918 	case IEEE80211_S_SCAN:
919 next_scan:
920 		err = qwx_scan(sc);
921 		if (err)
922 			break;
923 		if (ifp->if_flags & IFF_DEBUG)
924 			printf("%s: %s -> %s\n", ifp->if_xname,
925 			    ieee80211_state_name[ic->ic_state],
926 			    ieee80211_state_name[IEEE80211_S_SCAN]);
927 #if 0
928 		if ((sc->sc_flags & QWX_FLAG_BGSCAN) == 0) {
929 #endif
930 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
931 			ieee80211_node_cleanup(ic, ic->ic_bss);
932 #if 0
933 		}
934 #endif
935 		ic->ic_state = IEEE80211_S_SCAN;
936 		refcnt_rele_wake(&sc->task_refs);
937 		splx(s);
938 		return;
939 
940 	case IEEE80211_S_AUTH:
941 		err = qwx_auth(sc);
942 		break;
943 
944 	case IEEE80211_S_ASSOC:
945 		break;
946 
947 	case IEEE80211_S_RUN:
948 		err = qwx_run(sc);
949 		break;
950 	}
951 out:
952 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
953 		if (err)
954 			task_add(systq, &sc->init_task);
955 		else
956 			sc->sc_newstate(ic, nstate, sc->ns_arg);
957 	}
958 	refcnt_rele_wake(&sc->task_refs);
959 	splx(s);
960 }
961 
962 struct cfdriver qwx_cd = {
963 	NULL, "qwx", DV_IFNET
964 };
965 
966 void
967 qwx_init_wmi_config_qca6390(struct qwx_softc *sc,
968     struct target_resource_config *config)
969 {
970 	config->num_vdevs = 4;
971 	config->num_peers = 16;
972 	config->num_tids = 32;
973 
974 	config->num_offload_peers = 3;
975 	config->num_offload_reorder_buffs = 3;
976 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
977 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
978 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
979 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
980 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
981 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
982 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
983 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
984 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
985 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
986 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
987 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
988 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
989 	config->num_mcast_groups = 0;
990 	config->num_mcast_table_elems = 0;
991 	config->mcast2ucast_mode = 0;
992 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
993 	config->num_wds_entries = 0;
994 	config->dma_burst_size = 0;
995 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
996 	config->vow_config = TARGET_VOW_CONFIG;
997 	config->gtk_offload_max_vdev = 2;
998 	config->num_msdu_desc = 0x400;
999 	config->beacon_tx_offload_max_vdev = 2;
1000 	config->rx_batchmode = TARGET_RX_BATCHMODE;
1001 
1002 	config->peer_map_unmap_v2_support = 0;
1003 	config->use_pdev_id = 1;
1004 	config->max_frag_entries = 0xa;
1005 	config->num_tdls_vdevs = 0x1;
1006 	config->num_tdls_conn_table_entries = 8;
1007 	config->beacon_tx_offload_max_vdev = 0x2;
1008 	config->num_multicast_filter_entries = 0x20;
1009 	config->num_wow_filters = 0x16;
1010 	config->num_keep_alive_pattern = 0;
1011 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
1012 }
1013 
1014 void
1015 qwx_hw_ipq8074_reo_setup(struct qwx_softc *sc)
1016 {
1017 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1018 	uint32_t val;
1019 	/* Each hash entry uses three bits to map to a particular ring. */
1020 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1021 	    HAL_HASH_ROUTING_RING_SW2 << 3 |
1022 	    HAL_HASH_ROUTING_RING_SW3 << 6 |
1023 	    HAL_HASH_ROUTING_RING_SW4 << 9 |
1024 	    HAL_HASH_ROUTING_RING_SW1 << 12 |
1025 	    HAL_HASH_ROUTING_RING_SW2 << 15 |
1026 	    HAL_HASH_ROUTING_RING_SW3 << 18 |
1027 	    HAL_HASH_ROUTING_RING_SW4 << 21;
1028 
1029 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1030 
1031 	val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
1032 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
1033 	    HAL_SRNG_RING_ID_REO2SW1) |
1034 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1035 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1036 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1037 
1038 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1039 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1040 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1041 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1042 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1043 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1044 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1045 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1046 
1047 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
1048 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1049 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
1050 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1051 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1052 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1053 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1054 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1055 }
1056 
1057 void
1058 qwx_init_wmi_config_ipq8074(struct qwx_softc *sc,
1059     struct target_resource_config *config)
1060 {
1061 	config->num_vdevs = sc->num_radios * TARGET_NUM_VDEVS(sc);
1062 
1063 	if (sc->num_radios == 2) {
1064 		config->num_peers = TARGET_NUM_PEERS(sc, DBS);
1065 		config->num_tids = TARGET_NUM_TIDS(sc, DBS);
1066 	} else if (sc->num_radios == 3) {
1067 		config->num_peers = TARGET_NUM_PEERS(sc, DBS_SBS);
1068 		config->num_tids = TARGET_NUM_TIDS(sc, DBS_SBS);
1069 	} else {
1070 		/* Control should not reach here */
1071 		config->num_peers = TARGET_NUM_PEERS(sc, SINGLE);
1072 		config->num_tids = TARGET_NUM_TIDS(sc, SINGLE);
1073 	}
1074 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
1075 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
1076 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
1077 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
1078 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
1079 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
1080 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
1081 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
1082 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
1083 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
1084 
1085 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
1086 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
1087 	else
1088 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
1089 
1090 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
1091 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
1092 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
1093 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
1094 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
1095 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
1096 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
1097 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
1098 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
1099 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
1100 	config->rx_skip_defrag_timeout_dup_detection_check =
1101 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1102 	config->vow_config = TARGET_VOW_CONFIG;
1103 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
1104 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
1105 	config->beacon_tx_offload_max_vdev = sc->num_radios * TARGET_MAX_BCN_OFFLD;
1106 	config->rx_batchmode = TARGET_RX_BATCHMODE;
1107 	config->peer_map_unmap_v2_support = 1;
1108 	config->twt_ap_pdev_count = sc->num_radios;
1109 	config->twt_ap_sta_count = 1000;
1110 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
1111 	config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
1112 	config->ema_max_vap_cnt = sc->num_radios;
1113 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
1114 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
1115 }
1116 
1117 void
1118 qwx_hw_wcn6855_reo_setup(struct qwx_softc *sc)
1119 {
1120 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1121 	uint32_t val;
1122 	/* Each hash entry uses four bits to map to a particular ring. */
1123 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1124 	    HAL_HASH_ROUTING_RING_SW2 << 4 |
1125 	    HAL_HASH_ROUTING_RING_SW3 << 8 |
1126 	    HAL_HASH_ROUTING_RING_SW4 << 12 |
1127 	    HAL_HASH_ROUTING_RING_SW1 << 16 |
1128 	    HAL_HASH_ROUTING_RING_SW2 << 20 |
1129 	    HAL_HASH_ROUTING_RING_SW3 << 24 |
1130 	    HAL_HASH_ROUTING_RING_SW4 << 28;
1131 
1132 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1133 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1134 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1135 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1136 
1137 	val = sc->ops.read32(sc, reo_base + HAL_REO1_MISC_CTL(sc));
1138 	val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
1139 	val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING,
1140 	    HAL_SRNG_RING_ID_REO2SW1);
1141 	sc->ops.write32(sc, reo_base + HAL_REO1_MISC_CTL(sc), val);
1142 
1143 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1144 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1145 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1146 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1147 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1148 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1149 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1150 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1151 
1152 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1153 	    ring_hash_map);
1154 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1155 	    ring_hash_map);
1156 }
1157 
1158 void
1159 qwx_hw_ipq5018_reo_setup(struct qwx_softc *sc)
1160 {
1161 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1162 	uint32_t val;
1163 
1164 	/* Each hash entry uses three bits to map to a particular ring. */
1165 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1166 	    HAL_HASH_ROUTING_RING_SW2 << 4 |
1167 	    HAL_HASH_ROUTING_RING_SW3 << 8 |
1168 	    HAL_HASH_ROUTING_RING_SW4 << 12 |
1169 	    HAL_HASH_ROUTING_RING_SW1 << 16 |
1170 	    HAL_HASH_ROUTING_RING_SW2 << 20 |
1171 	    HAL_HASH_ROUTING_RING_SW3 << 24 |
1172 	    HAL_HASH_ROUTING_RING_SW4 << 28;
1173 
1174 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1175 
1176 	val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
1177 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
1178 	    HAL_SRNG_RING_ID_REO2SW1) |
1179 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1180 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1181 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1182 
1183 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1184 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1185 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1186 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1187 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1188 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1189 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1190 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1191 
1192 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
1193 	    ring_hash_map);
1194 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
1195 	    ring_hash_map);
1196 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1197 	    ring_hash_map);
1198 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1199 	    ring_hash_map);
1200 }
1201 
1202 int
1203 qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
1204 {
1205 	return mac_id;
1206 }
1207 
1208 int
1209 qwx_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
1210 {
1211 	return 0;
1212 }
1213 
1214 int
1215 qwx_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
1216 {
1217 	return 0;
1218 }
1219 
1220 int
1221 qwx_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
1222 {
1223 	return mac_id;
1224 }
1225 
1226 int
1227 qwx_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1228 {
1229 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
1230 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1231 }
1232 
1233 uint8_t
1234 qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1235 {
1236 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1237 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1238 }
1239 
1240 uint8_t *
1241 qwx_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1242 {
1243 	return desc->u.ipq8074.hdr_status;
1244 }
1245 
1246 int
1247 qwx_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1248 {
1249 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1250 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1251 }
1252 
1253 uint32_t
1254 qwx_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1255 {
1256 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1257 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1258 }
1259 
1260 uint8_t
1261 qwx_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1262 {
1263 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1264 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1265 }
1266 
1267 uint8_t
1268 qwx_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1269 {
1270 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1271 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1272 }
1273 
1274 int
1275 qwx_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1276 {
1277 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1278 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1279 }
1280 
1281 int
1282 qwx_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1283 {
1284 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1285 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1286 }
1287 
1288 int
1289 qwx_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1290 {
1291 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1292 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1293 }
1294 
1295 uint16_t
1296 qwx_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1297 {
1298 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1299 	    le32toh(desc->u.ipq8074.mpdu_start.info1));
1300 }
1301 
1302 uint16_t
1303 qwx_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1304 {
1305 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1306 	    le32toh(desc->u.ipq8074.msdu_start.info1));
1307 }
1308 
1309 uint8_t
1310 qwx_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1311 {
1312 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1313 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1314 }
1315 
1316 uint8_t
1317 qwx_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1318 {
1319 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1320 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1321 }
1322 
1323 uint8_t
1324 qwx_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1325 {
1326 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1327 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1328 }
1329 
1330 uint32_t
1331 qwx_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1332 {
1333 	return le32toh(desc->u.ipq8074.msdu_start.phy_meta_data);
1334 }
1335 
1336 uint8_t
1337 qwx_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1338 {
1339 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1340 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1341 }
1342 
1343 uint8_t
1344 qwx_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1345 {
1346 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1347 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1348 }
1349 
1350 uint8_t
1351 qwx_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1352 {
1353 	return FIELD_GET(RX_MPDU_START_INFO2_TID,
1354 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1355 }
1356 
1357 uint16_t
1358 qwx_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1359 {
1360 	return le16toh(desc->u.ipq8074.mpdu_start.sw_peer_id);
1361 }
1362 
1363 void
1364 qwx_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1365 				       struct hal_rx_desc *ldesc)
1366 {
1367 	memcpy((uint8_t *)&fdesc->u.ipq8074.msdu_end, (uint8_t *)&ldesc->u.ipq8074.msdu_end,
1368 	       sizeof(struct rx_msdu_end_ipq8074));
1369 	memcpy((uint8_t *)&fdesc->u.ipq8074.attention, (uint8_t *)&ldesc->u.ipq8074.attention,
1370 	       sizeof(struct rx_attention));
1371 	memcpy((uint8_t *)&fdesc->u.ipq8074.mpdu_end, (uint8_t *)&ldesc->u.ipq8074.mpdu_end,
1372 	       sizeof(struct rx_mpdu_end));
1373 }
1374 
1375 uint32_t
1376 qwx_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1377 {
1378 	return FIELD_GET(HAL_TLV_HDR_TAG,
1379 	    le32toh(desc->u.ipq8074.mpdu_start_tag));
1380 }
1381 
1382 uint32_t
1383 qwx_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1384 {
1385 	return le16toh(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
1386 }
1387 
1388 void
1389 qwx_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1390 {
1391 	uint32_t info = le32toh(desc->u.ipq8074.msdu_start.info1);
1392 
1393 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1394 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1395 
1396 	desc->u.ipq8074.msdu_start.info1 = htole32(info);
1397 }
1398 
1399 int
1400 qwx_dp_rx_h_msdu_end_first_msdu(struct qwx_softc *sc, struct hal_rx_desc *desc)
1401 {
1402 	return sc->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
1403 }
1404 
1405 int
1406 qwx_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1407 {
1408 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1409 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1410 }
1411 
1412 uint8_t *
1413 qwx_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1414 {
1415 	return desc->u.ipq8074.mpdu_start.addr2;
1416 }
1417 
1418 struct rx_attention *
1419 qwx_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
1420 {
1421 	return &desc->u.ipq8074.attention;
1422 }
1423 
1424 uint8_t *
1425 qwx_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1426 {
1427 	return &desc->u.ipq8074.msdu_payload[0];
1428 }
1429 
1430 int
1431 qwx_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1432 {
1433 	return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
1434 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1435 }
1436 
1437 int
1438 qwx_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1439 {
1440 	return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
1441 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1442 }
1443 
1444 uint8_t
1445 qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1446 {
1447 	return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
1448 	    le16toh(desc->u.qcn9074.msdu_end.info4));
1449 }
1450 
1451 uint8_t *
1452 qwx_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1453 {
1454 	return desc->u.qcn9074.hdr_status;
1455 }
1456 
1457 int
1458 qwx_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1459 {
1460 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1461 	       RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
1462 }
1463 
1464 uint32_t
1465 qwx_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1466 {
1467 	return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
1468 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1469 }
1470 
1471 uint8_t
1472 qwx_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1473 {
1474 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1475 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1476 }
1477 
1478 uint8_t
1479 qwx_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1480 {
1481 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1482 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1483 }
1484 
1485 int
1486 qwx_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1487 {
1488 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1489 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1490 }
1491 
1492 int
1493 qwx_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1494 {
1495 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
1496 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1497 }
1498 
1499 int
1500 qwx_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1501 {
1502 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
1503 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1504 }
1505 
1506 uint16_t
1507 qwx_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1508 {
1509 	return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
1510 	    le32toh(desc->u.qcn9074.mpdu_start.info11));
1511 }
1512 
1513 uint16_t
1514 qwx_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1515 {
1516 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1517 	    le32toh(desc->u.qcn9074.msdu_start.info1));
1518 }
1519 
1520 uint8_t
1521 qwx_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1522 {
1523 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1524 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1525 }
1526 
1527 uint8_t
1528 qwx_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1529 {
1530 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1531 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1532 }
1533 
1534 uint8_t
1535 qwx_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1536 {
1537 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1538 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1539 }
1540 
1541 uint32_t
1542 qwx_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1543 {
1544 	return le32toh(desc->u.qcn9074.msdu_start.phy_meta_data);
1545 }
1546 
1547 uint8_t
1548 qwx_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1549 {
1550 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1551 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1552 }
1553 
1554 uint8_t
1555 qwx_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1556 {
1557 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1558 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1559 }
1560 
1561 uint8_t
1562 qwx_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1563 {
1564 	return FIELD_GET(RX_MPDU_START_INFO9_TID,
1565 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1566 }
1567 
1568 uint16_t
1569 qwx_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1570 {
1571 	return le16toh(desc->u.qcn9074.mpdu_start.sw_peer_id);
1572 }
1573 
1574 void
1575 qwx_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1576 				       struct hal_rx_desc *ldesc)
1577 {
1578 	memcpy((uint8_t *)&fdesc->u.qcn9074.msdu_end, (uint8_t *)&ldesc->u.qcn9074.msdu_end,
1579 	       sizeof(struct rx_msdu_end_qcn9074));
1580 	memcpy((uint8_t *)&fdesc->u.qcn9074.attention, (uint8_t *)&ldesc->u.qcn9074.attention,
1581 	       sizeof(struct rx_attention));
1582 	memcpy((uint8_t *)&fdesc->u.qcn9074.mpdu_end, (uint8_t *)&ldesc->u.qcn9074.mpdu_end,
1583 	       sizeof(struct rx_mpdu_end));
1584 }
1585 
1586 uint32_t
1587 qwx_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1588 {
1589 	return FIELD_GET(HAL_TLV_HDR_TAG,
1590 	    le32toh(desc->u.qcn9074.mpdu_start_tag));
1591 }
1592 
1593 uint32_t
1594 qwx_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1595 {
1596 	return le16toh(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
1597 }
1598 
1599 void
1600 qwx_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1601 {
1602 	uint32_t info = le32toh(desc->u.qcn9074.msdu_start.info1);
1603 
1604 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1605 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1606 
1607 	desc->u.qcn9074.msdu_start.info1 = htole32(info);
1608 }
1609 
1610 struct rx_attention *
1611 qwx_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
1612 {
1613 	return &desc->u.qcn9074.attention;
1614 }
1615 
1616 uint8_t *
1617 qwx_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1618 {
1619 	return &desc->u.qcn9074.msdu_payload[0];
1620 }
1621 
1622 int
1623 qwx_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1624 {
1625 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1626 	       RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
1627 }
1628 
1629 uint8_t *
1630 qwx_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1631 {
1632 	return desc->u.qcn9074.mpdu_start.addr2;
1633 }
1634 
1635 int
1636 qwx_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1637 {
1638 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
1639 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1640 }
1641 
1642 int
1643 qwx_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1644 {
1645 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
1646 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1647 }
1648 
1649 uint8_t
1650 qwx_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1651 {
1652 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1653 	    le32toh(desc->u.wcn6855.msdu_end.info2));
1654 }
1655 
1656 uint8_t *
1657 qwx_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1658 {
1659 	return desc->u.wcn6855.hdr_status;
1660 }
1661 
1662 int
1663 qwx_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1664 {
1665 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1666 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1667 }
1668 
1669 uint32_t
1670 qwx_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1671 {
1672 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1673 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1674 }
1675 
1676 uint8_t
1677 qwx_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1678 {
1679 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1680 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1681 }
1682 
1683 uint8_t
1684 qwx_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1685 {
1686 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1687 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1688 }
1689 
1690 int
1691 qwx_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1692 {
1693 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1694 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1695 }
1696 
1697 int
1698 qwx_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1699 {
1700 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1701 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1702 }
1703 
1704 uint16_t
1705 qwx_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1706 {
1707 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1708 	    le32toh(desc->u.wcn6855.mpdu_start.info1));
1709 }
1710 
1711 uint16_t
1712 qwx_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1713 {
1714 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1715 	    le32toh(desc->u.wcn6855.msdu_start.info1));
1716 }
1717 
1718 uint8_t
1719 qwx_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1720 {
1721 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1722 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1723 }
1724 
1725 uint8_t
1726 qwx_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1727 {
1728 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1729 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1730 }
1731 
1732 uint8_t
1733 qwx_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1734 {
1735 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1736 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1737 }
1738 
1739 uint32_t
1740 qwx_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1741 {
1742 	return le32toh(desc->u.wcn6855.msdu_start.phy_meta_data);
1743 }
1744 
1745 uint8_t
1746 qwx_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1747 {
1748 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1749 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1750 }
1751 
1752 uint8_t
1753 qwx_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1754 {
1755 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1756 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1757 }
1758 
1759 uint8_t
1760 qwx_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1761 {
1762 	return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
1763 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1764 }
1765 
1766 uint16_t
1767 qwx_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1768 {
1769 	return le16toh(desc->u.wcn6855.mpdu_start.sw_peer_id);
1770 }
1771 
1772 void
1773 qwx_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1774     struct hal_rx_desc *ldesc)
1775 {
1776 	memcpy((uint8_t *)&fdesc->u.wcn6855.msdu_end, (uint8_t *)&ldesc->u.wcn6855.msdu_end,
1777 	       sizeof(struct rx_msdu_end_wcn6855));
1778 	memcpy((uint8_t *)&fdesc->u.wcn6855.attention, (uint8_t *)&ldesc->u.wcn6855.attention,
1779 	       sizeof(struct rx_attention));
1780 	memcpy((uint8_t *)&fdesc->u.wcn6855.mpdu_end, (uint8_t *)&ldesc->u.wcn6855.mpdu_end,
1781 	       sizeof(struct rx_mpdu_end));
1782 }
1783 
1784 uint32_t
1785 qwx_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1786 {
1787 	return FIELD_GET(HAL_TLV_HDR_TAG,
1788 	    le32toh(desc->u.wcn6855.mpdu_start_tag));
1789 }
1790 
1791 uint32_t
1792 qwx_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1793 {
1794 	return le16toh(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
1795 }
1796 
1797 void
1798 qwx_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1799 {
1800 	uint32_t info = le32toh(desc->u.wcn6855.msdu_start.info1);
1801 
1802 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1803 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1804 
1805 	desc->u.wcn6855.msdu_start.info1 = htole32(info);
1806 }
1807 
1808 struct rx_attention *
1809 qwx_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
1810 {
1811 	return &desc->u.wcn6855.attention;
1812 }
1813 
1814 uint8_t *
1815 qwx_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1816 {
1817 	return &desc->u.wcn6855.msdu_payload[0];
1818 }
1819 
1820 int
1821 qwx_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1822 {
1823 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1824 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1825 }
1826 
1827 uint8_t *
1828 qwx_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1829 {
1830 	return desc->u.wcn6855.mpdu_start.addr2;
1831 }
1832 
1833 /* Map from pdev index to hw mac index */
1834 uint8_t
1835 qwx_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
1836 {
1837 	switch (pdev_idx) {
1838 	case 0:
1839 		return 0;
1840 	case 1:
1841 		return 2;
1842 	case 2:
1843 		return 1;
1844 	default:
1845 		return ATH11K_INVALID_HW_MAC_ID;
1846 	}
1847 }
1848 
1849 uint8_t
1850 qwx_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
1851 {
1852 	return pdev_idx;
1853 }
1854 
1855 static inline int
1856 qwx_hw_get_mac_from_pdev_id(struct qwx_softc *sc, int pdev_idx)
1857 {
1858 	if (sc->hw_params.hw_ops->get_hw_mac_from_pdev_id)
1859 		return sc->hw_params.hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
1860 
1861 	return 0;
1862 }
1863 
1864 const struct ath11k_hw_ops ipq8074_ops = {
1865 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
1866 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
1867 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
1868 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
1869 #if notyet
1870 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1871 #endif
1872 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1873 #if notyet
1874 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1875 #endif
1876 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1877 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1878 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1879 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1880 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1881 #ifdef notyet
1882 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1883 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1884 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1885 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1886 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1887 #endif
1888 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1889 #ifdef notyet
1890 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1891 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
1892 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
1893 #endif
1894 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
1895 #ifdef notyet
1896 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
1897 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
1898 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
1899 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
1900 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
1901 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
1902 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
1903 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
1904 #endif
1905 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
1906 #ifdef notyet
1907 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
1908 #endif
1909 	.reo_setup = qwx_hw_ipq8074_reo_setup,
1910 #ifdef notyet
1911 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
1912 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
1913 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
1914 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
1915 #endif
1916 };
1917 
1918 const struct ath11k_hw_ops ipq6018_ops = {
1919 	.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
1920 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
1921 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
1922 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
1923 #if notyet
1924 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1925 #endif
1926 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1927 #if notyet
1928 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1929 #endif
1930 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1931 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1932 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1933 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1934 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1935 #ifdef notyet
1936 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1937 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1938 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1939 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1940 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1941 #endif
1942 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1943 #ifdef notyet
1944 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1945 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
1946 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
1947 #endif
1948 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
1949 #ifdef notyet
1950 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
1951 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
1952 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
1953 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
1954 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
1955 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
1956 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
1957 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
1958 #endif
1959 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
1960 #ifdef notyet
1961 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
1962 #endif
1963 	.reo_setup = qwx_hw_ipq8074_reo_setup,
1964 #ifdef notyet
1965 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
1966 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
1967 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
1968 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
1969 #endif
1970 };
1971 
1972 const struct ath11k_hw_ops qca6390_ops = {
1973 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
1974 	.wmi_init_config = qwx_init_wmi_config_qca6390,
1975 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
1976 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
1977 #if notyet
1978 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1979 #endif
1980 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1981 #if notyet
1982 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1983 #endif
1984 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1985 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1986 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1987 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1988 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1989 #ifdef notyet
1990 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1991 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1992 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1993 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1994 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1995 #endif
1996 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1997 #ifdef notyet
1998 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1999 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
2000 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
2001 #endif
2002 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
2003 #ifdef notyet
2004 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
2005 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
2006 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
2007 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
2008 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
2009 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
2010 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
2011 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
2012 #endif
2013 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
2014 #ifdef notyet
2015 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
2016 #endif
2017 	.reo_setup = qwx_hw_ipq8074_reo_setup,
2018 #ifdef notyet
2019 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2020 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
2021 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
2022 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2023 #endif
2024 };
2025 
2026 const struct ath11k_hw_ops qcn9074_ops = {
2027 	.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
2028 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
2029 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
2030 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
2031 #if notyet
2032 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
2033 #endif
2034 	.rx_desc_get_first_msdu = qwx_hw_qcn9074_rx_desc_get_first_msdu,
2035 #if notyet
2036 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
2037 #endif
2038 	.rx_desc_get_l3_pad_bytes = qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes,
2039 	.rx_desc_get_hdr_status = qwx_hw_qcn9074_rx_desc_get_hdr_status,
2040 	.rx_desc_encrypt_valid = qwx_hw_qcn9074_rx_desc_encrypt_valid,
2041 	.rx_desc_get_encrypt_type = qwx_hw_qcn9074_rx_desc_get_encrypt_type,
2042 	.rx_desc_get_decap_type = qwx_hw_qcn9074_rx_desc_get_decap_type,
2043 #ifdef notyet
2044 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
2045 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
2046 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
2047 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
2048 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
2049 #endif
2050 	.rx_desc_get_msdu_len = qwx_hw_qcn9074_rx_desc_get_msdu_len,
2051 #ifdef notyet
2052 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
2053 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
2054 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
2055 #endif
2056 	.rx_desc_get_msdu_freq = qwx_hw_qcn9074_rx_desc_get_msdu_freq,
2057 #ifdef notyet
2058 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
2059 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
2060 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
2061 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
2062 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
2063 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
2064 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
2065 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
2066 #endif
2067 	.rx_desc_get_attention = qwx_hw_qcn9074_rx_desc_get_attention,
2068 #ifdef notyet
2069 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
2070 #endif
2071 	.reo_setup = qwx_hw_ipq8074_reo_setup,
2072 #ifdef notyet
2073 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2074 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
2075 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
2076 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2077 #endif
2078 };
2079 
2080 const struct ath11k_hw_ops wcn6855_ops = {
2081 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
2082 	.wmi_init_config = qwx_init_wmi_config_qca6390,
2083 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
2084 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
2085 #if notyet
2086 	.tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
2087 #endif
2088 	.rx_desc_get_first_msdu = qwx_hw_wcn6855_rx_desc_get_first_msdu,
2089 #if notyet
2090 	.rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
2091 #endif
2092 	.rx_desc_get_l3_pad_bytes = qwx_hw_wcn6855_rx_desc_get_l3_pad_bytes,
2093 	.rx_desc_get_hdr_status = qwx_hw_wcn6855_rx_desc_get_hdr_status,
2094 	.rx_desc_encrypt_valid = qwx_hw_wcn6855_rx_desc_encrypt_valid,
2095 	.rx_desc_get_encrypt_type = qwx_hw_wcn6855_rx_desc_get_encrypt_type,
2096 	.rx_desc_get_decap_type = qwx_hw_wcn6855_rx_desc_get_decap_type,
2097 #ifdef notyet
2098 	.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
2099 	.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
2100 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
2101 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
2102 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
2103 #endif
2104 	.rx_desc_get_msdu_len = qwx_hw_wcn6855_rx_desc_get_msdu_len,
2105 #ifdef notyet
2106 	.rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
2107 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
2108 	.rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
2109 #endif
2110 	.rx_desc_get_msdu_freq = qwx_hw_wcn6855_rx_desc_get_msdu_freq,
2111 #ifdef notyet
2112 	.rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
2113 	.rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
2114 	.rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
2115 	.rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
2116 	.rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
2117 	.rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
2118 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
2119 	.rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
2120 #endif
2121 	.rx_desc_get_attention = qwx_hw_wcn6855_rx_desc_get_attention,
2122 #ifdef notyet
2123 	.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
2124 #endif
2125 	.reo_setup = qwx_hw_wcn6855_reo_setup,
2126 #ifdef notyet
2127 	.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
2128 	.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
2129 	.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
2130 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2131 #endif
2132 };
2133 
2134 const struct ath11k_hw_ops wcn6750_ops = {
2135 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
2136 	.wmi_init_config = qwx_init_wmi_config_qca6390,
2137 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
2138 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
2139 #if notyet
2140 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
2141 #endif
2142 	.rx_desc_get_first_msdu = qwx_hw_qcn9074_rx_desc_get_first_msdu,
2143 #if notyet
2144 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
2145 #endif
2146 	.rx_desc_get_l3_pad_bytes = qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes,
2147 	.rx_desc_get_hdr_status = qwx_hw_qcn9074_rx_desc_get_hdr_status,
2148 	.rx_desc_encrypt_valid = qwx_hw_qcn9074_rx_desc_encrypt_valid,
2149 	.rx_desc_get_encrypt_type = qwx_hw_qcn9074_rx_desc_get_encrypt_type,
2150 	.rx_desc_get_decap_type = qwx_hw_qcn9074_rx_desc_get_decap_type,
2151 #ifdef notyet
2152 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
2153 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
2154 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
2155 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
2156 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
2157 #endif
2158 	.rx_desc_get_msdu_len = qwx_hw_qcn9074_rx_desc_get_msdu_len,
2159 #ifdef notyet
2160 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
2161 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
2162 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
2163 #endif
2164 	.rx_desc_get_msdu_freq = qwx_hw_qcn9074_rx_desc_get_msdu_freq,
2165 #ifdef notyet
2166 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
2167 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
2168 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
2169 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
2170 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
2171 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
2172 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
2173 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
2174 #endif
2175 	.rx_desc_get_attention = qwx_hw_qcn9074_rx_desc_get_attention,
2176 #ifdef notyet
2177 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
2178 #endif
2179 	.reo_setup = qwx_hw_wcn6855_reo_setup,
2180 #ifdef notyet
2181 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2182 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
2183 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
2184 	.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
2185 #endif
2186 };
2187 
2188 #define ATH11K_TX_RING_MASK_0 BIT(0)
2189 #define ATH11K_TX_RING_MASK_1 BIT(1)
2190 #define ATH11K_TX_RING_MASK_2 BIT(2)
2191 #define ATH11K_TX_RING_MASK_3 BIT(3)
2192 #define ATH11K_TX_RING_MASK_4 BIT(4)
2193 
2194 #define ATH11K_RX_RING_MASK_0 0x1
2195 #define ATH11K_RX_RING_MASK_1 0x2
2196 #define ATH11K_RX_RING_MASK_2 0x4
2197 #define ATH11K_RX_RING_MASK_3 0x8
2198 
2199 #define ATH11K_RX_ERR_RING_MASK_0 0x1
2200 
2201 #define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
2202 
2203 #define ATH11K_REO_STATUS_RING_MASK_0 0x1
2204 
2205 #define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
2206 #define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
2207 #define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
2208 
2209 #define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
2210 #define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
2211 #define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
2212 
2213 #define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
2214 #define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
2215 #define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
2216 
2217 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
2218 	.tx  = {
2219 		ATH11K_TX_RING_MASK_0,
2220 		ATH11K_TX_RING_MASK_1,
2221 		ATH11K_TX_RING_MASK_2,
2222 	},
2223 	.rx_mon_status = {
2224 		0, 0, 0, 0,
2225 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2226 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2227 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2228 	},
2229 	.rx = {
2230 		0, 0, 0, 0, 0, 0, 0,
2231 		ATH11K_RX_RING_MASK_0,
2232 		ATH11K_RX_RING_MASK_1,
2233 		ATH11K_RX_RING_MASK_2,
2234 		ATH11K_RX_RING_MASK_3,
2235 	},
2236 	.rx_err = {
2237 		ATH11K_RX_ERR_RING_MASK_0,
2238 	},
2239 	.rx_wbm_rel = {
2240 		ATH11K_RX_WBM_REL_RING_MASK_0,
2241 	},
2242 	.reo_status = {
2243 		ATH11K_REO_STATUS_RING_MASK_0,
2244 	},
2245 	.rxdma2host = {
2246 		ATH11K_RXDMA2HOST_RING_MASK_0,
2247 		ATH11K_RXDMA2HOST_RING_MASK_1,
2248 		ATH11K_RXDMA2HOST_RING_MASK_2,
2249 	},
2250 	.host2rxdma = {
2251 		ATH11K_HOST2RXDMA_RING_MASK_0,
2252 		ATH11K_HOST2RXDMA_RING_MASK_1,
2253 		ATH11K_HOST2RXDMA_RING_MASK_2,
2254 	},
2255 };
2256 
2257 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
2258 	.tx  = {
2259 		ATH11K_TX_RING_MASK_0,
2260 	},
2261 	.rx_mon_status = {
2262 		0, 0, 0, 0,
2263 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2264 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2265 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2266 	},
2267 	.rx = {
2268 		0, 0, 0, 0, 0, 0, 0,
2269 		ATH11K_RX_RING_MASK_0,
2270 		ATH11K_RX_RING_MASK_1,
2271 		ATH11K_RX_RING_MASK_2,
2272 		ATH11K_RX_RING_MASK_3,
2273 	},
2274 	.rx_err = {
2275 		ATH11K_RX_ERR_RING_MASK_0,
2276 	},
2277 	.rx_wbm_rel = {
2278 		ATH11K_RX_WBM_REL_RING_MASK_0,
2279 	},
2280 	.reo_status = {
2281 		ATH11K_REO_STATUS_RING_MASK_0,
2282 	},
2283 	.rxdma2host = {
2284 		ATH11K_RXDMA2HOST_RING_MASK_0,
2285 		ATH11K_RXDMA2HOST_RING_MASK_1,
2286 		ATH11K_RXDMA2HOST_RING_MASK_2,
2287 	},
2288 	.host2rxdma = {
2289 	},
2290 };
2291 
2292 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
2293 	.tx  = {
2294 		ATH11K_TX_RING_MASK_0,
2295 		ATH11K_TX_RING_MASK_1,
2296 		ATH11K_TX_RING_MASK_2,
2297 	},
2298 	.rx_mon_status = {
2299 		0, 0, 0,
2300 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2301 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2302 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2303 	},
2304 	.rx = {
2305 		0, 0, 0, 0,
2306 		ATH11K_RX_RING_MASK_0,
2307 		ATH11K_RX_RING_MASK_1,
2308 		ATH11K_RX_RING_MASK_2,
2309 		ATH11K_RX_RING_MASK_3,
2310 	},
2311 	.rx_err = {
2312 		0, 0, 0,
2313 		ATH11K_RX_ERR_RING_MASK_0,
2314 	},
2315 	.rx_wbm_rel = {
2316 		0, 0, 0,
2317 		ATH11K_RX_WBM_REL_RING_MASK_0,
2318 	},
2319 	.reo_status = {
2320 		0, 0, 0,
2321 		ATH11K_REO_STATUS_RING_MASK_0,
2322 	},
2323 	.rxdma2host = {
2324 		0, 0, 0,
2325 		ATH11K_RXDMA2HOST_RING_MASK_0,
2326 	},
2327 	.host2rxdma = {
2328 		0, 0, 0,
2329 		ATH11K_HOST2RXDMA_RING_MASK_0,
2330 	},
2331 };
2332 
2333 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
2334 	.tx  = {
2335 		ATH11K_TX_RING_MASK_0,
2336 		0,
2337 		ATH11K_TX_RING_MASK_2,
2338 		0,
2339 		ATH11K_TX_RING_MASK_4,
2340 	},
2341 	.rx_mon_status = {
2342 		0, 0, 0, 0, 0, 0,
2343 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2344 	},
2345 	.rx = {
2346 		0, 0, 0, 0, 0, 0, 0,
2347 		ATH11K_RX_RING_MASK_0,
2348 		ATH11K_RX_RING_MASK_1,
2349 		ATH11K_RX_RING_MASK_2,
2350 		ATH11K_RX_RING_MASK_3,
2351 	},
2352 	.rx_err = {
2353 		0, ATH11K_RX_ERR_RING_MASK_0,
2354 	},
2355 	.rx_wbm_rel = {
2356 		0, ATH11K_RX_WBM_REL_RING_MASK_0,
2357 	},
2358 	.reo_status = {
2359 		0, ATH11K_REO_STATUS_RING_MASK_0,
2360 	},
2361 	.rxdma2host = {
2362 		ATH11K_RXDMA2HOST_RING_MASK_0,
2363 		ATH11K_RXDMA2HOST_RING_MASK_1,
2364 		ATH11K_RXDMA2HOST_RING_MASK_2,
2365 	},
2366 	.host2rxdma = {
2367 	},
2368 };
2369 
2370 /* Target firmware's Copy Engine configuration. */
2371 const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
2372 	/* CE0: host->target HTC control and raw streams */
2373 	{
2374 		.pipenum = htole32(0),
2375 		.pipedir = htole32(PIPEDIR_OUT),
2376 		.nentries = htole32(32),
2377 		.nbytes_max = htole32(2048),
2378 		.flags = htole32(CE_ATTR_FLAGS),
2379 		.reserved = htole32(0),
2380 	},
2381 
2382 	/* CE1: target->host HTT + HTC control */
2383 	{
2384 		.pipenum = htole32(1),
2385 		.pipedir = htole32(PIPEDIR_IN),
2386 		.nentries = htole32(32),
2387 		.nbytes_max = htole32(2048),
2388 		.flags = htole32(CE_ATTR_FLAGS),
2389 		.reserved = htole32(0),
2390 	},
2391 
2392 	/* CE2: target->host WMI */
2393 	{
2394 		.pipenum = htole32(2),
2395 		.pipedir = htole32(PIPEDIR_IN),
2396 		.nentries = htole32(32),
2397 		.nbytes_max = htole32(2048),
2398 		.flags = htole32(CE_ATTR_FLAGS),
2399 		.reserved = htole32(0),
2400 	},
2401 
2402 	/* CE3: host->target WMI */
2403 	{
2404 		.pipenum = htole32(3),
2405 		.pipedir = htole32(PIPEDIR_OUT),
2406 		.nentries = htole32(32),
2407 		.nbytes_max = htole32(2048),
2408 		.flags = htole32(CE_ATTR_FLAGS),
2409 		.reserved = htole32(0),
2410 	},
2411 
2412 	/* CE4: host->target HTT */
2413 	{
2414 		.pipenum = htole32(4),
2415 		.pipedir = htole32(PIPEDIR_OUT),
2416 		.nentries = htole32(256),
2417 		.nbytes_max = htole32(256),
2418 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2419 		.reserved = htole32(0),
2420 	},
2421 
2422 	/* CE5: target->host Pktlog */
2423 	{
2424 		.pipenum = htole32(5),
2425 		.pipedir = htole32(PIPEDIR_IN),
2426 		.nentries = htole32(32),
2427 		.nbytes_max = htole32(2048),
2428 		.flags = htole32(0),
2429 		.reserved = htole32(0),
2430 	},
2431 
2432 	/* CE6: Reserved for target autonomous hif_memcpy */
2433 	{
2434 		.pipenum = htole32(6),
2435 		.pipedir = htole32(PIPEDIR_INOUT),
2436 		.nentries = htole32(32),
2437 		.nbytes_max = htole32(65535),
2438 		.flags = htole32(CE_ATTR_FLAGS),
2439 		.reserved = htole32(0),
2440 	},
2441 
2442 	/* CE7 used only by Host */
2443 	{
2444 		.pipenum = htole32(7),
2445 		.pipedir = htole32(PIPEDIR_OUT),
2446 		.nentries = htole32(32),
2447 		.nbytes_max = htole32(2048),
2448 		.flags = htole32(CE_ATTR_FLAGS),
2449 		.reserved = htole32(0),
2450 	},
2451 
2452 	/* CE8 target->host used only by IPA */
2453 	{
2454 		.pipenum = htole32(8),
2455 		.pipedir = htole32(PIPEDIR_INOUT),
2456 		.nentries = htole32(32),
2457 		.nbytes_max = htole32(65535),
2458 		.flags = htole32(CE_ATTR_FLAGS),
2459 		.reserved = htole32(0),
2460 	},
2461 
2462 	/* CE9 host->target HTT */
2463 	{
2464 		.pipenum = htole32(9),
2465 		.pipedir = htole32(PIPEDIR_OUT),
2466 		.nentries = htole32(32),
2467 		.nbytes_max = htole32(2048),
2468 		.flags = htole32(CE_ATTR_FLAGS),
2469 		.reserved = htole32(0),
2470 	},
2471 
2472 	/* CE10 target->host HTT */
2473 	{
2474 		.pipenum = htole32(10),
2475 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2476 		.nentries = htole32(0),
2477 		.nbytes_max = htole32(0),
2478 		.flags = htole32(CE_ATTR_FLAGS),
2479 		.reserved = htole32(0),
2480 	},
2481 
2482 	/* CE11 Not used */
2483 };
2484 
2485 /* Map from service/endpoint to Copy Engine.
2486  * This table is derived from the CE_PCI TABLE, above.
2487  * It is passed to the Target at startup for use by firmware.
2488  */
2489 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
2490 	{
2491 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2492 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2493 		.pipenum = htole32(3),
2494 	},
2495 	{
2496 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2497 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2498 		.pipenum = htole32(2),
2499 	},
2500 	{
2501 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2502 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2503 		.pipenum = htole32(3),
2504 	},
2505 	{
2506 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2507 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2508 		.pipenum = htole32(2),
2509 	},
2510 	{
2511 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2512 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2513 		.pipenum = htole32(3),
2514 	},
2515 	{
2516 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2517 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2518 		.pipenum = htole32(2),
2519 	},
2520 	{
2521 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2522 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2523 		.pipenum = htole32(3),
2524 	},
2525 	{
2526 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2527 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2528 		.pipenum = htole32(2),
2529 	},
2530 	{
2531 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2532 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2533 		.pipenum = htole32(3),
2534 	},
2535 	{
2536 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2537 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2538 		.pipenum = htole32(2),
2539 	},
2540 	{
2541 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2542 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2543 		.pipenum = htole32(7),
2544 	},
2545 	{
2546 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2547 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2548 		.pipenum = htole32(2),
2549 	},
2550 	{
2551 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2552 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2553 		.pipenum = htole32(9),
2554 	},
2555 	{
2556 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2557 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2558 		.pipenum = htole32(2),
2559 	},
2560 	{
2561 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2562 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2563 		.pipenum = htole32(0),
2564 	},
2565 	{
2566 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2567 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2568 		.pipenum = htole32(1),
2569 	},
2570 	{ /* not used */
2571 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2572 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2573 		.pipenum = htole32(0),
2574 	},
2575 	{ /* not used */
2576 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2577 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2578 		.pipenum = htole32(1),
2579 	},
2580 	{
2581 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2582 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2583 		.pipenum = htole32(4),
2584 	},
2585 	{
2586 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2587 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2588 		.pipenum = htole32(1),
2589 	},
2590 	{
2591 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
2592 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2593 		.pipenum = htole32(5),
2594 	},
2595 
2596 	/* (Additions here) */
2597 
2598 	{ /* terminator entry */ }
2599 };
2600 
2601 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
2602 	{
2603 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2604 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2605 		.pipenum = htole32(3),
2606 	},
2607 	{
2608 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2609 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2610 		.pipenum = htole32(2),
2611 	},
2612 	{
2613 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2614 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2615 		.pipenum = htole32(3),
2616 	},
2617 	{
2618 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2619 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2620 		.pipenum = htole32(2),
2621 	},
2622 	{
2623 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2624 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2625 		.pipenum = htole32(3),
2626 	},
2627 	{
2628 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2629 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2630 		.pipenum = htole32(2),
2631 	},
2632 	{
2633 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2634 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2635 		.pipenum = htole32(3),
2636 	},
2637 	{
2638 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2639 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2640 		.pipenum = htole32(2),
2641 	},
2642 	{
2643 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2644 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2645 		.pipenum = htole32(3),
2646 	},
2647 	{
2648 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2649 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2650 		.pipenum = htole32(2),
2651 	},
2652 	{
2653 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2654 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2655 		.pipenum = htole32(7),
2656 	},
2657 	{
2658 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2659 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2660 		.pipenum = htole32(2),
2661 	},
2662 	{
2663 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2664 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2665 		.pipenum = htole32(0),
2666 	},
2667 	{
2668 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2669 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2670 		.pipenum = htole32(1),
2671 	},
2672 	{ /* not used */
2673 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2674 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2675 		.pipenum = htole32(0),
2676 	},
2677 	{ /* not used */
2678 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2679 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2680 		.pipenum = htole32(1),
2681 	},
2682 	{
2683 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2684 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2685 		.pipenum = htole32(4),
2686 	},
2687 	{
2688 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2689 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2690 		.pipenum = htole32(1),
2691 	},
2692 	{
2693 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
2694 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2695 		.pipenum = htole32(5),
2696 	},
2697 
2698 	/* (Additions here) */
2699 
2700 	{ /* terminator entry */ }
2701 };
2702 
2703 /* Target firmware's Copy Engine configuration. */
2704 const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
2705 	/* CE0: host->target HTC control and raw streams */
2706 	{
2707 		.pipenum = htole32(0),
2708 		.pipedir = htole32(PIPEDIR_OUT),
2709 		.nentries = htole32(32),
2710 		.nbytes_max = htole32(2048),
2711 		.flags = htole32(CE_ATTR_FLAGS),
2712 		.reserved = htole32(0),
2713 	},
2714 
2715 	/* CE1: target->host HTT + HTC control */
2716 	{
2717 		.pipenum = htole32(1),
2718 		.pipedir = htole32(PIPEDIR_IN),
2719 		.nentries = htole32(32),
2720 		.nbytes_max = htole32(2048),
2721 		.flags = htole32(CE_ATTR_FLAGS),
2722 		.reserved = htole32(0),
2723 	},
2724 
2725 	/* CE2: target->host WMI */
2726 	{
2727 		.pipenum = htole32(2),
2728 		.pipedir = htole32(PIPEDIR_IN),
2729 		.nentries = htole32(32),
2730 		.nbytes_max = htole32(2048),
2731 		.flags = htole32(CE_ATTR_FLAGS),
2732 		.reserved = htole32(0),
2733 	},
2734 
2735 	/* CE3: host->target WMI */
2736 	{
2737 		.pipenum = htole32(3),
2738 		.pipedir = htole32(PIPEDIR_OUT),
2739 		.nentries = htole32(32),
2740 		.nbytes_max = htole32(2048),
2741 		.flags = htole32(CE_ATTR_FLAGS),
2742 		.reserved = htole32(0),
2743 	},
2744 
2745 	/* CE4: host->target HTT */
2746 	{
2747 		.pipenum = htole32(4),
2748 		.pipedir = htole32(PIPEDIR_OUT),
2749 		.nentries = htole32(256),
2750 		.nbytes_max = htole32(256),
2751 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2752 		.reserved = htole32(0),
2753 	},
2754 
2755 	/* CE5: target->host Pktlog */
2756 	{
2757 		.pipenum = htole32(5),
2758 		.pipedir = htole32(PIPEDIR_IN),
2759 		.nentries = htole32(32),
2760 		.nbytes_max = htole32(2048),
2761 		.flags = htole32(CE_ATTR_FLAGS),
2762 		.reserved = htole32(0),
2763 	},
2764 
2765 	/* CE6: Reserved for target autonomous hif_memcpy */
2766 	{
2767 		.pipenum = htole32(6),
2768 		.pipedir = htole32(PIPEDIR_INOUT),
2769 		.nentries = htole32(32),
2770 		.nbytes_max = htole32(16384),
2771 		.flags = htole32(CE_ATTR_FLAGS),
2772 		.reserved = htole32(0),
2773 	},
2774 
2775 	/* CE7 used only by Host */
2776 	{
2777 		.pipenum = htole32(7),
2778 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2779 		.nentries = htole32(0),
2780 		.nbytes_max = htole32(0),
2781 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2782 		.reserved = htole32(0),
2783 	},
2784 
2785 	/* CE8 target->host used only by IPA */
2786 	{
2787 		.pipenum = htole32(8),
2788 		.pipedir = htole32(PIPEDIR_INOUT),
2789 		.nentries = htole32(32),
2790 		.nbytes_max = htole32(16384),
2791 		.flags = htole32(CE_ATTR_FLAGS),
2792 		.reserved = htole32(0),
2793 	},
2794 	/* CE 9, 10, 11 are used by MHI driver */
2795 };
2796 
2797 /* Map from service/endpoint to Copy Engine.
2798  * This table is derived from the CE_PCI TABLE, above.
2799  * It is passed to the Target at startup for use by firmware.
2800  */
2801 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
2802 	{
2803 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2804 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2805 		htole32(3),
2806 	},
2807 	{
2808 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2809 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2810 		htole32(2),
2811 	},
2812 	{
2813 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2814 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2815 		htole32(3),
2816 	},
2817 	{
2818 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2819 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2820 		htole32(2),
2821 	},
2822 	{
2823 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2824 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2825 		htole32(3),
2826 	},
2827 	{
2828 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2829 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2830 		htole32(2),
2831 	},
2832 	{
2833 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2834 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2835 		htole32(3),
2836 	},
2837 	{
2838 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2839 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2840 		htole32(2),
2841 	},
2842 	{
2843 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2844 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2845 		htole32(3),
2846 	},
2847 	{
2848 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2849 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2850 		htole32(2),
2851 	},
2852 	{
2853 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2854 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2855 		htole32(0),
2856 	},
2857 	{
2858 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2859 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2860 		htole32(2),
2861 	},
2862 	{
2863 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2864 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2865 		htole32(4),
2866 	},
2867 	{
2868 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2869 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2870 		htole32(1),
2871 	},
2872 
2873 	/* (Additions here) */
2874 
2875 	{ /* must be last */
2876 		htole32(0),
2877 		htole32(0),
2878 		htole32(0),
2879 	},
2880 };
2881 
2882 /* Target firmware's Copy Engine configuration. */
2883 const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
2884 	/* CE0: host->target HTC control and raw streams */
2885 	{
2886 		.pipenum = htole32(0),
2887 		.pipedir = htole32(PIPEDIR_OUT),
2888 		.nentries = htole32(32),
2889 		.nbytes_max = htole32(2048),
2890 		.flags = htole32(CE_ATTR_FLAGS),
2891 		.reserved = htole32(0),
2892 	},
2893 
2894 	/* CE1: target->host HTT + HTC control */
2895 	{
2896 		.pipenum = htole32(1),
2897 		.pipedir = htole32(PIPEDIR_IN),
2898 		.nentries = htole32(32),
2899 		.nbytes_max = htole32(2048),
2900 		.flags = htole32(CE_ATTR_FLAGS),
2901 		.reserved = htole32(0),
2902 	},
2903 
2904 	/* CE2: target->host WMI */
2905 	{
2906 		.pipenum = htole32(2),
2907 		.pipedir = htole32(PIPEDIR_IN),
2908 		.nentries = htole32(32),
2909 		.nbytes_max = htole32(2048),
2910 		.flags = htole32(CE_ATTR_FLAGS),
2911 		.reserved = htole32(0),
2912 	},
2913 
2914 	/* CE3: host->target WMI */
2915 	{
2916 		.pipenum = htole32(3),
2917 		.pipedir = htole32(PIPEDIR_OUT),
2918 		.nentries = htole32(32),
2919 		.nbytes_max = htole32(2048),
2920 		.flags = htole32(CE_ATTR_FLAGS),
2921 		.reserved = htole32(0),
2922 	},
2923 
2924 	/* CE4: host->target HTT */
2925 	{
2926 		.pipenum = htole32(4),
2927 		.pipedir = htole32(PIPEDIR_OUT),
2928 		.nentries = htole32(256),
2929 		.nbytes_max = htole32(256),
2930 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2931 		.reserved = htole32(0),
2932 	},
2933 
2934 	/* CE5: target->host Pktlog */
2935 	{
2936 		.pipenum = htole32(5),
2937 		.pipedir = htole32(PIPEDIR_IN),
2938 		.nentries = htole32(32),
2939 		.nbytes_max = htole32(2048),
2940 		.flags = htole32(CE_ATTR_FLAGS),
2941 		.reserved = htole32(0),
2942 	},
2943 
2944 	/* CE6: Reserved for target autonomous hif_memcpy */
2945 	{
2946 		.pipenum = htole32(6),
2947 		.pipedir = htole32(PIPEDIR_INOUT),
2948 		.nentries = htole32(32),
2949 		.nbytes_max = htole32(16384),
2950 		.flags = htole32(CE_ATTR_FLAGS),
2951 		.reserved = htole32(0),
2952 	},
2953 
2954 	/* CE7 used only by Host */
2955 	{
2956 		.pipenum = htole32(7),
2957 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2958 		.nentries = htole32(0),
2959 		.nbytes_max = htole32(0),
2960 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2961 		.reserved = htole32(0),
2962 	},
2963 
2964 	/* CE8 target->host used only by IPA */
2965 	{
2966 		.pipenum = htole32(8),
2967 		.pipedir = htole32(PIPEDIR_INOUT),
2968 		.nentries = htole32(32),
2969 		.nbytes_max = htole32(16384),
2970 		.flags = htole32(CE_ATTR_FLAGS),
2971 		.reserved = htole32(0),
2972 	},
2973 	/* CE 9, 10, 11 are used by MHI driver */
2974 };
2975 
2976 /* Map from service/endpoint to Copy Engine.
2977  * This table is derived from the CE_PCI TABLE, above.
2978  * It is passed to the Target at startup for use by firmware.
2979  */
2980 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
2981 	{
2982 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2983 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2984 		htole32(3),
2985 	},
2986 	{
2987 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2988 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2989 		htole32(2),
2990 	},
2991 	{
2992 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2993 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2994 		htole32(3),
2995 	},
2996 	{
2997 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2998 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2999 		htole32(2),
3000 	},
3001 	{
3002 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
3003 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3004 		htole32(3),
3005 	},
3006 	{
3007 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
3008 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3009 		htole32(2),
3010 	},
3011 	{
3012 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
3013 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3014 		htole32(3),
3015 	},
3016 	{
3017 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
3018 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3019 		htole32(2),
3020 	},
3021 	{
3022 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
3023 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3024 		htole32(3),
3025 	},
3026 	{
3027 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
3028 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3029 		htole32(2),
3030 	},
3031 	{
3032 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
3033 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3034 		htole32(0),
3035 	},
3036 	{
3037 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
3038 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3039 		htole32(1),
3040 	},
3041 	{
3042 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
3043 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3044 		htole32(0),
3045 	},
3046 	{
3047 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
3048 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3049 		htole32(1),
3050 	},
3051 	{
3052 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
3053 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3054 		htole32(4),
3055 	},
3056 	{
3057 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
3058 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3059 		htole32(1),
3060 	},
3061 	{
3062 		htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
3063 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3064 		htole32(5),
3065 	},
3066 
3067 	/* (Additions here) */
3068 
3069 	{ /* must be last */
3070 		htole32(0),
3071 		htole32(0),
3072 		htole32(0),
3073 	},
3074 };
3075 
3076 #define QWX_CE_COUNT_IPQ8074	21
3077 
3078 const struct ce_attr qwx_host_ce_config_ipq8074[QWX_CE_COUNT_IPQ8074] = {
3079 	/* CE0: host->target HTC control and raw streams */
3080 	{
3081 		.flags = CE_ATTR_FLAGS,
3082 		.src_nentries = 16,
3083 		.src_sz_max = 2048,
3084 		.dest_nentries = 0,
3085 		.send_cb = qwx_htc_tx_completion_handler,
3086 	},
3087 
3088 	/* CE1: target->host HTT + HTC control */
3089 	{
3090 		.flags = CE_ATTR_FLAGS,
3091 		.src_nentries = 0,
3092 		.src_sz_max = 2048,
3093 		.dest_nentries = 512,
3094 		.recv_cb = qwx_htc_rx_completion_handler,
3095 	},
3096 
3097 	/* CE2: target->host WMI */
3098 	{
3099 		.flags = CE_ATTR_FLAGS,
3100 		.src_nentries = 0,
3101 		.src_sz_max = 2048,
3102 		.dest_nentries = 512,
3103 		.recv_cb = qwx_htc_rx_completion_handler,
3104 	},
3105 
3106 	/* CE3: host->target WMI (mac0) */
3107 	{
3108 		.flags = CE_ATTR_FLAGS,
3109 		.src_nentries = 32,
3110 		.src_sz_max = 2048,
3111 		.dest_nentries = 0,
3112 		.send_cb = qwx_htc_tx_completion_handler,
3113 	},
3114 
3115 	/* CE4: host->target HTT */
3116 	{
3117 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3118 		.src_nentries = 2048,
3119 		.src_sz_max = 256,
3120 		.dest_nentries = 0,
3121 	},
3122 
3123 	/* CE5: target->host pktlog */
3124 	{
3125 		.flags = CE_ATTR_FLAGS,
3126 		.src_nentries = 0,
3127 		.src_sz_max = 2048,
3128 		.dest_nentries = 512,
3129 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3130 	},
3131 
3132 	/* CE6: target autonomous hif_memcpy */
3133 	{
3134 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3135 		.src_nentries = 0,
3136 		.src_sz_max = 0,
3137 		.dest_nentries = 0,
3138 	},
3139 
3140 	/* CE7: host->target WMI (mac1) */
3141 	{
3142 		.flags = CE_ATTR_FLAGS,
3143 		.src_nentries = 32,
3144 		.src_sz_max = 2048,
3145 		.dest_nentries = 0,
3146 		.send_cb = qwx_htc_tx_completion_handler,
3147 	},
3148 
3149 	/* CE8: target autonomous hif_memcpy */
3150 	{
3151 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3152 		.src_nentries = 0,
3153 		.src_sz_max = 0,
3154 		.dest_nentries = 0,
3155 	},
3156 
3157 	/* CE9: host->target WMI (mac2) */
3158 	{
3159 		.flags = CE_ATTR_FLAGS,
3160 		.src_nentries = 32,
3161 		.src_sz_max = 2048,
3162 		.dest_nentries = 0,
3163 		.send_cb = qwx_htc_tx_completion_handler,
3164 	},
3165 
3166 	/* CE10: target->host HTT */
3167 	{
3168 		.flags = CE_ATTR_FLAGS,
3169 		.src_nentries = 0,
3170 		.src_sz_max = 2048,
3171 		.dest_nentries = 512,
3172 		.recv_cb = qwx_htc_rx_completion_handler,
3173 	},
3174 
3175 	/* CE11: Not used */
3176 	{
3177 		.flags = CE_ATTR_FLAGS,
3178 		.src_nentries = 0,
3179 		.src_sz_max = 0,
3180 		.dest_nentries = 0,
3181 	},
3182 };
3183 
3184 #define QWX_CE_COUNT_QCA6390	9
3185 
3186 const struct ce_attr qwx_host_ce_config_qca6390[QWX_CE_COUNT_QCA6390] = {
3187 	/* CE0: host->target HTC control and raw streams */
3188 	{
3189 		.flags = CE_ATTR_FLAGS,
3190 		.src_nentries = 16,
3191 		.src_sz_max = 2048,
3192 		.dest_nentries = 0,
3193 	},
3194 
3195 	/* CE1: target->host HTT + HTC control */
3196 	{
3197 		.flags = CE_ATTR_FLAGS,
3198 		.src_nentries = 0,
3199 		.src_sz_max = 2048,
3200 		.dest_nentries = 512,
3201 		.recv_cb = qwx_htc_rx_completion_handler,
3202 	},
3203 
3204 	/* CE2: target->host WMI */
3205 	{
3206 		.flags = CE_ATTR_FLAGS,
3207 		.src_nentries = 0,
3208 		.src_sz_max = 2048,
3209 		.dest_nentries = 512,
3210 		.recv_cb = qwx_htc_rx_completion_handler,
3211 	},
3212 
3213 	/* CE3: host->target WMI (mac0) */
3214 	{
3215 		.flags = CE_ATTR_FLAGS,
3216 		.src_nentries = 32,
3217 		.src_sz_max = 2048,
3218 		.dest_nentries = 0,
3219 		.send_cb = qwx_htc_tx_completion_handler,
3220 	},
3221 
3222 	/* CE4: host->target HTT */
3223 	{
3224 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3225 		.src_nentries = 2048,
3226 		.src_sz_max = 256,
3227 		.dest_nentries = 0,
3228 	},
3229 
3230 	/* CE5: target->host pktlog */
3231 	{
3232 		.flags = CE_ATTR_FLAGS,
3233 		.src_nentries = 0,
3234 		.src_sz_max = 2048,
3235 		.dest_nentries = 512,
3236 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3237 	},
3238 
3239 	/* CE6: target autonomous hif_memcpy */
3240 	{
3241 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3242 		.src_nentries = 0,
3243 		.src_sz_max = 0,
3244 		.dest_nentries = 0,
3245 	},
3246 
3247 	/* CE7: host->target WMI (mac1) */
3248 	{
3249 		.flags = CE_ATTR_FLAGS,
3250 		.src_nentries = 32,
3251 		.src_sz_max = 2048,
3252 		.dest_nentries = 0,
3253 		.send_cb = qwx_htc_tx_completion_handler,
3254 	},
3255 
3256 	/* CE8: target autonomous hif_memcpy */
3257 	{
3258 		.flags = CE_ATTR_FLAGS,
3259 		.src_nentries = 0,
3260 		.src_sz_max = 0,
3261 		.dest_nentries = 0,
3262 	},
3263 
3264 };
3265 
3266 #define QWX_CE_COUNT_QCN9074	6
3267 
3268 const struct ce_attr qwx_host_ce_config_qcn9074[QWX_CE_COUNT_QCN9074] = {
3269 	/* CE0: host->target HTC control and raw streams */
3270 	{
3271 		.flags = CE_ATTR_FLAGS,
3272 		.src_nentries = 16,
3273 		.src_sz_max = 2048,
3274 		.dest_nentries = 0,
3275 	},
3276 
3277 	/* CE1: target->host HTT + HTC control */
3278 	{
3279 		.flags = CE_ATTR_FLAGS,
3280 		.src_nentries = 0,
3281 		.src_sz_max = 2048,
3282 		.dest_nentries = 512,
3283 		.recv_cb = qwx_htc_rx_completion_handler,
3284 	},
3285 
3286 	/* CE2: target->host WMI */
3287 	{
3288 		.flags = CE_ATTR_FLAGS,
3289 		.src_nentries = 0,
3290 		.src_sz_max = 2048,
3291 		.dest_nentries = 32,
3292 		.recv_cb = qwx_htc_rx_completion_handler,
3293 	},
3294 
3295 	/* CE3: host->target WMI (mac0) */
3296 	{
3297 		.flags = CE_ATTR_FLAGS,
3298 		.src_nentries = 32,
3299 		.src_sz_max = 2048,
3300 		.dest_nentries = 0,
3301 		.send_cb = qwx_htc_tx_completion_handler,
3302 	},
3303 
3304 	/* CE4: host->target HTT */
3305 	{
3306 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3307 		.src_nentries = 2048,
3308 		.src_sz_max = 256,
3309 		.dest_nentries = 0,
3310 	},
3311 
3312 	/* CE5: target->host pktlog */
3313 	{
3314 		.flags = CE_ATTR_FLAGS,
3315 		.src_nentries = 0,
3316 		.src_sz_max = 2048,
3317 		.dest_nentries = 512,
3318 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3319 	},
3320 };
3321 
3322 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
3323 	{
3324 		.tcl_ring_num = 0,
3325 		.wbm_ring_num = 0,
3326 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
3327 	},
3328 	{
3329 		.tcl_ring_num = 1,
3330 		.wbm_ring_num = 1,
3331 		.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
3332 	},
3333 	{
3334 		.tcl_ring_num = 2,
3335 		.wbm_ring_num = 2,
3336 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
3337 	},
3338 };
3339 
3340 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
3341 	{
3342 		.tcl_ring_num = 0,
3343 		.wbm_ring_num = 0,
3344 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
3345 	},
3346 	{
3347 		.tcl_ring_num = 1,
3348 		.wbm_ring_num = 4,
3349 		.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
3350 	},
3351 	{
3352 		.tcl_ring_num = 2,
3353 		.wbm_ring_num = 2,
3354 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
3355 	},
3356 };
3357 
3358 
3359 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
3360 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
3361 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
3362 };
3363 
3364 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
3365 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
3366 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
3367 };
3368 
3369 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
3370 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
3371 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
3372 };
3373 
3374 static const struct ath11k_hw_params ath11k_hw_params[] = {
3375 	{
3376 		.hw_rev = ATH11K_HW_IPQ8074,
3377 		.name = "ipq8074 hw2.0",
3378 		.fw = {
3379 			.dir = "ipq8074-hw2.0",
3380 			.board_size = 256 * 1024,
3381 			.cal_offset = 128 * 1024,
3382 		},
3383 		.max_radios = 3,
3384 		.bdf_addr = 0x4B0C0000,
3385 		.hw_ops = &ipq8074_ops,
3386 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
3387 		.internal_sleep_clock = false,
3388 		.regs = &ipq8074_regs,
3389 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
3390 		.host_ce_config = qwx_host_ce_config_ipq8074,
3391 		.ce_count = QWX_CE_COUNT_IPQ8074,
3392 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
3393 		.target_ce_count = 11,
3394 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
3395 		.svc_to_ce_map_len = 21,
3396 		.single_pdev_only = false,
3397 		.rxdma1_enable = true,
3398 		.num_rxmda_per_pdev = 1,
3399 		.rx_mac_buf_ring = false,
3400 		.vdev_start_delay = false,
3401 		.htt_peer_map_v2 = true,
3402 #if notyet
3403 		.spectral = {
3404 			.fft_sz = 2,
3405 			/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
3406 			 * so added pad size as 2 bytes to compensate the BIN size
3407 			 */
3408 			.fft_pad_sz = 2,
3409 			.summary_pad_sz = 0,
3410 			.fft_hdr_len = 16,
3411 			.max_fft_bins = 512,
3412 			.fragment_160mhz = true,
3413 		},
3414 
3415 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3416 					BIT(NL80211_IFTYPE_AP) |
3417 					BIT(NL80211_IFTYPE_MESH_POINT),
3418 		.supports_monitor = true,
3419 		.full_monitor_mode = false,
3420 #endif
3421 		.supports_shadow_regs = false,
3422 		.idle_ps = false,
3423 		.supports_sta_ps = false,
3424 		.cold_boot_calib = true,
3425 		.cbcal_restart_fw = true,
3426 		.fw_mem_mode = 0,
3427 		.num_vdevs = 16 + 1,
3428 		.num_peers = 512,
3429 		.supports_suspend = false,
3430 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3431 		.supports_regdb = false,
3432 		.fix_l1ss = true,
3433 		.credit_flow = false,
3434 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3435 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3436 #if notyet
3437 		.supports_dynamic_smps_6ghz = false,
3438 		.alloc_cacheable_memory = true,
3439 		.supports_rssi_stats = false,
3440 #endif
3441 		.fw_wmi_diag_event = false,
3442 		.current_cc_support = false,
3443 		.dbr_debug_support = true,
3444 		.global_reset = false,
3445 #ifdef notyet
3446 		.bios_sar_capa = NULL,
3447 #endif
3448 		.m3_fw_support = false,
3449 		.fixed_bdf_addr = true,
3450 		.fixed_mem_region = true,
3451 		.static_window_map = false,
3452 #if notyet
3453 		.hybrid_bus_type = false,
3454 		.fixed_fw_mem = false,
3455 		.support_off_channel_tx = false,
3456 		.supports_multi_bssid = false,
3457 
3458 		.sram_dump = {},
3459 
3460 		.tcl_ring_retry = true,
3461 #endif
3462 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3463 #ifdef notyet
3464 		.smp2p_wow_exit = false,
3465 #endif
3466 	},
3467 	{
3468 		.hw_rev = ATH11K_HW_IPQ6018_HW10,
3469 		.name = "ipq6018 hw1.0",
3470 		.fw = {
3471 			.dir = "ipq6018-hw1.0",
3472 			.board_size = 256 * 1024,
3473 			.cal_offset = 128 * 1024,
3474 		},
3475 		.max_radios = 2,
3476 		.bdf_addr = 0x4ABC0000,
3477 		.hw_ops = &ipq6018_ops,
3478 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
3479 		.internal_sleep_clock = false,
3480 		.regs = &ipq8074_regs,
3481 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
3482 		.host_ce_config = qwx_host_ce_config_ipq8074,
3483 		.ce_count = QWX_CE_COUNT_IPQ8074,
3484 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
3485 		.target_ce_count = 11,
3486 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
3487 		.svc_to_ce_map_len = 19,
3488 		.single_pdev_only = false,
3489 		.rxdma1_enable = true,
3490 		.num_rxmda_per_pdev = 1,
3491 		.rx_mac_buf_ring = false,
3492 		.vdev_start_delay = false,
3493 		.htt_peer_map_v2 = true,
3494 #if notyet
3495 		.spectral = {
3496 			.fft_sz = 4,
3497 			.fft_pad_sz = 0,
3498 			.summary_pad_sz = 0,
3499 			.fft_hdr_len = 16,
3500 			.max_fft_bins = 512,
3501 			.fragment_160mhz = true,
3502 		},
3503 
3504 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3505 					BIT(NL80211_IFTYPE_AP) |
3506 					BIT(NL80211_IFTYPE_MESH_POINT),
3507 		.supports_monitor = true,
3508 		.full_monitor_mode = false,
3509 #endif
3510 		.supports_shadow_regs = false,
3511 		.idle_ps = false,
3512 		.supports_sta_ps = false,
3513 		.cold_boot_calib = true,
3514 		.cbcal_restart_fw = true,
3515 		.fw_mem_mode = 0,
3516 		.num_vdevs = 16 + 1,
3517 		.num_peers = 512,
3518 		.supports_suspend = false,
3519 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3520 		.supports_regdb = false,
3521 		.fix_l1ss = true,
3522 		.credit_flow = false,
3523 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3524 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3525 #if notyet
3526 		.supports_dynamic_smps_6ghz = false,
3527 		.alloc_cacheable_memory = true,
3528 		.supports_rssi_stats = false,
3529 #endif
3530 		.fw_wmi_diag_event = false,
3531 		.current_cc_support = false,
3532 		.dbr_debug_support = true,
3533 		.global_reset = false,
3534 #ifdef notyet
3535 		.bios_sar_capa = NULL,
3536 #endif
3537 		.m3_fw_support = false,
3538 		.fixed_bdf_addr = true,
3539 		.fixed_mem_region = true,
3540 		.static_window_map = false,
3541 		.hybrid_bus_type = false,
3542 		.fixed_fw_mem = false,
3543 #if notyet
3544 		.support_off_channel_tx = false,
3545 		.supports_multi_bssid = false,
3546 
3547 		.sram_dump = {},
3548 
3549 		.tcl_ring_retry = true,
3550 #endif
3551 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3552 #ifdef notyet
3553 		.smp2p_wow_exit = false,
3554 #endif
3555 	},
3556 	{
3557 		.name = "qca6390 hw2.0",
3558 		.hw_rev = ATH11K_HW_QCA6390_HW20,
3559 		.fw = {
3560 			.dir = "qca6390-hw2.0",
3561 			.board_size = 256 * 1024,
3562 			.cal_offset = 128 * 1024,
3563 		},
3564 		.max_radios = 3,
3565 		.bdf_addr = 0x4B0C0000,
3566 		.hw_ops = &qca6390_ops,
3567 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3568 		.internal_sleep_clock = true,
3569 		.regs = &qca6390_regs,
3570 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3571 		.host_ce_config = qwx_host_ce_config_qca6390,
3572 		.ce_count = QWX_CE_COUNT_QCA6390,
3573 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3574 		.target_ce_count = 9,
3575 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3576 		.svc_to_ce_map_len = 14,
3577 		.single_pdev_only = true,
3578 		.rxdma1_enable = false,
3579 		.num_rxmda_per_pdev = 2,
3580 		.rx_mac_buf_ring = true,
3581 		.vdev_start_delay = true,
3582 		.htt_peer_map_v2 = false,
3583 #if notyet
3584 		.spectral = {
3585 			.fft_sz = 0,
3586 			.fft_pad_sz = 0,
3587 			.summary_pad_sz = 0,
3588 			.fft_hdr_len = 0,
3589 			.max_fft_bins = 0,
3590 			.fragment_160mhz = false,
3591 		},
3592 
3593 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3594 					BIT(NL80211_IFTYPE_AP),
3595 		.supports_monitor = false,
3596 		.full_monitor_mode = false,
3597 #endif
3598 		.supports_shadow_regs = true,
3599 		.idle_ps = true,
3600 		.supports_sta_ps = true,
3601 		.cold_boot_calib = false,
3602 		.cbcal_restart_fw = false,
3603 		.fw_mem_mode = 0,
3604 		.num_vdevs = 16 + 1,
3605 		.num_peers = 512,
3606 		.supports_suspend = true,
3607 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3608 		.supports_regdb = false,
3609 		.fix_l1ss = true,
3610 		.credit_flow = true,
3611 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3612 		.hal_params = &ath11k_hw_hal_params_qca6390,
3613 #if notyet
3614 		.supports_dynamic_smps_6ghz = false,
3615 		.alloc_cacheable_memory = false,
3616 		.supports_rssi_stats = true,
3617 #endif
3618 		.fw_wmi_diag_event = true,
3619 		.current_cc_support = true,
3620 		.dbr_debug_support = false,
3621 		.global_reset = true,
3622 #ifdef notyet
3623 		.bios_sar_capa = NULL,
3624 #endif
3625 		.m3_fw_support = true,
3626 		.fixed_bdf_addr = false,
3627 		.fixed_mem_region = false,
3628 		.static_window_map = false,
3629 		.hybrid_bus_type = false,
3630 		.fixed_fw_mem = false,
3631 #if notyet
3632 		.support_off_channel_tx = true,
3633 		.supports_multi_bssid = true,
3634 
3635 		.sram_dump = {
3636 			.start = 0x01400000,
3637 			.end = 0x0171ffff,
3638 		},
3639 
3640 		.tcl_ring_retry = true,
3641 #endif
3642 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3643 #ifdef notyet
3644 		.smp2p_wow_exit = false,
3645 #endif
3646 	},
3647 	{
3648 		.name = "qcn9074 hw1.0",
3649 		.hw_rev = ATH11K_HW_QCN9074_HW10,
3650 		.fw = {
3651 			.dir = "qcn9074-hw1.0",
3652 			.board_size = 256 * 1024,
3653 			.cal_offset = 128 * 1024,
3654 		},
3655 		.max_radios = 1,
3656 #if notyet
3657 		.single_pdev_only = false,
3658 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
3659 #endif
3660 		.hw_ops = &qcn9074_ops,
3661 		.ring_mask = &ath11k_hw_ring_mask_qcn9074,
3662 		.internal_sleep_clock = false,
3663 		.regs = &qcn9074_regs,
3664 		.host_ce_config = qwx_host_ce_config_qcn9074,
3665 		.ce_count = QWX_CE_COUNT_QCN9074,
3666 		.target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
3667 		.target_ce_count = 9,
3668 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
3669 		.svc_to_ce_map_len = 18,
3670 		.rxdma1_enable = true,
3671 		.num_rxmda_per_pdev = 1,
3672 		.rx_mac_buf_ring = false,
3673 		.vdev_start_delay = false,
3674 		.htt_peer_map_v2 = true,
3675 #if notyet
3676 		.spectral = {
3677 			.fft_sz = 2,
3678 			.fft_pad_sz = 0,
3679 			.summary_pad_sz = 16,
3680 			.fft_hdr_len = 24,
3681 			.max_fft_bins = 1024,
3682 			.fragment_160mhz = false,
3683 		},
3684 
3685 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3686 					BIT(NL80211_IFTYPE_AP) |
3687 					BIT(NL80211_IFTYPE_MESH_POINT),
3688 		.supports_monitor = true,
3689 		.full_monitor_mode = true,
3690 #endif
3691 		.supports_shadow_regs = false,
3692 		.idle_ps = false,
3693 		.supports_sta_ps = false,
3694 		.cold_boot_calib = false,
3695 		.cbcal_restart_fw = false,
3696 		.fw_mem_mode = 2,
3697 		.num_vdevs = 8,
3698 		.num_peers = 128,
3699 		.supports_suspend = false,
3700 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
3701 		.supports_regdb = false,
3702 		.fix_l1ss = true,
3703 		.credit_flow = false,
3704 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3705 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3706 #if notyet
3707 		.supports_dynamic_smps_6ghz = true,
3708 		.alloc_cacheable_memory = true,
3709 		.supports_rssi_stats = false,
3710 #endif
3711 		.fw_wmi_diag_event = false,
3712 		.current_cc_support = false,
3713 		.dbr_debug_support = true,
3714 		.global_reset = false,
3715 #ifdef notyet
3716 		.bios_sar_capa = NULL,
3717 #endif
3718 		.m3_fw_support = true,
3719 		.fixed_bdf_addr = false,
3720 		.fixed_mem_region = false,
3721 		.static_window_map = true,
3722 		.hybrid_bus_type = false,
3723 		.fixed_fw_mem = false,
3724 #if notyet
3725 		.support_off_channel_tx = false,
3726 		.supports_multi_bssid = false,
3727 
3728 		.sram_dump = {},
3729 
3730 		.tcl_ring_retry = true,
3731 #endif
3732 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3733 #ifdef notyet
3734 		.smp2p_wow_exit = false,
3735 #endif
3736 	},
3737 	{
3738 		.name = "wcn6855 hw2.0",
3739 		.hw_rev = ATH11K_HW_WCN6855_HW20,
3740 		.fw = {
3741 			.dir = "wcn6855-hw2.0",
3742 			.board_size = 256 * 1024,
3743 			.cal_offset = 128 * 1024,
3744 		},
3745 		.max_radios = 3,
3746 		.bdf_addr = 0x4B0C0000,
3747 		.hw_ops = &wcn6855_ops,
3748 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3749 		.internal_sleep_clock = true,
3750 		.regs = &wcn6855_regs,
3751 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3752 		.host_ce_config = qwx_host_ce_config_qca6390,
3753 		.ce_count = QWX_CE_COUNT_QCA6390,
3754 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3755 		.target_ce_count = 9,
3756 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3757 		.svc_to_ce_map_len = 14,
3758 		.single_pdev_only = true,
3759 		.rxdma1_enable = false,
3760 		.num_rxmda_per_pdev = 2,
3761 		.rx_mac_buf_ring = true,
3762 		.vdev_start_delay = true,
3763 		.htt_peer_map_v2 = false,
3764 #if notyet
3765 		.spectral = {
3766 			.fft_sz = 0,
3767 			.fft_pad_sz = 0,
3768 			.summary_pad_sz = 0,
3769 			.fft_hdr_len = 0,
3770 			.max_fft_bins = 0,
3771 			.fragment_160mhz = false,
3772 		},
3773 
3774 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3775 					BIT(NL80211_IFTYPE_AP),
3776 		.supports_monitor = false,
3777 		.full_monitor_mode = false,
3778 #endif
3779 		.supports_shadow_regs = true,
3780 		.idle_ps = true,
3781 		.supports_sta_ps = true,
3782 		.cold_boot_calib = false,
3783 		.cbcal_restart_fw = false,
3784 		.fw_mem_mode = 0,
3785 		.num_vdevs = 16 + 1,
3786 		.num_peers = 512,
3787 		.supports_suspend = true,
3788 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
3789 		.supports_regdb = true,
3790 		.fix_l1ss = false,
3791 		.credit_flow = true,
3792 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3793 		.hal_params = &ath11k_hw_hal_params_qca6390,
3794 #if notyet
3795 		.supports_dynamic_smps_6ghz = false,
3796 		.alloc_cacheable_memory = false,
3797 		.supports_rssi_stats = true,
3798 #endif
3799 		.fw_wmi_diag_event = true,
3800 		.current_cc_support = true,
3801 		.dbr_debug_support = false,
3802 		.global_reset = true,
3803 #ifdef notyet
3804 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
3805 #endif
3806 		.m3_fw_support = true,
3807 		.fixed_bdf_addr = false,
3808 		.fixed_mem_region = false,
3809 		.static_window_map = false,
3810 		.hybrid_bus_type = false,
3811 		.fixed_fw_mem = false,
3812 #if notyet
3813 		.support_off_channel_tx = true,
3814 		.supports_multi_bssid = true,
3815 
3816 		.sram_dump = {
3817 			.start = 0x01400000,
3818 			.end = 0x0177ffff,
3819 		},
3820 
3821 		.tcl_ring_retry = true,
3822 #endif
3823 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3824 #ifdef notyet
3825 		.smp2p_wow_exit = false,
3826 #endif
3827 	},
3828 	{
3829 		.name = "wcn6855 hw2.1",
3830 		.hw_rev = ATH11K_HW_WCN6855_HW21,
3831 		.fw = {
3832 			.dir = "wcn6855-hw2.1",
3833 			.board_size = 256 * 1024,
3834 			.cal_offset = 128 * 1024,
3835 		},
3836 		.max_radios = 3,
3837 		.bdf_addr = 0x4B0C0000,
3838 		.hw_ops = &wcn6855_ops,
3839 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3840 		.internal_sleep_clock = true,
3841 		.regs = &wcn6855_regs,
3842 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3843 		.host_ce_config = qwx_host_ce_config_qca6390,
3844 		.ce_count = QWX_CE_COUNT_QCA6390,
3845 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3846 		.target_ce_count = 9,
3847 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3848 		.svc_to_ce_map_len = 14,
3849 		.single_pdev_only = true,
3850 		.rxdma1_enable = false,
3851 		.num_rxmda_per_pdev = 2,
3852 		.rx_mac_buf_ring = true,
3853 		.vdev_start_delay = true,
3854 		.htt_peer_map_v2 = false,
3855 #if notyet
3856 		.spectral = {
3857 			.fft_sz = 0,
3858 			.fft_pad_sz = 0,
3859 			.summary_pad_sz = 0,
3860 			.fft_hdr_len = 0,
3861 			.max_fft_bins = 0,
3862 			.fragment_160mhz = false,
3863 		},
3864 
3865 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3866 					BIT(NL80211_IFTYPE_AP),
3867 		.supports_monitor = false,
3868 #endif
3869 		.supports_shadow_regs = true,
3870 		.idle_ps = true,
3871 		.supports_sta_ps = true,
3872 		.cold_boot_calib = false,
3873 		.cbcal_restart_fw = false,
3874 		.fw_mem_mode = 0,
3875 		.num_vdevs = 16 + 1,
3876 		.num_peers = 512,
3877 		.supports_suspend = true,
3878 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
3879 		.supports_regdb = true,
3880 		.fix_l1ss = false,
3881 		.credit_flow = true,
3882 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3883 		.hal_params = &ath11k_hw_hal_params_qca6390,
3884 #if notyet
3885 		.supports_dynamic_smps_6ghz = false,
3886 		.alloc_cacheable_memory = false,
3887 		.supports_rssi_stats = true,
3888 #endif
3889 		.fw_wmi_diag_event = true,
3890 		.current_cc_support = true,
3891 		.dbr_debug_support = false,
3892 		.global_reset = true,
3893 #ifdef notyet
3894 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
3895 #endif
3896 		.m3_fw_support = true,
3897 		.fixed_bdf_addr = false,
3898 		.fixed_mem_region = false,
3899 		.static_window_map = false,
3900 		.hybrid_bus_type = false,
3901 		.fixed_fw_mem = false,
3902 #if notyet
3903 		.support_off_channel_tx = true,
3904 		.supports_multi_bssid = true,
3905 
3906 		.sram_dump = {
3907 			.start = 0x01400000,
3908 			.end = 0x0177ffff,
3909 		},
3910 
3911 		.tcl_ring_retry = true,
3912 #endif
3913 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3914 #ifdef notyet
3915 		.smp2p_wow_exit = false,
3916 #endif
3917 	},
3918 	{
3919 		.name = "wcn6750 hw1.0",
3920 		.hw_rev = ATH11K_HW_WCN6750_HW10,
3921 		.fw = {
3922 			.dir = "wcn6750-hw1.0",
3923 			.board_size = 256 * 1024,
3924 			.cal_offset = 128 * 1024,
3925 		},
3926 		.max_radios = 1,
3927 		.bdf_addr = 0x4B0C0000,
3928 		.hw_ops = &wcn6750_ops,
3929 		.ring_mask = &ath11k_hw_ring_mask_wcn6750,
3930 		.internal_sleep_clock = false,
3931 		.regs = &wcn6750_regs,
3932 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
3933 		.host_ce_config = qwx_host_ce_config_qca6390,
3934 		.ce_count = QWX_CE_COUNT_QCA6390,
3935 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3936 		.target_ce_count = 9,
3937 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3938 		.svc_to_ce_map_len = 14,
3939 		.single_pdev_only = true,
3940 		.rxdma1_enable = false,
3941 		.num_rxmda_per_pdev = 1,
3942 		.rx_mac_buf_ring = true,
3943 		.vdev_start_delay = true,
3944 		.htt_peer_map_v2 = false,
3945 #if notyet
3946 		.spectral = {
3947 			.fft_sz = 0,
3948 			.fft_pad_sz = 0,
3949 			.summary_pad_sz = 0,
3950 			.fft_hdr_len = 0,
3951 			.max_fft_bins = 0,
3952 			.fragment_160mhz = false,
3953 		},
3954 
3955 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3956 					BIT(NL80211_IFTYPE_AP),
3957 		.supports_monitor = false,
3958 #endif
3959 		.supports_shadow_regs = true,
3960 		.idle_ps = true,
3961 		.supports_sta_ps = true,
3962 		.cold_boot_calib = true,
3963 		.cbcal_restart_fw = false,
3964 		.fw_mem_mode = 0,
3965 		.num_vdevs = 16 + 1,
3966 		.num_peers = 512,
3967 		.supports_suspend = false,
3968 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
3969 		.supports_regdb = true,
3970 		.fix_l1ss = false,
3971 		.credit_flow = true,
3972 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3973 		.hal_params = &ath11k_hw_hal_params_wcn6750,
3974 #if notyet
3975 		.supports_dynamic_smps_6ghz = false,
3976 		.alloc_cacheable_memory = false,
3977 		.supports_rssi_stats = true,
3978 #endif
3979 		.fw_wmi_diag_event = false,
3980 		.current_cc_support = true,
3981 		.dbr_debug_support = false,
3982 		.global_reset = false,
3983 #ifdef notyet
3984 		.bios_sar_capa = NULL,
3985 #endif
3986 		.m3_fw_support = false,
3987 		.fixed_bdf_addr = false,
3988 		.fixed_mem_region = false,
3989 		.static_window_map = true,
3990 		.hybrid_bus_type = true,
3991 		.fixed_fw_mem = true,
3992 #if notyet
3993 		.support_off_channel_tx = true,
3994 		.supports_multi_bssid = true,
3995 
3996 		.sram_dump = {},
3997 
3998 		.tcl_ring_retry = false,
3999 #endif
4000 		.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
4001 #ifdef notyet
4002 		.smp2p_wow_exit = true,
4003 #endif
4004 	},
4005 };
4006 
4007 const struct ath11k_hw_regs ipq8074_regs = {
4008 	/* SW2TCL(x) R0 ring configuration address */
4009 	.hal_tcl1_ring_base_lsb = 0x00000510,
4010 	.hal_tcl1_ring_base_msb = 0x00000514,
4011 	.hal_tcl1_ring_id = 0x00000518,
4012 	.hal_tcl1_ring_misc = 0x00000520,
4013 	.hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
4014 	.hal_tcl1_ring_tp_addr_msb = 0x00000530,
4015 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
4016 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
4017 	.hal_tcl1_ring_msi1_base_lsb = 0x00000558,
4018 	.hal_tcl1_ring_msi1_base_msb = 0x0000055c,
4019 	.hal_tcl1_ring_msi1_data = 0x00000560,
4020 	.hal_tcl2_ring_base_lsb = 0x00000568,
4021 	.hal_tcl_ring_base_lsb = 0x00000618,
4022 
4023 	/* TCL STATUS ring address */
4024 	.hal_tcl_status_ring_base_lsb = 0x00000720,
4025 
4026 	/* REO2SW(x) R0 ring configuration address */
4027 	.hal_reo1_ring_base_lsb = 0x0000029c,
4028 	.hal_reo1_ring_base_msb = 0x000002a0,
4029 	.hal_reo1_ring_id = 0x000002a4,
4030 	.hal_reo1_ring_misc = 0x000002ac,
4031 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
4032 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
4033 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
4034 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
4035 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
4036 	.hal_reo1_ring_msi1_data = 0x000002ec,
4037 	.hal_reo2_ring_base_lsb = 0x000002f4,
4038 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4039 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4040 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4041 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4042 
4043 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4044 	.hal_reo1_ring_hp = 0x00003038,
4045 	.hal_reo1_ring_tp = 0x0000303c,
4046 	.hal_reo2_ring_hp = 0x00003040,
4047 
4048 	/* REO2TCL R0 ring configuration address */
4049 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4050 	.hal_reo_tcl_ring_hp = 0x00003058,
4051 
4052 	/* REO CMD ring address */
4053 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4054 	.hal_reo_cmd_ring_hp = 0x00003020,
4055 
4056 	/* REO status address */
4057 	.hal_reo_status_ring_base_lsb = 0x00000504,
4058 	.hal_reo_status_hp = 0x00003070,
4059 
4060 	/* SW2REO ring address */
4061 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4062 	.hal_sw2reo_ring_hp = 0x00003028,
4063 
4064 	/* WCSS relative address */
4065 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
4066 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
4067 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
4068 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
4069 
4070 	/* WBM Idle address */
4071 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
4072 	.hal_wbm_idle_link_ring_misc = 0x00000870,
4073 
4074 	/* SW2WBM release address */
4075 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
4076 
4077 	/* WBM2SW release address */
4078 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
4079 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
4080 
4081 	/* PCIe base address */
4082 	.pcie_qserdes_sysclk_en_sel = 0x0,
4083 	.pcie_pcs_osc_dtct_config_base = 0x0,
4084 
4085 	/* Shadow register area */
4086 	.hal_shadow_base_addr = 0x0,
4087 
4088 	/* REO misc control register, not used in IPQ8074 */
4089 	.hal_reo1_misc_ctl = 0x0,
4090 };
4091 
4092 const struct ath11k_hw_regs qca6390_regs = {
4093 	/* SW2TCL(x) R0 ring configuration address */
4094 	.hal_tcl1_ring_base_lsb = 0x00000684,
4095 	.hal_tcl1_ring_base_msb = 0x00000688,
4096 	.hal_tcl1_ring_id = 0x0000068c,
4097 	.hal_tcl1_ring_misc = 0x00000694,
4098 	.hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
4099 	.hal_tcl1_ring_tp_addr_msb = 0x000006a4,
4100 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
4101 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
4102 	.hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
4103 	.hal_tcl1_ring_msi1_base_msb = 0x000006d0,
4104 	.hal_tcl1_ring_msi1_data = 0x000006d4,
4105 	.hal_tcl2_ring_base_lsb = 0x000006dc,
4106 	.hal_tcl_ring_base_lsb = 0x0000078c,
4107 
4108 	/* TCL STATUS ring address */
4109 	.hal_tcl_status_ring_base_lsb = 0x00000894,
4110 
4111 	/* REO2SW(x) R0 ring configuration address */
4112 	.hal_reo1_ring_base_lsb = 0x00000244,
4113 	.hal_reo1_ring_base_msb = 0x00000248,
4114 	.hal_reo1_ring_id = 0x0000024c,
4115 	.hal_reo1_ring_misc = 0x00000254,
4116 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
4117 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
4118 	.hal_reo1_ring_producer_int_setup = 0x00000268,
4119 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
4120 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
4121 	.hal_reo1_ring_msi1_data = 0x00000294,
4122 	.hal_reo2_ring_base_lsb = 0x0000029c,
4123 	.hal_reo1_aging_thresh_ix_0 = 0x0000050c,
4124 	.hal_reo1_aging_thresh_ix_1 = 0x00000510,
4125 	.hal_reo1_aging_thresh_ix_2 = 0x00000514,
4126 	.hal_reo1_aging_thresh_ix_3 = 0x00000518,
4127 
4128 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4129 	.hal_reo1_ring_hp = 0x00003030,
4130 	.hal_reo1_ring_tp = 0x00003034,
4131 	.hal_reo2_ring_hp = 0x00003038,
4132 
4133 	/* REO2TCL R0 ring configuration address */
4134 	.hal_reo_tcl_ring_base_lsb = 0x000003a4,
4135 	.hal_reo_tcl_ring_hp = 0x00003050,
4136 
4137 	/* REO CMD ring address */
4138 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4139 	.hal_reo_cmd_ring_hp = 0x00003020,
4140 
4141 	/* REO status address */
4142 	.hal_reo_status_ring_base_lsb = 0x000004ac,
4143 	.hal_reo_status_hp = 0x00003068,
4144 
4145 	/* SW2REO ring address */
4146 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4147 	.hal_sw2reo_ring_hp = 0x00003028,
4148 
4149 	/* WCSS relative address */
4150 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
4151 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
4152 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
4153 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
4154 
4155 	/* WBM Idle address */
4156 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
4157 	.hal_wbm_idle_link_ring_misc = 0x00000870,
4158 
4159 	/* SW2WBM release address */
4160 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
4161 
4162 	/* WBM2SW release address */
4163 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
4164 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
4165 
4166 	/* PCIe base address */
4167 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
4168 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
4169 
4170 	/* Shadow register area */
4171 	.hal_shadow_base_addr = 0x000008fc,
4172 
4173 	/* REO misc control register, not used in QCA6390 */
4174 	.hal_reo1_misc_ctl = 0x0,
4175 };
4176 
4177 const struct ath11k_hw_regs qcn9074_regs = {
4178 	/* SW2TCL(x) R0 ring configuration address */
4179 	.hal_tcl1_ring_base_lsb = 0x000004f0,
4180 	.hal_tcl1_ring_base_msb = 0x000004f4,
4181 	.hal_tcl1_ring_id = 0x000004f8,
4182 	.hal_tcl1_ring_misc = 0x00000500,
4183 	.hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
4184 	.hal_tcl1_ring_tp_addr_msb = 0x00000510,
4185 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
4186 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
4187 	.hal_tcl1_ring_msi1_base_lsb = 0x00000538,
4188 	.hal_tcl1_ring_msi1_base_msb = 0x0000053c,
4189 	.hal_tcl1_ring_msi1_data = 0x00000540,
4190 	.hal_tcl2_ring_base_lsb = 0x00000548,
4191 	.hal_tcl_ring_base_lsb = 0x000005f8,
4192 
4193 	/* TCL STATUS ring address */
4194 	.hal_tcl_status_ring_base_lsb = 0x00000700,
4195 
4196 	/* REO2SW(x) R0 ring configuration address */
4197 	.hal_reo1_ring_base_lsb = 0x0000029c,
4198 	.hal_reo1_ring_base_msb = 0x000002a0,
4199 	.hal_reo1_ring_id = 0x000002a4,
4200 	.hal_reo1_ring_misc = 0x000002ac,
4201 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
4202 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
4203 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
4204 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
4205 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
4206 	.hal_reo1_ring_msi1_data = 0x000002ec,
4207 	.hal_reo2_ring_base_lsb = 0x000002f4,
4208 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4209 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4210 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4211 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4212 
4213 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4214 	.hal_reo1_ring_hp = 0x00003038,
4215 	.hal_reo1_ring_tp = 0x0000303c,
4216 	.hal_reo2_ring_hp = 0x00003040,
4217 
4218 	/* REO2TCL R0 ring configuration address */
4219 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4220 	.hal_reo_tcl_ring_hp = 0x00003058,
4221 
4222 	/* REO CMD ring address */
4223 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4224 	.hal_reo_cmd_ring_hp = 0x00003020,
4225 
4226 	/* REO status address */
4227 	.hal_reo_status_ring_base_lsb = 0x00000504,
4228 	.hal_reo_status_hp = 0x00003070,
4229 
4230 	/* SW2REO ring address */
4231 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4232 	.hal_sw2reo_ring_hp = 0x00003028,
4233 
4234 	/* WCSS relative address */
4235 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
4236 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
4237 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
4238 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
4239 
4240 	/* WBM Idle address */
4241 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
4242 	.hal_wbm_idle_link_ring_misc = 0x00000884,
4243 
4244 	/* SW2WBM release address */
4245 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
4246 
4247 	/* WBM2SW release address */
4248 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
4249 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
4250 
4251 	/* PCIe base address */
4252 	.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
4253 	.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
4254 
4255 	/* Shadow register area */
4256 	.hal_shadow_base_addr = 0x0,
4257 
4258 	/* REO misc control register, not used in QCN9074 */
4259 	.hal_reo1_misc_ctl = 0x0,
4260 };
4261 
4262 const struct ath11k_hw_regs wcn6855_regs = {
4263 	/* SW2TCL(x) R0 ring configuration address */
4264 	.hal_tcl1_ring_base_lsb = 0x00000690,
4265 	.hal_tcl1_ring_base_msb = 0x00000694,
4266 	.hal_tcl1_ring_id = 0x00000698,
4267 	.hal_tcl1_ring_misc = 0x000006a0,
4268 	.hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
4269 	.hal_tcl1_ring_tp_addr_msb = 0x000006b0,
4270 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
4271 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
4272 	.hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
4273 	.hal_tcl1_ring_msi1_base_msb = 0x000006dc,
4274 	.hal_tcl1_ring_msi1_data = 0x000006e0,
4275 	.hal_tcl2_ring_base_lsb = 0x000006e8,
4276 	.hal_tcl_ring_base_lsb = 0x00000798,
4277 
4278 	/* TCL STATUS ring address */
4279 	.hal_tcl_status_ring_base_lsb = 0x000008a0,
4280 
4281 	/* REO2SW(x) R0 ring configuration address */
4282 	.hal_reo1_ring_base_lsb = 0x00000244,
4283 	.hal_reo1_ring_base_msb = 0x00000248,
4284 	.hal_reo1_ring_id = 0x0000024c,
4285 	.hal_reo1_ring_misc = 0x00000254,
4286 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
4287 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
4288 	.hal_reo1_ring_producer_int_setup = 0x00000268,
4289 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
4290 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
4291 	.hal_reo1_ring_msi1_data = 0x00000294,
4292 	.hal_reo2_ring_base_lsb = 0x0000029c,
4293 	.hal_reo1_aging_thresh_ix_0 = 0x000005bc,
4294 	.hal_reo1_aging_thresh_ix_1 = 0x000005c0,
4295 	.hal_reo1_aging_thresh_ix_2 = 0x000005c4,
4296 	.hal_reo1_aging_thresh_ix_3 = 0x000005c8,
4297 
4298 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4299 	.hal_reo1_ring_hp = 0x00003030,
4300 	.hal_reo1_ring_tp = 0x00003034,
4301 	.hal_reo2_ring_hp = 0x00003038,
4302 
4303 	/* REO2TCL R0 ring configuration address */
4304 	.hal_reo_tcl_ring_base_lsb = 0x00000454,
4305 	.hal_reo_tcl_ring_hp = 0x00003060,
4306 
4307 	/* REO CMD ring address */
4308 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4309 	.hal_reo_cmd_ring_hp = 0x00003020,
4310 
4311 	/* REO status address */
4312 	.hal_reo_status_ring_base_lsb = 0x0000055c,
4313 	.hal_reo_status_hp = 0x00003078,
4314 
4315 	/* SW2REO ring address */
4316 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4317 	.hal_sw2reo_ring_hp = 0x00003028,
4318 
4319 	/* WCSS relative address */
4320 	.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
4321 	.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
4322 	.hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
4323 	.hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
4324 
4325 	/* WBM Idle address */
4326 	.hal_wbm_idle_link_ring_base_lsb = 0x00000870,
4327 	.hal_wbm_idle_link_ring_misc = 0x00000880,
4328 
4329 	/* SW2WBM release address */
4330 	.hal_wbm_release_ring_base_lsb = 0x000001e8,
4331 
4332 	/* WBM2SW release address */
4333 	.hal_wbm0_release_ring_base_lsb = 0x00000920,
4334 	.hal_wbm1_release_ring_base_lsb = 0x00000978,
4335 
4336 	/* PCIe base address */
4337 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
4338 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
4339 
4340 	/* Shadow register area */
4341 	.hal_shadow_base_addr = 0x000008fc,
4342 
4343 	/* REO misc control register, used for fragment
4344 	 * destination ring config in WCN6855.
4345 	 */
4346 	.hal_reo1_misc_ctl = 0x00000630,
4347 };
4348 
4349 const struct ath11k_hw_regs wcn6750_regs = {
4350 	/* SW2TCL(x) R0 ring configuration address */
4351 	.hal_tcl1_ring_base_lsb = 0x00000694,
4352 	.hal_tcl1_ring_base_msb = 0x00000698,
4353 	.hal_tcl1_ring_id = 0x0000069c,
4354 	.hal_tcl1_ring_misc = 0x000006a4,
4355 	.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
4356 	.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
4357 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
4358 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
4359 	.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
4360 	.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
4361 	.hal_tcl1_ring_msi1_data = 0x000006e4,
4362 	.hal_tcl2_ring_base_lsb = 0x000006ec,
4363 	.hal_tcl_ring_base_lsb = 0x0000079c,
4364 
4365 	/* TCL STATUS ring address */
4366 	.hal_tcl_status_ring_base_lsb = 0x000008a4,
4367 
4368 	/* REO2SW(x) R0 ring configuration address */
4369 	.hal_reo1_ring_base_lsb = 0x000001ec,
4370 	.hal_reo1_ring_base_msb = 0x000001f0,
4371 	.hal_reo1_ring_id = 0x000001f4,
4372 	.hal_reo1_ring_misc = 0x000001fc,
4373 	.hal_reo1_ring_hp_addr_lsb = 0x00000200,
4374 	.hal_reo1_ring_hp_addr_msb = 0x00000204,
4375 	.hal_reo1_ring_producer_int_setup = 0x00000210,
4376 	.hal_reo1_ring_msi1_base_lsb = 0x00000234,
4377 	.hal_reo1_ring_msi1_base_msb = 0x00000238,
4378 	.hal_reo1_ring_msi1_data = 0x0000023c,
4379 	.hal_reo2_ring_base_lsb = 0x00000244,
4380 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4381 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4382 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4383 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4384 
4385 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4386 	.hal_reo1_ring_hp = 0x00003028,
4387 	.hal_reo1_ring_tp = 0x0000302c,
4388 	.hal_reo2_ring_hp = 0x00003030,
4389 
4390 	/* REO2TCL R0 ring configuration address */
4391 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4392 	.hal_reo_tcl_ring_hp = 0x00003058,
4393 
4394 	/* REO CMD ring address */
4395 	.hal_reo_cmd_ring_base_lsb = 0x000000e4,
4396 	.hal_reo_cmd_ring_hp = 0x00003010,
4397 
4398 	/* REO status address */
4399 	.hal_reo_status_ring_base_lsb = 0x00000504,
4400 	.hal_reo_status_hp = 0x00003070,
4401 
4402 	/* SW2REO ring address */
4403 	.hal_sw2reo_ring_base_lsb = 0x0000013c,
4404 	.hal_sw2reo_ring_hp = 0x00003018,
4405 
4406 	/* WCSS relative address */
4407 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
4408 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
4409 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
4410 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
4411 
4412 	/* WBM Idle address */
4413 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
4414 	.hal_wbm_idle_link_ring_misc = 0x00000884,
4415 
4416 	/* SW2WBM release address */
4417 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
4418 
4419 	/* WBM2SW release address */
4420 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
4421 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
4422 
4423 	/* PCIe base address */
4424 	.pcie_qserdes_sysclk_en_sel = 0x0,
4425 	.pcie_pcs_osc_dtct_config_base = 0x0,
4426 
4427 	/* Shadow register area */
4428 	.hal_shadow_base_addr = 0x00000504,
4429 
4430 	/* REO misc control register, used for fragment
4431 	 * destination ring config in WCN6750.
4432 	 */
4433 	.hal_reo1_misc_ctl = 0x000005d8,
4434 };
4435 
4436 #define QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
4437 #define QWX_HOST_CSTATE_BIT			0x04
4438 #define QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
4439 #define QWX_PLATFORM_CAP_PCIE_PME_D3COLD	0x10
4440 
4441 const struct qmi_elem_info qmi_response_type_v01_ei[] = {
4442 	{
4443 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
4444 		.elem_len	= 1,
4445 		.elem_size	= sizeof(uint16_t),
4446 		.array_type	= NO_ARRAY,
4447 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4448 		.offset		= offsetof(struct qmi_response_type_v01, result),
4449 		.ei_array	= NULL,
4450 	},
4451 	{
4452 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
4453 		.elem_len	= 1,
4454 		.elem_size	= sizeof(uint16_t),
4455 		.array_type	= NO_ARRAY,
4456 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4457 		.offset		= offsetof(struct qmi_response_type_v01, error),
4458 		.ei_array	= NULL,
4459 	},
4460 	{
4461 		.data_type	= QMI_EOTI,
4462 		.elem_len	= 0,
4463 		.elem_size	= 0,
4464 		.array_type	= NO_ARRAY,
4465 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4466 		.offset		= 0,
4467 		.ei_array	= NULL,
4468 	},
4469 };
4470 
4471 const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
4472 	{
4473 		.data_type	= QMI_OPT_FLAG,
4474 		.elem_len	= 1,
4475 		.elem_size	= sizeof(uint8_t),
4476 		.array_type	= NO_ARRAY,
4477 		.tlv_type	= 0x10,
4478 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4479 					   fw_ready_enable_valid),
4480 	},
4481 	{
4482 		.data_type	= QMI_UNSIGNED_1_BYTE,
4483 		.elem_len	= 1,
4484 		.elem_size	= sizeof(uint8_t),
4485 		.array_type	= NO_ARRAY,
4486 		.tlv_type	= 0x10,
4487 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4488 					   fw_ready_enable),
4489 	},
4490 	{
4491 		.data_type	= QMI_OPT_FLAG,
4492 		.elem_len	= 1,
4493 		.elem_size	= sizeof(uint8_t),
4494 		.array_type	= NO_ARRAY,
4495 		.tlv_type	= 0x11,
4496 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4497 					   initiate_cal_download_enable_valid),
4498 	},
4499 	{
4500 		.data_type	= QMI_UNSIGNED_1_BYTE,
4501 		.elem_len	= 1,
4502 		.elem_size	= sizeof(uint8_t),
4503 		.array_type	= NO_ARRAY,
4504 		.tlv_type	= 0x11,
4505 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4506 					   initiate_cal_download_enable),
4507 	},
4508 	{
4509 		.data_type	= QMI_OPT_FLAG,
4510 		.elem_len	= 1,
4511 		.elem_size	= sizeof(uint8_t),
4512 		.array_type	= NO_ARRAY,
4513 		.tlv_type	= 0x12,
4514 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4515 					   initiate_cal_update_enable_valid),
4516 	},
4517 	{
4518 		.data_type	= QMI_UNSIGNED_1_BYTE,
4519 		.elem_len	= 1,
4520 		.elem_size	= sizeof(uint8_t),
4521 		.array_type	= NO_ARRAY,
4522 		.tlv_type	= 0x12,
4523 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4524 					   initiate_cal_update_enable),
4525 	},
4526 	{
4527 		.data_type	= QMI_OPT_FLAG,
4528 		.elem_len	= 1,
4529 		.elem_size	= sizeof(uint8_t),
4530 		.array_type	= NO_ARRAY,
4531 		.tlv_type	= 0x13,
4532 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4533 					   msa_ready_enable_valid),
4534 	},
4535 	{
4536 		.data_type	= QMI_UNSIGNED_1_BYTE,
4537 		.elem_len	= 1,
4538 		.elem_size	= sizeof(uint8_t),
4539 		.array_type	= NO_ARRAY,
4540 		.tlv_type	= 0x13,
4541 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4542 					   msa_ready_enable),
4543 	},
4544 	{
4545 		.data_type	= QMI_OPT_FLAG,
4546 		.elem_len	= 1,
4547 		.elem_size	= sizeof(uint8_t),
4548 		.array_type	= NO_ARRAY,
4549 		.tlv_type	= 0x14,
4550 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4551 					   pin_connect_result_enable_valid),
4552 	},
4553 	{
4554 		.data_type	= QMI_UNSIGNED_1_BYTE,
4555 		.elem_len	= 1,
4556 		.elem_size	= sizeof(uint8_t),
4557 		.array_type	= NO_ARRAY,
4558 		.tlv_type	= 0x14,
4559 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4560 					   pin_connect_result_enable),
4561 	},
4562 	{
4563 		.data_type	= QMI_OPT_FLAG,
4564 		.elem_len	= 1,
4565 		.elem_size	= sizeof(uint8_t),
4566 		.array_type	= NO_ARRAY,
4567 		.tlv_type	= 0x15,
4568 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4569 					   client_id_valid),
4570 	},
4571 	{
4572 		.data_type	= QMI_UNSIGNED_4_BYTE,
4573 		.elem_len	= 1,
4574 		.elem_size	= sizeof(uint32_t),
4575 		.array_type	= NO_ARRAY,
4576 		.tlv_type	= 0x15,
4577 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4578 					   client_id),
4579 	},
4580 	{
4581 		.data_type	= QMI_OPT_FLAG,
4582 		.elem_len	= 1,
4583 		.elem_size	= sizeof(uint8_t),
4584 		.array_type	= NO_ARRAY,
4585 		.tlv_type	= 0x16,
4586 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4587 					   request_mem_enable_valid),
4588 	},
4589 	{
4590 		.data_type	= QMI_UNSIGNED_1_BYTE,
4591 		.elem_len	= 1,
4592 		.elem_size	= sizeof(uint8_t),
4593 		.array_type	= NO_ARRAY,
4594 		.tlv_type	= 0x16,
4595 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4596 					   request_mem_enable),
4597 	},
4598 	{
4599 		.data_type	= QMI_OPT_FLAG,
4600 		.elem_len	= 1,
4601 		.elem_size	= sizeof(uint8_t),
4602 		.array_type	= NO_ARRAY,
4603 		.tlv_type	= 0x17,
4604 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4605 					   fw_mem_ready_enable_valid),
4606 	},
4607 	{
4608 		.data_type	= QMI_UNSIGNED_1_BYTE,
4609 		.elem_len	= 1,
4610 		.elem_size	= sizeof(uint8_t),
4611 		.array_type	= NO_ARRAY,
4612 		.tlv_type	= 0x17,
4613 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4614 					   fw_mem_ready_enable),
4615 	},
4616 	{
4617 		.data_type	= QMI_OPT_FLAG,
4618 		.elem_len	= 1,
4619 		.elem_size	= sizeof(uint8_t),
4620 		.array_type	= NO_ARRAY,
4621 		.tlv_type	= 0x18,
4622 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4623 					   fw_init_done_enable_valid),
4624 	},
4625 	{
4626 		.data_type	= QMI_UNSIGNED_1_BYTE,
4627 		.elem_len	= 1,
4628 		.elem_size	= sizeof(uint8_t),
4629 		.array_type	= NO_ARRAY,
4630 		.tlv_type	= 0x18,
4631 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4632 					   fw_init_done_enable),
4633 	},
4634 
4635 	{
4636 		.data_type	= QMI_OPT_FLAG,
4637 		.elem_len	= 1,
4638 		.elem_size	= sizeof(uint8_t),
4639 		.array_type	= NO_ARRAY,
4640 		.tlv_type	= 0x19,
4641 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4642 					   rejuvenate_enable_valid),
4643 	},
4644 	{
4645 		.data_type	= QMI_UNSIGNED_1_BYTE,
4646 		.elem_len	= 1,
4647 		.elem_size	= sizeof(uint8_t),
4648 		.array_type	= NO_ARRAY,
4649 		.tlv_type	= 0x19,
4650 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4651 					   rejuvenate_enable),
4652 	},
4653 	{
4654 		.data_type	= QMI_OPT_FLAG,
4655 		.elem_len	= 1,
4656 		.elem_size	= sizeof(uint8_t),
4657 		.array_type	= NO_ARRAY,
4658 		.tlv_type	= 0x1A,
4659 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4660 					   xo_cal_enable_valid),
4661 	},
4662 	{
4663 		.data_type	= QMI_UNSIGNED_1_BYTE,
4664 		.elem_len	= 1,
4665 		.elem_size	= sizeof(uint8_t),
4666 		.array_type	= NO_ARRAY,
4667 		.tlv_type	= 0x1A,
4668 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4669 					   xo_cal_enable),
4670 	},
4671 	{
4672 		.data_type	= QMI_OPT_FLAG,
4673 		.elem_len	= 1,
4674 		.elem_size	= sizeof(uint8_t),
4675 		.array_type	= NO_ARRAY,
4676 		.tlv_type	= 0x1B,
4677 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4678 					   cal_done_enable_valid),
4679 	},
4680 	{
4681 		.data_type	= QMI_UNSIGNED_1_BYTE,
4682 		.elem_len	= 1,
4683 		.elem_size	= sizeof(uint8_t),
4684 		.array_type	= NO_ARRAY,
4685 		.tlv_type	= 0x1B,
4686 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4687 					   cal_done_enable),
4688 	},
4689 	{
4690 		.data_type	= QMI_EOTI,
4691 		.array_type	= NO_ARRAY,
4692 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4693 	},
4694 };
4695 
4696 const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
4697 	{
4698 		.data_type	= QMI_STRUCT,
4699 		.elem_len	= 1,
4700 		.elem_size	= sizeof(struct qmi_response_type_v01),
4701 		.array_type	= NO_ARRAY,
4702 		.tlv_type	= 0x02,
4703 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4704 					   resp),
4705 		.ei_array	= qmi_response_type_v01_ei,
4706 	},
4707 	{
4708 		.data_type	= QMI_OPT_FLAG,
4709 		.elem_len	= 1,
4710 		.elem_size	= sizeof(uint8_t),
4711 		.array_type	= NO_ARRAY,
4712 		.tlv_type	= 0x10,
4713 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4714 					   fw_status_valid),
4715 	},
4716 	{
4717 		.data_type	= QMI_UNSIGNED_8_BYTE,
4718 		.elem_len	= 1,
4719 		.elem_size	= sizeof(uint64_t),
4720 		.array_type	= NO_ARRAY,
4721 		.tlv_type	= 0x10,
4722 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4723 					   fw_status),
4724 	},
4725 	{
4726 		.data_type	= QMI_EOTI,
4727 		.array_type	= NO_ARRAY,
4728 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4729 	},
4730 };
4731 
4732 const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
4733 	{
4734 		.data_type	= QMI_OPT_FLAG,
4735 		.elem_len	= 1,
4736 		.elem_size	= sizeof(uint8_t),
4737 		.array_type	= NO_ARRAY,
4738 		.tlv_type	= 0x10,
4739 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4740 					   num_clients_valid),
4741 	},
4742 	{
4743 		.data_type	= QMI_UNSIGNED_4_BYTE,
4744 		.elem_len	= 1,
4745 		.elem_size	= sizeof(uint32_t),
4746 		.array_type	= NO_ARRAY,
4747 		.tlv_type	= 0x10,
4748 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4749 					   num_clients),
4750 	},
4751 	{
4752 		.data_type	= QMI_OPT_FLAG,
4753 		.elem_len	= 1,
4754 		.elem_size	= sizeof(uint8_t),
4755 		.array_type	= NO_ARRAY,
4756 		.tlv_type	= 0x11,
4757 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4758 					   wake_msi_valid),
4759 	},
4760 	{
4761 		.data_type	= QMI_UNSIGNED_4_BYTE,
4762 		.elem_len	= 1,
4763 		.elem_size	= sizeof(uint32_t),
4764 		.array_type	= NO_ARRAY,
4765 		.tlv_type	= 0x11,
4766 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4767 					   wake_msi),
4768 	},
4769 	{
4770 		.data_type	= QMI_OPT_FLAG,
4771 		.elem_len	= 1,
4772 		.elem_size	= sizeof(uint8_t),
4773 		.array_type	= NO_ARRAY,
4774 		.tlv_type	= 0x12,
4775 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4776 					   gpios_valid),
4777 	},
4778 	{
4779 		.data_type	= QMI_DATA_LEN,
4780 		.elem_len	= 1,
4781 		.elem_size	= sizeof(uint8_t),
4782 		.array_type	= NO_ARRAY,
4783 		.tlv_type	= 0x12,
4784 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4785 					   gpios_len),
4786 	},
4787 	{
4788 		.data_type	= QMI_UNSIGNED_4_BYTE,
4789 		.elem_len	= QMI_WLFW_MAX_NUM_GPIO_V01,
4790 		.elem_size	= sizeof(uint32_t),
4791 		.array_type	= VAR_LEN_ARRAY,
4792 		.tlv_type	= 0x12,
4793 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4794 					   gpios),
4795 	},
4796 	{
4797 		.data_type	= QMI_OPT_FLAG,
4798 		.elem_len	= 1,
4799 		.elem_size	= sizeof(uint8_t),
4800 		.array_type	= NO_ARRAY,
4801 		.tlv_type	= 0x13,
4802 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4803 					   nm_modem_valid),
4804 	},
4805 	{
4806 		.data_type	= QMI_UNSIGNED_1_BYTE,
4807 		.elem_len	= 1,
4808 		.elem_size	= sizeof(uint8_t),
4809 		.array_type	= NO_ARRAY,
4810 		.tlv_type	= 0x13,
4811 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4812 					   nm_modem),
4813 	},
4814 	{
4815 		.data_type	= QMI_OPT_FLAG,
4816 		.elem_len	= 1,
4817 		.elem_size	= sizeof(uint8_t),
4818 		.array_type	= NO_ARRAY,
4819 		.tlv_type	= 0x14,
4820 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4821 					   bdf_support_valid),
4822 	},
4823 	{
4824 		.data_type	= QMI_UNSIGNED_1_BYTE,
4825 		.elem_len	= 1,
4826 		.elem_size	= sizeof(uint8_t),
4827 		.array_type	= NO_ARRAY,
4828 		.tlv_type	= 0x14,
4829 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4830 					   bdf_support),
4831 	},
4832 	{
4833 		.data_type	= QMI_OPT_FLAG,
4834 		.elem_len	= 1,
4835 		.elem_size	= sizeof(uint8_t),
4836 		.array_type	= NO_ARRAY,
4837 		.tlv_type	= 0x15,
4838 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4839 					   bdf_cache_support_valid),
4840 	},
4841 	{
4842 		.data_type	= QMI_UNSIGNED_1_BYTE,
4843 		.elem_len	= 1,
4844 		.elem_size	= sizeof(uint8_t),
4845 		.array_type	= NO_ARRAY,
4846 		.tlv_type	= 0x15,
4847 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4848 					   bdf_cache_support),
4849 	},
4850 	{
4851 		.data_type	= QMI_OPT_FLAG,
4852 		.elem_len	= 1,
4853 		.elem_size	= sizeof(uint8_t),
4854 		.array_type	= NO_ARRAY,
4855 		.tlv_type	= 0x16,
4856 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4857 					   m3_support_valid),
4858 	},
4859 	{
4860 		.data_type	= QMI_UNSIGNED_1_BYTE,
4861 		.elem_len	= 1,
4862 		.elem_size	= sizeof(uint8_t),
4863 		.array_type	= NO_ARRAY,
4864 		.tlv_type	= 0x16,
4865 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4866 					   m3_support),
4867 	},
4868 	{
4869 		.data_type	= QMI_OPT_FLAG,
4870 		.elem_len	= 1,
4871 		.elem_size	= sizeof(uint8_t),
4872 		.array_type	= NO_ARRAY,
4873 		.tlv_type	= 0x17,
4874 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4875 					   m3_cache_support_valid),
4876 	},
4877 	{
4878 		.data_type	= QMI_UNSIGNED_1_BYTE,
4879 		.elem_len	= 1,
4880 		.elem_size	= sizeof(uint8_t),
4881 		.array_type	= NO_ARRAY,
4882 		.tlv_type	= 0x17,
4883 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4884 					   m3_cache_support),
4885 	},
4886 	{
4887 		.data_type	= QMI_OPT_FLAG,
4888 		.elem_len	= 1,
4889 		.elem_size	= sizeof(uint8_t),
4890 		.array_type	= NO_ARRAY,
4891 		.tlv_type	= 0x18,
4892 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4893 					   cal_filesys_support_valid),
4894 	},
4895 	{
4896 		.data_type	= QMI_UNSIGNED_1_BYTE,
4897 		.elem_len	= 1,
4898 		.elem_size	= sizeof(uint8_t),
4899 		.array_type	= NO_ARRAY,
4900 		.tlv_type	= 0x18,
4901 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4902 					   cal_filesys_support),
4903 	},
4904 	{
4905 		.data_type	= QMI_OPT_FLAG,
4906 		.elem_len	= 1,
4907 		.elem_size	= sizeof(uint8_t),
4908 		.array_type	= NO_ARRAY,
4909 		.tlv_type	= 0x19,
4910 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4911 					   cal_cache_support_valid),
4912 	},
4913 	{
4914 		.data_type	= QMI_UNSIGNED_1_BYTE,
4915 		.elem_len	= 1,
4916 		.elem_size	= sizeof(uint8_t),
4917 		.array_type	= NO_ARRAY,
4918 		.tlv_type	= 0x19,
4919 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4920 					   cal_cache_support),
4921 	},
4922 	{
4923 		.data_type	= QMI_OPT_FLAG,
4924 		.elem_len	= 1,
4925 		.elem_size	= sizeof(uint8_t),
4926 		.array_type	= NO_ARRAY,
4927 		.tlv_type	= 0x1A,
4928 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4929 					   cal_done_valid),
4930 	},
4931 	{
4932 		.data_type	= QMI_UNSIGNED_1_BYTE,
4933 		.elem_len	= 1,
4934 		.elem_size	= sizeof(uint8_t),
4935 		.array_type	= NO_ARRAY,
4936 		.tlv_type	= 0x1A,
4937 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4938 					   cal_done),
4939 	},
4940 	{
4941 		.data_type	= QMI_OPT_FLAG,
4942 		.elem_len	= 1,
4943 		.elem_size	= sizeof(uint8_t),
4944 		.array_type	= NO_ARRAY,
4945 		.tlv_type	= 0x1B,
4946 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4947 					   mem_bucket_valid),
4948 	},
4949 	{
4950 		.data_type	= QMI_UNSIGNED_4_BYTE,
4951 		.elem_len	= 1,
4952 		.elem_size	= sizeof(uint32_t),
4953 		.array_type	= NO_ARRAY,
4954 		.tlv_type	= 0x1B,
4955 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4956 					   mem_bucket),
4957 	},
4958 	{
4959 		.data_type	= QMI_OPT_FLAG,
4960 		.elem_len	= 1,
4961 		.elem_size	= sizeof(uint8_t),
4962 		.array_type	= NO_ARRAY,
4963 		.tlv_type	= 0x1C,
4964 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4965 					   mem_cfg_mode_valid),
4966 	},
4967 	{
4968 		.data_type	= QMI_UNSIGNED_1_BYTE,
4969 		.elem_len	= 1,
4970 		.elem_size	= sizeof(uint8_t),
4971 		.array_type	= NO_ARRAY,
4972 		.tlv_type	= 0x1C,
4973 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4974 					   mem_cfg_mode),
4975 	},
4976 	{
4977 		.data_type	= QMI_EOTI,
4978 		.array_type	= NO_ARRAY,
4979 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4980 	},
4981 };
4982 
4983 const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
4984 	{
4985 		.data_type	= QMI_STRUCT,
4986 		.elem_len	= 1,
4987 		.elem_size	= sizeof(struct qmi_response_type_v01),
4988 		.array_type	= NO_ARRAY,
4989 		.tlv_type	= 0x02,
4990 		.offset		= offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
4991 		.ei_array	= qmi_response_type_v01_ei,
4992 	},
4993 	{
4994 		.data_type	= QMI_EOTI,
4995 		.array_type	= NO_ARRAY,
4996 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4997 	},
4998 };
4999 
5000 const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
5001 	{
5002 		.data_type	= QMI_UNSIGNED_8_BYTE,
5003 		.elem_len	= 1,
5004 		.elem_size	= sizeof(uint64_t),
5005 		.array_type	= NO_ARRAY,
5006 		.tlv_type	= 0,
5007 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
5008 	},
5009 	{
5010 		.data_type	= QMI_UNSIGNED_4_BYTE,
5011 		.elem_len	= 1,
5012 		.elem_size	= sizeof(uint32_t),
5013 		.array_type	= NO_ARRAY,
5014 		.tlv_type	= 0,
5015 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
5016 	},
5017 	{
5018 		.data_type	= QMI_UNSIGNED_1_BYTE,
5019 		.elem_len	= 1,
5020 		.elem_size	= sizeof(uint8_t),
5021 		.array_type	= NO_ARRAY,
5022 		.tlv_type	= 0,
5023 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
5024 	},
5025 	{
5026 		.data_type	= QMI_EOTI,
5027 		.array_type	= NO_ARRAY,
5028 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5029 	},
5030 };
5031 
5032 const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
5033 	{
5034 		.data_type	= QMI_UNSIGNED_4_BYTE,
5035 		.elem_len	= 1,
5036 		.elem_size	= sizeof(uint32_t),
5037 		.array_type	= NO_ARRAY,
5038 		.tlv_type	= 0,
5039 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01,
5040 				  size),
5041 	},
5042 	{
5043 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5044 		.elem_len	= 1,
5045 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
5046 		.array_type	= NO_ARRAY,
5047 		.tlv_type	= 0,
5048 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
5049 	},
5050 	{
5051 		.data_type	= QMI_DATA_LEN,
5052 		.elem_len	= 1,
5053 		.elem_size	= sizeof(uint8_t),
5054 		.array_type	= NO_ARRAY,
5055 		.tlv_type	= 0,
5056 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
5057 	},
5058 	{
5059 		.data_type	= QMI_STRUCT,
5060 		.elem_len	= QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
5061 		.elem_size	= sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
5062 		.array_type	= VAR_LEN_ARRAY,
5063 		.tlv_type	= 0,
5064 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
5065 		.ei_array	= qmi_wlanfw_mem_cfg_s_v01_ei,
5066 	},
5067 	{
5068 		.data_type	= QMI_EOTI,
5069 		.array_type	= NO_ARRAY,
5070 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5071 	},
5072 };
5073 
5074 const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
5075 	{
5076 		.data_type	= QMI_DATA_LEN,
5077 		.elem_len	= 1,
5078 		.elem_size	= sizeof(uint8_t),
5079 		.array_type	= NO_ARRAY,
5080 		.tlv_type	= 0x01,
5081 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
5082 					   mem_seg_len),
5083 	},
5084 	{
5085 		.data_type	= QMI_STRUCT,
5086 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
5087 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_s_v01),
5088 		.array_type	= VAR_LEN_ARRAY,
5089 		.tlv_type	= 0x01,
5090 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
5091 					   mem_seg),
5092 		.ei_array	= qmi_wlanfw_mem_seg_s_v01_ei,
5093 	},
5094 	{
5095 		.data_type	= QMI_EOTI,
5096 		.array_type	= NO_ARRAY,
5097 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5098 	},
5099 };
5100 
5101 const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
5102 	{
5103 		.data_type	= QMI_UNSIGNED_8_BYTE,
5104 		.elem_len	= 1,
5105 		.elem_size	= sizeof(uint64_t),
5106 		.array_type	= NO_ARRAY,
5107 		.tlv_type	= 0,
5108 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
5109 	},
5110 	{
5111 		.data_type	= QMI_UNSIGNED_4_BYTE,
5112 		.elem_len	= 1,
5113 		.elem_size	= sizeof(uint32_t),
5114 		.array_type	= NO_ARRAY,
5115 		.tlv_type	= 0,
5116 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
5117 	},
5118 	{
5119 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5120 		.elem_len	= 1,
5121 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
5122 		.array_type	= NO_ARRAY,
5123 		.tlv_type	= 0,
5124 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
5125 	},
5126 	{
5127 		.data_type	= QMI_UNSIGNED_1_BYTE,
5128 		.elem_len	= 1,
5129 		.elem_size	= sizeof(uint8_t),
5130 		.array_type	= NO_ARRAY,
5131 		.tlv_type	= 0,
5132 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
5133 	},
5134 	{
5135 		.data_type	= QMI_EOTI,
5136 		.array_type	= NO_ARRAY,
5137 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5138 	},
5139 };
5140 
5141 const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
5142 	{
5143 		.data_type	= QMI_DATA_LEN,
5144 		.elem_len	= 1,
5145 		.elem_size	= sizeof(uint8_t),
5146 		.array_type	= NO_ARRAY,
5147 		.tlv_type	= 0x01,
5148 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
5149 					   mem_seg_len),
5150 	},
5151 	{
5152 		.data_type	= QMI_STRUCT,
5153 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
5154 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
5155 		.array_type	= VAR_LEN_ARRAY,
5156 		.tlv_type	= 0x01,
5157 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
5158 					   mem_seg),
5159 		.ei_array	= qmi_wlanfw_mem_seg_resp_s_v01_ei,
5160 	},
5161 	{
5162 		.data_type	= QMI_EOTI,
5163 		.array_type	= NO_ARRAY,
5164 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5165 	},
5166 };
5167 
5168 const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
5169 	{
5170 		.data_type	= QMI_STRUCT,
5171 		.elem_len	= 1,
5172 		.elem_size	= sizeof(struct qmi_response_type_v01),
5173 		.array_type	= NO_ARRAY,
5174 		.tlv_type	= 0x02,
5175 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
5176 					   resp),
5177 		.ei_array	= qmi_response_type_v01_ei,
5178 	},
5179 	{
5180 		.data_type	= QMI_EOTI,
5181 		.array_type	= NO_ARRAY,
5182 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5183 	},
5184 };
5185 
5186 const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
5187 	{
5188 		.data_type	= QMI_EOTI,
5189 		.array_type	= NO_ARRAY,
5190 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5191 	},
5192 };
5193 
5194 const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
5195 	{
5196 		.data_type	= QMI_UNSIGNED_4_BYTE,
5197 		.elem_len	= 1,
5198 		.elem_size	= sizeof(uint32_t),
5199 		.array_type	= NO_ARRAY,
5200 		.tlv_type	= 0,
5201 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
5202 					   chip_id),
5203 	},
5204 	{
5205 		.data_type	= QMI_UNSIGNED_4_BYTE,
5206 		.elem_len	= 1,
5207 		.elem_size	= sizeof(uint32_t),
5208 		.array_type	= NO_ARRAY,
5209 		.tlv_type	= 0,
5210 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
5211 					   chip_family),
5212 	},
5213 	{
5214 		.data_type	= QMI_EOTI,
5215 		.array_type	= NO_ARRAY,
5216 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5217 	},
5218 };
5219 
5220 const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
5221 	{
5222 		.data_type	= QMI_UNSIGNED_4_BYTE,
5223 		.elem_len	= 1,
5224 		.elem_size	= sizeof(uint32_t),
5225 		.array_type	= NO_ARRAY,
5226 		.tlv_type	= 0,
5227 		.offset		= offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
5228 					   board_id),
5229 	},
5230 	{
5231 		.data_type	= QMI_EOTI,
5232 		.array_type	= NO_ARRAY,
5233 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5234 	},
5235 };
5236 
5237 const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
5238 	{
5239 		.data_type	= QMI_UNSIGNED_4_BYTE,
5240 		.elem_len	= 1,
5241 		.elem_size	= sizeof(uint32_t),
5242 		.array_type	= NO_ARRAY,
5243 		.tlv_type	= 0,
5244 		.offset		= offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
5245 	},
5246 	{
5247 		.data_type	= QMI_EOTI,
5248 		.array_type	= NO_ARRAY,
5249 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5250 	},
5251 };
5252 
5253 const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
5254 	{
5255 		.data_type	= QMI_UNSIGNED_4_BYTE,
5256 		.elem_len	= 1,
5257 		.elem_size	= sizeof(uint32_t),
5258 		.array_type	= NO_ARRAY,
5259 		.tlv_type	= 0,
5260 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
5261 					   fw_version),
5262 	},
5263 	{
5264 		.data_type	= QMI_STRING,
5265 		.elem_len	= ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
5266 		.elem_size	= sizeof(char),
5267 		.array_type	= NO_ARRAY,
5268 		.tlv_type	= 0,
5269 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
5270 					   fw_build_timestamp),
5271 	},
5272 	{
5273 		.data_type	= QMI_EOTI,
5274 		.array_type	= NO_ARRAY,
5275 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5276 	},
5277 };
5278 
5279 const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
5280 	{
5281 		.data_type	= QMI_STRUCT,
5282 		.elem_len	= 1,
5283 		.elem_size	= sizeof(struct qmi_response_type_v01),
5284 		.array_type	= NO_ARRAY,
5285 		.tlv_type	= 0x02,
5286 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
5287 		.ei_array	= qmi_response_type_v01_ei,
5288 	},
5289 	{
5290 		.data_type	= QMI_OPT_FLAG,
5291 		.elem_len	= 1,
5292 		.elem_size	= sizeof(uint8_t),
5293 		.array_type	= NO_ARRAY,
5294 		.tlv_type	= 0x10,
5295 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5296 					   chip_info_valid),
5297 	},
5298 	{
5299 		.data_type	= QMI_STRUCT,
5300 		.elem_len	= 1,
5301 		.elem_size	= sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
5302 		.array_type	= NO_ARRAY,
5303 		.tlv_type	= 0x10,
5304 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5305 					   chip_info),
5306 		.ei_array	= qmi_wlanfw_rf_chip_info_s_v01_ei,
5307 	},
5308 	{
5309 		.data_type	= QMI_OPT_FLAG,
5310 		.elem_len	= 1,
5311 		.elem_size	= sizeof(uint8_t),
5312 		.array_type	= NO_ARRAY,
5313 		.tlv_type	= 0x11,
5314 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5315 					   board_info_valid),
5316 	},
5317 	{
5318 		.data_type	= QMI_STRUCT,
5319 		.elem_len	= 1,
5320 		.elem_size	= sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
5321 		.array_type	= NO_ARRAY,
5322 		.tlv_type	= 0x11,
5323 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5324 					   board_info),
5325 		.ei_array	= qmi_wlanfw_rf_board_info_s_v01_ei,
5326 	},
5327 	{
5328 		.data_type	= QMI_OPT_FLAG,
5329 		.elem_len	= 1,
5330 		.elem_size	= sizeof(uint8_t),
5331 		.array_type	= NO_ARRAY,
5332 		.tlv_type	= 0x12,
5333 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5334 					   soc_info_valid),
5335 	},
5336 	{
5337 		.data_type	= QMI_STRUCT,
5338 		.elem_len	= 1,
5339 		.elem_size	= sizeof(struct qmi_wlanfw_soc_info_s_v01),
5340 		.array_type	= NO_ARRAY,
5341 		.tlv_type	= 0x12,
5342 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5343 					   soc_info),
5344 		.ei_array	= qmi_wlanfw_soc_info_s_v01_ei,
5345 	},
5346 	{
5347 		.data_type	= QMI_OPT_FLAG,
5348 		.elem_len	= 1,
5349 		.elem_size	= sizeof(uint8_t),
5350 		.array_type	= NO_ARRAY,
5351 		.tlv_type	= 0x13,
5352 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5353 					   fw_version_info_valid),
5354 	},
5355 	{
5356 		.data_type	= QMI_STRUCT,
5357 		.elem_len	= 1,
5358 		.elem_size	= sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
5359 		.array_type	= NO_ARRAY,
5360 		.tlv_type	= 0x13,
5361 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5362 					   fw_version_info),
5363 		.ei_array	= qmi_wlanfw_fw_version_info_s_v01_ei,
5364 	},
5365 	{
5366 		.data_type	= QMI_OPT_FLAG,
5367 		.elem_len	= 1,
5368 		.elem_size	= sizeof(uint8_t),
5369 		.array_type	= NO_ARRAY,
5370 		.tlv_type	= 0x14,
5371 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5372 					   fw_build_id_valid),
5373 	},
5374 	{
5375 		.data_type	= QMI_STRING,
5376 		.elem_len	= ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
5377 		.elem_size	= sizeof(char),
5378 		.array_type	= NO_ARRAY,
5379 		.tlv_type	= 0x14,
5380 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5381 					   fw_build_id),
5382 	},
5383 	{
5384 		.data_type	= QMI_OPT_FLAG,
5385 		.elem_len	= 1,
5386 		.elem_size	= sizeof(uint8_t),
5387 		.array_type	= NO_ARRAY,
5388 		.tlv_type	= 0x15,
5389 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5390 					   num_macs_valid),
5391 	},
5392 	{
5393 		.data_type	= QMI_UNSIGNED_1_BYTE,
5394 		.elem_len	= 1,
5395 		.elem_size	= sizeof(uint8_t),
5396 		.array_type	= NO_ARRAY,
5397 		.tlv_type	= 0x15,
5398 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5399 					   num_macs),
5400 	},
5401 	{
5402 		.data_type      = QMI_OPT_FLAG,
5403 		.elem_len       = 1,
5404 		.elem_size      = sizeof(uint8_t),
5405 		.array_type     = NO_ARRAY,
5406 		.tlv_type       = 0x16,
5407 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5408 					   voltage_mv_valid),
5409 	},
5410 	{
5411 		.data_type      = QMI_UNSIGNED_4_BYTE,
5412 		.elem_len       = 1,
5413 		.elem_size      = sizeof(uint32_t),
5414 		.array_type     = NO_ARRAY,
5415 		.tlv_type       = 0x16,
5416 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5417 					   voltage_mv),
5418 	},
5419 	{
5420 		.data_type      = QMI_OPT_FLAG,
5421 		.elem_len       = 1,
5422 		.elem_size      = sizeof(uint8_t),
5423 		.array_type     = NO_ARRAY,
5424 		.tlv_type       = 0x17,
5425 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5426 					   time_freq_hz_valid),
5427 	},
5428 	{
5429 		.data_type      = QMI_UNSIGNED_4_BYTE,
5430 		.elem_len       = 1,
5431 		.elem_size      = sizeof(uint32_t),
5432 		.array_type     = NO_ARRAY,
5433 		.tlv_type       = 0x17,
5434 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5435 					   time_freq_hz),
5436 	},
5437 	{
5438 		.data_type      = QMI_OPT_FLAG,
5439 		.elem_len       = 1,
5440 		.elem_size      = sizeof(uint8_t),
5441 		.array_type     = NO_ARRAY,
5442 		.tlv_type       = 0x18,
5443 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5444 					   otp_version_valid),
5445 	},
5446 	{
5447 		.data_type      = QMI_UNSIGNED_4_BYTE,
5448 		.elem_len       = 1,
5449 		.elem_size      = sizeof(uint32_t),
5450 		.array_type     = NO_ARRAY,
5451 		.tlv_type       = 0x18,
5452 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5453 					   otp_version),
5454 	},
5455 	{
5456 		.data_type      = QMI_OPT_FLAG,
5457 		.elem_len       = 1,
5458 		.elem_size      = sizeof(uint8_t),
5459 		.array_type     = NO_ARRAY,
5460 		.tlv_type       = 0x19,
5461 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5462 					   eeprom_read_timeout_valid),
5463 	},
5464 	{
5465 		.data_type      = QMI_UNSIGNED_4_BYTE,
5466 		.elem_len       = 1,
5467 		.elem_size      = sizeof(uint32_t),
5468 		.array_type     = NO_ARRAY,
5469 		.tlv_type       = 0x19,
5470 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5471 					   eeprom_read_timeout),
5472 	},
5473 	{
5474 		.data_type	= QMI_EOTI,
5475 		.array_type	= NO_ARRAY,
5476 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5477 	},
5478 };
5479 
5480 const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
5481 	{
5482 		.data_type	= QMI_UNSIGNED_1_BYTE,
5483 		.elem_len	= 1,
5484 		.elem_size	= sizeof(uint8_t),
5485 		.array_type	= NO_ARRAY,
5486 		.tlv_type	= 0x01,
5487 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5488 					   valid),
5489 	},
5490 	{
5491 		.data_type	= QMI_OPT_FLAG,
5492 		.elem_len	= 1,
5493 		.elem_size	= sizeof(uint8_t),
5494 		.array_type	= NO_ARRAY,
5495 		.tlv_type	= 0x10,
5496 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5497 					   file_id_valid),
5498 	},
5499 	{
5500 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5501 		.elem_len	= 1,
5502 		.elem_size	= sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
5503 		.array_type	= NO_ARRAY,
5504 		.tlv_type	= 0x10,
5505 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5506 					   file_id),
5507 	},
5508 	{
5509 		.data_type	= QMI_OPT_FLAG,
5510 		.elem_len	= 1,
5511 		.elem_size	= sizeof(uint8_t),
5512 		.array_type	= NO_ARRAY,
5513 		.tlv_type	= 0x11,
5514 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5515 					   total_size_valid),
5516 	},
5517 	{
5518 		.data_type	= QMI_UNSIGNED_4_BYTE,
5519 		.elem_len	= 1,
5520 		.elem_size	= sizeof(uint32_t),
5521 		.array_type	= NO_ARRAY,
5522 		.tlv_type	= 0x11,
5523 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5524 					   total_size),
5525 	},
5526 	{
5527 		.data_type	= QMI_OPT_FLAG,
5528 		.elem_len	= 1,
5529 		.elem_size	= sizeof(uint8_t),
5530 		.array_type	= NO_ARRAY,
5531 		.tlv_type	= 0x12,
5532 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5533 					   seg_id_valid),
5534 	},
5535 	{
5536 		.data_type	= QMI_UNSIGNED_4_BYTE,
5537 		.elem_len	= 1,
5538 		.elem_size	= sizeof(uint32_t),
5539 		.array_type	= NO_ARRAY,
5540 		.tlv_type	= 0x12,
5541 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5542 					   seg_id),
5543 	},
5544 	{
5545 		.data_type	= QMI_OPT_FLAG,
5546 		.elem_len	= 1,
5547 		.elem_size	= sizeof(uint8_t),
5548 		.array_type	= NO_ARRAY,
5549 		.tlv_type	= 0x13,
5550 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5551 					   data_valid),
5552 	},
5553 	{
5554 		.data_type	= QMI_DATA_LEN,
5555 		.elem_len	= 1,
5556 		.elem_size	= sizeof(uint16_t),
5557 		.array_type	= NO_ARRAY,
5558 		.tlv_type	= 0x13,
5559 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5560 					   data_len),
5561 	},
5562 	{
5563 		.data_type	= QMI_UNSIGNED_1_BYTE,
5564 		.elem_len	= QMI_WLANFW_MAX_DATA_SIZE_V01,
5565 		.elem_size	= sizeof(uint8_t),
5566 		.array_type	= VAR_LEN_ARRAY,
5567 		.tlv_type	= 0x13,
5568 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5569 					   data),
5570 	},
5571 	{
5572 		.data_type	= QMI_OPT_FLAG,
5573 		.elem_len	= 1,
5574 		.elem_size	= sizeof(uint8_t),
5575 		.array_type	= NO_ARRAY,
5576 		.tlv_type	= 0x14,
5577 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5578 					   end_valid),
5579 	},
5580 	{
5581 		.data_type	= QMI_UNSIGNED_1_BYTE,
5582 		.elem_len	= 1,
5583 		.elem_size	= sizeof(uint8_t),
5584 		.array_type	= NO_ARRAY,
5585 		.tlv_type	= 0x14,
5586 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5587 					   end),
5588 	},
5589 	{
5590 		.data_type	= QMI_OPT_FLAG,
5591 		.elem_len	= 1,
5592 		.elem_size	= sizeof(uint8_t),
5593 		.array_type	= NO_ARRAY,
5594 		.tlv_type	= 0x15,
5595 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5596 					   bdf_type_valid),
5597 	},
5598 	{
5599 		.data_type	= QMI_UNSIGNED_1_BYTE,
5600 		.elem_len	= 1,
5601 		.elem_size	= sizeof(uint8_t),
5602 		.array_type	= NO_ARRAY,
5603 		.tlv_type	= 0x15,
5604 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5605 					   bdf_type),
5606 	},
5607 
5608 	{
5609 		.data_type	= QMI_EOTI,
5610 		.array_type	= NO_ARRAY,
5611 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5612 	},
5613 };
5614 
5615 const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
5616 	{
5617 		.data_type	= QMI_STRUCT,
5618 		.elem_len	= 1,
5619 		.elem_size	= sizeof(struct qmi_response_type_v01),
5620 		.array_type	= NO_ARRAY,
5621 		.tlv_type	= 0x02,
5622 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
5623 					   resp),
5624 		.ei_array	= qmi_response_type_v01_ei,
5625 	},
5626 	{
5627 		.data_type	= QMI_EOTI,
5628 		.array_type	= NO_ARRAY,
5629 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5630 	},
5631 };
5632 
5633 const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
5634 	{
5635 		.data_type	= QMI_UNSIGNED_8_BYTE,
5636 		.elem_len	= 1,
5637 		.elem_size	= sizeof(uint64_t),
5638 		.array_type	= NO_ARRAY,
5639 		.tlv_type	= 0x01,
5640 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
5641 	},
5642 	{
5643 		.data_type	= QMI_UNSIGNED_4_BYTE,
5644 		.elem_len	= 1,
5645 		.elem_size	= sizeof(uint32_t),
5646 		.array_type	= NO_ARRAY,
5647 		.tlv_type	= 0x02,
5648 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
5649 	},
5650 	{
5651 		.data_type	= QMI_EOTI,
5652 		.array_type	= NO_ARRAY,
5653 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5654 	},
5655 };
5656 
5657 const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
5658 	{
5659 		.data_type	= QMI_STRUCT,
5660 		.elem_len	= 1,
5661 		.elem_size	= sizeof(struct qmi_response_type_v01),
5662 		.array_type	= NO_ARRAY,
5663 		.tlv_type	= 0x02,
5664 		.offset		= offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
5665 		.ei_array	= qmi_response_type_v01_ei,
5666 	},
5667 	{
5668 		.data_type	= QMI_EOTI,
5669 		.array_type	= NO_ARRAY,
5670 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5671 	},
5672 };
5673 
5674 const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
5675 	{
5676 		.data_type	= QMI_OPT_FLAG,
5677 		.elem_len	= 1,
5678 		.elem_size	= sizeof(uint8_t),
5679 		.array_type	= NO_ARRAY,
5680 		.tlv_type	= 0x10,
5681 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
5682 					   enablefwlog_valid),
5683 	},
5684 	{
5685 		.data_type	= QMI_UNSIGNED_1_BYTE,
5686 		.elem_len	= 1,
5687 		.elem_size	= sizeof(uint8_t),
5688 		.array_type	= NO_ARRAY,
5689 		.tlv_type	= 0x10,
5690 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
5691 					   enablefwlog),
5692 	},
5693 	{
5694 		.data_type	= QMI_EOTI,
5695 		.array_type	= NO_ARRAY,
5696 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5697 	},
5698 };
5699 
5700 const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
5701 	{
5702 		.data_type	= QMI_STRUCT,
5703 		.elem_len	= 1,
5704 		.elem_size	= sizeof(struct qmi_response_type_v01),
5705 		.array_type	= NO_ARRAY,
5706 		.tlv_type	= 0x02,
5707 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
5708 					   resp),
5709 		.ei_array	= qmi_response_type_v01_ei,
5710 	},
5711 	{
5712 		.data_type	= QMI_EOTI,
5713 		.array_type	= NO_ARRAY,
5714 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5715 	},
5716 };
5717 
5718 const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
5719 	{
5720 		.data_type	= QMI_UNSIGNED_4_BYTE,
5721 		.elem_len	= 1,
5722 		.elem_size	= sizeof(uint32_t),
5723 		.array_type	= NO_ARRAY,
5724 		.tlv_type	= 0,
5725 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5726 					   pipe_num),
5727 	},
5728 	{
5729 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5730 		.elem_len	= 1,
5731 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
5732 		.array_type	= NO_ARRAY,
5733 		.tlv_type	= 0,
5734 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5735 					   pipe_dir),
5736 	},
5737 	{
5738 		.data_type	= QMI_UNSIGNED_4_BYTE,
5739 		.elem_len	= 1,
5740 		.elem_size	= sizeof(uint32_t),
5741 		.array_type	= NO_ARRAY,
5742 		.tlv_type	= 0,
5743 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5744 					   nentries),
5745 	},
5746 	{
5747 		.data_type	= QMI_UNSIGNED_4_BYTE,
5748 		.elem_len	= 1,
5749 		.elem_size	= sizeof(uint32_t),
5750 		.array_type	= NO_ARRAY,
5751 		.tlv_type	= 0,
5752 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5753 					   nbytes_max),
5754 	},
5755 	{
5756 		.data_type	= QMI_UNSIGNED_4_BYTE,
5757 		.elem_len	= 1,
5758 		.elem_size	= sizeof(uint32_t),
5759 		.array_type	= NO_ARRAY,
5760 		.tlv_type	= 0,
5761 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5762 					   flags),
5763 	},
5764 	{
5765 		.data_type	= QMI_EOTI,
5766 		.array_type	= NO_ARRAY,
5767 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5768 	},
5769 };
5770 
5771 const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
5772 	{
5773 		.data_type	= QMI_UNSIGNED_4_BYTE,
5774 		.elem_len	= 1,
5775 		.elem_size	= sizeof(uint32_t),
5776 		.array_type	= NO_ARRAY,
5777 		.tlv_type	= 0,
5778 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5779 					   service_id),
5780 	},
5781 	{
5782 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5783 		.elem_len	= 1,
5784 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
5785 		.array_type	= NO_ARRAY,
5786 		.tlv_type	= 0,
5787 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5788 					   pipe_dir),
5789 	},
5790 	{
5791 		.data_type	= QMI_UNSIGNED_4_BYTE,
5792 		.elem_len	= 1,
5793 		.elem_size	= sizeof(uint32_t),
5794 		.array_type	= NO_ARRAY,
5795 		.tlv_type	= 0,
5796 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5797 					   pipe_num),
5798 	},
5799 	{
5800 		.data_type	= QMI_EOTI,
5801 		.array_type	= NO_ARRAY,
5802 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5803 	},
5804 };
5805 
5806 const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
5807 	{
5808 		.data_type	= QMI_UNSIGNED_2_BYTE,
5809 		.elem_len	= 1,
5810 		.elem_size	= sizeof(uint16_t),
5811 		.array_type	= NO_ARRAY,
5812 		.tlv_type	= 0,
5813 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
5814 	},
5815 	{
5816 		.data_type	= QMI_UNSIGNED_2_BYTE,
5817 		.elem_len	= 1,
5818 		.elem_size	= sizeof(uint16_t),
5819 		.array_type	= NO_ARRAY,
5820 		.tlv_type	= 0,
5821 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
5822 					   offset),
5823 	},
5824 	{
5825 		.data_type	= QMI_EOTI,
5826 		.array_type	= QMI_COMMON_TLV_TYPE,
5827 	},
5828 };
5829 
5830 const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
5831 	{
5832 		.data_type	= QMI_UNSIGNED_4_BYTE,
5833 		.elem_len	= 1,
5834 		.elem_size	= sizeof(uint32_t),
5835 		.array_type	= NO_ARRAY,
5836 		.tlv_type	= 0,
5837 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
5838 					   addr),
5839 	},
5840 	{
5841 		.data_type	= QMI_EOTI,
5842 		.array_type	= NO_ARRAY,
5843 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5844 	},
5845 };
5846 
5847 const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
5848 	{
5849 		.data_type	= QMI_UNSIGNED_4_BYTE,
5850 		.elem_len	= 1,
5851 		.elem_size	= sizeof(uint32_t),
5852 		.array_type	= NO_ARRAY,
5853 		.tlv_type	= 0x01,
5854 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5855 					   mode),
5856 	},
5857 	{
5858 		.data_type	= QMI_OPT_FLAG,
5859 		.elem_len	= 1,
5860 		.elem_size	= sizeof(uint8_t),
5861 		.array_type	= NO_ARRAY,
5862 		.tlv_type	= 0x10,
5863 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5864 					   hw_debug_valid),
5865 	},
5866 	{
5867 		.data_type	= QMI_UNSIGNED_1_BYTE,
5868 		.elem_len	= 1,
5869 		.elem_size	= sizeof(uint8_t),
5870 		.array_type	= NO_ARRAY,
5871 		.tlv_type	= 0x10,
5872 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5873 					   hw_debug),
5874 	},
5875 	{
5876 		.data_type	= QMI_EOTI,
5877 		.array_type	= NO_ARRAY,
5878 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5879 	},
5880 };
5881 
5882 const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
5883 	{
5884 		.data_type	= QMI_STRUCT,
5885 		.elem_len	= 1,
5886 		.elem_size	= sizeof(struct qmi_response_type_v01),
5887 		.array_type	= NO_ARRAY,
5888 		.tlv_type	= 0x02,
5889 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
5890 					   resp),
5891 		.ei_array	= qmi_response_type_v01_ei,
5892 	},
5893 	{
5894 		.data_type	= QMI_EOTI,
5895 		.array_type	= NO_ARRAY,
5896 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5897 	},
5898 };
5899 
5900 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
5901 	{
5902 		.data_type	= QMI_OPT_FLAG,
5903 		.elem_len	= 1,
5904 		.elem_size	= sizeof(uint8_t),
5905 		.array_type	= NO_ARRAY,
5906 		.tlv_type	= 0x10,
5907 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5908 					   host_version_valid),
5909 	},
5910 	{
5911 		.data_type	= QMI_STRING,
5912 		.elem_len	= QMI_WLANFW_MAX_STR_LEN_V01 + 1,
5913 		.elem_size	= sizeof(char),
5914 		.array_type	= NO_ARRAY,
5915 		.tlv_type	= 0x10,
5916 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5917 					   host_version),
5918 	},
5919 	{
5920 		.data_type	= QMI_OPT_FLAG,
5921 		.elem_len	= 1,
5922 		.elem_size	= sizeof(uint8_t),
5923 		.array_type	= NO_ARRAY,
5924 		.tlv_type	= 0x11,
5925 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5926 					   tgt_cfg_valid),
5927 	},
5928 	{
5929 		.data_type	= QMI_DATA_LEN,
5930 		.elem_len	= 1,
5931 		.elem_size	= sizeof(uint8_t),
5932 		.array_type	= NO_ARRAY,
5933 		.tlv_type	= 0x11,
5934 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5935 					   tgt_cfg_len),
5936 	},
5937 	{
5938 		.data_type	= QMI_STRUCT,
5939 		.elem_len	= QMI_WLANFW_MAX_NUM_CE_V01,
5940 		.elem_size	= sizeof(
5941 				struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
5942 		.array_type	= VAR_LEN_ARRAY,
5943 		.tlv_type	= 0x11,
5944 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5945 					   tgt_cfg),
5946 		.ei_array	= qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
5947 	},
5948 	{
5949 		.data_type	= QMI_OPT_FLAG,
5950 		.elem_len	= 1,
5951 		.elem_size	= sizeof(uint8_t),
5952 		.array_type	= NO_ARRAY,
5953 		.tlv_type	= 0x12,
5954 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5955 					   svc_cfg_valid),
5956 	},
5957 	{
5958 		.data_type	= QMI_DATA_LEN,
5959 		.elem_len	= 1,
5960 		.elem_size	= sizeof(uint8_t),
5961 		.array_type	= NO_ARRAY,
5962 		.tlv_type	= 0x12,
5963 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5964 					   svc_cfg_len),
5965 	},
5966 	{
5967 		.data_type	= QMI_STRUCT,
5968 		.elem_len	= QMI_WLANFW_MAX_NUM_SVC_V01,
5969 		.elem_size	= sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
5970 		.array_type	= VAR_LEN_ARRAY,
5971 		.tlv_type	= 0x12,
5972 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5973 					   svc_cfg),
5974 		.ei_array	= qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
5975 	},
5976 	{
5977 		.data_type	= QMI_OPT_FLAG,
5978 		.elem_len	= 1,
5979 		.elem_size	= sizeof(uint8_t),
5980 		.array_type	= NO_ARRAY,
5981 		.tlv_type	= 0x13,
5982 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5983 					   shadow_reg_valid),
5984 	},
5985 	{
5986 		.data_type	= QMI_DATA_LEN,
5987 		.elem_len	= 1,
5988 		.elem_size	= sizeof(uint8_t),
5989 		.array_type	= NO_ARRAY,
5990 		.tlv_type	= 0x13,
5991 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5992 					   shadow_reg_len),
5993 	},
5994 	{
5995 		.data_type	= QMI_STRUCT,
5996 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
5997 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
5998 		.array_type	= VAR_LEN_ARRAY,
5999 		.tlv_type	= 0x13,
6000 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6001 					   shadow_reg),
6002 		.ei_array	= qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
6003 	},
6004 	{
6005 		.data_type	= QMI_OPT_FLAG,
6006 		.elem_len	= 1,
6007 		.elem_size	= sizeof(uint8_t),
6008 		.array_type	= NO_ARRAY,
6009 		.tlv_type	= 0x14,
6010 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6011 					   shadow_reg_v2_valid),
6012 	},
6013 	{
6014 		.data_type	= QMI_DATA_LEN,
6015 		.elem_len	= 1,
6016 		.elem_size	= sizeof(uint8_t),
6017 		.array_type	= NO_ARRAY,
6018 		.tlv_type	= 0x14,
6019 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6020 					   shadow_reg_v2_len),
6021 	},
6022 	{
6023 		.data_type	= QMI_STRUCT,
6024 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
6025 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
6026 		.array_type	= VAR_LEN_ARRAY,
6027 		.tlv_type	= 0x14,
6028 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6029 					   shadow_reg_v2),
6030 		.ei_array	= qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
6031 	},
6032 	{
6033 		.data_type	= QMI_EOTI,
6034 		.array_type	= NO_ARRAY,
6035 		.tlv_type	= QMI_COMMON_TLV_TYPE,
6036 	},
6037 };
6038 
6039 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
6040 	{
6041 		.data_type	= QMI_STRUCT,
6042 		.elem_len	= 1,
6043 		.elem_size	= sizeof(struct qmi_response_type_v01),
6044 		.array_type	= NO_ARRAY,
6045 		.tlv_type	= 0x02,
6046 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
6047 		.ei_array	= qmi_response_type_v01_ei,
6048 	},
6049 	{
6050 		.data_type	= QMI_EOTI,
6051 		.array_type	= NO_ARRAY,
6052 		.tlv_type	= QMI_COMMON_TLV_TYPE,
6053 	},
6054 };
6055 
6056 int
6057 qwx_ce_intr(void *arg)
6058 {
6059 	struct qwx_ce_pipe *pipe = arg;
6060 	struct qwx_softc *sc = pipe->sc;
6061 
6062 	if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) ||
6063 	    ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) {
6064 		DPRINTF("%s: unexpected interrupt on pipe %d\n",
6065 		    __func__, pipe->pipe_num);
6066 		return 1;
6067 	}
6068 
6069 	return qwx_ce_per_engine_service(sc, pipe->pipe_num);
6070 }
6071 
6072 int
6073 qwx_ext_intr(void *arg)
6074 {
6075 	struct qwx_ext_irq_grp *irq_grp = arg;
6076 	struct qwx_softc *sc = irq_grp->sc;
6077 
6078 	if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
6079 		DPRINTF("%s: unexpected interrupt for ext group %d\n",
6080 		    __func__, irq_grp->grp_id);
6081 		return 1;
6082 	}
6083 
6084 	return qwx_dp_service_srng(sc, irq_grp->grp_id);
6085 }
6086 
6087 const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = {
6088 	"EOTI",
6089 	"OPT_FLAG",
6090 	"DATA_LEN",
6091 	"UNSIGNED_1_BYTE",
6092 	"UNSIGNED_2_BYTE",
6093 	"UNSIGNED_4_BYTE",
6094 	"UNSIGNED_8_BYTE",
6095 	"SIGNED_2_BYTE_ENUM",
6096 	"SIGNED_4_BYTE_ENUM",
6097 	"STRUCT",
6098 	"STRING"
6099 };
6100 
6101 const struct qmi_elem_info *
6102 qwx_qmi_decode_get_elem(const struct qmi_elem_info *ei, uint8_t elem_type)
6103 {
6104 	while (ei->data_type != QMI_EOTI && ei->tlv_type != elem_type)
6105 		ei++;
6106 
6107 	DNPRINTF(QWX_D_QMI, "%s: found elem 0x%x data type 0x%x\n", __func__,
6108 	    ei->tlv_type, ei->data_type);
6109 	return ei;
6110 }
6111 
6112 size_t
6113 qwx_qmi_decode_min_elem_size(const struct qmi_elem_info *ei, int nested)
6114 {
6115 	size_t min_size = 0;
6116 
6117 	switch (ei->data_type) {
6118 	case QMI_EOTI:
6119 	case QMI_OPT_FLAG:
6120 		break;
6121 	case QMI_DATA_LEN:
6122 		if (ei->elem_len == 1)
6123 			min_size += sizeof(uint8_t);
6124 		else
6125 			min_size += sizeof(uint16_t);
6126 		break;
6127 	case QMI_UNSIGNED_1_BYTE:
6128 	case QMI_UNSIGNED_2_BYTE:
6129 	case QMI_UNSIGNED_4_BYTE:
6130 	case QMI_UNSIGNED_8_BYTE:
6131 	case QMI_SIGNED_2_BYTE_ENUM:
6132 	case QMI_SIGNED_4_BYTE_ENUM:
6133 		min_size += ei->elem_len * ei->elem_size;
6134 		break;
6135 	case QMI_STRUCT:
6136 		if (nested > 2) {
6137 			printf("%s: QMI struct element 0x%x with "
6138 			    "data type %s (0x%x) is nested too "
6139 			    "deeply\n", __func__,
6140 			    ei->tlv_type,
6141 			    qmi_data_type_name[ei->data_type],
6142 			    ei->data_type);
6143 		}
6144 		ei = ei->ei_array;
6145 		while (ei->data_type != QMI_EOTI) {
6146 			min_size += qwx_qmi_decode_min_elem_size(ei,
6147 			    nested + 1);
6148 			ei++;
6149 		}
6150 		break;
6151 	case QMI_STRING:
6152 		min_size += 1;
6153 		/* Strings nested in structs use an in-band length field. */
6154 		if (nested) {
6155 			if (ei->elem_len <= 0xff)
6156 				min_size += sizeof(uint8_t);
6157 			else
6158 				min_size += sizeof(uint16_t);
6159 		}
6160 		break;
6161 	default:
6162 		printf("%s: unhandled data type 0x%x\n", __func__,
6163 		    ei->data_type);
6164 		break;
6165 	}
6166 
6167 	return min_size;
6168 }
6169 
6170 int
6171 qwx_qmi_decode_tlv_hdr(struct qwx_softc *sc,
6172     const struct qmi_elem_info **next_ei, uint16_t *actual_size,
6173     size_t output_len, const struct qmi_elem_info *ei0,
6174     uint8_t *input, size_t input_len)
6175 {
6176 	uint8_t *p = input;
6177 	size_t remain = input_len;
6178 	uint8_t elem_type;
6179 	uint16_t elem_size = 0;
6180 	const struct qmi_elem_info *ei;
6181 
6182 	*next_ei = NULL;
6183 	*actual_size = 0;
6184 
6185 	if (remain < 3) {
6186 		printf("%s: QMI message TLV header too short\n",
6187 		   sc->sc_dev.dv_xname);
6188 		return -1;
6189 	}
6190 	elem_type = *p;
6191 	p++;
6192 	remain--;
6193 
6194 	/*
6195 	 * By relying on TLV type information we can skip over EIs which
6196 	 * describe optional elements that have not been encoded.
6197 	 * Such elements will be left at their default value (zero) in
6198 	 * the decoded output struct.
6199 	 * XXX We currently allow elements to appear in any order and
6200 	 * we do not detect duplicates.
6201 	 */
6202 	ei = qwx_qmi_decode_get_elem(ei0, elem_type);
6203 
6204 	DNPRINTF(QWX_D_QMI,
6205 	    "%s: decoding element 0x%x with data type %s (0x%x)\n",
6206 	    __func__, elem_type, qmi_data_type_name[ei->data_type],
6207 	    ei->data_type);
6208 
6209 	if (remain < 2) {
6210 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
6211 		return -1;
6212 	}
6213 
6214 	if (ei->data_type == QMI_DATA_LEN && ei->elem_len == 1) {
6215 		elem_size = p[0];
6216 		p++;
6217 		remain--;
6218 	} else {
6219 		elem_size = (p[0] | (p[1] << 8));
6220 		p += 2;
6221 		remain -= 2;
6222 	}
6223 
6224 	*next_ei = ei;
6225 	*actual_size = elem_size;
6226 
6227 	if (ei->data_type == QMI_EOTI) {
6228 		DNPRINTF(QWX_D_QMI,
6229 		    "%s: unrecognized QMI element type 0x%x size %u\n",
6230 		    sc->sc_dev.dv_xname, elem_type, elem_size);
6231 		return 0;
6232 	}
6233 
6234 	/*
6235 	 * Is this an optional element which has been encoded?
6236 	 * If so, use info about this optional element for verification.
6237 	 */
6238 	if (ei->data_type == QMI_OPT_FLAG)
6239 		ei++;
6240 
6241 	DNPRINTF(QWX_D_QMI, "%s: ei->size %u, actual size %u\n", __func__,
6242 	    ei->elem_size, *actual_size);
6243 
6244 	switch (ei->data_type) {
6245 	case QMI_UNSIGNED_1_BYTE:
6246 	case QMI_UNSIGNED_2_BYTE:
6247 	case QMI_UNSIGNED_4_BYTE:
6248 	case QMI_UNSIGNED_8_BYTE:
6249 	case QMI_SIGNED_2_BYTE_ENUM:
6250 	case QMI_SIGNED_4_BYTE_ENUM:
6251 		if (elem_size != ei->elem_size) {
6252 			printf("%s: QMI message element 0x%x "
6253 			    "data type %s (0x%x) with bad size: %u\n",
6254 			    sc->sc_dev.dv_xname, elem_type,
6255 			    qmi_data_type_name[ei->data_type],
6256 			    ei->data_type, elem_size);
6257 			return -1;
6258 		}
6259 		break;
6260 	case QMI_DATA_LEN:
6261 		break;
6262 	case QMI_STRING:
6263 	case QMI_STRUCT:
6264 		if (elem_size < qwx_qmi_decode_min_elem_size(ei, 0)) {
6265 			printf("%s: QMI message element 0x%x "
6266 			    "data type %s (0x%x) with bad size: %u\n",
6267 			    sc->sc_dev.dv_xname, elem_type,
6268 			    qmi_data_type_name[ei->data_type],
6269 			    ei->data_type, elem_size);
6270 			return -1;
6271 		}
6272 		break;
6273 	default:
6274 		printf("%s: unexpected QMI message element "
6275 		    "data type 0x%x\n", sc->sc_dev.dv_xname,
6276 		    ei->data_type);
6277 		return -1;
6278 	}
6279 
6280 	if (remain < elem_size) {
6281 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
6282 		return -1;
6283 	}
6284 
6285 	if (ei->offset + ei->elem_size > output_len) {
6286 		printf("%s: QMI message element type 0x%x too large: %u\n",
6287 		    sc->sc_dev.dv_xname, elem_type, ei->elem_size);
6288 		return -1;
6289 	}
6290 
6291 	return 0;
6292 }
6293 
6294 int
6295 qwx_qmi_decode_byte(void *output, const struct qmi_elem_info *ei, void *input)
6296 {
6297 	if (ei->elem_size != sizeof(uint8_t)) {
6298 		printf("%s: bad element size\n", __func__);
6299 		return -1;
6300 	}
6301 
6302 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6303 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6304 	memcpy(output, input, ei->elem_size);
6305 	return 0;
6306 }
6307 
6308 int
6309 qwx_qmi_decode_word(void *output, const struct qmi_elem_info *ei, void *input)
6310 {
6311 	if (ei->elem_size != sizeof(uint16_t)) {
6312 		printf("%s: bad element size\n", __func__);
6313 		return -1;
6314 	}
6315 
6316 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6317 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6318 	memcpy(output, input, ei->elem_size);
6319 	return 0;
6320 }
6321 
6322 int
6323 qwx_qmi_decode_dword(void *output, const struct qmi_elem_info *ei, void *input)
6324 {
6325 	if (ei->elem_size != sizeof(uint32_t)) {
6326 		printf("%s: bad element size\n", __func__);
6327 		return -1;
6328 	}
6329 
6330 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6331 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6332 	memcpy(output, input, ei->elem_size);
6333 	return 0;
6334 }
6335 
6336 int
6337 qwx_qmi_decode_qword(void *output, const struct qmi_elem_info *ei, void *input)
6338 {
6339 	if (ei->elem_size != sizeof(uint64_t)) {
6340 		printf("%s: bad element size\n", __func__);
6341 		return -1;
6342 	}
6343 
6344 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6345 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6346 	memcpy(output, input, ei->elem_size);
6347 	return 0;
6348 }
6349 
6350 int
6351 qwx_qmi_decode_datalen(struct qwx_softc *sc, size_t *used, uint32_t *datalen,
6352     void *output, size_t output_len, const struct qmi_elem_info *ei,
6353     uint8_t *input, uint16_t input_len)
6354 {
6355 	uint8_t *p = input;
6356 	size_t remain = input_len;
6357 
6358 	*datalen = 0;
6359 
6360 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6361 	for (int i = 0; i < input_len; i++) {
6362 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6363 	}
6364 	DNPRINTF(QWX_D_QMI, "\n");
6365 
6366 	if (remain < ei->elem_size) {
6367 		printf("%s: QMI message too short: remain=%zu elem_size=%u\n", __func__, remain, ei->elem_size);
6368 		return -1;
6369 	}
6370 
6371 	switch (ei->elem_size) {
6372 	case sizeof(uint8_t):
6373 		*datalen = p[0];
6374 		break;
6375 	case sizeof(uint16_t):
6376 		*datalen = p[0] | (p[1] << 8);
6377 		break;
6378 	default:
6379 		printf("%s: bad datalen element size %u\n",
6380 		    sc->sc_dev.dv_xname, ei->elem_size);
6381 		return -1;
6382 
6383 	}
6384 	*used = ei->elem_size;
6385 
6386 	if (ei->offset + sizeof(*datalen) > output_len) {
6387 		printf("%s: QMI message element type 0x%x too large\n",
6388 		    sc->sc_dev.dv_xname, ei->tlv_type);
6389 		return -1;
6390 	}
6391 	memcpy(output + ei->offset, datalen, sizeof(*datalen));
6392 	return 0;
6393 }
6394 
6395 int
6396 qwx_qmi_decode_string(struct qwx_softc *sc, size_t *used_total,
6397     void *output, size_t output_len, const struct qmi_elem_info *ei,
6398     uint8_t *input, uint16_t input_len, uint16_t elem_size, int nested)
6399 {
6400 	uint8_t *p = input;
6401 	uint16_t len;
6402 	size_t remain = input_len;
6403 
6404 	*used_total = 0;
6405 
6406 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6407 	for (int i = 0; i < input_len; i++) {
6408 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6409 	}
6410 	DNPRINTF(QWX_D_QMI, "\n");
6411 
6412 	if (nested) {
6413 		/* Strings nested in structs use an in-band length field. */
6414 		if (ei->elem_len <= 0xff) {
6415 			if (remain == 0) {
6416 				printf("%s: QMI string length header exceeds "
6417 				    "input buffer size\n", __func__);
6418 				return -1;
6419 			}
6420 			len = p[0];
6421 			p++;
6422 			(*used_total)++;
6423 			remain--;
6424 		} else {
6425 			if (remain < 2) {
6426 				printf("%s: QMI string length header exceeds "
6427 				    "input buffer size\n", __func__);
6428 				return -1;
6429 			}
6430 			len = p[0] | (p[1] << 8);
6431 			p += 2;
6432 			*used_total += 2;
6433 			remain -= 2;
6434 		}
6435 	} else
6436 		len = elem_size;
6437 
6438 	if (len > ei->elem_len) {
6439 		printf("%s: QMI string element of length %u exceeds "
6440 		    "maximum length %u\n", __func__, len, ei->elem_len);
6441 		return -1;
6442 	}
6443 	if (len > remain) {
6444 		printf("%s: QMI string element of length %u exceeds "
6445 		    "input buffer size %zu\n", __func__, len, remain);
6446 		return -1;
6447 	}
6448 	if (len > output_len) {
6449 		printf("%s: QMI string element of length %u exceeds "
6450 		    "output buffer size %zu\n", __func__, len, output_len);
6451 		return -1;
6452 	}
6453 
6454 	memcpy(output, p, len);
6455 
6456 	p = output;
6457 	p[len] = '\0';
6458 	DNPRINTF(QWX_D_QMI, "%s: string (len %u): %s\n", __func__, len, p);
6459 
6460 	*used_total += len;
6461 	return 0;
6462 }
6463 
6464 int
6465 qwx_qmi_decode_struct(struct qwx_softc *sc, size_t *used_total,
6466     void *output, size_t output_len,
6467     const struct qmi_elem_info *struct_ei,
6468     uint8_t *input, uint16_t input_len,
6469     int nested)
6470 {
6471 	const struct qmi_elem_info *ei = struct_ei->ei_array;
6472 	uint32_t min_size;
6473 	uint8_t *p = input;
6474 	size_t remain = input_len;
6475 	size_t used = 0;
6476 
6477 	*used_total = 0;
6478 
6479 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6480 	for (int i = 0; i < input_len; i++) {
6481 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6482 	}
6483 	DNPRINTF(QWX_D_QMI, "\n");
6484 
6485 	min_size = qwx_qmi_decode_min_elem_size(struct_ei, 0);
6486 	DNPRINTF(QWX_D_QMI, "%s: minimum struct size: %u\n", __func__, min_size);
6487 	while (*used_total < min_size && ei->data_type != QMI_EOTI) {
6488 		if (remain == 0) {
6489 			printf("%s: QMI message too short\n", __func__);
6490 			return -1;
6491 		}
6492 
6493 		if (ei->data_type == QMI_DATA_LEN) {
6494 			uint32_t datalen;
6495 
6496 			used = 0;
6497 			if (qwx_qmi_decode_datalen(sc, &used, &datalen,
6498 			    output, output_len, ei, p, remain))
6499 				return -1;
6500 			DNPRINTF(QWX_D_QMI, "%s: datalen %u used %zu bytes\n",
6501 			    __func__, datalen, used);
6502 			p += used;
6503 			remain -= used;
6504 			*used_total += used;
6505 			if (remain < datalen) {
6506 				printf("%s: QMI message too short\n", __func__);
6507 				return -1;
6508 			}
6509 			ei++;
6510 			DNPRINTF(QWX_D_QMI, "%s: datalen is for data_type=0x%x "
6511 			    "tlv_type=0x%x elem_size=%u(0x%x) remain=%zu\n",
6512 			    __func__, ei->data_type, ei->tlv_type,
6513 			    ei->elem_size, ei->elem_size, remain);
6514 			if (datalen == 0) {
6515 				ei++;
6516 				DNPRINTF(QWX_D_QMI,
6517 				    "%s: skipped to data_type=0x%x "
6518 				    "tlv_type=0x%x elem_size=%u(0x%x) "
6519 				    "remain=%zu\n", __func__,
6520 				    ei->data_type, ei->tlv_type,
6521 				    ei->elem_size, ei->elem_size, remain);
6522 				continue;
6523 			}
6524 		} else {
6525 			if (remain < ei->elem_size) {
6526 				printf("%s: QMI message too short\n",
6527 				    __func__);
6528 				return -1;
6529 			}
6530 		}
6531 
6532 		if (ei->offset + ei->elem_size > output_len) {
6533 			printf("%s: QMI message struct member element "
6534 			    "type 0x%x too large: %u\n", sc->sc_dev.dv_xname,
6535 			    ei->tlv_type, ei->elem_size);
6536 			return -1;
6537 		}
6538 
6539 		DNPRINTF(QWX_D_QMI,
6540 		    "%s: decoding struct member element 0x%x with "
6541 		    "data type %s (0x%x) size=%u(0x%x) remain=%zu\n", __func__,
6542 		    ei->tlv_type, qmi_data_type_name[ei->data_type],
6543 		    ei->data_type, ei->elem_size, ei->elem_size, remain);
6544 		switch (ei->data_type) {
6545 		case QMI_UNSIGNED_1_BYTE:
6546 			if (qwx_qmi_decode_byte(output + ei->offset, ei, p))
6547 				return -1;
6548 			remain -= ei->elem_size;
6549 			p += ei->elem_size;
6550 			*used_total += ei->elem_size;
6551 			break;
6552 		case QMI_UNSIGNED_2_BYTE:
6553 		case QMI_SIGNED_2_BYTE_ENUM:
6554 			if (qwx_qmi_decode_word(output + ei->offset, ei, p))
6555 				return -1;
6556 			remain -= ei->elem_size;
6557 			p += ei->elem_size;
6558 			*used_total += ei->elem_size;
6559 			break;
6560 		case QMI_UNSIGNED_4_BYTE:
6561 		case QMI_SIGNED_4_BYTE_ENUM:
6562 			if (qwx_qmi_decode_dword(output + ei->offset, ei, p))
6563 				return -1;
6564 			remain -= ei->elem_size;
6565 			p += ei->elem_size;
6566 			*used_total += ei->elem_size;
6567 			break;
6568 		case QMI_UNSIGNED_8_BYTE:
6569 			if (qwx_qmi_decode_qword(output + ei->offset, ei, p))
6570 				return -1;
6571 			remain -= ei->elem_size;
6572 			p += ei->elem_size;
6573 			*used_total += ei->elem_size;
6574 			break;
6575 		case QMI_STRUCT:
6576 			if (nested > 2) {
6577 				printf("%s: QMI struct element data type 0x%x "
6578 				    "is nested too deeply\n",
6579 				    sc->sc_dev.dv_xname, ei->data_type);
6580 				return -1;
6581 			}
6582 			used = 0;
6583 			if (qwx_qmi_decode_struct(sc, &used,
6584 			    output + ei->offset, output_len - ei->offset,
6585 			    ei, p, remain, nested + 1))
6586 				return -1;
6587 			remain -= used;
6588 			p += used;
6589 			*used_total += used;
6590 			break;
6591 		case QMI_STRING:
6592 			used = 0;
6593 			if (qwx_qmi_decode_string(sc, &used,
6594 			    output + ei->offset, output_len - ei->offset,
6595 			    ei, p, remain, 0, 1))
6596 				return -1;
6597 			remain -= used;
6598 			p += used;
6599 			*used_total += used;
6600 			break;
6601 		default:
6602 			printf("%s: unhandled QMI struct element "
6603 			    "data type 0x%x\n", sc->sc_dev.dv_xname,
6604 			    ei->data_type);
6605 			return -1;
6606 		}
6607 
6608 		ei++;
6609 		DNPRINTF(QWX_D_QMI, "%s: next ei 0x%x ei->data_type=0x%x\n",
6610 		    __func__, ei->tlv_type, ei->data_type);
6611 	}
6612 
6613 	DNPRINTF(QWX_D_QMI, "%s: used_total=%zu ei->data_type=0x%x\n",
6614 	    __func__, *used_total, ei->data_type);
6615 
6616 	return 0;
6617 }
6618 
6619 int
6620 qwx_qmi_decode_msg(struct qwx_softc *sc, void *output, size_t output_len,
6621     const struct qmi_elem_info *ei0, uint8_t *input, uint16_t input_len)
6622 {
6623 	uint8_t *p = input;
6624 	size_t remain = input_len, used;
6625 	const struct qmi_elem_info *ei = ei0;
6626 
6627 	memset(output, 0, output_len);
6628 
6629 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6630 	for (int i = 0; i < input_len; i++) {
6631 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6632 	}
6633 	DNPRINTF(QWX_D_QMI, "\n");
6634 
6635 	while (remain > 0 && ei->data_type != QMI_EOTI) {
6636 		uint32_t nelem = 1, i;
6637 		uint16_t datalen;
6638 
6639 		if (qwx_qmi_decode_tlv_hdr(sc, &ei, &datalen, output_len,
6640 		    ei0, p, remain))
6641 			return -1;
6642 
6643 		/* Skip unrecognized elements. */
6644 		if (ei->data_type == QMI_EOTI) {
6645 			p += 3 + datalen;
6646 			remain -= 3 + datalen;
6647 			ei = ei0;
6648 			continue;
6649 		}
6650 
6651 		/* Set 'valid' flag for optional fields in output struct. */
6652 		if (ei->data_type == QMI_OPT_FLAG) {
6653 			uint8_t *pvalid;
6654 
6655 			if (ei->offset + ei->elem_size > output_len) {
6656 				printf("%s: QMI message element type 0x%x "
6657 				    "too large: %u\n", sc->sc_dev.dv_xname,
6658 				    ei->tlv_type, ei->elem_size);
6659 			}
6660 
6661 			pvalid = (uint8_t *)output + ei->offset;
6662 			*pvalid = 1;
6663 
6664 			ei++;
6665 		}
6666 
6667 		p += 3;
6668 		remain -= 3;
6669 
6670 		if (ei->data_type == QMI_DATA_LEN) {
6671 			const struct qmi_elem_info *datalen_ei = ei;
6672 			uint8_t elem_type = ei->tlv_type;
6673 
6674 			/*
6675 			 * Size info in TLV header indicates the
6676 			 * total length of element data that follows.
6677 			 */
6678 			if (remain < datalen) {
6679 				printf("%s:%d QMI message too short\n",
6680 				    __func__, __LINE__);
6681 				return -1;
6682 			}
6683 
6684 			ei++;
6685 			DNPRINTF(QWX_D_QMI,
6686 			    "%s: next ei data_type=0x%x tlv_type=0x%x "
6687 			    "dst elem_size=%u(0x%x) src total size=%u "
6688 			    "remain=%zu\n", __func__, ei->data_type,
6689 			    ei->tlv_type, ei->elem_size, ei->elem_size,
6690 			    datalen, remain);
6691 
6692 			/* Related EIs must have the same type. */
6693 			if (ei->tlv_type != elem_type) {
6694 				printf("%s: unexpected element type 0x%x; "
6695 				    "expected 0x%x\n", __func__,
6696 				    ei->tlv_type, elem_type);
6697 				return -1;
6698 			}
6699 
6700 			if (datalen == 0) {
6701 				if (ei->data_type != QMI_EOTI)
6702 					ei++;
6703 				continue;
6704 			}
6705 
6706 			/*
6707 			 * For variable length arrays a one- or two-byte
6708 			 * value follows the header, indicating the number
6709 			 * of elements in the array.
6710 			 */
6711 			if (ei->array_type == VAR_LEN_ARRAY) {
6712 				DNPRINTF(QWX_D_QMI,
6713 				    "%s: variable length array\n", __func__);
6714 				used = 0;
6715 				if (qwx_qmi_decode_datalen(sc, &used, &nelem,
6716 				    output, output_len, datalen_ei, p, remain))
6717 					return -1;
6718 				p += used;
6719 				remain -= used;
6720 				/*
6721 				 * Previous datalen value included the total
6722 				 * amount of bytes following the DATALEN TLV
6723 				 * header.
6724 				 */
6725 				datalen -= used;
6726 
6727 				if (nelem == 0) {
6728 					if (ei->data_type != QMI_EOTI)
6729 						ei++;
6730 					continue;
6731 				}
6732 
6733 				DNPRINTF(QWX_D_QMI,
6734 				    "%s: datalen %u used %zu bytes\n",
6735 				    __func__, nelem, used);
6736 
6737 				DNPRINTF(QWX_D_QMI,
6738 				    "%s: decoding %u array elements with "
6739 				    "src size %u dest size %u\n", __func__,
6740 				    nelem, datalen / nelem, ei->elem_size);
6741 			}
6742 		}
6743 
6744 		if (remain < datalen) {
6745 			printf("%s:%d QMI message too short: remain=%zu, "
6746 			    "datalen=%u\n", __func__, __LINE__, remain,
6747 			    datalen);
6748 			return -1;
6749 		}
6750 		if (output_len < nelem * ei->elem_size) {
6751 			printf("%s: QMI output buffer too short: remain=%zu "
6752 			    "nelem=%u ei->elem_size=%u\n", __func__, remain,
6753 			    nelem, ei->elem_size);
6754 			return -1;
6755 		}
6756 
6757 		for (i = 0; i < nelem && remain > 0; i++) {
6758 			size_t outoff;
6759 
6760 			outoff = ei->offset + (ei->elem_size * i);
6761 			switch (ei->data_type) {
6762 			case QMI_STRUCT:
6763 				used = 0;
6764 				if (qwx_qmi_decode_struct(sc, &used,
6765 				    output + outoff, output_len - outoff,
6766 				    ei, p, remain, 0))
6767 					return -1;
6768 				remain -= used;
6769 				p += used;
6770 				if (used != datalen) {
6771 					DNPRINTF(QWX_D_QMI,
6772 					    "%s struct used only %zu bytes "
6773 					    "of %u input bytes\n", __func__,
6774 					    used, datalen);
6775 				} else {
6776 					DNPRINTF(QWX_D_QMI,
6777 					    "%s: struct used %zu bytes "
6778 					    "of input\n", __func__, used);
6779 				}
6780 				break;
6781 			case QMI_STRING:
6782 				used = 0;
6783 				if (qwx_qmi_decode_string(sc, &used,
6784 				    output + outoff, output_len - outoff,
6785 				    ei, p, remain, datalen, 0))
6786 					return -1;
6787 				remain -= used;
6788 				p += used;
6789 				if (used != datalen) {
6790 					DNPRINTF(QWX_D_QMI,
6791 					    "%s: string used only %zu bytes "
6792 					    "of %u input bytes\n", __func__,
6793 					    used, datalen);
6794 				} else {
6795 					DNPRINTF(QWX_D_QMI,
6796 					    "%s: string used %zu bytes "
6797 					    "of input\n", __func__, used);
6798 				}
6799 				break;
6800 			case QMI_UNSIGNED_1_BYTE:
6801 				if (remain < ei->elem_size) {
6802 					printf("%s: QMI message too "
6803 					    "short\n", __func__);
6804 					return -1;
6805 				}
6806 				if (qwx_qmi_decode_byte(output + outoff,
6807 				    ei, p))
6808 					return -1;
6809 				remain -= ei->elem_size;
6810 				p += ei->elem_size;
6811 				break;
6812 			case QMI_UNSIGNED_2_BYTE:
6813 			case QMI_SIGNED_2_BYTE_ENUM:
6814 				if (remain < ei->elem_size) {
6815 					printf("%s: QMI message too "
6816 					    "short\n", __func__);
6817 					return -1;
6818 				}
6819 				if (qwx_qmi_decode_word(output + outoff,
6820 				    ei, p))
6821 					return -1;
6822 				remain -= ei->elem_size;
6823 				p += ei->elem_size;
6824 				break;
6825 			case QMI_UNSIGNED_4_BYTE:
6826 			case QMI_SIGNED_4_BYTE_ENUM:
6827 				if (remain < ei->elem_size) {
6828 					printf("%s: QMI message too "
6829 					    "short\n", __func__);
6830 					return -1;
6831 				}
6832 				if (qwx_qmi_decode_dword(output + outoff,
6833 				    ei, p))
6834 					return -1;
6835 				remain -= ei->elem_size;
6836 				p += ei->elem_size;
6837 				break;
6838 			case QMI_UNSIGNED_8_BYTE:
6839 				if (remain < ei->elem_size) {
6840 					printf("%s: QMI message too "
6841 					    "short 4\n", __func__);
6842 					return -1;
6843 				}
6844 				if (qwx_qmi_decode_qword(output + outoff,
6845 				    ei, p))
6846 					return -1;
6847 				remain -= ei->elem_size;
6848 				p += ei->elem_size;
6849 				break;
6850 			default:
6851 				printf("%s: unhandled QMI message element "
6852 				    "data type 0x%x\n",
6853 				    sc->sc_dev.dv_xname, ei->data_type);
6854 				return -1;
6855 			}
6856 		}
6857 
6858 		ei++;
6859 		DNPRINTF(QWX_D_QMI,
6860 		    "%s: next ei 0x%x ei->data_type=0x%x remain=%zu\n",
6861 		    __func__, ei->tlv_type, ei->data_type, remain);
6862 
6863 		DNPRINTF(QWX_D_QMI, "%s: remaining input: ", __func__);
6864 		for (int i = 0; i < remain; i++)
6865 			DNPRINTF(QWX_D_QMI, " %02x", p[i]);
6866 		DNPRINTF(QWX_D_QMI, "\n");
6867 	}
6868 
6869 	return 0;
6870 }
6871 
6872 void
6873 qwx_qmi_recv_wlanfw_ind_register_req_v1(struct qwx_softc *sc, struct mbuf *m,
6874     uint16_t txn_id, uint16_t msg_len)
6875 {
6876 	struct qmi_wlanfw_ind_register_resp_msg_v01 resp;
6877 	const struct qmi_elem_info *ei;
6878 	uint8_t *msg = mtod(m, uint8_t *);
6879 
6880 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6881 
6882 	ei = qmi_wlanfw_ind_register_resp_msg_v01_ei;
6883 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6884 		return;
6885 
6886 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6887 	    __func__, le16toh(resp.resp.result));
6888 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6889 	    __func__, le16toh(resp.resp.error));
6890 	DNPRINTF(QWX_D_QMI, "%s: resp.fw_status=0x%llx\n",
6891 	   __func__, le64toh(resp.fw_status));
6892 
6893 	sc->qmi_resp.result = le16toh(resp.resp.result);
6894 	sc->qmi_resp.error = le16toh(resp.resp.error);
6895 	wakeup(&sc->qmi_resp);
6896 }
6897 
6898 void
6899 qwx_qmi_recv_wlanfw_host_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6900     uint16_t txn_id, uint16_t msg_len)
6901 {
6902 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
6903 	const struct qmi_elem_info *ei;
6904 	uint8_t *msg = mtod(m, uint8_t *);
6905 
6906 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6907 
6908 	ei = qmi_wlanfw_host_cap_resp_msg_v01_ei;
6909 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6910 		return;
6911 
6912 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6913 	    __func__, le16toh(resp.resp.result));
6914 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6915 	    __func__, le16toh(resp.resp.error));
6916 
6917 	sc->qmi_resp.result = le16toh(resp.resp.result);
6918 	sc->qmi_resp.error = le16toh(resp.resp.error);
6919 	wakeup(&sc->qmi_resp);
6920 }
6921 
6922 void
6923 qwx_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6924     uint16_t txn_id, uint16_t msg_len)
6925 {
6926 	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
6927 	const struct qmi_elem_info *ei;
6928 	uint8_t *msg = mtod(m, uint8_t *);
6929 
6930 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6931 
6932 	ei = qmi_wlanfw_respond_mem_resp_msg_v01_ei;
6933 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6934 		return;
6935 
6936 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6937 	    __func__, le16toh(resp.resp.result));
6938 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6939 	    __func__, le16toh(resp.resp.error));
6940 
6941 	sc->qmi_resp.result = le16toh(resp.resp.result);
6942 	sc->qmi_resp.error = le16toh(resp.resp.error);
6943 	wakeup(&sc->qmi_resp);
6944 }
6945 
6946 void
6947 qwx_qmi_recv_wlanfw_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6948     uint16_t txn_id, uint16_t msg_len)
6949 {
6950 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
6951 	const struct qmi_elem_info *ei;
6952 	uint8_t *msg = mtod(m, uint8_t *);
6953 
6954 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6955 
6956 	memset(&resp, 0, sizeof(resp));
6957 
6958 	ei = qmi_wlanfw_cap_resp_msg_v01_ei;
6959 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6960 		return;
6961 
6962 	if (resp.chip_info_valid) {
6963 		sc->qmi_target.chip_id = resp.chip_info.chip_id;
6964 		sc->qmi_target.chip_family = resp.chip_info.chip_family;
6965 	}
6966 
6967 	if (resp.board_info_valid)
6968 		sc->qmi_target.board_id = resp.board_info.board_id;
6969 	else
6970 		sc->qmi_target.board_id = 0xFF;
6971 
6972 	if (resp.soc_info_valid)
6973 		sc->qmi_target.soc_id = resp.soc_info.soc_id;
6974 
6975 	if (resp.fw_version_info_valid) {
6976 		sc->qmi_target.fw_version = resp.fw_version_info.fw_version;
6977 		strlcpy(sc->qmi_target.fw_build_timestamp,
6978 			resp.fw_version_info.fw_build_timestamp,
6979 			sizeof(sc->qmi_target.fw_build_timestamp));
6980 	}
6981 
6982 	if (resp.fw_build_id_valid)
6983 		strlcpy(sc->qmi_target.fw_build_id, resp.fw_build_id,
6984 			sizeof(sc->qmi_target.fw_build_id));
6985 
6986 	if (resp.eeprom_read_timeout_valid) {
6987 		sc->qmi_target.eeprom_caldata = resp.eeprom_read_timeout;
6988 		DNPRINTF(QWX_D_QMI,
6989 		    "%s: qmi cal data supported from eeprom\n", __func__);
6990 	}
6991 
6992 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6993 	    __func__, le16toh(resp.resp.result));
6994 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6995 	    __func__, le16toh(resp.resp.error));
6996 
6997 	sc->qmi_resp.result = le16toh(resp.resp.result);
6998 	sc->qmi_resp.error = le16toh(resp.resp.error);
6999 	wakeup(&sc->qmi_resp);
7000 }
7001 
7002 void
7003 qwx_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7004     uint16_t txn_id, uint16_t msg_len)
7005 {
7006 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
7007 	const struct qmi_elem_info *ei;
7008 	uint8_t *msg = mtod(m, uint8_t *);
7009 
7010 	memset(&resp, 0, sizeof(resp));
7011 
7012 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7013 
7014 	ei = qmi_wlanfw_bdf_download_resp_msg_v01_ei;
7015 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7016 		return;
7017 
7018 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7019 	    __func__, le16toh(resp.resp.result));
7020 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7021 	    __func__, le16toh(resp.resp.error));
7022 
7023 	sc->qmi_resp.result = le16toh(resp.resp.result);
7024 	sc->qmi_resp.error = le16toh(resp.resp.error);
7025 	wakeup(&sc->qmi_resp);
7026 }
7027 
7028 void
7029 qwx_qmi_recv_wlanfw_m3_info_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7030     uint16_t txn_id, uint16_t msg_len)
7031 {
7032 	struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
7033 	const struct qmi_elem_info *ei;
7034 	uint8_t *msg = mtod(m, uint8_t *);
7035 
7036 	memset(&resp, 0, sizeof(resp));
7037 
7038 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7039 
7040 	ei = qmi_wlanfw_m3_info_resp_msg_v01_ei;
7041 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7042 		return;
7043 
7044 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7045 	    __func__, le16toh(resp.resp.result));
7046 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7047 	    __func__, le16toh(resp.resp.error));
7048 
7049 	sc->qmi_resp.result = le16toh(resp.resp.result);
7050 	sc->qmi_resp.error = le16toh(resp.resp.error);
7051 	wakeup(&sc->qmi_resp);
7052 }
7053 
7054 void
7055 qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7056     uint16_t txn_id, uint16_t msg_len)
7057 {
7058 	struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp;
7059 	const struct qmi_elem_info *ei;
7060 	uint8_t *msg = mtod(m, uint8_t *);
7061 
7062 	memset(&resp, 0, sizeof(resp));
7063 
7064 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7065 
7066 	ei = qmi_wlanfw_wlan_ini_resp_msg_v01_ei;
7067 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7068 		return;
7069 
7070 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7071 	    __func__, le16toh(resp.resp.result));
7072 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7073 	    __func__, le16toh(resp.resp.error));
7074 
7075 	sc->qmi_resp.result = le16toh(resp.resp.result);
7076 	sc->qmi_resp.error = le16toh(resp.resp.error);
7077 	wakeup(&sc->qmi_resp);
7078 }
7079 
7080 void
7081 qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7082     uint16_t txn_id, uint16_t msg_len)
7083 {
7084 	struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
7085 	const struct qmi_elem_info *ei;
7086 	uint8_t *msg = mtod(m, uint8_t *);
7087 
7088 	memset(&resp, 0, sizeof(resp));
7089 
7090 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7091 
7092 	ei = qmi_wlanfw_wlan_cfg_resp_msg_v01_ei;
7093 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7094 		return;
7095 
7096 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7097 	    __func__, le16toh(resp.resp.result));
7098 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7099 	    __func__, le16toh(resp.resp.error));
7100 
7101 	sc->qmi_resp.result = le16toh(resp.resp.result);
7102 	sc->qmi_resp.error = le16toh(resp.resp.error);
7103 	wakeup(&sc->qmi_resp);
7104 }
7105 
7106 void
7107 qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7108     uint16_t txn_id, uint16_t msg_len)
7109 {
7110 	struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
7111 	const struct qmi_elem_info *ei;
7112 	uint8_t *msg = mtod(m, uint8_t *);
7113 
7114 	memset(&resp, 0, sizeof(resp));
7115 
7116 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7117 
7118 	ei = qmi_wlanfw_wlan_mode_resp_msg_v01_ei;
7119 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7120 		return;
7121 
7122 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7123 	    __func__, le16toh(resp.resp.result));
7124 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7125 	    __func__, le16toh(resp.resp.error));
7126 
7127 	sc->qmi_resp.result = le16toh(resp.resp.result);
7128 	sc->qmi_resp.error = le16toh(resp.resp.error);
7129 	wakeup(&sc->qmi_resp);
7130 }
7131 
7132 void
7133 qwx_qmi_recv_response(struct qwx_softc *sc, struct mbuf *m,
7134     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
7135 {
7136 	switch (msg_id) {
7137 	case QMI_WLANFW_IND_REGISTER_REQ_V01:
7138 		qwx_qmi_recv_wlanfw_ind_register_req_v1(sc, m, txn_id, msg_len);
7139 		break;
7140 	case QMI_WLFW_HOST_CAP_RESP_V01:
7141 		qwx_qmi_recv_wlanfw_host_cap_resp_v1(sc, m, txn_id, msg_len);
7142 		break;
7143 	case QMI_WLFW_RESPOND_MEM_RESP_V01:
7144 		qwx_qmi_recv_wlanfw_respond_mem_resp_v1(sc, m, txn_id, msg_len);
7145 		break;
7146 	case QMI_WLANFW_CAP_RESP_V01:
7147 		qwx_qmi_recv_wlanfw_cap_resp_v1(sc, m, txn_id, msg_len);
7148 		break;
7149 	case QMI_WLANFW_BDF_DOWNLOAD_RESP_V01:
7150 		qwx_qmi_recv_wlanfw_bdf_download_resp_v1(sc, m, txn_id,
7151 		    msg_len);
7152 		break;
7153 	case QMI_WLANFW_M3_INFO_RESP_V01:
7154 		qwx_qmi_recv_wlanfw_m3_info_resp_v1(sc, m, txn_id, msg_len);
7155 		break;
7156 	case QMI_WLANFW_WLAN_INI_RESP_V01:
7157 		qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(sc, m, txn_id, msg_len);
7158 		break;
7159 	case QMI_WLANFW_WLAN_CFG_RESP_V01:
7160 		qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(sc, m, txn_id, msg_len);
7161 		break;
7162 	case QMI_WLANFW_WLAN_MODE_RESP_V01:
7163 		qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(sc, m, txn_id, msg_len);
7164 		break;
7165 	default:
7166 		printf("%s: unhandled QMI response 0x%x\n",
7167 		    sc->sc_dev.dv_xname, msg_id);
7168 		break;
7169 	}
7170 }
7171 
7172 void
7173 qwx_qmi_recv_wlanfw_request_mem_indication(struct qwx_softc *sc, struct mbuf *m,
7174     uint16_t txn_id, uint16_t msg_len)
7175 {
7176 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind = NULL;
7177 	const struct qmi_elem_info *ei;
7178 	uint8_t *msg = mtod(m, uint8_t *);
7179 
7180 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7181 
7182 	if (!sc->expect_fwmem_req || sc->sc_req_mem_ind != NULL)
7183 		return;
7184 
7185 	/* This structure is too large for the stack. */
7186 	ind = malloc(sizeof(*ind), M_DEVBUF, M_NOWAIT | M_ZERO);
7187 	if (ind == NULL)
7188 		return;
7189 
7190 	ei = qmi_wlanfw_request_mem_ind_msg_v01_ei;
7191 	if (qwx_qmi_decode_msg(sc, ind, sizeof(*ind), ei, msg, msg_len)) {
7192 		free(ind, M_DEVBUF, sizeof(*ind));
7193 		return;
7194 	}
7195 
7196 	/* Handled by qwx_qmi_mem_seg_send() in process context */
7197 	sc->sc_req_mem_ind = ind;
7198 	wakeup(&sc->sc_req_mem_ind);
7199 }
7200 
7201 void
7202 qwx_qmi_recv_indication(struct qwx_softc *sc, struct mbuf *m,
7203     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
7204 {
7205 	switch (msg_id) {
7206 	case QMI_WLFW_REQUEST_MEM_IND_V01:
7207 		qwx_qmi_recv_wlanfw_request_mem_indication(sc, m,
7208 		    txn_id, msg_len);
7209 		break;
7210 	case QMI_WLFW_FW_MEM_READY_IND_V01:
7211 		sc->fwmem_ready = 1;
7212 		wakeup(&sc->fwmem_ready);
7213 		break;
7214 	case QMI_WLFW_FW_INIT_DONE_IND_V01:
7215 		sc->fw_init_done = 1;
7216 		wakeup(&sc->fw_init_done);
7217 		break;
7218 	default:
7219 		printf("%s: unhandled QMI indication 0x%x\n",
7220 		    sc->sc_dev.dv_xname, msg_id);
7221 		break;
7222 	}
7223 }
7224 
7225 void
7226 qwx_qrtr_recv_data(struct qwx_softc *sc, struct mbuf *m, size_t size)
7227 {
7228 	struct qmi_header hdr;
7229 	uint16_t txn_id, msg_id, msg_len;
7230 
7231 	if (size < sizeof(hdr)) {
7232 		printf("%s: QMI message too short: %zu bytes\n",
7233 		    sc->sc_dev.dv_xname, size);
7234 		return;
7235 	}
7236 
7237 	memcpy(&hdr, mtod(m, void *), sizeof(hdr));
7238 
7239 	DNPRINTF(QWX_D_QMI,
7240 	    "%s: QMI message type=0x%x txn=0x%x id=0x%x len=%u\n",
7241 	    __func__, hdr.type, le16toh(hdr.txn_id),
7242 	    le16toh(hdr.msg_id), le16toh(hdr.msg_len));
7243 
7244 	txn_id = le16toh(hdr.txn_id);
7245 	msg_id = le16toh(hdr.msg_id);
7246 	msg_len = le16toh(hdr.msg_len);
7247 	if (sizeof(hdr) + msg_len != size) {
7248 		printf("%s: bad length in QMI message header: %u\n",
7249 		    sc->sc_dev.dv_xname, msg_len);
7250 		return;
7251 	}
7252 
7253 	switch (hdr.type) {
7254 	case QMI_RESPONSE:
7255 		m_adj(m, sizeof(hdr));
7256 		qwx_qmi_recv_response(sc, m, txn_id, msg_id, msg_len);
7257 		break;
7258 	case QMI_INDICATION:
7259 		m_adj(m, sizeof(hdr));
7260 		qwx_qmi_recv_indication(sc, m, txn_id, msg_id, msg_len);
7261 		break;
7262 	default:
7263 		printf("%s: unhandled QMI message type %u\n",
7264 		    sc->sc_dev.dv_xname, hdr.type);
7265 		break;
7266 	}
7267 }
7268 
7269 int
7270 qwx_qrtr_say_hello(struct qwx_softc *sc)
7271 {
7272 	struct qrtr_hdr_v1 hdr;
7273 	struct qrtr_ctrl_pkt pkt;
7274 	struct mbuf *m;
7275 	size_t totlen, padlen;
7276 	int err;
7277 
7278 	totlen = sizeof(hdr) + sizeof(pkt);
7279 	padlen = roundup(totlen, 4);
7280 
7281 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7282 	if (m == NULL) {
7283 		err = ENOBUFS;
7284 		goto done;
7285 	}
7286 
7287 	if (padlen <= MCLBYTES)
7288 		MCLGET(m, M_DONTWAIT);
7289 	else
7290 		MCLGETL(m, M_DONTWAIT, padlen);
7291 	if ((m->m_flags & M_EXT) == 0) {
7292 		err = ENOBUFS;
7293 		goto done;
7294 	}
7295 
7296 	m->m_len = m->m_pkthdr.len = padlen;
7297 
7298 	memset(&hdr, 0, sizeof(hdr));
7299 	hdr.version = htole32(QRTR_PROTO_VER_1);
7300 	hdr.type = htole32(QRTR_TYPE_HELLO);
7301 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7302 	hdr.src_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
7303 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7304 	hdr.dst_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
7305 	hdr.size = htole32(sizeof(pkt));
7306 
7307 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7308 	if (err)
7309 		goto done;
7310 
7311 	memset(&pkt, 0, sizeof(pkt));
7312 	pkt.cmd = htole32(QRTR_TYPE_HELLO);
7313 
7314 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
7315 	if (err)
7316 		goto done;
7317 
7318 	/* Zero-pad the mbuf */
7319 	if (padlen != totlen) {
7320 		uint32_t pad = 0;
7321 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7322 		if (err)
7323 			goto done;
7324 	}
7325 
7326 	err = sc->ops.submit_xfer(sc, m);
7327 done:
7328 	if (err)
7329 		m_freem(m);
7330 	return err;
7331 }
7332 
7333 int
7334 qwx_qrtr_resume_tx(struct qwx_softc *sc)
7335 {
7336 	struct qrtr_hdr_v1 hdr;
7337 	struct qrtr_ctrl_pkt pkt;
7338 	struct mbuf *m;
7339 	size_t totlen, padlen;
7340 	int err;
7341 
7342 	totlen = sizeof(hdr) + sizeof(pkt);
7343 	padlen = roundup(totlen, 4);
7344 
7345 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7346 	if (m == NULL) {
7347 		err = ENOBUFS;
7348 		goto done;
7349 	}
7350 
7351 	if (padlen <= MCLBYTES)
7352 		MCLGET(m, M_DONTWAIT);
7353 	else
7354 		MCLGETL(m, M_DONTWAIT, padlen);
7355 	if ((m->m_flags & M_EXT) == 0) {
7356 		err = ENOBUFS;
7357 		goto done;
7358 	}
7359 
7360 	m->m_len = m->m_pkthdr.len = padlen;
7361 
7362 	memset(&hdr, 0, sizeof(hdr));
7363 	hdr.version = htole32(QRTR_PROTO_VER_1);
7364 	hdr.type = htole32(QRTR_TYPE_RESUME_TX);
7365 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7366 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
7367 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7368 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
7369 	hdr.size = htole32(sizeof(pkt));
7370 
7371 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7372 	if (err)
7373 		goto done;
7374 
7375 	memset(&pkt, 0, sizeof(pkt));
7376 	pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
7377 	pkt.client.node = htole32(0x01);
7378 	pkt.client.port = htole32(0x4000);
7379 
7380 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
7381 	if (err)
7382 		goto done;
7383 
7384 	/* Zero-pad the mbuf */
7385 	if (padlen != totlen) {
7386 		uint32_t pad = 0;
7387 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7388 		if (err)
7389 			goto done;
7390 	}
7391 
7392 	err = sc->ops.submit_xfer(sc, m);
7393 done:
7394 	if (err)
7395 		m_freem(m);
7396 	return err;
7397 }
7398 
7399 void
7400 qwx_qrtr_recv_msg(struct qwx_softc *sc, struct mbuf *m)
7401 {
7402 	struct qrtr_hdr_v1 *v1 = mtod(m, struct qrtr_hdr_v1 *);
7403 	struct qrtr_hdr_v2 *v2 = mtod(m, struct qrtr_hdr_v2 *);
7404 	struct qrtr_ctrl_pkt *pkt;
7405 	uint32_t type, size, hdrsize;
7406 	uint8_t ver, confirm_rx;
7407 
7408 	ver = *mtod(m, uint8_t *);
7409 	switch (ver) {
7410 	case QRTR_PROTO_VER_1:
7411 		DNPRINTF(QWX_D_QMI,
7412 		    "%s: type %u size %u confirm_rx %u\n", __func__,
7413 		    letoh32(v1->type), letoh32(v1->size),
7414 		    letoh32(v1->confirm_rx));
7415 		type = letoh32(v1->type);
7416 		size = letoh32(v1->size);
7417 		confirm_rx = !!letoh32(v1->confirm_rx);
7418 		hdrsize = sizeof(*v1);
7419 		break;
7420 	case QRTR_PROTO_VER_2:
7421 		DNPRINTF(QWX_D_QMI,
7422 		    "%s: type %u size %u confirm_rx %u\n", __func__,
7423 		    v2->type, letoh32(v2->size),
7424 		    !!(v2->flags & QRTR_FLAGS_CONFIRM_RX));
7425 		type = v2->type;
7426 		size = letoh32(v2->size);
7427 		confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
7428 		hdrsize = sizeof(*v2);
7429 		break;
7430 	default:
7431 		printf("%s: unsupported qrtr version %u\n",
7432 		    sc->sc_dev.dv_xname, ver);
7433 		return;
7434 	}
7435 
7436 	if (size > m->m_pkthdr.len) {
7437 		printf("%s: bad size in qrtr message header: %u\n",
7438 		    sc->sc_dev.dv_xname, size);
7439 		return;
7440 	}
7441 
7442 	switch (type) {
7443 	case QRTR_TYPE_DATA:
7444 		m_adj(m, hdrsize);
7445 		qwx_qrtr_recv_data(sc, m, size);
7446 		break;
7447 	case QRTR_TYPE_HELLO:
7448 		qwx_qrtr_say_hello(sc);
7449 		break;
7450 	case QRTR_TYPE_NEW_SERVER:
7451 		m_adj(m, hdrsize);
7452 		pkt = mtod(m, struct qrtr_ctrl_pkt *);
7453 		sc->qrtr_server.service = le32toh(pkt->server.service);
7454 		sc->qrtr_server.instance = le32toh(pkt->server.instance);
7455 		sc->qrtr_server.node = le32toh(pkt->server.node);
7456 		sc->qrtr_server.port = le32toh(pkt->server.port);
7457 		DNPRINTF(QWX_D_QMI,
7458 		    "%s: new server: service=0x%x instance=0x%x node=0x%x "
7459 		    "port=0x%x\n", __func__, sc->qrtr_server.service,
7460 		    sc->qrtr_server.instance,
7461 		    sc->qrtr_server.node, sc->qrtr_server.port);
7462 		wakeup(&sc->qrtr_server);
7463 		break;
7464 	default:
7465 		DPRINTF("%s: unhandled qrtr type %u\n",
7466 		    sc->sc_dev.dv_xname, type);
7467 		return;
7468 	}
7469 
7470 	if (confirm_rx)
7471 		qwx_qrtr_resume_tx(sc);
7472 }
7473 
7474 // Not needed because we don't implement QMI as a network service.
7475 #define qwx_qmi_init_service(sc)	(0)
7476 #define qwx_qmi_deinit_service(sc)	(0)
7477 
7478 int
7479 qwx_qmi_encode_datalen(uint8_t *p, uint32_t *datalen,
7480     const struct qmi_elem_info *ei, void *input)
7481 {
7482 	memcpy(datalen, input + ei->offset, sizeof(uint32_t));
7483 
7484 	if (ei->elem_size == sizeof(uint8_t)) {
7485 		p[0] = (*datalen & 0xff);
7486 	} else if (ei->elem_size == sizeof(uint16_t)) {
7487 		p[0] = (*datalen & 0xff);
7488 		p[1] = (*datalen >> 8) & 0xff;
7489 	} else {
7490 		printf("%s: bad element size\n", __func__);
7491 		return -1;
7492 	}
7493 
7494 	return 0;
7495 }
7496 
7497 int
7498 qwx_qmi_encode_byte(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7499     int i)
7500 {
7501 	if (ei->elem_size != sizeof(uint8_t)) {
7502 		printf("%s: bad element size\n", __func__);
7503 		return -1;
7504 	}
7505 
7506 	if (p == NULL)
7507 		return 0;
7508 
7509 	memcpy(p, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7510 	return 0;
7511 }
7512 
7513 int
7514 qwx_qmi_encode_word(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7515     int i)
7516 {
7517 	uint16_t val;
7518 
7519 	if (ei->elem_size != sizeof(val)) {
7520 		printf("%s: bad element size\n", __func__);
7521 		return -1;
7522 	}
7523 
7524 	if (p == NULL)
7525 		return 0;
7526 
7527 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7528 	val = htole16(val);
7529 	memcpy(p, &val, sizeof(val));
7530 	return 0;
7531 }
7532 
7533 int
7534 qwx_qmi_encode_dword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7535     int i)
7536 {
7537 	uint32_t val;
7538 
7539 	if (ei->elem_size != sizeof(val)) {
7540 		printf("%s: bad element size\n", __func__);
7541 		return -1;
7542 	}
7543 
7544 	if (p == NULL)
7545 		return 0;
7546 
7547 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7548 	val = htole32(val);
7549 	memcpy(p, &val, sizeof(val));
7550 	return 0;
7551 }
7552 
7553 int
7554 qwx_qmi_encode_qword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7555     int i)
7556 {
7557 	uint64_t val;
7558 
7559 	if (ei->elem_size != sizeof(val)) {
7560 		printf("%s: bad element size\n", __func__);
7561 		return -1;
7562 	}
7563 
7564 	if (p == NULL)
7565 		return 0;
7566 
7567 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7568 	val = htole64(val);
7569 	memcpy(p, &val, sizeof(val));
7570 	return 0;
7571 }
7572 
7573 int
7574 qwx_qmi_encode_struct(uint8_t *p, size_t *encoded_len,
7575     const struct qmi_elem_info *struct_ei, void *input, size_t input_len)
7576 {
7577 	const struct qmi_elem_info *ei = struct_ei->ei_array;
7578 	size_t remain = input_len;
7579 
7580 	*encoded_len = 0;
7581 
7582 	while (ei->data_type != QMI_EOTI) {
7583 		if (ei->data_type == QMI_OPT_FLAG) {
7584 			uint8_t do_encode, tlv_type;
7585 
7586 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7587 			ei++; /* Advance to element we might have to encode. */
7588 			if (ei->data_type == QMI_OPT_FLAG ||
7589 			    ei->data_type == QMI_EOTI) {
7590 				printf("%s: bad optional flag element\n",
7591 				    __func__);
7592 				return -1;
7593 			}
7594 			if (!do_encode) {
7595 				/* The element will not be encoded. Skip it. */
7596 				tlv_type = ei->tlv_type;
7597 				while (ei->data_type != QMI_EOTI &&
7598 				    ei->tlv_type == tlv_type)
7599 					ei++;
7600 				continue;
7601 			}
7602 		}
7603 
7604 		if (ei->elem_size > remain) {
7605 			printf("%s: QMI message buffer too short\n", __func__);
7606 			return -1;
7607 		}
7608 
7609 		switch (ei->data_type) {
7610 		case QMI_UNSIGNED_1_BYTE:
7611 			if (qwx_qmi_encode_byte(p, ei, input, 0))
7612 				return -1;
7613 			break;
7614 		case QMI_UNSIGNED_2_BYTE:
7615 			if (qwx_qmi_encode_word(p, ei, input, 0))
7616 				return -1;
7617 			break;
7618 		case QMI_UNSIGNED_4_BYTE:
7619 		case QMI_SIGNED_4_BYTE_ENUM:
7620 			if (qwx_qmi_encode_dword(p, ei, input, 0))
7621 				return -1;
7622 			break;
7623 		case QMI_UNSIGNED_8_BYTE:
7624 			if (qwx_qmi_encode_qword(p, ei, input, 0))
7625 				return -1;
7626 			break;
7627 		default:
7628 			printf("%s: unhandled QMI struct element type %d\n",
7629 			    __func__, ei->data_type);
7630 			return -1;
7631 		}
7632 
7633 		remain -= ei->elem_size;
7634 		if (p != NULL)
7635 			p += ei->elem_size;
7636 		*encoded_len += ei->elem_size;
7637 		ei++;
7638 	}
7639 
7640 	return 0;
7641 }
7642 
7643 int
7644 qwx_qmi_encode_string(uint8_t *p, size_t *encoded_len,
7645     const struct qmi_elem_info *string_ei, void *input, size_t input_len)
7646 {
7647 	*encoded_len = strnlen(input, input_len);
7648 	if (*encoded_len > string_ei->elem_len) {
7649 		printf("%s: QMI message buffer too short\n", __func__);
7650 		return -1;
7651 	}
7652 
7653 	if (p)
7654 		memcpy(p, input, *encoded_len);
7655 
7656 	return 0;
7657 }
7658 
7659 int
7660 qwx_qmi_encode_msg(uint8_t **encoded_msg, size_t *encoded_len, int type,
7661     uint16_t *txn_id, uint16_t msg_id, size_t msg_len,
7662     const struct qmi_elem_info *ei, void *input, size_t input_len)
7663 {
7664 	const struct qmi_elem_info *ei0 = ei;
7665 	struct qmi_header hdr;
7666 	size_t remain;
7667 	uint8_t *p, *op;
7668 
7669 	*encoded_msg = NULL;
7670 	*encoded_len = 0;
7671 
7672 	/* First pass: Determine length of encoded message. */
7673 	while (ei->data_type != QMI_EOTI) {
7674 		int nelem = 1, i;
7675 
7676 		if (ei->offset + ei->elem_size > input_len) {
7677 			printf("%s: bad input buffer offset at element 0x%x "
7678 			    "data type 0x%x\n",
7679 			    __func__, ei->tlv_type, ei->data_type);
7680 			goto err;
7681 		}
7682 
7683 		/*
7684 		 * OPT_FLAG determines whether the next element
7685 		 * should be considered for encoding.
7686 		 */
7687 		if (ei->data_type == QMI_OPT_FLAG) {
7688 			uint8_t do_encode, tlv_type;
7689 
7690 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7691 			ei++; /* Advance to element we might have to encode. */
7692 			if (ei->data_type == QMI_OPT_FLAG ||
7693 			    ei->data_type == QMI_EOTI) {
7694 				printf("%s: bad optional element\n", __func__);
7695 				goto err;
7696 			}
7697 			if (!do_encode) {
7698 				/* The element will not be encoded. Skip it. */
7699 				tlv_type = ei->tlv_type;
7700 				while (ei->data_type != QMI_EOTI &&
7701 				    ei->tlv_type == tlv_type)
7702 					ei++;
7703 				continue;
7704 			}
7705 		}
7706 
7707 		*encoded_len += 3; /* type, length */
7708 		if (ei->data_type == QMI_DATA_LEN) {
7709 			uint32_t datalen = 0;
7710 			uint8_t dummy[2];
7711 
7712 			if (qwx_qmi_encode_datalen(dummy, &datalen, ei, input))
7713 				goto err;
7714 			*encoded_len += ei->elem_size;
7715 			ei++;
7716 			if (ei->array_type != VAR_LEN_ARRAY) {
7717 				printf("%s: data len not for a var array\n",
7718 				    __func__);
7719 				goto err;
7720 			}
7721 			nelem = datalen;
7722 			if (ei->data_type == QMI_STRUCT) {
7723 				for (i = 0; i < nelem; i++) {
7724 					size_t encoded_struct_len = 0;
7725 					size_t inoff = ei->offset + (i * ei->elem_size);
7726 
7727 					if (qwx_qmi_encode_struct(NULL,
7728 					    &encoded_struct_len, ei,
7729 					    input + inoff, input_len - inoff))
7730 						goto err;
7731 
7732 					*encoded_len += encoded_struct_len;
7733 				}
7734 			} else
7735 				*encoded_len += nelem * ei->elem_size;
7736 			ei++;
7737 		} else if (ei->data_type == QMI_STRING) {
7738 			size_t encoded_string_len = 0;
7739 			size_t inoff = ei->offset;
7740 
7741 			if (qwx_qmi_encode_string(NULL,
7742 			    &encoded_string_len, ei,
7743 			    input + inoff, input_len - inoff))
7744 				goto err;
7745 			*encoded_len += encoded_string_len;
7746 			ei++;
7747 		} else {
7748 			*encoded_len += ei->elem_size;
7749 			ei++;
7750 		}
7751 	}
7752 
7753 	*encoded_len += sizeof(hdr);
7754 	*encoded_msg = malloc(*encoded_len, M_DEVBUF, M_NOWAIT | M_ZERO);
7755 	if (*encoded_msg == NULL)
7756 		return ENOMEM;
7757 
7758 	hdr.type = type;
7759 	hdr.txn_id = htole16(*txn_id);
7760 	hdr.msg_id = htole16(msg_id);
7761 	hdr.msg_len = htole16(*encoded_len - sizeof(hdr));
7762 	memcpy(*encoded_msg, &hdr, sizeof(hdr));
7763 
7764 	/* Second pass: Encode the message. */
7765 	ei = ei0;
7766 	p = *encoded_msg + sizeof(hdr);
7767 	remain = *encoded_len - sizeof(hdr);
7768 	while (ei->data_type != QMI_EOTI) {
7769 		uint32_t datalen = 0;
7770 		int nelem = 1, i;
7771 
7772 		if (ei->data_type == QMI_OPT_FLAG) {
7773 			uint8_t do_encode, tlv_type;
7774 
7775 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7776 			ei++; /* Advance to element we might have to encode. */
7777 			if (ei->data_type == QMI_OPT_FLAG ||
7778 			    ei->data_type == QMI_EOTI) {
7779 				printf("%s: bad optional flag element\n",
7780 				    __func__);
7781 				goto err;
7782 			}
7783 			if (!do_encode) {
7784 				/* The element will not be encoded. Skip it. */
7785 				tlv_type = ei->tlv_type;
7786 				while (ei->data_type != QMI_EOTI &&
7787 				    ei->tlv_type == tlv_type)
7788 					ei++;
7789 				continue;
7790 			}
7791 		}
7792 
7793 		if (ei->elem_size + 3 > remain) {
7794 			printf("%s: QMI message buffer too short\n", __func__);
7795 			goto err;
7796 		}
7797 
7798 		/* 3 bytes of type-length-value header, remember for later */
7799 		op = p;
7800 		p += 3;
7801 
7802 		if (ei->data_type == QMI_DATA_LEN) {
7803 			if (qwx_qmi_encode_datalen(p, &datalen, ei, input))
7804 				goto err;
7805 			p += ei->elem_size;
7806 			ei++;
7807 			if (ei->array_type == VAR_LEN_ARRAY)
7808 				nelem = datalen;
7809 		}
7810 
7811 		for (i = 0; i < nelem; i++) {
7812 			size_t encoded_struct_len = 0;
7813 			size_t encoded_string_len = 0;
7814 			size_t inoff = ei->offset + (i * ei->elem_size);
7815 
7816 			switch (ei->data_type) {
7817 			case QMI_UNSIGNED_1_BYTE:
7818 				if (qwx_qmi_encode_byte(p, ei, input, i))
7819 					goto err;
7820 				remain -= ei->elem_size;
7821 				p += ei->elem_size;
7822 				break;
7823 			case QMI_UNSIGNED_2_BYTE:
7824 			case QMI_SIGNED_2_BYTE_ENUM:
7825 				if (qwx_qmi_encode_word(p, ei, input, i))
7826 					goto err;
7827 				remain -= ei->elem_size;
7828 				p += ei->elem_size;
7829 				break;
7830 			case QMI_UNSIGNED_4_BYTE:
7831 			case QMI_SIGNED_4_BYTE_ENUM:
7832 				if (qwx_qmi_encode_dword(p, ei, input, i))
7833 					goto err;
7834 				remain -= ei->elem_size;
7835 				p += ei->elem_size;
7836 				break;
7837 			case QMI_UNSIGNED_8_BYTE:
7838 				if (qwx_qmi_encode_qword(p, ei, input, i))
7839 					goto err;
7840 				remain -= ei->elem_size;
7841 				p += ei->elem_size;
7842 				break;
7843 			case QMI_STRUCT:
7844 				if (qwx_qmi_encode_struct(p,
7845 				    &encoded_struct_len, ei,
7846 				    input + inoff, input_len - inoff))
7847 					goto err;
7848 				remain -= encoded_struct_len;
7849 				p += encoded_struct_len;
7850 				break;
7851 			case QMI_STRING:
7852 				if (qwx_qmi_encode_string(p,
7853 				    &encoded_string_len, ei,
7854 				    input + inoff, input_len - inoff))
7855 					goto err;
7856 				remain -= encoded_string_len;
7857 				p += encoded_string_len;
7858 				break;
7859 			default:
7860 				printf("%s: unhandled QMI message element type %d\n",
7861 				    __func__, ei->data_type);
7862 				goto err;
7863 			}
7864 		}
7865 
7866 		op[0] = ei->tlv_type;
7867 		op[1] = (p - (op + 3)) & 0xff;
7868 		op[2] = ((p - (op + 3)) >> 8) & 0xff;
7869 
7870 		ei++;
7871 	}
7872 
7873 	if (0) {
7874 		int i;
7875 		DNPRINTF(QWX_D_QMI,
7876 		   "%s: message type 0x%x txnid 0x%x msgid 0x%x "
7877 		    "msglen %zu encoded:", __func__,
7878 		    type, *txn_id, msg_id, *encoded_len - sizeof(hdr));
7879 		for (i = 0; i < *encoded_len; i++) {
7880 			DNPRINTF(QWX_D_QMI, "%s %.2x", i % 16 == 0 ? "\n" : "",
7881 			    (*encoded_msg)[i]);
7882 		}
7883 		if (i % 16)
7884 			DNPRINTF(QWX_D_QMI, "\n");
7885 	}
7886 
7887 	(*txn_id)++; /* wrap-around is fine */
7888 	return 0;
7889 err:
7890 	free(*encoded_msg, M_DEVBUF, *encoded_len);
7891 	*encoded_msg = NULL;
7892 	*encoded_len = 0;
7893 	return -1;
7894 }
7895 
7896 int
7897 qwx_qmi_send_request(struct qwx_softc *sc, uint16_t msg_id, size_t msg_len,
7898     const struct qmi_elem_info *ei, void *req, size_t req_len)
7899 {
7900 	struct qrtr_hdr_v1 hdr;
7901 	struct mbuf *m;
7902 	uint8_t *encoded_msg;
7903 	size_t encoded_len;
7904 	size_t totlen, padlen;
7905 	int err;
7906 
7907 	if (qwx_qmi_encode_msg(&encoded_msg, &encoded_len, QMI_REQUEST,
7908 	    &sc->qmi_txn_id, msg_id, msg_len, ei, req, req_len))
7909 		return -1;
7910 
7911 	totlen = sizeof(hdr) + encoded_len;
7912 	padlen = roundup(totlen, 4);
7913 
7914 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7915 	if (m == NULL) {
7916 		err = ENOBUFS;
7917 		goto done;
7918 	}
7919 
7920 	if (padlen <= MCLBYTES)
7921 		MCLGET(m, M_DONTWAIT);
7922 	else
7923 		MCLGETL(m, M_DONTWAIT, padlen);
7924 	if ((m->m_flags & M_EXT) == 0) {
7925 		err = ENOBUFS;
7926 		goto done;
7927 	}
7928 
7929 	m->m_len = m->m_pkthdr.len = padlen;
7930 
7931 	memset(&hdr, 0, sizeof(hdr));
7932 	hdr.version = htole32(QRTR_PROTO_VER_1);
7933 	hdr.type = htole32(QRTR_TYPE_DATA);
7934 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7935 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
7936 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7937 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
7938 	hdr.size = htole32(encoded_len);
7939 
7940 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7941 	if (err)
7942 		goto done;
7943 
7944 	err = m_copyback(m, sizeof(hdr), encoded_len, encoded_msg, M_NOWAIT);
7945 	if (err)
7946 		goto done;
7947 
7948 	/* Zero-pad the mbuf */
7949 	if (padlen != totlen) {
7950 		uint32_t pad = 0;
7951 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7952 		if (err)
7953 			goto done;
7954 	}
7955 
7956 	err = sc->ops.submit_xfer(sc, m);
7957 done:
7958 	if (err)
7959 		m_freem(m);
7960 	free(encoded_msg, M_DEVBUF, encoded_len);
7961 	return err;
7962 }
7963 
7964 int
7965 qwx_qmi_fw_ind_register_send(struct qwx_softc *sc)
7966 {
7967 	struct qmi_wlanfw_ind_register_req_msg_v01 req;
7968 	int ret;
7969 
7970 	memset(&req, 0, sizeof(req));
7971 
7972 	req.client_id_valid = 1;
7973 	req.client_id = QMI_WLANFW_CLIENT_ID;
7974 	req.fw_ready_enable_valid = 1;
7975 	req.fw_ready_enable = 1;
7976 	req.cal_done_enable_valid = 1;
7977 	req.cal_done_enable = 1;
7978 	req.fw_init_done_enable_valid = 1;
7979 	req.fw_init_done_enable = 1;
7980 
7981 	req.pin_connect_result_enable_valid = 0;
7982 	req.pin_connect_result_enable = 0;
7983 
7984 	/*
7985 	 * WCN6750 doesn't request for DDR memory via QMI,
7986 	 * instead it uses a fixed 12MB reserved memory region in DDR.
7987 	 */
7988 	if (!sc->hw_params.fixed_fw_mem) {
7989 		req.request_mem_enable_valid = 1;
7990 		req.request_mem_enable = 1;
7991 		req.fw_mem_ready_enable_valid = 1;
7992 		req.fw_mem_ready_enable = 1;
7993 	}
7994 
7995 	DNPRINTF(QWX_D_QMI, "%s: qmi indication register request\n", __func__);
7996 
7997 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_IND_REGISTER_REQ_V01,
7998 			       QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
7999 			       qmi_wlanfw_ind_register_req_msg_v01_ei,
8000 			       &req, sizeof(req));
8001 	if (ret) {
8002 		printf("%s: failed to send indication register request: %d\n",
8003 		    sc->sc_dev.dv_xname, ret);
8004 		return -1;
8005 	}
8006 
8007 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8008 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8009 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwind",
8010 		    SEC_TO_NSEC(1));
8011 		if (ret) {
8012 			printf("%s: fw indication register request timeout\n",
8013 			    sc->sc_dev.dv_xname);
8014 			return ret;
8015 		}
8016 	}
8017 
8018 	return 0;
8019 }
8020 
8021 int
8022 qwx_qmi_host_cap_send(struct qwx_softc *sc)
8023 {
8024 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
8025 	int ret;
8026 
8027 	memset(&req, 0, sizeof(req));
8028 	req.num_clients_valid = 1;
8029 	req.num_clients = 1;
8030 	req.mem_cfg_mode = sc->hw_params.fw_mem_mode;
8031 	req.mem_cfg_mode_valid = 1;
8032 	req.bdf_support_valid = 1;
8033 	req.bdf_support = 1;
8034 
8035 	if (sc->hw_params.m3_fw_support) {
8036 		req.m3_support_valid = 1;
8037 		req.m3_support = 1;
8038 		req.m3_cache_support_valid = 1;
8039 		req.m3_cache_support = 1;
8040 	} else {
8041 		req.m3_support_valid = 0;
8042 		req.m3_support = 0;
8043 		req.m3_cache_support_valid = 0;
8044 		req.m3_cache_support = 0;
8045 	}
8046 
8047 	req.cal_done_valid = 1;
8048 	req.cal_done = sc->qmi_cal_done;
8049 
8050 	if (sc->hw_params.internal_sleep_clock) {
8051 		req.nm_modem_valid = 1;
8052 
8053 		/* Notify firmware that this is non-qualcomm platform. */
8054 		req.nm_modem |= QWX_HOST_CSTATE_BIT;
8055 
8056 		/* Notify firmware about the sleep clock selection,
8057 		 * nm_modem_bit[1] is used for this purpose. Host driver on
8058 		 * non-qualcomm platforms should select internal sleep
8059 		 * clock.
8060 		 */
8061 		req.nm_modem |= QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT;
8062 	}
8063 
8064 	if (sc->hw_params.global_reset)
8065 		req.nm_modem |= QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET;
8066 
8067 	req.nm_modem |= QWX_PLATFORM_CAP_PCIE_PME_D3COLD;
8068 
8069 	DNPRINTF(QWX_D_QMI, "%s: qmi host cap request\n", __func__);
8070 
8071 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_HOST_CAP_REQ_V01,
8072 			       QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
8073 			       qmi_wlanfw_host_cap_req_msg_v01_ei,
8074 			       &req, sizeof(req));
8075 	if (ret) {
8076 		printf("%s: failed to send host cap request: %d\n",
8077 		    sc->sc_dev.dv_xname, ret);
8078 		return -1;
8079 	}
8080 
8081 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8082 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8083 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwhcap",
8084 		    SEC_TO_NSEC(1));
8085 		if (ret) {
8086 			printf("%s: fw host cap request timeout\n",
8087 			    sc->sc_dev.dv_xname);
8088 			return ret;
8089 		}
8090 	}
8091 
8092 	return 0;
8093 }
8094 
8095 int
8096 qwx_qmi_mem_seg_send(struct qwx_softc *sc)
8097 {
8098 	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
8099 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind;
8100 	uint32_t mem_seg_len;
8101 	const uint32_t mem_seg_len_max = 64; /* bump if needed by future fw */
8102 	uint16_t expected_result;
8103 	size_t total_size;
8104 	int i, ret;
8105 
8106 	sc->fwmem_ready = 0;
8107 
8108 	while (sc->sc_req_mem_ind == NULL) {
8109 		ret = tsleep_nsec(&sc->sc_req_mem_ind, 0, "qwxfwmem",
8110 		    SEC_TO_NSEC(10));
8111 		if (ret) {
8112 			printf("%s: fw memory request timeout\n",
8113 			    sc->sc_dev.dv_xname);
8114 			return -1;
8115 		}
8116 	}
8117 
8118 	sc->expect_fwmem_req = 0;
8119 
8120 	ind = sc->sc_req_mem_ind;
8121 	mem_seg_len = le32toh(ind->mem_seg_len);
8122 	if (mem_seg_len > mem_seg_len_max) {
8123 		printf("%s: firmware requested too many memory segments: %u\n",
8124 		    sc->sc_dev.dv_xname, mem_seg_len);
8125 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
8126 		sc->sc_req_mem_ind = NULL;
8127 		return -1;
8128 	}
8129 
8130 	total_size = 0;
8131 	for (i = 0; i < mem_seg_len; i++) {
8132 		if (ind->mem_seg[i].size == 0) {
8133 			printf("%s: firmware requested zero-sized "
8134 			    "memory segment %u\n", sc->sc_dev.dv_xname, i);
8135 			free(sc->sc_req_mem_ind, M_DEVBUF,
8136 			    sizeof(*sc->sc_req_mem_ind));
8137 			sc->sc_req_mem_ind = NULL;
8138 			return -1;
8139 		}
8140 		total_size += le32toh(ind->mem_seg[i].size);
8141 	}
8142 
8143 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
8144 	if (req == NULL) {
8145 		printf("%s: failed to allocate respond memory request\n",
8146 		    sc->sc_dev.dv_xname);
8147 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
8148 		sc->sc_req_mem_ind = NULL;
8149 		return -1;
8150 	}
8151 
8152 	if (total_size == 0) {
8153 		/* Should not happen. Send back an empty allocation. */
8154 		printf("%s: firmware has requested no memory\n",
8155 		    sc->sc_dev.dv_xname);
8156 		mem_seg_len = 0;
8157 	} else if (sc->fwmem == NULL || QWX_DMA_LEN(sc->fwmem) < total_size) {
8158 		if (sc->fwmem != NULL)
8159 			qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
8160 		sc->fwmem = qwx_dmamem_alloc(sc->sc_dmat, total_size, 65536);
8161 		if (sc->fwmem == NULL) {
8162 			printf("%s: failed to allocate %zu bytes of DMA "
8163 			    "memory for firmware\n", sc->sc_dev.dv_xname,
8164 			    total_size);
8165 			/* Send back an empty allocation. */
8166 			mem_seg_len = 0;
8167 		} else
8168 			DPRINTF("%s: allocated %zu bytes of DMA memory for "
8169 			    "firmware\n", sc->sc_dev.dv_xname, total_size);
8170 	}
8171 
8172 	/* Chunk DMA memory block into segments as requested by firmware. */
8173 	req->mem_seg_len = htole32(mem_seg_len);
8174 	if (sc->fwmem) {
8175 		uint64_t paddr = QWX_DMA_DVA(sc->fwmem);
8176 
8177 		for (i = 0; i < mem_seg_len; i++) {
8178 			DPRINTF("%s: mem seg[%d] addr=%llx size=%u type=%u\n",
8179 			    __func__, i, paddr, le32toh(ind->mem_seg[i].size),
8180 			    le32toh(ind->mem_seg[i].type));
8181 			req->mem_seg[i].addr = htole64(paddr);
8182 			paddr += le32toh(ind->mem_seg[i].size);
8183 
8184 			/* Values in 'ind' are in little-endian format. */
8185 			req->mem_seg[i].size = ind->mem_seg[i].size;
8186 			req->mem_seg[i].type = ind->mem_seg[i].type;
8187 		}
8188 	}
8189 
8190 	free(ind, M_DEVBUF, sizeof(*ind));
8191 	sc->sc_req_mem_ind = NULL;
8192 
8193 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_RESPOND_MEM_REQ_V01,
8194 			       QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
8195 			       qmi_wlanfw_respond_mem_req_msg_v01_ei,
8196 			       req, sizeof(*req));
8197 	free(req, M_DEVBUF, sizeof(*req));
8198 	if (ret) {
8199 		printf("%s: failed to send respond memory request: %d\n",
8200 		    sc->sc_dev.dv_xname, ret);
8201 		return -1;
8202 	}
8203 
8204 	if (mem_seg_len == 0) {
8205 		expected_result = QMI_RESULT_FAILURE_V01;
8206 		sc->qmi_resp.result = QMI_RESULT_SUCCESS_V01;
8207 	} else {
8208 		expected_result = QMI_RESULT_SUCCESS_V01;
8209 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8210 	}
8211 	while (sc->qmi_resp.result != expected_result) {
8212 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwrespmem",
8213 		    SEC_TO_NSEC(1));
8214 		if (ret) {
8215 			printf("%s: fw respond memory request timeout\n",
8216 			    sc->sc_dev.dv_xname);
8217 			return -1;
8218 		}
8219 	}
8220 
8221 	if (mem_seg_len == 0) {
8222 		sc->expect_fwmem_req = 1;
8223 		return EBUSY; /* retry */
8224 	}
8225 
8226 	if (!sc->hw_params.fixed_fw_mem) {
8227 		while (!sc->fwmem_ready) {
8228 			ret = tsleep_nsec(&sc->fwmem_ready, 0, "qwxfwrdy",
8229 			    SEC_TO_NSEC(10));
8230 			if (ret) {
8231 				printf("%s: fw memory ready timeout\n",
8232 				    sc->sc_dev.dv_xname);
8233 				return -1;
8234 			}
8235 		}
8236 	}
8237 
8238 	return 0;
8239 }
8240 
8241 int
8242 qwx_core_check_smbios(struct qwx_softc *sc)
8243 {
8244 	return 0; /* TODO */
8245 }
8246 
8247 int
8248 qwx_core_check_dt(struct qwx_softc *sc)
8249 {
8250 #ifdef __HAVE_FDT
8251 	if (sc->sc_node == 0)
8252 		return 0;
8253 
8254 	OF_getprop(sc->sc_node, "qcom,ath11k-calibration-variant",
8255 	    sc->qmi_target.bdf_ext, sizeof(sc->qmi_target.bdf_ext) - 1);
8256 #endif
8257 
8258 	return 0;
8259 }
8260 
8261 int
8262 qwx_qmi_request_target_cap(struct qwx_softc *sc)
8263 {
8264 	struct qmi_wlanfw_cap_req_msg_v01 req;
8265 	int ret = 0;
8266 	int r;
8267 	char *fw_build_id;
8268 	int fw_build_id_mask_len;
8269 
8270 	memset(&req, 0, sizeof(req));
8271 
8272 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_CAP_REQ_V01,
8273 	    QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
8274 	    qmi_wlanfw_cap_req_msg_v01_ei, &req, sizeof(req));
8275 	if (ret) {
8276 		printf("%s: failed to send qmi cap request: %d\n",
8277 		    sc->sc_dev.dv_xname, ret);
8278 		goto out;
8279 	}
8280 
8281 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8282 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8283 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwcap",
8284 		    SEC_TO_NSEC(1));
8285 		if (ret) {
8286 			printf("%s: qmi cap request failed\n",
8287 			    sc->sc_dev.dv_xname);
8288 			return ret;
8289 		}
8290 	}
8291 
8292 	fw_build_id = sc->qmi_target.fw_build_id;
8293 	fw_build_id_mask_len = strlen(QWX_FW_BUILD_ID_MASK);
8294 	if (!strncmp(fw_build_id, QWX_FW_BUILD_ID_MASK, fw_build_id_mask_len))
8295 		fw_build_id = fw_build_id + fw_build_id_mask_len;
8296 
8297 	DPRINTF("%s: chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
8298 	    sc->sc_dev.dv_xname,
8299 	    sc->qmi_target.chip_id, sc->qmi_target.chip_family,
8300 	    sc->qmi_target.board_id, sc->qmi_target.soc_id);
8301 
8302 	DPRINTF("%s: fw_version 0x%x fw_build_timestamp %s fw_build_id %s\n",
8303 	    sc->sc_dev.dv_xname, sc->qmi_target.fw_version,
8304 	    sc->qmi_target.fw_build_timestamp, fw_build_id);
8305 
8306 	r = qwx_core_check_smbios(sc);
8307 	if (r)
8308 		DPRINTF("%s: SMBIOS bdf variant name not set\n", __func__);
8309 
8310 	r = qwx_core_check_dt(sc);
8311 	if (r)
8312 		DPRINTF("%s: DT bdf variant name not set\n", __func__);
8313 
8314 out:
8315 	return ret;
8316 }
8317 
8318 int
8319 qwx_qmi_request_device_info(struct qwx_softc *sc)
8320 {
8321 	/* device info message req is only sent for hybrid bus devices */
8322 	if (!sc->hw_params.hybrid_bus_type)
8323 		return 0;
8324 
8325 	/* TODO */
8326 	return -1;
8327 }
8328 
8329 int
8330 _qwx_core_create_board_name(struct qwx_softc *sc, char *name,
8331     size_t name_len, int with_variant, int bus_type_mode)
8332 {
8333 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
8334 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
8335 
8336 	if (with_variant && sc->qmi_target.bdf_ext[0] != '\0')
8337 		snprintf(variant, sizeof(variant), ",variant=%s",
8338 		    sc->qmi_target.bdf_ext);
8339 
8340 	switch (sc->id.bdf_search) {
8341 	case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
8342 		if (bus_type_mode)
8343 			snprintf(name, name_len, "bus=%s", sc->sc_bus_str);
8344 		else
8345 			snprintf(name, name_len,
8346 			    "bus=%s,vendor=%04x,device=%04x,"
8347 			    "subsystem-vendor=%04x,subsystem-device=%04x,"
8348 			    "qmi-chip-id=%d,qmi-board-id=%d%s",
8349 			    sc->sc_bus_str, sc->id.vendor, sc->id.device,
8350 			    sc->id.subsystem_vendor, sc->id.subsystem_device,
8351 			    sc->qmi_target.chip_id, sc->qmi_target.board_id,
8352 			    variant);
8353 		break;
8354 	default:
8355 		snprintf(name, name_len,
8356 		    "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
8357 		    sc->sc_bus_str, sc->qmi_target.chip_id,
8358 		    sc->qmi_target.board_id, variant);
8359 		break;
8360 	}
8361 
8362 	DPRINTF("%s: using board name '%s'\n", __func__, name);
8363 
8364 	return 0;
8365 }
8366 
8367 int
8368 qwx_core_create_board_name(struct qwx_softc *sc, char *name, size_t name_len)
8369 {
8370 	return _qwx_core_create_board_name(sc, name, name_len, 1, 0);
8371 }
8372 
8373 int
8374 qwx_core_create_fallback_board_name(struct qwx_softc *sc, char *name,
8375     size_t name_len)
8376 {
8377 	return _qwx_core_create_board_name(sc, name, name_len, 0, 0);
8378 }
8379 
8380 int
8381 qwx_core_create_bus_type_board_name(struct qwx_softc *sc, char *name,
8382     size_t name_len)
8383 {
8384 	return _qwx_core_create_board_name(sc, name, name_len, 0, 1);
8385 }
8386 
8387 struct ath11k_fw_ie {
8388 	uint32_t id;
8389 	uint32_t len;
8390 	uint8_t data[];
8391 };
8392 
8393 enum ath11k_bd_ie_board_type {
8394 	ATH11K_BD_IE_BOARD_NAME = 0,
8395 	ATH11K_BD_IE_BOARD_DATA = 1,
8396 };
8397 
8398 enum ath11k_bd_ie_regdb_type {
8399 	ATH11K_BD_IE_REGDB_NAME = 0,
8400 	ATH11K_BD_IE_REGDB_DATA = 1,
8401 };
8402 
8403 enum ath11k_bd_ie_type {
8404 	/* contains sub IEs of enum ath11k_bd_ie_board_type */
8405 	ATH11K_BD_IE_BOARD = 0,
8406 	/* contains sub IEs of enum ath11k_bd_ie_regdb_type */
8407 	ATH11K_BD_IE_REGDB = 1,
8408 };
8409 
8410 static inline const char *
8411 qwx_bd_ie_type_str(enum ath11k_bd_ie_type type)
8412 {
8413 	switch (type) {
8414 	case ATH11K_BD_IE_BOARD:
8415 		return "board data";
8416 	case ATH11K_BD_IE_REGDB:
8417 		return "regdb data";
8418 	}
8419 
8420 	return "unknown";
8421 }
8422 
8423 int
8424 qwx_core_parse_bd_ie_board(struct qwx_softc *sc,
8425     const u_char **boardfw, size_t *boardfw_len,
8426     const void *buf, size_t buf_len,
8427     const char *boardname, int ie_id, int name_id, int data_id)
8428 {
8429 	const struct ath11k_fw_ie *hdr;
8430 	int name_match_found = 0;
8431 	int ret, board_ie_id;
8432 	size_t board_ie_len;
8433 	const void *board_ie_data;
8434 
8435 	*boardfw = NULL;
8436 	*boardfw_len = 0;
8437 
8438 	/* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
8439 	while (buf_len > sizeof(struct ath11k_fw_ie)) {
8440 		hdr = buf;
8441 		board_ie_id = le32toh(hdr->id);
8442 		board_ie_len = le32toh(hdr->len);
8443 		board_ie_data = hdr->data;
8444 
8445 		buf_len -= sizeof(*hdr);
8446 		buf += sizeof(*hdr);
8447 
8448 		if (buf_len < roundup(board_ie_len, 4)) {
8449 			printf("%s: invalid %s length: %zu < %zu\n",
8450 			    sc->sc_dev.dv_xname, qwx_bd_ie_type_str(ie_id),
8451 			    buf_len, roundup(board_ie_len, 4));
8452 			return EINVAL;
8453 		}
8454 
8455 		if (board_ie_id == name_id) {
8456 			if (board_ie_len != strlen(boardname))
8457 				goto next;
8458 
8459 			ret = memcmp(board_ie_data, boardname, board_ie_len);
8460 			if (ret)
8461 				goto next;
8462 
8463 			name_match_found = 1;
8464 			   DPRINTF("%s: found match %s for name '%s'", __func__,
8465 			       qwx_bd_ie_type_str(ie_id), boardname);
8466 		} else if (board_ie_id == data_id) {
8467 			if (!name_match_found)
8468 				/* no match found */
8469 				goto next;
8470 
8471 			DPRINTF("%s: found %s for '%s'", __func__,
8472 			    qwx_bd_ie_type_str(ie_id), boardname);
8473 
8474 			*boardfw = board_ie_data;
8475 			*boardfw_len = board_ie_len;
8476 			return 0;
8477 		} else {
8478 			printf("%s: unknown %s id found: %d\n", __func__,
8479 			    qwx_bd_ie_type_str(ie_id), board_ie_id);
8480 		}
8481 next:
8482 		/* jump over the padding */
8483 		board_ie_len = roundup(board_ie_len, 4);
8484 
8485 		buf_len -= board_ie_len;
8486 		buf += board_ie_len;
8487 	}
8488 
8489 	/* no match found */
8490 	return ENOENT;
8491 }
8492 
8493 int
8494 qwx_core_fetch_board_data_api_n(struct qwx_softc *sc,
8495     const u_char **boardfw, size_t *boardfw_len,
8496     u_char *fwdata, size_t fwdata_len,
8497     const char *boardname, int ie_id_match, int name_id, int data_id)
8498 {
8499 	size_t len, magic_len;
8500 	const uint8_t *data;
8501 	char *filename;
8502 	size_t ie_len;
8503 	struct ath11k_fw_ie *hdr;
8504 	int ret, ie_id;
8505 
8506 	filename = ATH11K_BOARD_API2_FILE;
8507 
8508 	*boardfw = NULL;
8509 	*boardfw_len = 0;
8510 
8511 	data = fwdata;
8512 	len = fwdata_len;
8513 
8514 	/* magic has extra null byte padded */
8515 	magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
8516 	if (len < magic_len) {
8517 		printf("%s: failed to find magic value in %s, "
8518 		    "file too short: %zu\n",
8519 		    sc->sc_dev.dv_xname, filename, len);
8520 		return EINVAL;
8521 	}
8522 
8523 	if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
8524 		DPRINTF("%s: found invalid board magic\n", sc->sc_dev.dv_xname);
8525 		return EINVAL;
8526 	}
8527 
8528 	/* magic is padded to 4 bytes */
8529 	magic_len = roundup(magic_len, 4);
8530 	if (len < magic_len) {
8531 		printf("%s: %s too small to contain board data, len: %zu\n",
8532 		    sc->sc_dev.dv_xname, filename, len);
8533 		return EINVAL;
8534 	}
8535 
8536 	data += magic_len;
8537 	len -= magic_len;
8538 
8539 	while (len > sizeof(struct ath11k_fw_ie)) {
8540 		hdr = (struct ath11k_fw_ie *)data;
8541 		ie_id = le32toh(hdr->id);
8542 		ie_len = le32toh(hdr->len);
8543 
8544 		len -= sizeof(*hdr);
8545 		data = hdr->data;
8546 
8547 		if (len < roundup(ie_len, 4)) {
8548 			printf("%s: invalid length for board ie_id %d "
8549 			    "ie_len %zu len %zu\n",
8550 			    sc->sc_dev.dv_xname, ie_id, ie_len, len);
8551 			return EINVAL;
8552 		}
8553 
8554 		if (ie_id == ie_id_match) {
8555 			ret = qwx_core_parse_bd_ie_board(sc,
8556 			    boardfw, boardfw_len, data, ie_len,
8557 			    boardname, ie_id_match, name_id, data_id);
8558 			if (ret == ENOENT)
8559 				/* no match found, continue */
8560 				goto next;
8561 			else if (ret)
8562 				/* there was an error, bail out */
8563 				return ret;
8564 			/* either found or error, so stop searching */
8565 			goto out;
8566 		}
8567 next:
8568 		/* jump over the padding */
8569 		ie_len = roundup(ie_len, 4);
8570 
8571 		len -= ie_len;
8572 		data += ie_len;
8573 	}
8574 
8575 out:
8576 	if (!*boardfw || !*boardfw_len) {
8577 		printf("%s: failed to fetch %s for %s from %s\n",
8578 		    __func__, qwx_bd_ie_type_str(ie_id_match),
8579 		    boardname, filename);
8580 		return ENOENT;
8581 	}
8582 
8583 	return 0;
8584 }
8585 
8586 int
8587 qwx_core_fetch_bdf(struct qwx_softc *sc, u_char **data, size_t *len,
8588     const u_char **boardfw, size_t *boardfw_len, const char *filename)
8589 {
8590 	char path[PATH_MAX];
8591 	char boardname[200];
8592 	int ret;
8593 
8594 	ret = snprintf(path, sizeof(path), "%s-%s-%s",
8595 	    ATH11K_FW_DIR, sc->hw_params.fw.dir, filename);
8596 	if (ret < 0 || ret >= sizeof(path))
8597 		return ENOSPC;
8598 
8599 	ret = qwx_core_create_board_name(sc, boardname, sizeof(boardname));
8600 	if (ret) {
8601 		DPRINTF("%s: failed to create board name: %d",
8602 		    sc->sc_dev.dv_xname, ret);
8603 		return ret;
8604 	}
8605 
8606 	ret = loadfirmware(path, data, len);
8607 	if (ret) {
8608 		printf("%s: could not read %s (error %d)\n",
8609 		    sc->sc_dev.dv_xname, path, ret);
8610 		return ret;
8611 	}
8612 
8613 	ret = qwx_core_fetch_board_data_api_n(sc, boardfw, boardfw_len,
8614 	    *data, *len, boardname, ATH11K_BD_IE_BOARD,
8615 	    ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA);
8616 	if (ret) {
8617 		DPRINTF("%s: failed to fetch board data for %s from %s\n",
8618 		    sc->sc_dev.dv_xname, boardname, path);
8619 		return ret;
8620 	}
8621 
8622 	return 0;
8623 }
8624 
8625 int
8626 qwx_qmi_load_file_target_mem(struct qwx_softc *sc, const u_char *data,
8627     size_t len, int type)
8628 {
8629 	struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
8630 	const uint8_t *p = data;
8631 #ifdef notyet
8632 	void *bdf_addr = NULL;
8633 #endif
8634 	int ret = EINVAL; /* empty fw image */
8635 	uint32_t remaining = len;
8636 
8637 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
8638 	if (!req) {
8639 		printf("%s: failed to allocate bfd download request\n",
8640 		    sc->sc_dev.dv_xname);
8641 		return ENOMEM;
8642 	}
8643 
8644 	if (sc->hw_params.fixed_bdf_addr) {
8645 #ifdef notyet
8646 		bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
8647 		if (!bdf_addr) {
8648 			ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
8649 			ret = -EIO;
8650 			goto err_free_req;
8651 		}
8652 #else
8653 		printf("%s: fixed bdf address not yet supported\n",
8654 		    sc->sc_dev.dv_xname);
8655 		ret = EIO;
8656 		goto err_free_req;
8657 #endif
8658 	}
8659 
8660 	while (remaining) {
8661 		req->valid = 1;
8662 		req->file_id_valid = 1;
8663 		req->file_id = sc->qmi_target.board_id;
8664 		req->total_size_valid = 1;
8665 		req->total_size = remaining;
8666 		req->seg_id_valid = 1;
8667 		req->data_valid = 1;
8668 		req->bdf_type = type;
8669 		req->bdf_type_valid = 1;
8670 		req->end_valid = 1;
8671 		req->end = 0;
8672 
8673 		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
8674 			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
8675 		} else {
8676 			req->data_len = remaining;
8677 			req->end = 1;
8678 		}
8679 
8680 		if (sc->hw_params.fixed_bdf_addr ||
8681 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
8682 			req->data_valid = 0;
8683 			req->end = 1;
8684 			req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
8685 		} else {
8686 			memcpy(req->data, p, req->data_len);
8687 		}
8688 #ifdef notyet
8689 		if (ab->hw_params.fixed_bdf_addr) {
8690 			if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
8691 				bdf_addr += ab->hw_params.fw.cal_offset;
8692 
8693 			memcpy_toio(bdf_addr, p, len);
8694 		}
8695 #endif
8696 		DPRINTF("%s: bdf download req fixed addr type %d\n",
8697 		    __func__, type);
8698 
8699 		ret = qwx_qmi_send_request(sc,
8700 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
8701 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
8702 		    qmi_wlanfw_bdf_download_req_msg_v01_ei,
8703 		    req, sizeof(*req));
8704 		if (ret) {
8705 			printf("%s: failed to send bdf download request\n",
8706 			    sc->sc_dev.dv_xname);
8707 			goto err_iounmap;
8708 		}
8709 
8710 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8711 		while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8712 			ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxbdf",
8713 			    SEC_TO_NSEC(1));
8714 			if (ret) {
8715 				printf("%s: bdf download request timeout\n",
8716 				    sc->sc_dev.dv_xname);
8717 				goto err_iounmap;
8718 			}
8719 		}
8720 
8721 		if (sc->hw_params.fixed_bdf_addr ||
8722 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
8723 			remaining = 0;
8724 		} else {
8725 			remaining -= req->data_len;
8726 			p += req->data_len;
8727 			req->seg_id++;
8728 			DPRINTF("%s: bdf download request remaining %i\n",
8729 			    __func__, remaining);
8730 		}
8731 	}
8732 
8733 err_iounmap:
8734 #ifdef notyet
8735 	if (ab->hw_params.fixed_bdf_addr)
8736 		iounmap(bdf_addr);
8737 #endif
8738 err_free_req:
8739 	free(req, M_DEVBUF, sizeof(*req));
8740 
8741 	return ret;
8742 }
8743 
8744 #define QWX_ELFMAG	"\177ELF"
8745 #define QWX_SELFMAG	4
8746 
8747 int
8748 qwx_qmi_load_bdf_qmi(struct qwx_softc *sc, int regdb)
8749 {
8750 	u_char *data = NULL;
8751 	const u_char *boardfw;
8752 	size_t len = 0, boardfw_len;
8753 	uint32_t fw_size;
8754 	int ret = 0, bdf_type;
8755 #ifdef notyet
8756 	const uint8_t *tmp;
8757 	uint32_t file_type;
8758 #endif
8759 	int fw_idx = regdb ? QWX_FW_REGDB : QWX_FW_BOARD;
8760 
8761 	if (sc->fw_img[fw_idx].data) {
8762 		boardfw = sc->fw_img[fw_idx].data;
8763 		boardfw_len = sc->fw_img[fw_idx].size;
8764 	} else {
8765 		ret = qwx_core_fetch_bdf(sc, &data, &len,
8766 		    &boardfw, &boardfw_len,
8767 		    regdb ? ATH11K_REGDB_FILE : ATH11K_BOARD_API2_FILE);
8768 		if (ret)
8769 			return ret;
8770 
8771 		sc->fw_img[fw_idx].data = malloc(boardfw_len, M_DEVBUF,
8772 		    M_NOWAIT);
8773 		if (sc->fw_img[fw_idx].data) {
8774 			memcpy(sc->fw_img[fw_idx].data, boardfw, boardfw_len);
8775 			sc->fw_img[fw_idx].size = boardfw_len;
8776 		}
8777 	}
8778 
8779 	if (regdb)
8780 		bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
8781 	else if (boardfw_len >= QWX_SELFMAG &&
8782 	    memcmp(boardfw, QWX_ELFMAG, QWX_SELFMAG) == 0)
8783 		bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
8784 	else
8785 		bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
8786 
8787 	DPRINTF("%s: bdf_type %d\n", __func__, bdf_type);
8788 
8789 	fw_size = MIN(sc->hw_params.fw.board_size, boardfw_len);
8790 
8791 	ret = qwx_qmi_load_file_target_mem(sc, boardfw, fw_size, bdf_type);
8792 	if (ret) {
8793 		printf("%s: failed to load bdf file\n", __func__);
8794 		goto out;
8795 	}
8796 
8797 	/* QCA6390/WCN6855 does not support cal data, skip it */
8798 	if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
8799 		goto out;
8800 #ifdef notyet
8801 	if (ab->qmi.target.eeprom_caldata) {
8802 		file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
8803 		tmp = filename;
8804 		fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
8805 	} else {
8806 		file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
8807 
8808 		/* cal-<bus>-<id>.bin */
8809 		snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
8810 			 ath11k_bus_str(ab->hif.bus), dev_name(dev));
8811 		fw_entry = ath11k_core_firmware_request(ab, filename);
8812 		if (!IS_ERR(fw_entry))
8813 			goto success;
8814 
8815 		fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
8816 		if (IS_ERR(fw_entry)) {
8817 			/* Caldata may not be present during first time calibration in
8818 			 * factory hence allow to boot without loading caldata in ftm mode
8819 			 */
8820 			if (ath11k_ftm_mode) {
8821 				ath11k_info(ab,
8822 					    "Booting without cal data file in factory test mode\n");
8823 				return 0;
8824 			}
8825 			ret = PTR_ERR(fw_entry);
8826 			ath11k_warn(ab,
8827 				    "qmi failed to load CAL data file:%s\n",
8828 				    filename);
8829 			goto out;
8830 		}
8831 success:
8832 		fw_size = MIN(ab->hw_params.fw.board_size, fw_entry->size);
8833 		tmp = fw_entry->data;
8834 	}
8835 
8836 	ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
8837 	if (ret < 0) {
8838 		ath11k_warn(ab, "qmi failed to load caldata\n");
8839 		goto out_qmi_cal;
8840 	}
8841 
8842 	ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
8843 
8844 out_qmi_cal:
8845 	if (!ab->qmi.target.eeprom_caldata)
8846 		release_firmware(fw_entry);
8847 #endif
8848 out:
8849 	free(data, M_DEVBUF, len);
8850 	if (ret == 0)
8851 		DPRINTF("%s: BDF download sequence completed\n", __func__);
8852 
8853 	return ret;
8854 }
8855 
8856 int
8857 qwx_qmi_event_load_bdf(struct qwx_softc *sc)
8858 {
8859 	int ret;
8860 
8861 	ret = qwx_qmi_request_target_cap(sc);
8862 	if (ret < 0) {
8863 		printf("%s: failed to request qmi target capabilities: %d\n",
8864 		    sc->sc_dev.dv_xname, ret);
8865 		return ret;
8866 	}
8867 
8868 	ret = qwx_qmi_request_device_info(sc);
8869 	if (ret < 0) {
8870 		printf("%s: failed to request qmi device info: %d\n",
8871 		    sc->sc_dev.dv_xname, ret);
8872 		return ret;
8873 	}
8874 
8875 	if (sc->hw_params.supports_regdb)
8876 		qwx_qmi_load_bdf_qmi(sc, 1);
8877 
8878 	ret = qwx_qmi_load_bdf_qmi(sc, 0);
8879 	if (ret < 0) {
8880 		printf("%s: failed to load board data file: %d\n",
8881 		    sc->sc_dev.dv_xname, ret);
8882 		return ret;
8883 	}
8884 
8885 	return 0;
8886 }
8887 
8888 int
8889 qwx_qmi_m3_load(struct qwx_softc *sc)
8890 {
8891 	u_char *data;
8892 	size_t len;
8893 	char path[PATH_MAX];
8894 	int ret;
8895 
8896 	if (sc->fw_img[QWX_FW_M3].data) {
8897 		data = sc->fw_img[QWX_FW_M3].data;
8898 		len = sc->fw_img[QWX_FW_M3].size;
8899 	} else {
8900 		ret = snprintf(path, sizeof(path), "%s-%s-%s",
8901 		    ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_M3_FILE);
8902 		if (ret < 0 || ret >= sizeof(path))
8903 			return ENOSPC;
8904 
8905 		ret = loadfirmware(path, &data, &len);
8906 		if (ret) {
8907 			printf("%s: could not read %s (error %d)\n",
8908 			    sc->sc_dev.dv_xname, path, ret);
8909 			return ret;
8910 		}
8911 
8912 		sc->fw_img[QWX_FW_M3].data = data;
8913 		sc->fw_img[QWX_FW_M3].size = len;
8914 	}
8915 
8916 	if (sc->m3_mem == NULL || QWX_DMA_LEN(sc->m3_mem) < len) {
8917 		if (sc->m3_mem)
8918 			qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
8919 		sc->m3_mem = qwx_dmamem_alloc(sc->sc_dmat, len, 65536);
8920 		if (sc->m3_mem == NULL) {
8921 			printf("%s: failed to allocate %zu bytes of DMA "
8922 			    "memory for M3 firmware\n", sc->sc_dev.dv_xname,
8923 			    len);
8924 			return ENOMEM;
8925 		}
8926 	}
8927 
8928 	memcpy(QWX_DMA_KVA(sc->m3_mem), data, len);
8929 	return 0;
8930 }
8931 
8932 int
8933 qwx_qmi_wlanfw_m3_info_send(struct qwx_softc *sc)
8934 {
8935 	struct qmi_wlanfw_m3_info_req_msg_v01 req;
8936 	int ret = 0;
8937 	uint64_t paddr;
8938 	uint32_t size;
8939 
8940 	memset(&req, 0, sizeof(req));
8941 
8942 	if (sc->hw_params.m3_fw_support) {
8943 		ret = qwx_qmi_m3_load(sc);
8944 		if (ret) {
8945 			printf("%s: failed to load m3 firmware: %d",
8946 			    sc->sc_dev.dv_xname, ret);
8947 			return ret;
8948 		}
8949 
8950 		paddr = QWX_DMA_DVA(sc->m3_mem);
8951 		size = QWX_DMA_LEN(sc->m3_mem);
8952 		req.addr = htole64(paddr);
8953 		req.size = htole32(size);
8954 	} else {
8955 		req.addr = 0;
8956 		req.size = 0;
8957 	}
8958 
8959 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_M3_INFO_REQ_V01,
8960 	    QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
8961 	    qmi_wlanfw_m3_info_req_msg_v01_ei, &req, sizeof(req));
8962 	if (ret) {
8963 		printf("%s: failed to send m3 information request: %d\n",
8964 		    sc->sc_dev.dv_xname, ret);
8965 		return ret;
8966 	}
8967 
8968 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8969 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8970 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwm3",
8971 		    SEC_TO_NSEC(1));
8972 		if (ret) {
8973 			printf("%s: m3 information request timeout\n",
8974 			    sc->sc_dev.dv_xname);
8975 			return ret;
8976 		}
8977 	}
8978 
8979 	return 0;
8980 }
8981 
8982 void
8983 qwx_hal_dump_srng_stats(struct qwx_softc *sc)
8984 {
8985 	DPRINTF("%s not implemented\n", __func__);
8986 }
8987 
8988 uint16_t
8989 qwx_hal_srng_get_entrysize(struct qwx_softc *sc, uint32_t ring_type)
8990 {
8991 	struct hal_srng_config *srng_config;
8992 
8993 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
8994 
8995 	srng_config = &sc->hal.srng_config[ring_type];
8996 	return (srng_config->entry_size << 2);
8997 }
8998 
8999 uint32_t
9000 qwx_hal_srng_get_max_entries(struct qwx_softc *sc, uint32_t ring_type)
9001 {
9002 	struct hal_srng_config *srng_config;
9003 
9004 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
9005 
9006 	srng_config = &sc->hal.srng_config[ring_type];
9007 	return (srng_config->max_size / srng_config->entry_size);
9008 }
9009 
9010 uint32_t *
9011 qwx_hal_srng_dst_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
9012 {
9013 	uint32_t *desc;
9014 #ifdef notyet
9015 	lockdep_assert_held(&srng->lock);
9016 #endif
9017 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
9018 		return NULL;
9019 
9020 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
9021 
9022 	srng->u.dst_ring.tp += srng->entry_size;
9023 
9024 	/* wrap around to start of ring*/
9025 	if (srng->u.dst_ring.tp == srng->ring_size)
9026 		srng->u.dst_ring.tp = 0;
9027 #ifdef notyet
9028 	/* Try to prefetch the next descriptor in the ring */
9029 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
9030 		ath11k_hal_srng_prefetch_desc(ab, srng);
9031 #endif
9032 	return desc;
9033 }
9034 
9035 int
9036 qwx_hal_srng_dst_num_free(struct qwx_softc *sc, struct hal_srng *srng,
9037     int sync_hw_ptr)
9038 {
9039 	uint32_t tp, hp;
9040 #ifdef notyet
9041 	lockdep_assert_held(&srng->lock);
9042 #endif
9043 	tp = srng->u.dst_ring.tp;
9044 
9045 	if (sync_hw_ptr) {
9046 		hp = *srng->u.dst_ring.hp_addr;
9047 		srng->u.dst_ring.cached_hp = hp;
9048 	} else {
9049 		hp = srng->u.dst_ring.cached_hp;
9050 	}
9051 
9052 	if (hp >= tp)
9053 		return (hp - tp) / srng->entry_size;
9054 	else
9055 		return (srng->ring_size - tp + hp) / srng->entry_size;
9056 }
9057 
9058 uint32_t *
9059 qwx_hal_srng_src_get_next_reaped(struct qwx_softc *sc, struct hal_srng *srng)
9060 {
9061 	uint32_t *desc;
9062 #ifdef notyet
9063 	lockdep_assert_held(&srng->lock);
9064 #endif
9065 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
9066 		return NULL;
9067 
9068 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
9069 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
9070 			      srng->ring_size;
9071 
9072 	return desc;
9073 }
9074 
9075 uint32_t *
9076 qwx_hal_srng_src_peek(struct qwx_softc *sc, struct hal_srng *srng)
9077 {
9078 #ifdef notyet
9079 	lockdep_assert_held(&srng->lock);
9080 #endif
9081 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
9082 	    srng->u.src_ring.cached_tp)
9083 		return NULL;
9084 
9085 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
9086 }
9087 
9088 void
9089 qwx_get_msi_address(struct qwx_softc *sc, uint32_t *addr_lo,
9090     uint32_t *addr_hi)
9091 {
9092 	*addr_lo = sc->msi_addr_lo;
9093 	*addr_hi = sc->msi_addr_hi;
9094 }
9095 
9096 int
9097 qwx_dp_srng_find_ring_in_mask(int ring_num, const uint8_t *grp_mask)
9098 {
9099 	int ext_group_num;
9100 	uint8_t mask = 1 << ring_num;
9101 
9102 	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
9103 	     ext_group_num++) {
9104 		if (mask & grp_mask[ext_group_num])
9105 			return ext_group_num;
9106 	}
9107 
9108 	return -1;
9109 }
9110 
9111 int
9112 qwx_dp_srng_calculate_msi_group(struct qwx_softc *sc, enum hal_ring_type type,
9113     int ring_num)
9114 {
9115 	const uint8_t *grp_mask;
9116 
9117 	switch (type) {
9118 	case HAL_WBM2SW_RELEASE:
9119 		if (ring_num == DP_RX_RELEASE_RING_NUM) {
9120 			grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
9121 			ring_num = 0;
9122 		} else {
9123 			grp_mask = &sc->hw_params.ring_mask->tx[0];
9124 		}
9125 		break;
9126 	case HAL_REO_EXCEPTION:
9127 		grp_mask = &sc->hw_params.ring_mask->rx_err[0];
9128 		break;
9129 	case HAL_REO_DST:
9130 		grp_mask = &sc->hw_params.ring_mask->rx[0];
9131 		break;
9132 	case HAL_REO_STATUS:
9133 		grp_mask = &sc->hw_params.ring_mask->reo_status[0];
9134 		break;
9135 	case HAL_RXDMA_MONITOR_STATUS:
9136 	case HAL_RXDMA_MONITOR_DST:
9137 		grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
9138 		break;
9139 	case HAL_RXDMA_DST:
9140 		grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
9141 		break;
9142 	case HAL_RXDMA_BUF:
9143 		grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
9144 		break;
9145 	case HAL_RXDMA_MONITOR_BUF:
9146 	case HAL_TCL_DATA:
9147 	case HAL_TCL_CMD:
9148 	case HAL_REO_CMD:
9149 	case HAL_SW2WBM_RELEASE:
9150 	case HAL_WBM_IDLE_LINK:
9151 	case HAL_TCL_STATUS:
9152 	case HAL_REO_REINJECT:
9153 	case HAL_CE_SRC:
9154 	case HAL_CE_DST:
9155 	case HAL_CE_DST_STATUS:
9156 	default:
9157 		return -1;
9158 	}
9159 
9160 	return qwx_dp_srng_find_ring_in_mask(ring_num, grp_mask);
9161 }
9162 
9163 void
9164 qwx_dp_srng_msi_setup(struct qwx_softc *sc, struct hal_srng_params *ring_params,
9165     enum hal_ring_type type, int ring_num)
9166 {
9167 	int msi_group_number;
9168 	uint32_t msi_data_start = 0;
9169 	uint32_t msi_data_count = 1;
9170 	uint32_t msi_irq_start = 0;
9171 	uint32_t addr_lo;
9172 	uint32_t addr_hi;
9173 	int ret;
9174 
9175 	ret = sc->ops.get_user_msi_vector(sc, "DP",
9176 	    &msi_data_count, &msi_data_start, &msi_irq_start);
9177 	if (ret)
9178 		return;
9179 
9180 	msi_group_number = qwx_dp_srng_calculate_msi_group(sc, type,
9181 	    ring_num);
9182 	if (msi_group_number < 0) {
9183 		DPRINTF("%s ring not part of an ext_group; ring_type %d,"
9184 		    "ring_num %d\n", __func__, type, ring_num);
9185 		ring_params->msi_addr = 0;
9186 		ring_params->msi_data = 0;
9187 		return;
9188 	}
9189 
9190 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
9191 
9192 	ring_params->msi_addr = addr_lo;
9193 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
9194 	ring_params->msi_data = (msi_group_number % msi_data_count) +
9195 	    msi_data_start;
9196 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
9197 }
9198 
9199 int
9200 qwx_dp_srng_setup(struct qwx_softc *sc, struct dp_srng *ring,
9201     enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
9202 {
9203 	struct hal_srng_params params = { 0 };
9204 	uint16_t entry_sz = qwx_hal_srng_get_entrysize(sc, type);
9205 	uint32_t max_entries = qwx_hal_srng_get_max_entries(sc, type);
9206 	int ret;
9207 	int cached = 0;
9208 
9209 	if (num_entries > max_entries)
9210 		num_entries = max_entries;
9211 
9212 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
9213 
9214 #ifdef notyet
9215 	if (sc->hw_params.alloc_cacheable_memory) {
9216 		/* Allocate the reo dst and tx completion rings from cacheable memory */
9217 		switch (type) {
9218 		case HAL_REO_DST:
9219 		case HAL_WBM2SW_RELEASE:
9220 			cached = true;
9221 			break;
9222 		default:
9223 			cached = false;
9224 		}
9225 
9226 		if (cached) {
9227 			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
9228 			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
9229 		}
9230 		if (!ring->vaddr_unaligned)
9231 			return -ENOMEM;
9232 	}
9233 #endif
9234 	if (!cached) {
9235 		ring->mem = qwx_dmamem_alloc(sc->sc_dmat, ring->size,
9236 		    PAGE_SIZE);
9237 		if (ring->mem == NULL) {
9238 			printf("%s: could not allocate DP SRNG DMA memory\n",
9239 			    sc->sc_dev.dv_xname);
9240 			return ENOMEM;
9241 
9242 		}
9243 	}
9244 
9245 	ring->vaddr = QWX_DMA_KVA(ring->mem);
9246 	ring->paddr = QWX_DMA_DVA(ring->mem);
9247 
9248 	params.ring_base_vaddr = ring->vaddr;
9249 	params.ring_base_paddr = ring->paddr;
9250 	params.num_entries = num_entries;
9251 	qwx_dp_srng_msi_setup(sc, &params, type, ring_num + mac_id);
9252 
9253 	switch (type) {
9254 	case HAL_REO_DST:
9255 		params.intr_batch_cntr_thres_entries =
9256 		    HAL_SRNG_INT_BATCH_THRESHOLD_RX;
9257 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
9258 		break;
9259 	case HAL_RXDMA_BUF:
9260 	case HAL_RXDMA_MONITOR_BUF:
9261 	case HAL_RXDMA_MONITOR_STATUS:
9262 		params.low_threshold = num_entries >> 3;
9263 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
9264 		params.intr_batch_cntr_thres_entries = 0;
9265 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
9266 		break;
9267 	case HAL_WBM2SW_RELEASE:
9268 		if (ring_num < 3) {
9269 			params.intr_batch_cntr_thres_entries =
9270 			    HAL_SRNG_INT_BATCH_THRESHOLD_TX;
9271 			params.intr_timer_thres_us =
9272 			    HAL_SRNG_INT_TIMER_THRESHOLD_TX;
9273 			break;
9274 		}
9275 		/* follow through when ring_num >= 3 */
9276 		/* FALLTHROUGH */
9277 	case HAL_REO_EXCEPTION:
9278 	case HAL_REO_REINJECT:
9279 	case HAL_REO_CMD:
9280 	case HAL_REO_STATUS:
9281 	case HAL_TCL_DATA:
9282 	case HAL_TCL_CMD:
9283 	case HAL_TCL_STATUS:
9284 	case HAL_WBM_IDLE_LINK:
9285 	case HAL_SW2WBM_RELEASE:
9286 	case HAL_RXDMA_DST:
9287 	case HAL_RXDMA_MONITOR_DST:
9288 	case HAL_RXDMA_MONITOR_DESC:
9289 		params.intr_batch_cntr_thres_entries =
9290 		    HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
9291 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
9292 		break;
9293 	case HAL_RXDMA_DIR_BUF:
9294 		break;
9295 	default:
9296 		printf("%s: Not a valid ring type in dp :%d\n",
9297 		    sc->sc_dev.dv_xname, type);
9298 		return EINVAL;
9299 	}
9300 
9301 	if (cached) {
9302 		params.flags |= HAL_SRNG_FLAGS_CACHED;
9303 		ring->cached = 1;
9304 	}
9305 
9306 	ret = qwx_hal_srng_setup(sc, type, ring_num, mac_id, &params);
9307 	if (ret < 0) {
9308 		printf("%s: failed to setup srng: %d ring_id %d\n",
9309 		    sc->sc_dev.dv_xname, ret, ring_num);
9310 		return ret;
9311 	}
9312 
9313 	ring->ring_id = ret;
9314 	return 0;
9315 }
9316 
9317 void
9318 qwx_hal_srng_access_begin(struct qwx_softc *sc, struct hal_srng *srng)
9319 {
9320 #ifdef notyet
9321 	lockdep_assert_held(&srng->lock);
9322 #endif
9323 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9324 		srng->u.src_ring.cached_tp =
9325 			*(volatile uint32_t *)srng->u.src_ring.tp_addr;
9326 	} else {
9327 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
9328 	}
9329 }
9330 
9331 void
9332 qwx_hal_srng_access_end(struct qwx_softc *sc, struct hal_srng *srng)
9333 {
9334 #ifdef notyet
9335 	lockdep_assert_held(&srng->lock);
9336 #endif
9337 	/* TODO: See if we need a write memory barrier here */
9338 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
9339 		/* For LMAC rings, ring pointer updates are done through FW and
9340 		 * hence written to a shared memory location that is read by FW
9341 		 */
9342 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9343 			srng->u.src_ring.last_tp =
9344 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
9345 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
9346 		} else {
9347 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
9348 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
9349 		}
9350 	} else {
9351 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9352 			srng->u.src_ring.last_tp =
9353 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
9354 			sc->ops.write32(sc,
9355 			    (unsigned long)srng->u.src_ring.hp_addr -
9356 			    (unsigned long)sc->mem, srng->u.src_ring.hp);
9357 		} else {
9358 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
9359 			sc->ops.write32(sc,
9360 			    (unsigned long)srng->u.dst_ring.tp_addr -
9361 			    (unsigned long)sc->mem, srng->u.dst_ring.tp);
9362 		}
9363 	}
9364 #ifdef notyet
9365 	srng->timestamp = jiffies;
9366 #endif
9367 }
9368 
9369 int
9370 qwx_wbm_idle_ring_setup(struct qwx_softc *sc, uint32_t *n_link_desc)
9371 {
9372 	struct qwx_dp *dp = &sc->dp;
9373 	uint32_t n_mpdu_link_desc, n_mpdu_queue_desc;
9374 	uint32_t n_tx_msdu_link_desc, n_rx_msdu_link_desc;
9375 	int ret = 0;
9376 
9377 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
9378 			   HAL_NUM_MPDUS_PER_LINK_DESC;
9379 
9380 	n_mpdu_queue_desc = n_mpdu_link_desc /
9381 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
9382 
9383 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
9384 			       DP_AVG_MSDUS_PER_FLOW) /
9385 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
9386 
9387 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
9388 			       DP_AVG_MSDUS_PER_MPDU) /
9389 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
9390 
9391 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
9392 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
9393 
9394 	if (*n_link_desc & (*n_link_desc - 1))
9395 		*n_link_desc = 1 << fls(*n_link_desc);
9396 
9397 	ret = qwx_dp_srng_setup(sc, &dp->wbm_idle_ring,
9398 	    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
9399 	if (ret) {
9400 		printf("%s: failed to setup wbm_idle_ring: %d\n",
9401 		    sc->sc_dev.dv_xname, ret);
9402 	}
9403 
9404 	return ret;
9405 }
9406 
9407 void
9408 qwx_dp_link_desc_bank_free(struct qwx_softc *sc,
9409     struct dp_link_desc_bank *link_desc_banks)
9410 {
9411 	int i;
9412 
9413 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
9414 		if (link_desc_banks[i].mem) {
9415 			qwx_dmamem_free(sc->sc_dmat, link_desc_banks[i].mem);
9416 			link_desc_banks[i].mem = NULL;
9417 		}
9418 	}
9419 }
9420 
9421 int
9422 qwx_dp_link_desc_bank_alloc(struct qwx_softc *sc,
9423     struct dp_link_desc_bank *desc_bank, int n_link_desc_bank,
9424     int last_bank_sz)
9425 {
9426 	struct qwx_dp *dp = &sc->dp;
9427 	int i;
9428 	int ret = 0;
9429 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
9430 
9431 	for (i = 0; i < n_link_desc_bank; i++) {
9432 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
9433 			desc_sz = last_bank_sz;
9434 
9435 		desc_bank[i].mem = qwx_dmamem_alloc(sc->sc_dmat, desc_sz,
9436 		    PAGE_SIZE);
9437 		if (!desc_bank[i].mem) {
9438 			ret = ENOMEM;
9439 			goto err;
9440 		}
9441 
9442 		desc_bank[i].vaddr = QWX_DMA_KVA(desc_bank[i].mem);
9443 		desc_bank[i].paddr = QWX_DMA_DVA(desc_bank[i].mem);
9444 		desc_bank[i].size = desc_sz;
9445 	}
9446 
9447 	return 0;
9448 
9449 err:
9450 	qwx_dp_link_desc_bank_free(sc, dp->link_desc_banks);
9451 
9452 	return ret;
9453 }
9454 
9455 void
9456 qwx_hal_setup_link_idle_list(struct qwx_softc *sc,
9457     struct hal_wbm_idle_scatter_list *sbuf,
9458     uint32_t nsbufs, uint32_t tot_link_desc, uint32_t end_offset)
9459 {
9460 	struct ath11k_buffer_addr *link_addr;
9461 	int i;
9462 	uint32_t reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
9463 
9464 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
9465 
9466 	for (i = 1; i < nsbufs; i++) {
9467 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
9468 		link_addr->info1 = FIELD_PREP(
9469 		    HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9470 		    (uint64_t)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
9471 		    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
9472 		    BASE_ADDR_MATCH_TAG_VAL);
9473 
9474 		link_addr = (void *)sbuf[i].vaddr +
9475 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE;
9476 	}
9477 
9478 	sc->ops.write32(sc,
9479 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
9480 	    FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
9481 	    FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
9482 	sc->ops.write32(sc,
9483 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
9484 	    FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
9485 	    reg_scatter_buf_sz * nsbufs));
9486 	sc->ops.write32(sc,
9487 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB,
9488 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
9489 	    sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
9490 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_WBM_REG +
9491 	    HAL_WBM_SCATTERED_RING_BASE_MSB,
9492 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9493 	    (uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
9494 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
9495 	    BASE_ADDR_MATCH_TAG_VAL));
9496 
9497 	/* Setup head and tail pointers for the idle list */
9498 	sc->ops.write32(sc,
9499 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
9500 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
9501 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr));
9502 	sc->ops.write32(sc,
9503 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
9504 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9505 	    ((uint64_t)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9506 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
9507 	    (end_offset >> 2)));
9508 	sc->ops.write32(sc,
9509 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
9510 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
9511 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
9512 
9513 	sc->ops.write32(sc,
9514 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
9515 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
9516 	sc->ops.write32(sc,
9517 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
9518 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9519 	    ((uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9520 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0));
9521 	sc->ops.write32(sc,
9522 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
9523 	    2 * tot_link_desc);
9524 
9525 	/* Enable the SRNG */
9526 	sc->ops.write32(sc,
9527 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(sc),
9528 	    0x40);
9529 }
9530 
9531 void
9532 qwx_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, uint32_t cookie,
9533     bus_addr_t paddr)
9534 {
9535 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
9536 	    (paddr & HAL_ADDR_LSB_REG_MASK));
9537 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
9538 	    ((uint64_t)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9539 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
9540 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
9541 }
9542 
9543 void
9544 qwx_dp_scatter_idle_link_desc_cleanup(struct qwx_softc *sc)
9545 {
9546 	struct qwx_dp *dp = &sc->dp;
9547 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
9548 	int i;
9549 
9550 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
9551 		if (slist[i].mem == NULL)
9552 			continue;
9553 
9554 		qwx_dmamem_free(sc->sc_dmat, slist[i].mem);
9555 		slist[i].mem = NULL;
9556 		slist[i].vaddr = NULL;
9557 		slist[i].paddr = 0L;
9558 	}
9559 }
9560 
9561 int
9562 qwx_dp_scatter_idle_link_desc_setup(struct qwx_softc *sc, int size,
9563     uint32_t n_link_desc_bank, uint32_t n_link_desc, uint32_t last_bank_sz)
9564 {
9565 	struct qwx_dp *dp = &sc->dp;
9566 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
9567 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
9568 	uint32_t n_entries_per_buf;
9569 	int num_scatter_buf, scatter_idx;
9570 	struct hal_wbm_link_desc *scatter_buf;
9571 	int n_entries;
9572 	bus_addr_t paddr;
9573 	int rem_entries;
9574 	int i;
9575 	int ret = 0;
9576 	uint32_t end_offset;
9577 
9578 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
9579 	    qwx_hal_srng_get_entrysize(sc, HAL_WBM_IDLE_LINK);
9580 	num_scatter_buf = howmany(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
9581 
9582 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
9583 		return EINVAL;
9584 
9585 	for (i = 0; i < num_scatter_buf; i++) {
9586 		slist[i].mem = qwx_dmamem_alloc(sc->sc_dmat,
9587 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, PAGE_SIZE);
9588 		if (slist[i].mem == NULL) {
9589 			ret = ENOMEM;
9590 			goto err;
9591 		}
9592 
9593 		slist[i].vaddr = QWX_DMA_KVA(slist[i].mem);
9594 		slist[i].paddr = QWX_DMA_DVA(slist[i].mem);
9595 	}
9596 
9597 	scatter_idx = 0;
9598 	scatter_buf = slist[scatter_idx].vaddr;
9599 	rem_entries = n_entries_per_buf;
9600 
9601 	for (i = 0; i < n_link_desc_bank; i++) {
9602 		n_entries = DP_LINK_DESC_ALLOC_SIZE_THRESH / HAL_LINK_DESC_SIZE;
9603 		paddr = link_desc_banks[i].paddr;
9604 		while (n_entries) {
9605 			qwx_hal_set_link_desc_addr(scatter_buf, i, paddr);
9606 			n_entries--;
9607 			paddr += HAL_LINK_DESC_SIZE;
9608 			if (rem_entries) {
9609 				rem_entries--;
9610 				scatter_buf++;
9611 				continue;
9612 			}
9613 
9614 			rem_entries = n_entries_per_buf;
9615 			scatter_idx++;
9616 			scatter_buf = slist[scatter_idx].vaddr;
9617 		}
9618 	}
9619 
9620 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
9621 	    sizeof(struct hal_wbm_link_desc);
9622 	qwx_hal_setup_link_idle_list(sc, slist, num_scatter_buf,
9623 	    n_link_desc, end_offset);
9624 
9625 	return 0;
9626 
9627 err:
9628 	qwx_dp_scatter_idle_link_desc_cleanup(sc);
9629 
9630 	return ret;
9631 }
9632 
9633 uint32_t *
9634 qwx_hal_srng_src_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
9635 {
9636 	uint32_t *desc;
9637 	uint32_t next_hp;
9638 #ifdef notyet
9639 	lockdep_assert_held(&srng->lock);
9640 #endif
9641 
9642 	/* TODO: Using % is expensive, but we have to do this since size of some
9643 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
9644 	 * if separate function is defined for rings having power of 2 ring size
9645 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
9646 	 * overhead of % by using mask (with &).
9647 	 */
9648 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
9649 
9650 	if (next_hp == srng->u.src_ring.cached_tp)
9651 		return NULL;
9652 
9653 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
9654 	srng->u.src_ring.hp = next_hp;
9655 
9656 	/* TODO: Reap functionality is not used by all rings. If particular
9657 	 * ring does not use reap functionality, we need not update reap_hp
9658 	 * with next_hp pointer. Need to make sure a separate function is used
9659 	 * before doing any optimization by removing below code updating
9660 	 * reap_hp.
9661 	 */
9662 	srng->u.src_ring.reap_hp = next_hp;
9663 
9664 	return desc;
9665 }
9666 
9667 uint32_t *
9668 qwx_hal_srng_src_reap_next(struct qwx_softc *sc, struct hal_srng *srng)
9669 {
9670 	uint32_t *desc;
9671 	uint32_t next_reap_hp;
9672 #ifdef notyet
9673 	lockdep_assert_held(&srng->lock);
9674 #endif
9675 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
9676 	    srng->ring_size;
9677 
9678 	if (next_reap_hp == srng->u.src_ring.cached_tp)
9679 		return NULL;
9680 
9681 	desc = srng->ring_base_vaddr + next_reap_hp;
9682 	srng->u.src_ring.reap_hp = next_reap_hp;
9683 
9684 	return desc;
9685 }
9686 
9687 int
9688 qwx_dp_link_desc_setup(struct qwx_softc *sc,
9689     struct dp_link_desc_bank *link_desc_banks, uint32_t ring_type,
9690     struct hal_srng *srng, uint32_t n_link_desc)
9691 {
9692 	uint32_t tot_mem_sz;
9693 	uint32_t n_link_desc_bank, last_bank_sz;
9694 	uint32_t entry_sz, n_entries;
9695 	uint64_t paddr;
9696 	uint32_t *desc;
9697 	int i, ret;
9698 
9699 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
9700 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
9701 
9702 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
9703 		n_link_desc_bank = 1;
9704 		last_bank_sz = tot_mem_sz;
9705 	} else {
9706 		n_link_desc_bank = tot_mem_sz /
9707 		    (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN);
9708 		last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH -
9709 		    HAL_LINK_DESC_ALIGN);
9710 
9711 		if (last_bank_sz)
9712 			n_link_desc_bank += 1;
9713 	}
9714 
9715 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
9716 		return EINVAL;
9717 
9718 	ret = qwx_dp_link_desc_bank_alloc(sc, link_desc_banks,
9719 	    n_link_desc_bank, last_bank_sz);
9720 	if (ret)
9721 		return ret;
9722 
9723 	/* Setup link desc idle list for HW internal usage */
9724 	entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
9725 	tot_mem_sz = entry_sz * n_link_desc;
9726 
9727 	/* Setup scatter desc list when the total memory requirement is more */
9728 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
9729 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
9730 		ret = qwx_dp_scatter_idle_link_desc_setup(sc, tot_mem_sz,
9731 		    n_link_desc_bank, n_link_desc, last_bank_sz);
9732 		if (ret) {
9733 			printf("%s: failed to setup scatting idle list "
9734 			    "descriptor :%d\n",
9735 			    sc->sc_dev.dv_xname, ret);
9736 			goto fail_desc_bank_free;
9737 		}
9738 
9739 		return 0;
9740 	}
9741 #if 0
9742 	spin_lock_bh(&srng->lock);
9743 #endif
9744 	qwx_hal_srng_access_begin(sc, srng);
9745 
9746 	for (i = 0; i < n_link_desc_bank; i++) {
9747 		n_entries = (link_desc_banks[i].size) / HAL_LINK_DESC_SIZE;
9748 		paddr = link_desc_banks[i].paddr;
9749 		while (n_entries &&
9750 		    (desc = qwx_hal_srng_src_get_next_entry(sc, srng))) {
9751 			qwx_hal_set_link_desc_addr(
9752 			    (struct hal_wbm_link_desc *) desc, i, paddr);
9753 			n_entries--;
9754 			paddr += HAL_LINK_DESC_SIZE;
9755 		}
9756 	}
9757 
9758 	qwx_hal_srng_access_end(sc, srng);
9759 #if 0
9760 	spin_unlock_bh(&srng->lock);
9761 #endif
9762 
9763 	return 0;
9764 
9765 fail_desc_bank_free:
9766 	qwx_dp_link_desc_bank_free(sc, link_desc_banks);
9767 
9768 	return ret;
9769 }
9770 
9771 void
9772 qwx_dp_srng_cleanup(struct qwx_softc *sc, struct dp_srng *ring)
9773 {
9774 	if (ring->mem == NULL)
9775 		return;
9776 
9777 #if 0
9778 	if (ring->cached)
9779 		kfree(ring->vaddr_unaligned);
9780 	else
9781 #endif
9782 		qwx_dmamem_free(sc->sc_dmat, ring->mem);
9783 
9784 	ring->mem = NULL;
9785 	ring->vaddr = NULL;
9786 	ring->paddr = 0;
9787 }
9788 
9789 void
9790 qwx_dp_shadow_stop_timer(struct qwx_softc *sc,
9791     struct qwx_hp_update_timer *update_timer)
9792 {
9793 	if (!sc->hw_params.supports_shadow_regs)
9794 		return;
9795 
9796 	timeout_del(&update_timer->timer);
9797 }
9798 
9799 void
9800 qwx_dp_shadow_start_timer(struct qwx_softc *sc, struct hal_srng *srng,
9801     struct qwx_hp_update_timer *update_timer)
9802 {
9803 #ifdef notyet
9804 	lockdep_assert_held(&srng->lock);
9805 #endif
9806 	if (!sc->hw_params.supports_shadow_regs)
9807 		return;
9808 
9809 	update_timer->tx_num++;
9810 	if (update_timer->started)
9811 		return;
9812 
9813 	update_timer->started = 1;
9814 	update_timer->timer_tx_num = update_timer->tx_num;
9815 
9816 	timeout_add_msec(&update_timer->timer, update_timer->interval);
9817 }
9818 
9819 void
9820 qwx_dp_shadow_timer_handler(void *arg)
9821 {
9822 	struct qwx_hp_update_timer *update_timer = arg;
9823 	struct qwx_softc *sc = update_timer->sc;
9824 	struct hal_srng	*srng = &sc->hal.srng_list[update_timer->ring_id];
9825 	int s;
9826 
9827 #ifdef notyet
9828 	spin_lock_bh(&srng->lock);
9829 #endif
9830 	s = splnet();
9831 
9832 	/*
9833 	 * Update HP if there were no TX operations during the timeout interval,
9834 	 * and stop the timer. Timer will be restarted if more TX happens.
9835 	 */
9836 	if (update_timer->timer_tx_num != update_timer->tx_num) {
9837 		update_timer->timer_tx_num = update_timer->tx_num;
9838 		timeout_add_msec(&update_timer->timer, update_timer->interval);
9839 	} else {
9840 		update_timer->started = 0;
9841 		qwx_hal_srng_shadow_update_hp_tp(sc, srng);
9842 	}
9843 #ifdef notyet
9844 	spin_unlock_bh(&srng->lock);
9845 #endif
9846 	splx(s);
9847 }
9848 
9849 void
9850 qwx_dp_stop_shadow_timers(struct qwx_softc *sc)
9851 {
9852 	int i;
9853 
9854 	for (i = 0; i < sc->hw_params.max_tx_ring; i++)
9855 		qwx_dp_shadow_stop_timer(sc, &sc->dp.tx_ring_timer[i]);
9856 
9857 	qwx_dp_shadow_stop_timer(sc, &sc->dp.reo_cmd_timer);
9858 }
9859 
9860 void
9861 qwx_dp_srng_common_cleanup(struct qwx_softc *sc)
9862 {
9863 	struct qwx_dp *dp = &sc->dp;
9864 	int i;
9865 
9866 	qwx_dp_stop_shadow_timers(sc);
9867 	qwx_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
9868 	qwx_dp_srng_cleanup(sc, &dp->tcl_cmd_ring);
9869 	qwx_dp_srng_cleanup(sc, &dp->tcl_status_ring);
9870 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9871 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
9872 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
9873 	}
9874 	qwx_dp_srng_cleanup(sc, &dp->reo_reinject_ring);
9875 	qwx_dp_srng_cleanup(sc, &dp->rx_rel_ring);
9876 	qwx_dp_srng_cleanup(sc, &dp->reo_except_ring);
9877 	qwx_dp_srng_cleanup(sc, &dp->reo_cmd_ring);
9878 	qwx_dp_srng_cleanup(sc, &dp->reo_status_ring);
9879 }
9880 
9881 void
9882 qwx_hal_srng_get_params(struct qwx_softc *sc, struct hal_srng *srng,
9883     struct hal_srng_params *params)
9884 {
9885 	params->ring_base_paddr = srng->ring_base_paddr;
9886 	params->ring_base_vaddr = srng->ring_base_vaddr;
9887 	params->num_entries = srng->num_entries;
9888 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
9889 	params->intr_batch_cntr_thres_entries =
9890 		srng->intr_batch_cntr_thres_entries;
9891 	params->low_threshold = srng->u.src_ring.low_threshold;
9892 	params->msi_addr = srng->msi_addr;
9893 	params->msi_data = srng->msi_data;
9894 	params->flags = srng->flags;
9895 }
9896 
9897 void
9898 qwx_hal_tx_init_data_ring(struct qwx_softc *sc, struct hal_srng *srng)
9899 {
9900 	struct hal_srng_params params;
9901 	struct hal_tlv_hdr *tlv;
9902 	int i, entry_size;
9903 	uint8_t *desc;
9904 
9905 	memset(&params, 0, sizeof(params));
9906 
9907 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_TCL_DATA);
9908 	qwx_hal_srng_get_params(sc, srng, &params);
9909 	desc = (uint8_t *)params.ring_base_vaddr;
9910 
9911 	for (i = 0; i < params.num_entries; i++) {
9912 		tlv = (struct hal_tlv_hdr *)desc;
9913 		tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
9914 		    FIELD_PREP(HAL_TLV_HDR_LEN,
9915 		    sizeof(struct hal_tcl_data_cmd));
9916 		desc += entry_size;
9917 	}
9918 }
9919 
9920 #define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
9921 
9922 /* dscp_tid_map - Default DSCP-TID mapping
9923  *
9924  * DSCP        TID
9925  * 000000      0
9926  * 001000      1
9927  * 010000      2
9928  * 011000      3
9929  * 100000      4
9930  * 101000      5
9931  * 110000      6
9932  * 111000      7
9933  */
9934 static const uint8_t dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
9935 	0, 0, 0, 0, 0, 0, 0, 0,
9936 	1, 1, 1, 1, 1, 1, 1, 1,
9937 	2, 2, 2, 2, 2, 2, 2, 2,
9938 	3, 3, 3, 3, 3, 3, 3, 3,
9939 	4, 4, 4, 4, 4, 4, 4, 4,
9940 	5, 5, 5, 5, 5, 5, 5, 5,
9941 	6, 6, 6, 6, 6, 6, 6, 6,
9942 	7, 7, 7, 7, 7, 7, 7, 7,
9943 };
9944 
9945 void
9946 qwx_hal_tx_set_dscp_tid_map(struct qwx_softc *sc, int id)
9947 {
9948 	uint32_t ctrl_reg_val;
9949 	uint32_t addr;
9950 	uint8_t hw_map_val[HAL_DSCP_TID_TBL_SIZE];
9951 	int i;
9952 	uint32_t value;
9953 	int cnt = 0;
9954 
9955 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9956 	    HAL_TCL1_RING_CMN_CTRL_REG);
9957 
9958 	/* Enable read/write access */
9959 	ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9960 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9961 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
9962 
9963 	addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
9964 	       (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
9965 
9966 	/* Configure each DSCP-TID mapping in three bits there by configure
9967 	 * three bytes in an iteration.
9968 	 */
9969 	for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
9970 		value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
9971 				   dscp_tid_map[i]) |
9972 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
9973 				   dscp_tid_map[i + 1]) |
9974 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
9975 				   dscp_tid_map[i + 2]) |
9976 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
9977 				   dscp_tid_map[i + 3]) |
9978 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
9979 				   dscp_tid_map[i + 4]) |
9980 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
9981 				   dscp_tid_map[i + 5]) |
9982 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
9983 				   dscp_tid_map[i + 6]) |
9984 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
9985 				   dscp_tid_map[i + 7]);
9986 		memcpy(&hw_map_val[cnt], (uint8_t *)&value, 3);
9987 		cnt += 3;
9988 	}
9989 
9990 	for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
9991 		sc->ops.write32(sc, addr, *(uint32_t *)&hw_map_val[i]);
9992 		addr += 4;
9993 	}
9994 
9995 	/* Disable read/write access */
9996 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9997 	    HAL_TCL1_RING_CMN_CTRL_REG);
9998 	ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9999 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
10000 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
10001 }
10002 
10003 void
10004 qwx_dp_shadow_init_timer(struct qwx_softc *sc,
10005     struct qwx_hp_update_timer *update_timer,
10006     uint32_t interval, uint32_t ring_id)
10007 {
10008 	if (!sc->hw_params.supports_shadow_regs)
10009 		return;
10010 
10011 	update_timer->tx_num = 0;
10012 	update_timer->timer_tx_num = 0;
10013 	update_timer->sc = sc;
10014 	update_timer->ring_id = ring_id;
10015 	update_timer->interval = interval;
10016 	update_timer->init = 1;
10017 	timeout_set(&update_timer->timer, qwx_dp_shadow_timer_handler,
10018 	    update_timer);
10019 }
10020 
10021 void
10022 qwx_hal_reo_init_cmd_ring(struct qwx_softc *sc, struct hal_srng *srng)
10023 {
10024 	struct hal_srng_params params;
10025 	struct hal_tlv_hdr *tlv;
10026 	struct hal_reo_get_queue_stats *desc;
10027 	int i, cmd_num = 1;
10028 	int entry_size;
10029 	uint8_t *entry;
10030 
10031 	memset(&params, 0, sizeof(params));
10032 
10033 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_REO_CMD);
10034 	qwx_hal_srng_get_params(sc, srng, &params);
10035 	entry = (uint8_t *)params.ring_base_vaddr;
10036 
10037 	for (i = 0; i < params.num_entries; i++) {
10038 		tlv = (struct hal_tlv_hdr *)entry;
10039 		desc = (struct hal_reo_get_queue_stats *)tlv->value;
10040 		desc->cmd.info0 = FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER,
10041 		    cmd_num++);
10042 		entry += entry_size;
10043 	}
10044 }
10045 
10046 int
10047 qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath11k_hal_reo_cmd *cmd)
10048 {
10049 	struct hal_reo_get_queue_stats *desc;
10050 
10051 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
10052 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10053 
10054 	desc = (struct hal_reo_get_queue_stats *)tlv->value;
10055 
10056 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10057 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10058 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10059 
10060 	desc->queue_addr_lo = cmd->addr_lo;
10061 	desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
10062 	    cmd->addr_hi);
10063 	if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
10064 		desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
10065 
10066 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10067 }
10068 
10069 int
10070 qwx_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
10071     struct ath11k_hal_reo_cmd *cmd)
10072 {
10073 	struct hal_reo_flush_cache *desc;
10074 	uint8_t avail_slot = ffz(hal->avail_blk_resource);
10075 
10076 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
10077 		if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
10078 			return ENOSPC;
10079 
10080 		hal->current_blk_index = avail_slot;
10081 	}
10082 
10083 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
10084 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10085 
10086 	desc = (struct hal_reo_flush_cache *)tlv->value;
10087 
10088 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10089 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10090 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10091 
10092 	desc->cache_addr_lo = cmd->addr_lo;
10093 	desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
10094 	    cmd->addr_hi);
10095 
10096 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
10097 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
10098 
10099 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
10100 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
10101 		desc->info0 |=
10102 		    FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
10103 		    avail_slot);
10104 	}
10105 
10106 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
10107 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
10108 
10109 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
10110 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
10111 
10112 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10113 }
10114 
10115 int
10116 qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
10117     struct ath11k_hal_reo_cmd *cmd)
10118 {
10119 	struct hal_reo_update_rx_queue *desc;
10120 
10121 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
10122 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10123 
10124 	desc = (struct hal_reo_update_rx_queue *)tlv->value;
10125 
10126 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10127 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10128 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10129 
10130 	desc->queue_addr_lo = cmd->addr_lo;
10131 	desc->info0 =
10132 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
10133 		    cmd->addr_hi) |
10134 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
10135 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
10136 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
10137 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
10138 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
10139 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
10140 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
10141 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
10142 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
10143 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
10144 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
10145 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
10146 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
10147 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
10148 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
10149 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
10150 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
10151 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
10152 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
10153 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
10154 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
10155 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
10156 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
10157 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
10158 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
10159 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
10160 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
10161 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
10162 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
10163 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
10164 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
10165 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
10166 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
10167 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
10168 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
10169 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
10170 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
10171 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
10172 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
10173 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
10174 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
10175 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
10176 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
10177 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
10178 
10179 	desc->info1 =
10180 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
10181 		    cmd->rx_queue_num) |
10182 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
10183 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
10184 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
10185 		    FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
10186 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
10187 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
10188 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
10189 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
10190 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
10191 		    FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
10192 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
10193 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
10194 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
10195 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
10196 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
10197 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
10198 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
10199 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
10200 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
10201 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
10202 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
10203 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
10204 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
10205 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
10206 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
10207 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
10208 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
10209 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
10210 
10211 	if (cmd->pn_size == 24)
10212 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
10213 	else if (cmd->pn_size == 48)
10214 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
10215 	else if (cmd->pn_size == 128)
10216 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
10217 
10218 	if (cmd->ba_window_size < 1)
10219 		cmd->ba_window_size = 1;
10220 
10221 	if (cmd->ba_window_size == 1)
10222 		cmd->ba_window_size++;
10223 
10224 	desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
10225 	    cmd->ba_window_size - 1) |
10226 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
10227 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
10228 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
10229 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
10230 	        FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
10231 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
10232 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
10233 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
10234 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
10235 
10236 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10237 }
10238 
10239 int
10240 qwx_hal_reo_cmd_send(struct qwx_softc *sc, struct hal_srng *srng,
10241     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd)
10242 {
10243 	struct hal_tlv_hdr *reo_desc;
10244 	int ret;
10245 #ifdef notyet
10246 	spin_lock_bh(&srng->lock);
10247 #endif
10248 	qwx_hal_srng_access_begin(sc, srng);
10249 	reo_desc = (struct hal_tlv_hdr *)qwx_hal_srng_src_get_next_entry(sc, srng);
10250 	if (!reo_desc) {
10251 		ret = ENOBUFS;
10252 		goto out;
10253 	}
10254 
10255 	switch (type) {
10256 	case HAL_REO_CMD_GET_QUEUE_STATS:
10257 		ret = qwx_hal_reo_cmd_queue_stats(reo_desc, cmd);
10258 		break;
10259 	case HAL_REO_CMD_FLUSH_CACHE:
10260 		ret = qwx_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
10261 		break;
10262 	case HAL_REO_CMD_UPDATE_RX_QUEUE:
10263 		ret = qwx_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
10264 		break;
10265 	case HAL_REO_CMD_FLUSH_QUEUE:
10266 	case HAL_REO_CMD_UNBLOCK_CACHE:
10267 	case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
10268 		printf("%s: unsupported reo command %d\n",
10269 		   sc->sc_dev.dv_xname, type);
10270 		ret = ENOTSUP;
10271 		break;
10272 	default:
10273 		printf("%s: unknown reo command %d\n",
10274 		    sc->sc_dev.dv_xname, type);
10275 		ret = EINVAL;
10276 		break;
10277 	}
10278 
10279 	qwx_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer);
10280 out:
10281 	qwx_hal_srng_access_end(sc, srng);
10282 #ifdef notyet
10283 	spin_unlock_bh(&srng->lock);
10284 #endif
10285 	return ret;
10286 }
10287 int
10288 qwx_dp_srng_common_setup(struct qwx_softc *sc)
10289 {
10290 	struct qwx_dp *dp = &sc->dp;
10291 	struct hal_srng *srng;
10292 	int i, ret;
10293 	uint8_t tcl_num, wbm_num;
10294 
10295 	ret = qwx_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
10296 	    0, 0, DP_WBM_RELEASE_RING_SIZE);
10297 	if (ret) {
10298 		printf("%s: failed to set up wbm2sw_release ring :%d\n",
10299 		    sc->sc_dev.dv_xname, ret);
10300 		goto err;
10301 	}
10302 
10303 	ret = qwx_dp_srng_setup(sc, &dp->tcl_cmd_ring, HAL_TCL_CMD,
10304 	    0, 0, DP_TCL_CMD_RING_SIZE);
10305 	if (ret) {
10306 		printf("%s: failed to set up tcl_cmd ring :%d\n",
10307 		    sc->sc_dev.dv_xname, ret);
10308 		goto err;
10309 	}
10310 
10311 	ret = qwx_dp_srng_setup(sc, &dp->tcl_status_ring, HAL_TCL_STATUS,
10312 	    0, 0, DP_TCL_STATUS_RING_SIZE);
10313 	if (ret) {
10314 		printf("%s: failed to set up tcl_status ring :%d\n",
10315 		    sc->sc_dev.dv_xname, ret);
10316 		goto err;
10317 	}
10318 
10319 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10320 		const struct ath11k_hw_hal_params *hal_params;
10321 
10322 		hal_params = sc->hw_params.hal_params;
10323 		tcl_num = hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
10324 		wbm_num = hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
10325 
10326 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
10327 		    HAL_TCL_DATA, tcl_num, 0, sc->hw_params.tx_ring_size);
10328 		if (ret) {
10329 			printf("%s: failed to set up tcl_data ring (%d) :%d\n",
10330 			    sc->sc_dev.dv_xname, i, ret);
10331 			goto err;
10332 		}
10333 
10334 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
10335 		    HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE);
10336 		if (ret) {
10337 			printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
10338 			    sc->sc_dev.dv_xname, i, ret);
10339 			goto err;
10340 		}
10341 
10342 		srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
10343 		qwx_hal_tx_init_data_ring(sc, srng);
10344 
10345 		qwx_dp_shadow_init_timer(sc, &dp->tx_ring_timer[i],
10346 		    ATH11K_SHADOW_DP_TIMER_INTERVAL,
10347 		    dp->tx_ring[i].tcl_data_ring.ring_id);
10348 	}
10349 
10350 	ret = qwx_dp_srng_setup(sc, &dp->reo_reinject_ring, HAL_REO_REINJECT,
10351 	    0, 0, DP_REO_REINJECT_RING_SIZE);
10352 	if (ret) {
10353 		printf("%s: failed to set up reo_reinject ring :%d\n",
10354 		    sc->sc_dev.dv_xname, ret);
10355 		goto err;
10356 	}
10357 
10358 	ret = qwx_dp_srng_setup(sc, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
10359 	    DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
10360 	if (ret) {
10361 		printf("%s: failed to set up rx_rel ring :%d\n",
10362 		    sc->sc_dev.dv_xname, ret);
10363 		goto err;
10364 	}
10365 
10366 	ret = qwx_dp_srng_setup(sc, &dp->reo_except_ring, HAL_REO_EXCEPTION,
10367 	    0, 0, DP_REO_EXCEPTION_RING_SIZE);
10368 	if (ret) {
10369 		printf("%s: failed to set up reo_exception ring :%d\n",
10370 		    sc->sc_dev.dv_xname, ret);
10371 		goto err;
10372 	}
10373 
10374 	ret = qwx_dp_srng_setup(sc, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0,
10375 	    DP_REO_CMD_RING_SIZE);
10376 	if (ret) {
10377 		printf("%s: failed to set up reo_cmd ring :%d\n",
10378 		    sc->sc_dev.dv_xname, ret);
10379 		goto err;
10380 	}
10381 
10382 	srng = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
10383 	qwx_hal_reo_init_cmd_ring(sc, srng);
10384 
10385 	qwx_dp_shadow_init_timer(sc, &dp->reo_cmd_timer,
10386 	     ATH11K_SHADOW_CTRL_TIMER_INTERVAL, dp->reo_cmd_ring.ring_id);
10387 
10388 	ret = qwx_dp_srng_setup(sc, &dp->reo_status_ring, HAL_REO_STATUS,
10389 	    0, 0, DP_REO_STATUS_RING_SIZE);
10390 	if (ret) {
10391 		printf("%s: failed to set up reo_status ring :%d\n",
10392 		    sc->sc_dev.dv_xname, ret);
10393 		goto err;
10394 	}
10395 
10396 	/* When hash based routing of rx packet is enabled, 32 entries to map
10397 	 * the hash values to the ring will be configured.
10398 	 */
10399 	sc->hw_params.hw_ops->reo_setup(sc);
10400 	return 0;
10401 
10402 err:
10403 	qwx_dp_srng_common_cleanup(sc);
10404 
10405 	return ret;
10406 }
10407 
10408 void
10409 qwx_dp_link_desc_cleanup(struct qwx_softc *sc,
10410     struct dp_link_desc_bank *desc_bank, uint32_t ring_type,
10411     struct dp_srng *ring)
10412 {
10413 	qwx_dp_link_desc_bank_free(sc, desc_bank);
10414 
10415 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
10416 		qwx_dp_srng_cleanup(sc, ring);
10417 		qwx_dp_scatter_idle_link_desc_cleanup(sc);
10418 	}
10419 }
10420 
10421 void
10422 qwx_dp_tx_ring_free_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
10423 {
10424 	int i;
10425 
10426 	if (tx_ring->data == NULL)
10427 		return;
10428 
10429 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
10430 		struct qwx_tx_data *tx_data = &tx_ring->data[i];
10431 
10432 		if (tx_data->map) {
10433 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
10434 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
10435 		}
10436 
10437 		m_freem(tx_data->m);
10438 	}
10439 
10440 	free(tx_ring->data, M_DEVBUF,
10441 	    sc->hw_params.tx_ring_size * sizeof(struct qwx_tx_data));
10442 	tx_ring->data = NULL;
10443 }
10444 
10445 int
10446 qwx_dp_tx_ring_alloc_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
10447 {
10448 	int i, ret;
10449 
10450 	tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
10451 	   sizeof(struct qwx_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO);
10452 	if (tx_ring->data == NULL)
10453 		return ENOMEM;
10454 
10455 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
10456 		struct qwx_tx_data *tx_data = &tx_ring->data[i];
10457 
10458 		ret = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
10459 		    BUS_DMA_NOWAIT, &tx_data->map);
10460 		if (ret)
10461 			return ret;
10462 	}
10463 
10464 	return 0;
10465 }
10466 
10467 int
10468 qwx_dp_alloc(struct qwx_softc *sc)
10469 {
10470 	struct qwx_dp *dp = &sc->dp;
10471 	struct hal_srng *srng = NULL;
10472 	size_t size = 0;
10473 	uint32_t n_link_desc = 0;
10474 	int ret;
10475 	int i;
10476 
10477 	dp->sc = sc;
10478 
10479 	TAILQ_INIT(&dp->reo_cmd_list);
10480 	TAILQ_INIT(&dp->reo_cmd_cache_flush_list);
10481 #if 0
10482 	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
10483 	spin_lock_init(&dp->reo_cmd_lock);
10484 #endif
10485 
10486 	dp->reo_cmd_cache_flush_count = 0;
10487 
10488 	ret = qwx_wbm_idle_ring_setup(sc, &n_link_desc);
10489 	if (ret) {
10490 		printf("%s: failed to setup wbm_idle_ring: %d\n",
10491 		    sc->sc_dev.dv_xname, ret);
10492 		return ret;
10493 	}
10494 
10495 	srng = &sc->hal.srng_list[dp->wbm_idle_ring.ring_id];
10496 
10497 	ret = qwx_dp_link_desc_setup(sc, dp->link_desc_banks,
10498 	    HAL_WBM_IDLE_LINK, srng, n_link_desc);
10499 	if (ret) {
10500 		printf("%s: failed to setup link desc: %d\n",
10501 		   sc->sc_dev.dv_xname, ret);
10502 		return ret;
10503 	}
10504 
10505 	ret = qwx_dp_srng_common_setup(sc);
10506 	if (ret)
10507 		goto fail_link_desc_cleanup;
10508 
10509 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
10510 
10511 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10512 #if 0
10513 		idr_init(&dp->tx_ring[i].txbuf_idr);
10514 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
10515 #endif
10516 		ret = qwx_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
10517 		if (ret)
10518 			goto fail_cmn_srng_cleanup;
10519 
10520 		dp->tx_ring[i].cur = 0;
10521 		dp->tx_ring[i].queued = 0;
10522 		dp->tx_ring[i].tcl_data_ring_id = i;
10523 		dp->tx_ring[i].tx_status_head = 0;
10524 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
10525 		dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
10526 		    M_NOWAIT | M_ZERO);
10527 		if (!dp->tx_ring[i].tx_status) {
10528 			ret = ENOMEM;
10529 			goto fail_cmn_srng_cleanup;
10530 		}
10531 	}
10532 
10533 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
10534 		qwx_hal_tx_set_dscp_tid_map(sc, i);
10535 
10536 	/* Init any SOC level resource for DP */
10537 
10538 	return 0;
10539 fail_cmn_srng_cleanup:
10540 	qwx_dp_srng_common_cleanup(sc);
10541 fail_link_desc_cleanup:
10542 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks, HAL_WBM_IDLE_LINK,
10543 	    &dp->wbm_idle_ring);
10544 
10545 	return ret;
10546 }
10547 
10548 void
10549 qwx_dp_reo_cmd_list_cleanup(struct qwx_softc *sc)
10550 {
10551 	struct qwx_dp *dp = &sc->dp;
10552 	struct dp_reo_cmd *cmd, *tmp;
10553 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
10554 	struct dp_rx_tid *rx_tid;
10555 #ifdef notyet
10556 	spin_lock_bh(&dp->reo_cmd_lock);
10557 #endif
10558 	TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
10559 		TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
10560 		rx_tid = &cmd->data;
10561 		if (rx_tid->mem) {
10562 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
10563 			rx_tid->mem = NULL;
10564 			rx_tid->vaddr = NULL;
10565 			rx_tid->paddr = 0ULL;
10566 			rx_tid->size = 0;
10567 		}
10568 		free(cmd, M_DEVBUF, sizeof(*cmd));
10569 	}
10570 
10571 	TAILQ_FOREACH_SAFE(cmd_cache, &dp->reo_cmd_cache_flush_list,
10572 	    entry, tmp_cache) {
10573 		TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, cmd_cache, entry);
10574 		dp->reo_cmd_cache_flush_count--;
10575 		rx_tid = &cmd_cache->data;
10576 		if (rx_tid->mem) {
10577 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
10578 			rx_tid->mem = NULL;
10579 			rx_tid->vaddr = NULL;
10580 			rx_tid->paddr = 0ULL;
10581 			rx_tid->size = 0;
10582 		}
10583 		free(cmd_cache, M_DEVBUF, sizeof(*cmd_cache));
10584 	}
10585 #ifdef notyet
10586 	spin_unlock_bh(&dp->reo_cmd_lock);
10587 #endif
10588 }
10589 
10590 void
10591 qwx_dp_free(struct qwx_softc *sc)
10592 {
10593 	struct qwx_dp *dp = &sc->dp;
10594 	int i;
10595 
10596 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks,
10597 	    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
10598 
10599 	qwx_dp_srng_common_cleanup(sc);
10600 	qwx_dp_reo_cmd_list_cleanup(sc);
10601 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10602 #if 0
10603 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
10604 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
10605 			     ath11k_dp_tx_pending_cleanup, ab);
10606 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
10607 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
10608 #endif
10609 		qwx_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
10610 		free(dp->tx_ring[i].tx_status, M_DEVBUF,
10611 		    sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
10612 		dp->tx_ring[i].tx_status = NULL;
10613 	}
10614 
10615 	/* Deinit any SOC level resource */
10616 }
10617 
10618 void
10619 qwx_qmi_process_coldboot_calibration(struct qwx_softc *sc)
10620 {
10621 	printf("%s not implemented\n", __func__);
10622 }
10623 
10624 int
10625 qwx_qmi_wlanfw_wlan_ini_send(struct qwx_softc *sc, int enable)
10626 {
10627 	int ret;
10628 	struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
10629 
10630 	req.enablefwlog_valid = 1;
10631 	req.enablefwlog = enable ? 1 : 0;
10632 
10633 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_INI_REQ_V01,
10634 	    QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
10635 	    qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req, sizeof(req));
10636 	if (ret) {
10637 		printf("%s: failed to send wlan ini request, err = %d\n",
10638 		    sc->sc_dev.dv_xname, ret);
10639 		return ret;
10640 	}
10641 
10642 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10643 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10644 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxini",
10645 		    SEC_TO_NSEC(1));
10646 		if (ret) {
10647 			printf("%s: wlan ini request timeout\n",
10648 			    sc->sc_dev.dv_xname);
10649 			return ret;
10650 		}
10651 	}
10652 
10653 	return 0;
10654 }
10655 
10656 int
10657 qwx_qmi_wlanfw_wlan_cfg_send(struct qwx_softc *sc)
10658 {
10659 	struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
10660 	const struct ce_pipe_config *ce_cfg;
10661 	const struct service_to_pipe *svc_cfg;
10662 	int ret = 0, pipe_num;
10663 
10664 	ce_cfg	= sc->hw_params.target_ce_config;
10665 	svc_cfg	= sc->hw_params.svc_to_ce_map;
10666 
10667 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
10668 	if (!req)
10669 		return ENOMEM;
10670 
10671 	req->host_version_valid = 1;
10672 	strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
10673 	    sizeof(req->host_version));
10674 
10675 	req->tgt_cfg_valid = 1;
10676 	/* This is number of CE configs */
10677 	req->tgt_cfg_len = sc->hw_params.target_ce_count;
10678 	for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
10679 		req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
10680 		req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
10681 		req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
10682 		req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
10683 		req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
10684 	}
10685 
10686 	req->svc_cfg_valid = 1;
10687 	/* This is number of Service/CE configs */
10688 	req->svc_cfg_len = sc->hw_params.svc_to_ce_map_len;
10689 	for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
10690 		req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
10691 		req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
10692 		req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
10693 	}
10694 	req->shadow_reg_valid = 0;
10695 
10696 	/* set shadow v2 configuration */
10697 	if (sc->hw_params.supports_shadow_regs) {
10698 		req->shadow_reg_v2_valid = 1;
10699 		req->shadow_reg_v2_len = MIN(sc->qmi_ce_cfg.shadow_reg_v2_len,
10700 		    QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
10701 		memcpy(&req->shadow_reg_v2, sc->qmi_ce_cfg.shadow_reg_v2,
10702 		       sizeof(uint32_t) * req->shadow_reg_v2_len);
10703 	} else {
10704 		req->shadow_reg_v2_valid = 0;
10705 	}
10706 
10707 	DNPRINTF(QWX_D_QMI, "%s: wlan cfg req\n", __func__);
10708 
10709 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_CFG_REQ_V01,
10710 	    QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
10711 	    qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req, sizeof(*req));
10712 	if (ret) {
10713 		printf("%s: failed to send wlan config request: %d\n",
10714 		    sc->sc_dev.dv_xname, ret);
10715 		goto out;
10716 	}
10717 
10718 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10719 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10720 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxwlancfg",
10721 		    SEC_TO_NSEC(1));
10722 		if (ret) {
10723 			printf("%s: wlan config request failed\n",
10724 			    sc->sc_dev.dv_xname);
10725 			goto out;
10726 		}
10727 	}
10728 out:
10729 	free(req, M_DEVBUF, sizeof(*req));
10730 	return ret;
10731 }
10732 
10733 int
10734 qwx_qmi_wlanfw_mode_send(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10735 {
10736 	int ret;
10737 	struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
10738 
10739 	req.mode = mode;
10740 	req.hw_debug_valid = 1;
10741 	req.hw_debug = 0;
10742 
10743 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_MODE_REQ_V01,
10744 	    QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
10745 	    qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req, sizeof(req));
10746 	if (ret) {
10747 		printf("%s: failed to send wlan mode request, err = %d\n",
10748 		    sc->sc_dev.dv_xname, ret);
10749 		return ret;
10750 	}
10751 
10752 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10753 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10754 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwmode",
10755 		    SEC_TO_NSEC(1));
10756 		if (ret) {
10757 			if (mode == ATH11K_FIRMWARE_MODE_OFF)
10758 				return 0;
10759 			printf("%s: wlan mode request timeout\n",
10760 			    sc->sc_dev.dv_xname);
10761 			return ret;
10762 		}
10763 	}
10764 
10765 	return 0;
10766 }
10767 
10768 int
10769 qwx_qmi_firmware_start(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10770 {
10771 	int ret;
10772 
10773 	DPRINTF("%s: firmware start\n", sc->sc_dev.dv_xname);
10774 
10775 	if (sc->hw_params.fw_wmi_diag_event) {
10776 		ret = qwx_qmi_wlanfw_wlan_ini_send(sc, 1);
10777 		if (ret < 0) {
10778 			printf("%s: qmi failed to send wlan fw ini: %d\n",
10779 			    sc->sc_dev.dv_xname, ret);
10780 			return ret;
10781 		}
10782 	}
10783 
10784 	ret = qwx_qmi_wlanfw_wlan_cfg_send(sc);
10785 	if (ret) {
10786 		printf("%s: qmi failed to send wlan cfg: %d\n",
10787 		    sc->sc_dev.dv_xname, ret);
10788 		return ret;
10789 	}
10790 
10791 	ret = qwx_qmi_wlanfw_mode_send(sc, mode);
10792 	if (ret) {
10793 		printf("%s: qmi failed to send wlan fw mode: %d\n",
10794 		    sc->sc_dev.dv_xname, ret);
10795 		return ret;
10796 	}
10797 
10798 	return 0;
10799 }
10800 
10801 void
10802 qwx_qmi_firmware_stop(struct qwx_softc *sc)
10803 {
10804 	int ret;
10805 
10806 	ret = qwx_qmi_wlanfw_mode_send(sc, ATH11K_FIRMWARE_MODE_OFF);
10807 	if (ret) {
10808 		printf("%s: qmi failed to send wlan mode off: %d\n",
10809 		    sc->sc_dev.dv_xname, ret);
10810 	}
10811 }
10812 
10813 int
10814 qwx_core_start_firmware(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10815 {
10816 	int ret;
10817 
10818 	qwx_ce_get_shadow_config(sc, &sc->qmi_ce_cfg.shadow_reg_v2,
10819 	    &sc->qmi_ce_cfg.shadow_reg_v2_len);
10820 
10821 	ret = qwx_qmi_firmware_start(sc, mode);
10822 	if (ret) {
10823 		printf("%s: failed to send firmware start: %d\n",
10824 		    sc->sc_dev.dv_xname, ret);
10825 		return ret;
10826 	}
10827 
10828 	return ret;
10829 }
10830 
10831 int
10832 qwx_wmi_pdev_attach(struct qwx_softc *sc, uint8_t pdev_id)
10833 {
10834 	struct qwx_pdev_wmi *wmi_handle;
10835 
10836 	if (pdev_id >= sc->hw_params.max_radios)
10837 		return EINVAL;
10838 
10839 	wmi_handle = &sc->wmi.wmi[pdev_id];
10840 	wmi_handle->wmi = &sc->wmi;
10841 
10842 	wmi_handle->tx_ce_desc = 1;
10843 
10844 	return 0;
10845 }
10846 
10847 void
10848 qwx_wmi_detach(struct qwx_softc *sc)
10849 {
10850 	qwx_wmi_free_dbring_caps(sc);
10851 }
10852 
10853 int
10854 qwx_wmi_attach(struct qwx_softc *sc)
10855 {
10856 	int ret;
10857 
10858 	ret = qwx_wmi_pdev_attach(sc, 0);
10859 	if (ret)
10860 		return ret;
10861 
10862 	sc->wmi.sc = sc;
10863 	sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
10864 	sc->wmi.tx_credits = 1;
10865 
10866 	/* It's overwritten when service_ext_ready is handled */
10867 	if (sc->hw_params.single_pdev_only &&
10868 	    sc->hw_params.num_rxmda_per_pdev > 1)
10869 		sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
10870 
10871 	return 0;
10872 }
10873 
10874 void
10875 qwx_wmi_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
10876 {
10877 	struct qwx_pdev_wmi *wmi = NULL;
10878 	uint32_t i;
10879 	uint8_t wmi_ep_count;
10880 	uint8_t eid;
10881 
10882 	eid = (uintptr_t)m->m_pkthdr.ph_cookie;
10883 	m_freem(m);
10884 
10885 	if (eid >= ATH11K_HTC_EP_COUNT)
10886 		return;
10887 
10888 	wmi_ep_count = sc->htc.wmi_ep_count;
10889 	if (wmi_ep_count > sc->hw_params.max_radios)
10890 		return;
10891 
10892 	for (i = 0; i < sc->htc.wmi_ep_count; i++) {
10893 		if (sc->wmi.wmi[i].eid == eid) {
10894 			wmi = &sc->wmi.wmi[i];
10895 			break;
10896 		}
10897 	}
10898 
10899 	if (wmi)
10900 		wakeup(&wmi->tx_ce_desc);
10901 }
10902 
10903 int
10904 qwx_wmi_tlv_services_parser(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10905     const void *ptr, void *data)
10906 {
10907 	const struct wmi_service_available_event *ev;
10908 	uint32_t *wmi_ext2_service_bitmap;
10909 	int i, j;
10910 
10911 	switch (tag) {
10912 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
10913 		ev = (struct wmi_service_available_event *)ptr;
10914 		for (i = 0, j = WMI_MAX_SERVICE;
10915 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10916 		    j < WMI_MAX_EXT_SERVICE;
10917 		    i++) {
10918 			do {
10919 				if (ev->wmi_service_segment_bitmap[i] &
10920 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10921 					setbit(sc->wmi.svc_map, j);
10922 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10923 		}
10924 
10925 		DNPRINTF(QWX_D_WMI,
10926 		    "%s: wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, "
10927 		    "2:0x%04x, 3:0x%04x\n", __func__,
10928 		    ev->wmi_service_segment_bitmap[0],
10929 		    ev->wmi_service_segment_bitmap[1],
10930 		    ev->wmi_service_segment_bitmap[2],
10931 		    ev->wmi_service_segment_bitmap[3]);
10932 		break;
10933 	case WMI_TAG_ARRAY_UINT32:
10934 		wmi_ext2_service_bitmap = (uint32_t *)ptr;
10935 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
10936 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10937 		    j < WMI_MAX_EXT2_SERVICE;
10938 		    i++) {
10939 			do {
10940 				if (wmi_ext2_service_bitmap[i] &
10941 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10942 					setbit(sc->wmi.svc_map, j);
10943 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10944 		}
10945 
10946 		DNPRINTF(QWX_D_WMI,
10947 		    "%s: wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, "
10948 		    "2:0x%04x, 3:0x%04x\n", __func__,
10949 		    wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
10950 		    wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
10951 		break;
10952 	}
10953 
10954 	return 0;
10955 }
10956 
10957 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
10958 	[WMI_TAG_ARRAY_BYTE]
10959 		= { .min_len = 0 },
10960 	[WMI_TAG_ARRAY_UINT32]
10961 		= { .min_len = 0 },
10962 	[WMI_TAG_SERVICE_READY_EVENT]
10963 		= { .min_len = sizeof(struct wmi_service_ready_event) },
10964 	[WMI_TAG_SERVICE_READY_EXT_EVENT]
10965 		= { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
10966 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
10967 		= { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
10968 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES]
10969 		= { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
10970 	[WMI_TAG_VDEV_START_RESPONSE_EVENT]
10971 		= { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
10972 	[WMI_TAG_PEER_DELETE_RESP_EVENT]
10973 		= { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
10974 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
10975 		= { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
10976 	[WMI_TAG_VDEV_STOPPED_EVENT]
10977 		= { .min_len = sizeof(struct wmi_vdev_stopped_event) },
10978 	[WMI_TAG_REG_CHAN_LIST_CC_EVENT]
10979 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
10980 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
10981 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
10982 	[WMI_TAG_MGMT_RX_HDR]
10983 		= { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
10984 	[WMI_TAG_MGMT_TX_COMPL_EVENT]
10985 		= { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
10986 	[WMI_TAG_SCAN_EVENT]
10987 		= { .min_len = sizeof(struct wmi_scan_event) },
10988 	[WMI_TAG_PEER_STA_KICKOUT_EVENT]
10989 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
10990 	[WMI_TAG_ROAM_EVENT]
10991 		= { .min_len = sizeof(struct wmi_roam_event) },
10992 	[WMI_TAG_CHAN_INFO_EVENT]
10993 		= { .min_len = sizeof(struct wmi_chan_info_event) },
10994 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
10995 		= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
10996 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
10997 		= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
10998 	[WMI_TAG_READY_EVENT] = {
10999 		.min_len = sizeof(struct wmi_ready_event_min) },
11000 	[WMI_TAG_SERVICE_AVAILABLE_EVENT]
11001 		= {.min_len = sizeof(struct wmi_service_available_event) },
11002 	[WMI_TAG_PEER_ASSOC_CONF_EVENT]
11003 		= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
11004 	[WMI_TAG_STATS_EVENT]
11005 		= { .min_len = sizeof(struct wmi_stats_event) },
11006 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
11007 		= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
11008 	[WMI_TAG_HOST_SWFDA_EVENT] = {
11009 		.min_len = sizeof(struct wmi_fils_discovery_event) },
11010 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
11011 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
11012 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
11013 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
11014 	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
11015 		.min_len = sizeof(struct wmi_obss_color_collision_event) },
11016 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
11017 		.min_len = sizeof(struct wmi_11d_new_cc_ev) },
11018 	[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
11019 		.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
11020 	[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
11021 		.min_len = sizeof(struct wmi_twt_add_dialog_event) },
11022 };
11023 
11024 int
11025 qwx_wmi_tlv_iter(struct qwx_softc *sc, const void *ptr, size_t len,
11026     int (*iter)(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11027     const void *ptr, void *data), void *data)
11028 {
11029 	const void *begin = ptr;
11030 	const struct wmi_tlv *tlv;
11031 	uint16_t tlv_tag, tlv_len;
11032 	int ret;
11033 
11034 	while (len > 0) {
11035 		if (len < sizeof(*tlv)) {
11036 			printf("%s: wmi tlv parse failure at byte %zd "
11037 			    "(%zu bytes left, %zu expected)\n", __func__,
11038 			    ptr - begin, len, sizeof(*tlv));
11039 			return EINVAL;
11040 		}
11041 
11042 		tlv = ptr;
11043 		tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
11044 		tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
11045 		ptr += sizeof(*tlv);
11046 		len -= sizeof(*tlv);
11047 
11048 		if (tlv_len > len) {
11049 			printf("%s: wmi tlv parse failure of tag %u "
11050 			    "at byte %zd (%zu bytes left, %u expected)\n",
11051 			    __func__, tlv_tag, ptr - begin, len, tlv_len);
11052 			return EINVAL;
11053 		}
11054 
11055 		if (tlv_tag < nitems(wmi_tlv_policies) &&
11056 		    wmi_tlv_policies[tlv_tag].min_len &&
11057 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
11058 			printf("%s: wmi tlv parse failure of tag %u "
11059 			    "at byte %zd (%u bytes is less than "
11060 			    "min length %zu)\n", __func__,
11061 			    tlv_tag, ptr - begin, tlv_len,
11062 			    wmi_tlv_policies[tlv_tag].min_len);
11063 			return EINVAL;
11064 		}
11065 
11066 		ret = iter(sc, tlv_tag, tlv_len, ptr, data);
11067 		if (ret)
11068 			return ret;
11069 
11070 		ptr += tlv_len;
11071 		len -= tlv_len;
11072 	}
11073 
11074 	return 0;
11075 }
11076 
11077 int
11078 qwx_pull_service_ready_tlv(struct qwx_softc *sc, const void *evt_buf,
11079     struct ath11k_targ_cap *cap)
11080 {
11081 	const struct wmi_service_ready_event *ev = evt_buf;
11082 
11083 	if (!ev)
11084 		return EINVAL;
11085 
11086 	cap->phy_capability = ev->phy_capability;
11087 	cap->max_frag_entry = ev->max_frag_entry;
11088 	cap->num_rf_chains = ev->num_rf_chains;
11089 	cap->ht_cap_info = ev->ht_cap_info;
11090 	cap->vht_cap_info = ev->vht_cap_info;
11091 	cap->vht_supp_mcs = ev->vht_supp_mcs;
11092 	cap->hw_min_tx_power = ev->hw_min_tx_power;
11093 	cap->hw_max_tx_power = ev->hw_max_tx_power;
11094 	cap->sys_cap_info = ev->sys_cap_info;
11095 	cap->min_pkt_size_enable = ev->min_pkt_size_enable;
11096 	cap->max_bcn_ie_size = ev->max_bcn_ie_size;
11097 	cap->max_num_scan_channels = ev->max_num_scan_channels;
11098 	cap->max_supported_macs = ev->max_supported_macs;
11099 	cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
11100 	cap->txrx_chainmask = ev->txrx_chainmask;
11101 	cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
11102 	cap->num_msdu_desc = ev->num_msdu_desc;
11103 
11104 	return 0;
11105 }
11106 
11107 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
11108  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
11109  * 4-byte word.
11110  */
11111 void
11112 qwx_wmi_service_bitmap_copy(struct qwx_pdev_wmi *wmi,
11113     const uint32_t *wmi_svc_bm)
11114 {
11115 	int i, j = 0;
11116 
11117 	for (i = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
11118 		do {
11119 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
11120 				setbit(wmi->wmi->svc_map, j);
11121 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
11122 	}
11123 }
11124 
11125 int
11126 qwx_wmi_tlv_svc_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11127     const void *ptr, void *data)
11128 {
11129 	struct wmi_tlv_svc_ready_parse *svc_ready = data;
11130 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11131 	uint16_t expect_len;
11132 
11133 	switch (tag) {
11134 	case WMI_TAG_SERVICE_READY_EVENT:
11135 		if (qwx_pull_service_ready_tlv(sc, ptr, &sc->target_caps))
11136 			return EINVAL;
11137 		break;
11138 
11139 	case WMI_TAG_ARRAY_UINT32:
11140 		if (!svc_ready->wmi_svc_bitmap_done) {
11141 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(uint32_t);
11142 			if (len < expect_len) {
11143 				printf("%s: invalid len %d for the tag 0x%x\n",
11144 				    __func__, len, tag);
11145 				return EINVAL;
11146 			}
11147 
11148 			qwx_wmi_service_bitmap_copy(wmi_handle, ptr);
11149 
11150 			svc_ready->wmi_svc_bitmap_done = 1;
11151 		}
11152 		break;
11153 	default:
11154 		break;
11155 	}
11156 
11157 	return 0;
11158 }
11159 
11160 void
11161 qwx_service_ready_event(struct qwx_softc *sc, struct mbuf *m)
11162 {
11163 	struct wmi_tlv_svc_ready_parse svc_ready = { };
11164 	int ret;
11165 
11166 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11167 	    qwx_wmi_tlv_svc_rdy_parse, &svc_ready);
11168 	if (ret) {
11169 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11170 		return;
11171 	}
11172 
11173 	DNPRINTF(QWX_D_WMI, "%s: event service ready\n", __func__);
11174 }
11175 
11176 int
11177 qwx_pull_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle, const void *ptr,
11178     struct ath11k_service_ext_param *param)
11179 {
11180 	const struct wmi_service_ready_ext_event *ev = ptr;
11181 
11182 	if (!ev)
11183 		return EINVAL;
11184 
11185 	/* Move this to host based bitmap */
11186 	param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
11187 	param->default_fw_config_bits =	ev->default_fw_config_bits;
11188 	param->he_cap_info = ev->he_cap_info;
11189 	param->mpdu_density = ev->mpdu_density;
11190 	param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
11191 	memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
11192 
11193 	return 0;
11194 }
11195 
11196 int
11197 qwx_pull_mac_phy_cap_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle,
11198     struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
11199     struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
11200     struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
11201     struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
11202     uint8_t hw_mode_id, uint8_t phy_id, struct qwx_pdev *pdev)
11203 {
11204 	struct wmi_mac_phy_capabilities *mac_phy_caps;
11205 	struct qwx_softc *sc = wmi_handle->wmi->sc;
11206 	struct ath11k_band_cap *cap_band;
11207 	struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
11208 	uint32_t phy_map;
11209 	uint32_t hw_idx, phy_idx = 0;
11210 
11211 	if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
11212 		return EINVAL;
11213 
11214 	for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
11215 		if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
11216 			break;
11217 
11218 		phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
11219 		while (phy_map) {
11220 			phy_map >>= 1;
11221 			phy_idx++;
11222 		}
11223 	}
11224 
11225 	if (hw_idx == hw_caps->num_hw_modes)
11226 		return EINVAL;
11227 
11228 	phy_idx += phy_id;
11229 	if (phy_id >= hal_reg_caps->num_phy)
11230 		return EINVAL;
11231 
11232 	mac_phy_caps = wmi_mac_phy_caps + phy_idx;
11233 
11234 	pdev->pdev_id = mac_phy_caps->pdev_id;
11235 	pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
11236 	pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
11237 	sc->target_pdev_ids[sc->target_pdev_count].supported_bands =
11238 	    mac_phy_caps->supported_bands;
11239 	sc->target_pdev_ids[sc->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
11240 	sc->target_pdev_count++;
11241 
11242 	if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
11243 	    !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
11244 		return EINVAL;
11245 
11246 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
11247 	 * band to band for a single radio, need to see how this should be
11248 	 * handled.
11249 	 */
11250 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
11251 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
11252 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
11253 	}
11254 
11255 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
11256 		pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
11257 		pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
11258 		pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11259 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
11260 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
11261 		pdev_cap->nss_ratio_enabled =
11262 		    WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
11263 		pdev_cap->nss_ratio_info =
11264 		    WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
11265 	}
11266 
11267 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
11268 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
11269 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
11270 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
11271 	 * will be advertised for second mac or vice-versa. Compute the shift value
11272 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
11273 	 * mac80211.
11274 	 */
11275 	pdev_cap->tx_chain_mask_shift = ffs(pdev_cap->tx_chain_mask);
11276 	pdev_cap->rx_chain_mask_shift = ffs(pdev_cap->rx_chain_mask);
11277 
11278 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
11279 		cap_band = &pdev_cap->band[0];
11280 		cap_band->phy_id = mac_phy_caps->phy_id;
11281 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
11282 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
11283 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
11284 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
11285 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
11286 		memcpy(cap_band->he_cap_phy_info,
11287 		    &mac_phy_caps->he_cap_phy_info_2g,
11288 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
11289 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
11290 		    sizeof(struct ath11k_ppe_threshold));
11291 	}
11292 
11293 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
11294 		cap_band = &pdev_cap->band[1];
11295 		cap_band->phy_id = mac_phy_caps->phy_id;
11296 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
11297 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
11298 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
11299 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
11300 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11301 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
11302 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
11303 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
11304 		    sizeof(struct ath11k_ppe_threshold));
11305 #if 0
11306 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
11307 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
11308 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
11309 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
11310 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
11311 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11312 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
11313 		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
11314 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
11315 		       sizeof(struct ath11k_ppe_threshold));
11316 #endif
11317 	}
11318 
11319 	return 0;
11320 }
11321 
11322 int
11323 qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwx_softc *sc, uint16_t len,
11324     const void *ptr, void *data)
11325 {
11326 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11327 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11328 	uint8_t hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
11329 	uint32_t phy_id_map;
11330 	int pdev_index = 0;
11331 	int ret;
11332 
11333 	svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
11334 	svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
11335 
11336 	sc->num_radios = 0;
11337 	sc->target_pdev_count = 0;
11338 	phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
11339 
11340 	while (phy_id_map && sc->num_radios < MAX_RADIOS) {
11341 		ret = qwx_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
11342 		    svc_rdy_ext->hw_caps,
11343 		    svc_rdy_ext->hw_mode_caps,
11344 		    svc_rdy_ext->soc_hal_reg_caps,
11345 		    svc_rdy_ext->mac_phy_caps,
11346 		    hw_mode_id, sc->num_radios, &sc->pdevs[pdev_index]);
11347 		if (ret) {
11348 			printf("%s: failed to extract mac caps, idx: %d\n",
11349 			    __func__, sc->num_radios);
11350 			return ret;
11351 		}
11352 
11353 		sc->num_radios++;
11354 
11355 		/* For QCA6390, save mac_phy capability in the same pdev */
11356 		if (sc->hw_params.single_pdev_only)
11357 			pdev_index = 0;
11358 		else
11359 			pdev_index = sc->num_radios;
11360 
11361 		/* TODO: mac_phy_cap prints */
11362 		phy_id_map >>= 1;
11363 	}
11364 
11365 	/* For QCA6390, set num_radios to 1 because host manages
11366 	 * both 2G and 5G radio in one pdev.
11367 	 * Set pdev_id = 0 and 0 means soc level.
11368 	 */
11369 	if (sc->hw_params.single_pdev_only) {
11370 		sc->num_radios = 1;
11371 		sc->pdevs[0].pdev_id = 0;
11372 	}
11373 
11374 	return 0;
11375 }
11376 
11377 int
11378 qwx_wmi_tlv_hw_mode_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11379     const void *ptr, void *data)
11380 {
11381 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11382 	struct wmi_hw_mode_capabilities *hw_mode_cap;
11383 	uint32_t phy_map = 0;
11384 
11385 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
11386 		return EPROTO;
11387 
11388 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
11389 		return ENOBUFS;
11390 
11391 	hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
11392 	    hw_mode_id);
11393 	svc_rdy_ext->n_hw_mode_caps++;
11394 
11395 	phy_map = hw_mode_cap->phy_id_map;
11396 	while (phy_map) {
11397 		svc_rdy_ext->tot_phy_id++;
11398 		phy_map = phy_map >> 1;
11399 	}
11400 
11401 	return 0;
11402 }
11403 
11404 #define PRIMAP(_hw_mode_) \
11405 	[_hw_mode_] = _hw_mode_##_PRI
11406 
11407 static const int qwx_hw_mode_pri_map[] = {
11408 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
11409 	PRIMAP(WMI_HOST_HW_MODE_DBS),
11410 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
11411 	PRIMAP(WMI_HOST_HW_MODE_SBS),
11412 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
11413 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
11414 	/* keep last */
11415 	PRIMAP(WMI_HOST_HW_MODE_MAX),
11416 };
11417 
11418 int
11419 qwx_wmi_tlv_hw_mode_caps(struct qwx_softc *sc, uint16_t len,
11420     const void *ptr, void *data)
11421 {
11422 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11423 	struct wmi_hw_mode_capabilities *hw_mode_caps;
11424 	enum wmi_host_hw_mode_config_type mode, pref;
11425 	uint32_t i;
11426 	int ret;
11427 
11428 	svc_rdy_ext->n_hw_mode_caps = 0;
11429 	svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
11430 
11431 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11432 	    qwx_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext);
11433 	if (ret) {
11434 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11435 		return ret;
11436 	}
11437 
11438 	i = 0;
11439 	while (i < svc_rdy_ext->n_hw_mode_caps) {
11440 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
11441 		mode = hw_mode_caps->hw_mode_id;
11442 		pref = sc->wmi.preferred_hw_mode;
11443 
11444 		if (qwx_hw_mode_pri_map[mode] < qwx_hw_mode_pri_map[pref]) {
11445 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
11446 			sc->wmi.preferred_hw_mode = mode;
11447 		}
11448 		i++;
11449 	}
11450 
11451 	DNPRINTF(QWX_D_WMI, "%s: preferred_hw_mode: %d\n", __func__,
11452 	    sc->wmi.preferred_hw_mode);
11453 	if (sc->wmi.preferred_hw_mode >= WMI_HOST_HW_MODE_MAX)
11454 		return EINVAL;
11455 
11456 	return 0;
11457 }
11458 
11459 int
11460 qwx_wmi_tlv_mac_phy_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11461     const void *ptr, void *data)
11462 {
11463 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11464 
11465 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
11466 		return EPROTO;
11467 
11468 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
11469 		return ENOBUFS;
11470 
11471 	len = MIN(len, sizeof(struct wmi_mac_phy_capabilities));
11472 	if (!svc_rdy_ext->n_mac_phy_caps) {
11473 		svc_rdy_ext->mac_phy_caps = mallocarray(
11474 		    svc_rdy_ext->tot_phy_id,
11475 		    sizeof(struct wmi_mac_phy_capabilities),
11476 		    M_DEVBUF, M_NOWAIT | M_ZERO);
11477 		if (!svc_rdy_ext->mac_phy_caps)
11478 			return ENOMEM;
11479 		svc_rdy_ext->mac_phy_caps_size = len * svc_rdy_ext->tot_phy_id;
11480 	}
11481 
11482 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps,
11483 	    ptr, len);
11484 	svc_rdy_ext->n_mac_phy_caps++;
11485 	return 0;
11486 }
11487 
11488 int
11489 qwx_wmi_tlv_ext_hal_reg_caps_parse(struct qwx_softc *sc,
11490     uint16_t tag, uint16_t len, const void *ptr, void *data)
11491 {
11492 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11493 
11494 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
11495 		return EPROTO;
11496 
11497 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
11498 		return ENOBUFS;
11499 
11500 	svc_rdy_ext->n_ext_hal_reg_caps++;
11501 	return 0;
11502 }
11503 
11504 int
11505 qwx_pull_reg_cap_svc_rdy_ext(struct qwx_pdev_wmi *wmi_handle,
11506     struct wmi_soc_hal_reg_capabilities *reg_caps,
11507     struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
11508     uint8_t phy_idx, struct ath11k_hal_reg_capabilities_ext *param)
11509 {
11510 	struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
11511 
11512 	if (!reg_caps || !wmi_ext_reg_cap)
11513 		return EINVAL;
11514 
11515 	if (phy_idx >= reg_caps->num_phy)
11516 		return EINVAL;
11517 
11518 	ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
11519 
11520 	param->phy_id = ext_reg_cap->phy_id;
11521 	param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
11522 	param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext;
11523 	param->regcap1 = ext_reg_cap->regcap1;
11524 	param->regcap2 = ext_reg_cap->regcap2;
11525 	/* check if param->wireless_mode is needed */
11526 	param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
11527 	param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
11528 	param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
11529 	param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
11530 
11531 	return 0;
11532 }
11533 
11534 int
11535 qwx_wmi_tlv_ext_hal_reg_caps(struct qwx_softc *sc, uint16_t len,
11536     const void *ptr, void *data)
11537 {
11538 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11539 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11540 	struct ath11k_hal_reg_capabilities_ext reg_cap;
11541 	int ret;
11542 	uint32_t i;
11543 
11544 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
11545 	svc_rdy_ext->ext_hal_reg_caps =
11546 	    (struct wmi_hal_reg_capabilities_ext *)ptr;
11547 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11548 	    qwx_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext);
11549 	if (ret) {
11550 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11551 		return ret;
11552 	}
11553 
11554 	for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
11555 		ret = qwx_pull_reg_cap_svc_rdy_ext(wmi_handle,
11556 		    svc_rdy_ext->soc_hal_reg_caps,
11557 		    svc_rdy_ext->ext_hal_reg_caps, i, &reg_cap);
11558 		if (ret) {
11559 			printf("%s: failed to extract reg cap %d\n",
11560 			    __func__, i);
11561 			return ret;
11562 		}
11563 
11564 		memcpy(&sc->hal_reg_cap[reg_cap.phy_id], &reg_cap,
11565 		    sizeof(sc->hal_reg_cap[0]));
11566 	}
11567 
11568 	return 0;
11569 }
11570 
11571 int
11572 qwx_wmi_tlv_dma_ring_caps_parse(struct qwx_softc *sc, uint16_t tag,
11573     uint16_t len, const void *ptr, void *data)
11574 {
11575 	struct wmi_tlv_dma_ring_caps_parse *parse = data;
11576 
11577 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
11578 		return EPROTO;
11579 
11580 	parse->n_dma_ring_caps++;
11581 	return 0;
11582 }
11583 
11584 int
11585 qwx_wmi_alloc_dbring_caps(struct qwx_softc *sc, uint32_t num_cap)
11586 {
11587 	void *ptr;
11588 
11589 	ptr = mallocarray(num_cap, sizeof(struct qwx_dbring_cap),
11590 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11591 	if (!ptr)
11592 		return ENOMEM;
11593 
11594 	sc->db_caps = ptr;
11595 	sc->num_db_cap = num_cap;
11596 
11597 	return 0;
11598 }
11599 
11600 void
11601 qwx_wmi_free_dbring_caps(struct qwx_softc *sc)
11602 {
11603 	free(sc->db_caps, M_DEVBUF,
11604 	    sc->num_db_cap * sizeof(struct qwx_dbring_cap));
11605 	sc->db_caps = NULL;
11606 	sc->num_db_cap = 0;
11607 }
11608 
11609 int
11610 qwx_wmi_tlv_dma_ring_caps(struct qwx_softc *sc, uint16_t len,
11611     const void *ptr, void *data)
11612 {
11613 	struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
11614 	struct wmi_dma_ring_capabilities *dma_caps;
11615 	struct qwx_dbring_cap *dir_buff_caps;
11616 	int ret;
11617 	uint32_t i;
11618 
11619 	dma_caps_parse->n_dma_ring_caps = 0;
11620 	dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
11621 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11622 	    qwx_wmi_tlv_dma_ring_caps_parse, dma_caps_parse);
11623 	if (ret) {
11624 		printf("%s: failed to parse dma ring caps tlv %d\n",
11625 		    __func__, ret);
11626 		return ret;
11627 	}
11628 
11629 	if (!dma_caps_parse->n_dma_ring_caps)
11630 		return 0;
11631 
11632 	if (sc->num_db_cap) {
11633 		DNPRINTF(QWX_D_WMI,
11634 		    "%s: Already processed, so ignoring dma ring caps\n",
11635 		    __func__);
11636 		return 0;
11637 	}
11638 
11639 	ret = qwx_wmi_alloc_dbring_caps(sc, dma_caps_parse->n_dma_ring_caps);
11640 	if (ret)
11641 		return ret;
11642 
11643 	dir_buff_caps = sc->db_caps;
11644 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
11645 		if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
11646 			printf("%s: Invalid module id %d\n", __func__,
11647 			    dma_caps[i].module_id);
11648 			ret = EINVAL;
11649 			goto free_dir_buff;
11650 		}
11651 
11652 		dir_buff_caps[i].id = dma_caps[i].module_id;
11653 		dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
11654 		dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
11655 		dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
11656 		dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
11657 	}
11658 
11659 	return 0;
11660 
11661 free_dir_buff:
11662 	qwx_wmi_free_dbring_caps(sc);
11663 	return ret;
11664 }
11665 
11666 int
11667 qwx_wmi_tlv_svc_rdy_ext_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11668     const void *ptr, void *data)
11669 {
11670 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11671 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11672 	int ret;
11673 
11674 	switch (tag) {
11675 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
11676 		ret = qwx_pull_svc_ready_ext(wmi_handle, ptr,
11677 		    &svc_rdy_ext->param);
11678 		if (ret) {
11679 			printf("%s: unable to extract ext params\n", __func__);
11680 			return ret;
11681 		}
11682 		break;
11683 
11684 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
11685 		svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
11686 		svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
11687 		break;
11688 
11689 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
11690 		ret = qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(sc, len, ptr,
11691 		    svc_rdy_ext);
11692 		if (ret)
11693 			return ret;
11694 		break;
11695 
11696 	case WMI_TAG_ARRAY_STRUCT:
11697 		if (!svc_rdy_ext->hw_mode_done) {
11698 			ret = qwx_wmi_tlv_hw_mode_caps(sc, len, ptr,
11699 			    svc_rdy_ext);
11700 			if (ret)
11701 				return ret;
11702 
11703 			svc_rdy_ext->hw_mode_done = 1;
11704 		} else if (!svc_rdy_ext->mac_phy_done) {
11705 			svc_rdy_ext->n_mac_phy_caps = 0;
11706 			ret = qwx_wmi_tlv_iter(sc, ptr, len,
11707 			    qwx_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext);
11708 			if (ret) {
11709 				printf("%s: failed to parse tlv %d\n",
11710 				    __func__, ret);
11711 				return ret;
11712 			}
11713 
11714 			svc_rdy_ext->mac_phy_done = 1;
11715 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
11716 			ret = qwx_wmi_tlv_ext_hal_reg_caps(sc, len, ptr,
11717 			    svc_rdy_ext);
11718 			if (ret)
11719 				return ret;
11720 
11721 			svc_rdy_ext->ext_hal_reg_done = 1;
11722 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
11723 			svc_rdy_ext->mac_phy_chainmask_combo_done = 1;
11724 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
11725 			svc_rdy_ext->mac_phy_chainmask_cap_done = 1;
11726 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
11727 			svc_rdy_ext->oem_dma_ring_cap_done = 1;
11728 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
11729 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
11730 			    &svc_rdy_ext->dma_caps_parse);
11731 			if (ret)
11732 				return ret;
11733 
11734 			svc_rdy_ext->dma_ring_cap_done = 1;
11735 		}
11736 		break;
11737 
11738 	default:
11739 		break;
11740 	}
11741 
11742 	return 0;
11743 }
11744 
11745 void
11746 qwx_service_ready_ext_event(struct qwx_softc *sc, struct mbuf *m)
11747 {
11748 	struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
11749 	int ret;
11750 
11751 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11752 	    qwx_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext);
11753 	if (ret) {
11754 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11755 		qwx_wmi_free_dbring_caps(sc);
11756 		return;
11757 	}
11758 
11759 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext\n", __func__);
11760 
11761 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_EXT2_MSG))
11762 		wakeup(&sc->wmi.service_ready);
11763 
11764 	free(svc_rdy_ext.mac_phy_caps, M_DEVBUF,
11765 	    svc_rdy_ext.mac_phy_caps_size);
11766 }
11767 
11768 int
11769 qwx_wmi_tlv_svc_rdy_ext2_parse(struct qwx_softc *sc,
11770     uint16_t tag, uint16_t len, const void *ptr, void *data)
11771 {
11772 	struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
11773 	int ret;
11774 
11775 	switch (tag) {
11776 	case WMI_TAG_ARRAY_STRUCT:
11777 		if (!parse->dma_ring_cap_done) {
11778 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
11779 			    &parse->dma_caps_parse);
11780 			if (ret)
11781 				return ret;
11782 
11783 			parse->dma_ring_cap_done = 1;
11784 		}
11785 		break;
11786 	default:
11787 		break;
11788 	}
11789 
11790 	return 0;
11791 }
11792 
11793 void
11794 qwx_service_ready_ext2_event(struct qwx_softc *sc, struct mbuf *m)
11795 {
11796 	struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
11797 	int ret;
11798 
11799 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11800 	    qwx_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2);
11801 	if (ret) {
11802 		printf("%s: failed to parse ext2 event tlv %d\n",
11803 		    __func__, ret);
11804 		qwx_wmi_free_dbring_caps(sc);
11805 		return;
11806 	}
11807 
11808 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext2\n", __func__);
11809 
11810 	sc->wmi.service_ready = 1;
11811 	wakeup(&sc->wmi.service_ready);
11812 }
11813 
11814 void
11815 qwx_service_available_event(struct qwx_softc *sc, struct mbuf *m)
11816 {
11817 	int ret;
11818 
11819 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11820 	    qwx_wmi_tlv_services_parser, NULL);
11821 	if (ret)
11822 		printf("%s: failed to parse services available tlv %d\n",
11823 		    sc->sc_dev.dv_xname, ret);
11824 
11825 	DNPRINTF(QWX_D_WMI, "%s: event service available\n", __func__);
11826 }
11827 
11828 int
11829 qwx_pull_peer_assoc_conf_ev(struct qwx_softc *sc, struct mbuf *m,
11830     struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
11831 {
11832 	const void **tb;
11833 	const struct wmi_peer_assoc_conf_event *ev;
11834 	int ret;
11835 
11836 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11837 	if (tb == NULL) {
11838 		ret = ENOMEM;
11839 		printf("%s: failed to parse tlv: %d\n",
11840 		    sc->sc_dev.dv_xname, ret);
11841 		return ret;
11842 	}
11843 
11844 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
11845 	if (!ev) {
11846 		printf("%s: failed to fetch peer assoc conf ev\n",
11847 		    sc->sc_dev.dv_xname);
11848 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11849 		return EPROTO;
11850 	}
11851 
11852 	peer_assoc_conf->vdev_id = ev->vdev_id;
11853 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
11854 
11855 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11856 	return 0;
11857 }
11858 
11859 void
11860 qwx_peer_assoc_conf_event(struct qwx_softc *sc, struct mbuf *m)
11861 {
11862 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
11863 
11864 	if (qwx_pull_peer_assoc_conf_ev(sc, m, &peer_assoc_conf) != 0) {
11865 		printf("%s: failed to extract peer assoc conf event\n",
11866 		   sc->sc_dev.dv_xname);
11867 		return;
11868 	}
11869 
11870 	DNPRINTF(QWX_D_WMI, "%s: event peer assoc conf ev vdev id %d "
11871 	    "macaddr %s\n", __func__, peer_assoc_conf.vdev_id,
11872 	    ether_sprintf((u_char *)peer_assoc_conf.macaddr));
11873 
11874 	sc->peer_assoc_done = 1;
11875 	wakeup(&sc->peer_assoc_done);
11876 }
11877 
11878 int
11879 qwx_wmi_tlv_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11880     const void *ptr, void *data)
11881 {
11882 	struct wmi_tlv_rdy_parse *rdy_parse = data;
11883 	struct wmi_ready_event fixed_param;
11884 	struct wmi_mac_addr *addr_list;
11885 	struct qwx_pdev *pdev;
11886 	uint32_t num_mac_addr;
11887 	int i;
11888 
11889 	switch (tag) {
11890 	case WMI_TAG_READY_EVENT:
11891 		memset(&fixed_param, 0, sizeof(fixed_param));
11892 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
11893 		       MIN(sizeof(fixed_param), len));
11894 		sc->wlan_init_status = fixed_param.ready_event_min.status;
11895 		rdy_parse->num_extra_mac_addr =
11896 			fixed_param.ready_event_min.num_extra_mac_addr;
11897 
11898 		IEEE80211_ADDR_COPY(sc->mac_addr,
11899 		    fixed_param.ready_event_min.mac_addr.addr);
11900 		sc->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
11901 		sc->wmi_ready = 1;
11902 		break;
11903 	case WMI_TAG_ARRAY_FIXED_STRUCT:
11904 		addr_list = (struct wmi_mac_addr *)ptr;
11905 		num_mac_addr = rdy_parse->num_extra_mac_addr;
11906 
11907 		if (!(sc->num_radios > 1 && num_mac_addr >= sc->num_radios))
11908 			break;
11909 
11910 		for (i = 0; i < sc->num_radios; i++) {
11911 			pdev = &sc->pdevs[i];
11912 			IEEE80211_ADDR_COPY(pdev->mac_addr, addr_list[i].addr);
11913 		}
11914 		sc->pdevs_macaddr_valid = 1;
11915 		break;
11916 	default:
11917 		break;
11918 	}
11919 
11920 	return 0;
11921 }
11922 
11923 void
11924 qwx_ready_event(struct qwx_softc *sc, struct mbuf *m)
11925 {
11926 	struct wmi_tlv_rdy_parse rdy_parse = { };
11927 	int ret;
11928 
11929 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11930 	    qwx_wmi_tlv_rdy_parse, &rdy_parse);
11931 	if (ret) {
11932 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11933 		return;
11934 	}
11935 
11936 	DNPRINTF(QWX_D_WMI, "%s: event ready", __func__);
11937 
11938 	sc->wmi.unified_ready = 1;
11939 	wakeup(&sc->wmi.unified_ready);
11940 }
11941 
11942 int
11943 qwx_pull_peer_del_resp_ev(struct qwx_softc *sc, struct mbuf *m,
11944     struct wmi_peer_delete_resp_event *peer_del_resp)
11945 {
11946 	const void **tb;
11947 	const struct wmi_peer_delete_resp_event *ev;
11948 	int ret;
11949 
11950 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11951 	if (tb == NULL) {
11952 		ret = ENOMEM;
11953 		printf("%s: failed to parse tlv: %d\n",
11954 		    sc->sc_dev.dv_xname, ret);
11955 		return ret;
11956 	}
11957 
11958 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
11959 	if (!ev) {
11960 		printf("%s: failed to fetch peer delete resp ev\n",
11961 		    sc->sc_dev.dv_xname);
11962 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11963 		return EPROTO;
11964 	}
11965 
11966 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
11967 
11968 	peer_del_resp->vdev_id = ev->vdev_id;
11969 	IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
11970 	    ev->peer_macaddr.addr);
11971 
11972 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11973 	return 0;
11974 }
11975 
11976 void
11977 qwx_peer_delete_resp_event(struct qwx_softc *sc, struct mbuf *m)
11978 {
11979 	struct wmi_peer_delete_resp_event peer_del_resp;
11980 
11981 	if (qwx_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
11982 		printf("%s: failed to extract peer delete resp",
11983 		    sc->sc_dev.dv_xname);
11984 		return;
11985 	}
11986 
11987 	sc->peer_delete_done = 1;
11988 	wakeup(&sc->peer_delete_done);
11989 
11990 	DNPRINTF(QWX_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
11991 	    __func__, peer_del_resp.vdev_id,
11992 	    ether_sprintf(peer_del_resp.peer_macaddr.addr));
11993 }
11994 
11995 const char *
11996 qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status)
11997 {
11998 	switch (vdev_resp_status) {
11999 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
12000 		return "invalid vdev id";
12001 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
12002 		return "not supported";
12003 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
12004 		return "dfs violation";
12005 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
12006 		return "invalid regdomain";
12007 	default:
12008 		return "unknown";
12009 	}
12010 }
12011 
12012 int
12013 qwx_pull_vdev_start_resp_tlv(struct qwx_softc *sc, struct mbuf *m,
12014     struct wmi_vdev_start_resp_event *vdev_rsp)
12015 {
12016 	const void **tb;
12017 	const struct wmi_vdev_start_resp_event *ev;
12018 	int ret;
12019 
12020 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12021 	if (tb == NULL) {
12022 		ret = ENOMEM;
12023 		printf("%s: failed to parse tlv: %d\n",
12024 		    sc->sc_dev.dv_xname, ret);
12025 		return ret;
12026 	}
12027 
12028 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
12029 	if (!ev) {
12030 		printf("%s: failed to fetch vdev start resp ev\n",
12031 		    sc->sc_dev.dv_xname);
12032 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12033 		return EPROTO;
12034 	}
12035 
12036 	memset(vdev_rsp, 0, sizeof(*vdev_rsp));
12037 
12038 	vdev_rsp->vdev_id = ev->vdev_id;
12039 	vdev_rsp->requestor_id = ev->requestor_id;
12040 	vdev_rsp->resp_type = ev->resp_type;
12041 	vdev_rsp->status = ev->status;
12042 	vdev_rsp->chain_mask = ev->chain_mask;
12043 	vdev_rsp->smps_mode = ev->smps_mode;
12044 	vdev_rsp->mac_id = ev->mac_id;
12045 	vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
12046 	vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
12047 
12048 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12049 	return 0;
12050 }
12051 
12052 void
12053 qwx_vdev_start_resp_event(struct qwx_softc *sc, struct mbuf *m)
12054 {
12055 	struct wmi_vdev_start_resp_event vdev_start_resp;
12056 	uint32_t status;
12057 
12058 	if (qwx_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
12059 		printf("%s: failed to extract vdev start resp",
12060 		    sc->sc_dev.dv_xname);
12061 		return;
12062 	}
12063 
12064 	status = vdev_start_resp.status;
12065 	if (status) {
12066 		printf("%s: vdev start resp error status %d (%s)\n",
12067 		    sc->sc_dev.dv_xname, status,
12068 		   qwx_wmi_vdev_resp_print(status));
12069 	}
12070 
12071 	sc->vdev_setup_done = 1;
12072 	wakeup(&sc->vdev_setup_done);
12073 
12074 	DNPRINTF(QWX_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
12075 	    vdev_start_resp.vdev_id);
12076 }
12077 
12078 int
12079 qwx_pull_vdev_stopped_param_tlv(struct qwx_softc *sc, struct mbuf *m,
12080     uint32_t *vdev_id)
12081 {
12082 	const void **tb;
12083 	const struct wmi_vdev_stopped_event *ev;
12084 	int ret;
12085 
12086 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12087 	if (tb == NULL) {
12088 		ret = ENOMEM;
12089 		printf("%s: failed to parse tlv: %d\n",
12090 		    sc->sc_dev.dv_xname, ret);
12091 		return ret;
12092 	}
12093 
12094 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
12095 	if (!ev) {
12096 		printf("%s: failed to fetch vdev stop ev\n",
12097 		    sc->sc_dev.dv_xname);
12098 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12099 		return EPROTO;
12100 	}
12101 
12102 	*vdev_id = ev->vdev_id;
12103 
12104 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12105 	return 0;
12106 }
12107 
12108 void
12109 qwx_vdev_stopped_event(struct qwx_softc *sc, struct mbuf *m)
12110 {
12111 	uint32_t vdev_id = 0;
12112 
12113 	if (qwx_pull_vdev_stopped_param_tlv(sc, m, &vdev_id) != 0) {
12114 		printf("%s: failed to extract vdev stopped event\n",
12115 		    sc->sc_dev.dv_xname);
12116 		return;
12117 	}
12118 
12119 	sc->vdev_setup_done = 1;
12120 	wakeup(&sc->vdev_setup_done);
12121 
12122 	DNPRINTF(QWX_D_WMI, "%s: vdev stopped for vdev id %d", __func__,
12123 	    vdev_id);
12124 }
12125 
12126 int
12127 qwx_wmi_tlv_iter_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
12128     const void *ptr, void *data)
12129 {
12130 	const void **tb = data;
12131 
12132 	if (tag < WMI_TAG_MAX)
12133 		tb[tag] = ptr;
12134 
12135 	return 0;
12136 }
12137 
12138 int
12139 qwx_wmi_tlv_parse(struct qwx_softc *sc, const void **tb,
12140     const void *ptr, size_t len)
12141 {
12142 	return qwx_wmi_tlv_iter(sc, ptr, len, qwx_wmi_tlv_iter_parse,
12143 	    (void *)tb);
12144 }
12145 
12146 const void **
12147 qwx_wmi_tlv_parse_alloc(struct qwx_softc *sc, const void *ptr, size_t len)
12148 {
12149 	const void **tb;
12150 	int ret;
12151 
12152 	tb = mallocarray(WMI_TAG_MAX, sizeof(*tb), M_DEVBUF, M_NOWAIT | M_ZERO);
12153 	if (!tb)
12154 		return NULL;
12155 
12156 	ret = qwx_wmi_tlv_parse(sc, tb, ptr, len);
12157 	if (ret) {
12158 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12159 		return NULL;
12160 	}
12161 
12162 	return tb;
12163 }
12164 
12165 static void
12166 qwx_print_reg_rule(struct qwx_softc *sc, const char *band,
12167     uint32_t num_reg_rules, struct cur_reg_rule *reg_rule_ptr)
12168 {
12169 	struct cur_reg_rule *reg_rule = reg_rule_ptr;
12170 	uint32_t count;
12171 
12172 	DNPRINTF(QWX_D_WMI, "%s: number of reg rules in %s band: %d\n",
12173 	    __func__, band, num_reg_rules);
12174 
12175 	for (count = 0; count < num_reg_rules; count++) {
12176 		DNPRINTF(QWX_D_WMI,
12177 		    "%s: reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
12178 		    __func__, count + 1, reg_rule->start_freq,
12179 		    reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain,
12180 		    reg_rule->reg_power, reg_rule->flags);
12181 		reg_rule++;
12182 	}
12183 }
12184 
12185 struct cur_reg_rule *
12186 qwx_create_reg_rules_from_wmi(uint32_t num_reg_rules,
12187     struct wmi_regulatory_rule_struct *wmi_reg_rule)
12188 {
12189 	struct cur_reg_rule *reg_rule_ptr;
12190 	uint32_t count;
12191 
12192 	reg_rule_ptr = mallocarray(num_reg_rules, sizeof(*reg_rule_ptr),
12193 	    M_DEVBUF, M_NOWAIT | M_ZERO);
12194 	if (!reg_rule_ptr)
12195 		return NULL;
12196 
12197 	for (count = 0; count < num_reg_rules; count++) {
12198 		reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ,
12199 		    wmi_reg_rule[count].freq_info);
12200 		reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ,
12201 		    wmi_reg_rule[count].freq_info);
12202 		reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW,
12203 		    wmi_reg_rule[count].bw_pwr_info);
12204 		reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR,
12205 		    wmi_reg_rule[count].bw_pwr_info);
12206 		reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN,
12207 		    wmi_reg_rule[count].bw_pwr_info);
12208 		reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS,
12209 		    wmi_reg_rule[count].flag_info);
12210 	}
12211 
12212 	return reg_rule_ptr;
12213 }
12214 
12215 int
12216 qwx_pull_reg_chan_list_update_ev(struct qwx_softc *sc, struct mbuf *m,
12217     struct cur_regulatory_info *reg_info)
12218 {
12219 	const void **tb;
12220 	const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
12221 	struct wmi_regulatory_rule_struct *wmi_reg_rule;
12222 	uint32_t num_2ghz_reg_rules, num_5ghz_reg_rules;
12223 	int ret;
12224 
12225 	DNPRINTF(QWX_D_WMI, "%s: processing regulatory channel list\n",
12226 	    __func__);
12227 
12228 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12229 	if (tb == NULL) {
12230 		ret = ENOMEM; /* XXX allocation failure or parsing failure? */
12231 		printf("%s: failed to parse tlv: %d\n", __func__, ret);
12232 		return ENOMEM;
12233 	}
12234 
12235 	chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
12236 	if (!chan_list_event_hdr) {
12237 		printf("%s: failed to fetch reg chan list update ev\n",
12238 		    __func__);
12239 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12240 		return EPROTO;
12241 	}
12242 
12243 	reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
12244 	reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
12245 
12246 	if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
12247 		printf("%s: No regulatory rules available in the event info\n",
12248 		    __func__);
12249 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12250 		return EINVAL;
12251 	}
12252 
12253 	memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN);
12254 	reg_info->dfs_region = chan_list_event_hdr->dfs_region;
12255 	reg_info->phybitmap = chan_list_event_hdr->phybitmap;
12256 	reg_info->num_phy = chan_list_event_hdr->num_phy;
12257 	reg_info->phy_id = chan_list_event_hdr->phy_id;
12258 	reg_info->ctry_code = chan_list_event_hdr->country_id;
12259 	reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
12260 
12261 	DNPRINTF(QWX_D_WMI, "%s: CC status_code %s\n", __func__,
12262 	    qwx_cc_status_to_str(reg_info->status_code));
12263 
12264 	reg_info->status_code =
12265 		qwx_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
12266 
12267 	reg_info->is_ext_reg_event = false;
12268 
12269 	reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
12270 	reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
12271 	reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
12272 	reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
12273 
12274 	num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
12275 	num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
12276 
12277 	DNPRINTF(QWX_D_WMI,
12278 	    "%s: cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d "
12279 	    "max_5ghz %d\n", __func__, reg_info->alpha2, reg_info->dfs_region,
12280 	    reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
12281 	    reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
12282 
12283 	DNPRINTF(QWX_D_WMI,
12284 	    "%s: num_2ghz_reg_rules %d num_5ghz_reg_rules %d\n", __func__,
12285 	    num_2ghz_reg_rules, num_5ghz_reg_rules);
12286 
12287 	wmi_reg_rule = (struct wmi_regulatory_rule_struct *)
12288 	    ((uint8_t *)chan_list_event_hdr + sizeof(*chan_list_event_hdr)
12289 	    + sizeof(struct wmi_tlv));
12290 
12291 	if (num_2ghz_reg_rules) {
12292 		reg_info->reg_rules_2ghz_ptr = qwx_create_reg_rules_from_wmi(
12293 		    num_2ghz_reg_rules, wmi_reg_rule);
12294 		if (!reg_info->reg_rules_2ghz_ptr) {
12295 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12296 			printf("%s: Unable to allocate memory for "
12297 			    "2 GHz rules\n", __func__);
12298 			return ENOMEM;
12299 		}
12300 
12301 		qwx_print_reg_rule(sc, "2 GHz", num_2ghz_reg_rules,
12302 		    reg_info->reg_rules_2ghz_ptr);
12303 	}
12304 
12305 	if (num_5ghz_reg_rules) {
12306 		wmi_reg_rule += num_2ghz_reg_rules;
12307 		reg_info->reg_rules_5ghz_ptr = qwx_create_reg_rules_from_wmi(
12308 		    num_5ghz_reg_rules, wmi_reg_rule);
12309 		if (!reg_info->reg_rules_5ghz_ptr) {
12310 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12311 			printf("%s: Unable to allocate memory for "
12312 			    "5 GHz rules\n", __func__);
12313 			return ENOMEM;
12314 		}
12315 
12316 		qwx_print_reg_rule(sc, "5 GHz", num_5ghz_reg_rules,
12317 		    reg_info->reg_rules_5ghz_ptr);
12318 	}
12319 
12320 	DNPRINTF(QWX_D_WMI, "%s: processed regulatory channel list\n",
12321 	    __func__);
12322 
12323 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12324 	return 0;
12325 }
12326 
12327 int
12328 qwx_pull_reg_chan_list_ext_update_ev(struct qwx_softc *sc, struct mbuf *m,
12329     struct cur_regulatory_info *reg_info)
12330 {
12331 	printf("%s: not implemented\n", __func__);
12332 	return ENOTSUP;
12333 }
12334 
12335 void
12336 qwx_init_channels(struct qwx_softc *sc, struct cur_regulatory_info *reg_info)
12337 {
12338 	struct ieee80211com *ic = &sc->sc_ic;
12339 	struct ieee80211_channel *chan;
12340 	struct cur_reg_rule *rule;
12341 	int i, chnum;
12342 	uint16_t freq;
12343 
12344 	for (i = 0; i < reg_info->num_2ghz_reg_rules; i++) {
12345 		rule = &reg_info->reg_rules_2ghz_ptr[i];
12346 		if (rule->start_freq < 2402 ||
12347 		    rule->start_freq > 2500 ||
12348 		    rule->start_freq > rule->end_freq) {
12349 			DPRINTF("%s: bad regulatory rule: start freq %u, "
12350 			    "end freq %u\n", __func__, rule->start_freq,
12351 			    rule->end_freq);
12352 			continue;
12353 		}
12354 
12355 		freq = rule->start_freq + 10;
12356 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_2GHZ);
12357 		if (chnum < 1 || chnum > 14) {
12358 			DPRINTF("%s: bad regulatory rule: freq %u, "
12359 			    "channel %u\n", __func__, freq, chnum);
12360 			continue;
12361 		}
12362 		while (freq <= rule->end_freq && chnum <= 14) {
12363 			chan = &ic->ic_channels[chnum];
12364 			if (rule->flags & REGULATORY_CHAN_DISABLED) {
12365 				chan->ic_freq = 0;
12366 				chan->ic_flags = 0;
12367 			} else {
12368 				chan->ic_freq = freq;
12369 				chan->ic_flags = IEEE80211_CHAN_CCK |
12370 				    IEEE80211_CHAN_OFDM |
12371 				    IEEE80211_CHAN_DYN |
12372 				    IEEE80211_CHAN_2GHZ;
12373 			}
12374 			chnum++;
12375 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_2GHZ);
12376 		}
12377 	}
12378 
12379 	for (i = 0; i < reg_info->num_5ghz_reg_rules; i++) {
12380 		rule = &reg_info->reg_rules_5ghz_ptr[i];
12381 		if (rule->start_freq < 5170 ||
12382 		    rule->start_freq > 6000 ||
12383 		    rule->start_freq > rule->end_freq) {
12384 			DPRINTF("%s: bad regulatory rule: start freq %u, "
12385 			    "end freq %u\n", __func__, rule->start_freq,
12386 			    rule->end_freq);
12387 			continue;
12388 		}
12389 
12390 		freq = rule->start_freq + 10;
12391 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_5GHZ);
12392 		if (chnum < 36 || chnum > IEEE80211_CHAN_MAX) {
12393 			DPRINTF("%s: bad regulatory rule: freq %u, "
12394 			    "channel %u\n", __func__, freq, chnum);
12395 			continue;
12396 		}
12397 		while (freq <= rule->end_freq && freq <= 5885 &&
12398 		    chnum <= IEEE80211_CHAN_MAX) {
12399 			chan = &ic->ic_channels[chnum];
12400 			if (rule->flags & (REGULATORY_CHAN_DISABLED |
12401 			    REGULATORY_CHAN_NO_OFDM)) {
12402 				chan->ic_freq = 0;
12403 				chan->ic_flags = 0;
12404 			} else {
12405 				chan->ic_freq = freq;
12406 				chan->ic_flags = IEEE80211_CHAN_A;
12407 				if (rule->flags & (REGULATORY_CHAN_RADAR |
12408 				    REGULATORY_CHAN_NO_IR |
12409 				    REGULATORY_CHAN_INDOOR_ONLY)) {
12410 					chan->ic_flags |=
12411 					    IEEE80211_CHAN_PASSIVE;
12412 				}
12413 			}
12414 			chnum += 4;
12415 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_5GHZ);
12416 		}
12417 	}
12418 }
12419 
12420 int
12421 qwx_reg_chan_list_event(struct qwx_softc *sc, struct mbuf *m,
12422     enum wmi_reg_chan_list_cmd_type id)
12423 {
12424 	struct cur_regulatory_info *reg_info = NULL;
12425 	int ret = 0;
12426 #if 0
12427 	struct ieee80211_regdomain *regd = NULL;
12428 	bool intersect = false;
12429 	int pdev_idx, i, j;
12430 	struct ath11k *ar;
12431 #endif
12432 
12433 	reg_info = malloc(sizeof(*reg_info), M_DEVBUF, M_NOWAIT | M_ZERO);
12434 	if (!reg_info) {
12435 		ret = ENOMEM;
12436 		goto fallback;
12437 	}
12438 
12439 	if (id == WMI_REG_CHAN_LIST_CC_ID)
12440 		ret = qwx_pull_reg_chan_list_update_ev(sc, m, reg_info);
12441 	else
12442 		ret = qwx_pull_reg_chan_list_ext_update_ev(sc, m, reg_info);
12443 
12444 	if (ret) {
12445 		printf("%s: failed to extract regulatory info from "
12446 		    "received event\n", sc->sc_dev.dv_xname);
12447 		goto fallback;
12448 	}
12449 
12450 	DNPRINTF(QWX_D_WMI, "%s: event reg chan list id %d\n", __func__, id);
12451 
12452 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
12453 		/* In case of failure to set the requested ctry,
12454 		 * fw retains the current regd. We print a failure info
12455 		 * and return from here.
12456 		 */
12457 		printf("%s: Failed to set the requested Country "
12458 		    "regulatory setting\n", __func__);
12459 		goto mem_free;
12460 	}
12461 
12462 	qwx_init_channels(sc, reg_info);
12463 #if 0
12464 	pdev_idx = reg_info->phy_id;
12465 
12466 	/* Avoid default reg rule updates sent during FW recovery if
12467 	 * it is already available
12468 	 */
12469 	spin_lock(&ab->base_lock);
12470 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
12471 	    ab->default_regd[pdev_idx]) {
12472 		spin_unlock(&ab->base_lock);
12473 		goto mem_free;
12474 	}
12475 	spin_unlock(&ab->base_lock);
12476 
12477 	if (pdev_idx >= ab->num_radios) {
12478 		/* Process the event for phy0 only if single_pdev_only
12479 		 * is true. If pdev_idx is valid but not 0, discard the
12480 		 * event. Otherwise, it goes to fallback.
12481 		 */
12482 		if (ab->hw_params.single_pdev_only &&
12483 		    pdev_idx < ab->hw_params.num_rxmda_per_pdev)
12484 			goto mem_free;
12485 		else
12486 			goto fallback;
12487 	}
12488 
12489 	/* Avoid multiple overwrites to default regd, during core
12490 	 * stop-start after mac registration.
12491 	 */
12492 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
12493 	    !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
12494 		    (char *)reg_info->alpha2, 2))
12495 		goto mem_free;
12496 
12497 	/* Intersect new rules with default regd if a new country setting was
12498 	 * requested, i.e a default regd was already set during initialization
12499 	 * and the regd coming from this event has a valid country info.
12500 	 */
12501 	if (ab->default_regd[pdev_idx] &&
12502 	    !ath11k_reg_is_world_alpha((char *)
12503 		ab->default_regd[pdev_idx]->alpha2) &&
12504 	    !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
12505 		intersect = true;
12506 
12507 	regd = ath11k_reg_build_regd(ab, reg_info, intersect);
12508 	if (!regd) {
12509 		ath11k_warn(ab, "failed to build regd from reg_info\n");
12510 		goto fallback;
12511 	}
12512 
12513 	spin_lock(&ab->base_lock);
12514 	if (ab->default_regd[pdev_idx]) {
12515 		/* The initial rules from FW after WMI Init is to build
12516 		 * the default regd. From then on, any rules updated for
12517 		 * the pdev could be due to user reg changes.
12518 		 * Free previously built regd before assigning the newly
12519 		 * generated regd to ar. NULL pointer handling will be
12520 		 * taken care by kfree itself.
12521 		 */
12522 		ar = ab->pdevs[pdev_idx].ar;
12523 		kfree(ab->new_regd[pdev_idx]);
12524 		ab->new_regd[pdev_idx] = regd;
12525 		queue_work(ab->workqueue, &ar->regd_update_work);
12526 	} else {
12527 		/* This regd would be applied during mac registration and is
12528 		 * held constant throughout for regd intersection purpose
12529 		 */
12530 		ab->default_regd[pdev_idx] = regd;
12531 	}
12532 	ab->dfs_region = reg_info->dfs_region;
12533 	spin_unlock(&ab->base_lock);
12534 #endif
12535 	goto mem_free;
12536 
12537 fallback:
12538 	/* Fallback to older reg (by sending previous country setting
12539 	 * again if fw has succeeded and we failed to process here.
12540 	 * The Regdomain should be uniform across driver and fw. Since the
12541 	 * FW has processed the command and sent a success status, we expect
12542 	 * this function to succeed as well. If it doesn't, CTRY needs to be
12543 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
12544 	 */
12545 	/* TODO: This is rare, but still should also be handled */
12546 mem_free:
12547 	if (reg_info) {
12548 		free(reg_info->reg_rules_2ghz_ptr, M_DEVBUF,
12549 		    reg_info->num_2ghz_reg_rules *
12550 		    sizeof(*reg_info->reg_rules_2ghz_ptr));
12551 		free(reg_info->reg_rules_5ghz_ptr, M_DEVBUF,
12552 		    reg_info->num_5ghz_reg_rules *
12553 		    sizeof(*reg_info->reg_rules_5ghz_ptr));
12554 #if 0
12555 		if (reg_info->is_ext_reg_event) {
12556 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
12557 				kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
12558 
12559 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
12560 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
12561 					kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
12562 		}
12563 #endif
12564 		free(reg_info, M_DEVBUF, sizeof(*reg_info));
12565 	}
12566 	return ret;
12567 }
12568 
12569 const char *
12570 qwx_wmi_event_scan_type_str(enum wmi_scan_event_type type,
12571     enum wmi_scan_completion_reason reason)
12572 {
12573 	switch (type) {
12574 	case WMI_SCAN_EVENT_STARTED:
12575 		return "started";
12576 	case WMI_SCAN_EVENT_COMPLETED:
12577 		switch (reason) {
12578 		case WMI_SCAN_REASON_COMPLETED:
12579 			return "completed";
12580 		case WMI_SCAN_REASON_CANCELLED:
12581 			return "completed [cancelled]";
12582 		case WMI_SCAN_REASON_PREEMPTED:
12583 			return "completed [preempted]";
12584 		case WMI_SCAN_REASON_TIMEDOUT:
12585 			return "completed [timedout]";
12586 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
12587 			return "completed [internal err]";
12588 		case WMI_SCAN_REASON_MAX:
12589 			break;
12590 		}
12591 		return "completed [unknown]";
12592 	case WMI_SCAN_EVENT_BSS_CHANNEL:
12593 		return "bss channel";
12594 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
12595 		return "foreign channel";
12596 	case WMI_SCAN_EVENT_DEQUEUED:
12597 		return "dequeued";
12598 	case WMI_SCAN_EVENT_PREEMPTED:
12599 		return "preempted";
12600 	case WMI_SCAN_EVENT_START_FAILED:
12601 		return "start failed";
12602 	case WMI_SCAN_EVENT_RESTARTED:
12603 		return "restarted";
12604 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12605 		return "foreign channel exit";
12606 	default:
12607 		return "unknown";
12608 	}
12609 }
12610 
12611 const char *
12612 qwx_scan_state_str(enum ath11k_scan_state state)
12613 {
12614 	switch (state) {
12615 	case ATH11K_SCAN_IDLE:
12616 		return "idle";
12617 	case ATH11K_SCAN_STARTING:
12618 		return "starting";
12619 	case ATH11K_SCAN_RUNNING:
12620 		return "running";
12621 	case ATH11K_SCAN_ABORTING:
12622 		return "aborting";
12623 	}
12624 
12625 	return "unknown";
12626 }
12627 
12628 int
12629 qwx_pull_scan_ev(struct qwx_softc *sc, struct mbuf *m,
12630     struct wmi_scan_event *scan_evt_param)
12631 {
12632 	const void **tb;
12633 	const struct wmi_scan_event *ev;
12634 
12635 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12636 	if (tb == NULL) {
12637 		DPRINTF("%s: failed to parse tlv\n", __func__);
12638 		return EINVAL;
12639 	}
12640 
12641 	ev = tb[WMI_TAG_SCAN_EVENT];
12642 	if (!ev) {
12643 		DPRINTF("%s: failed to fetch scan ev\n", __func__);
12644 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12645 		return EPROTO;
12646 	}
12647 
12648 	scan_evt_param->event_type = ev->event_type;
12649 	scan_evt_param->reason = ev->reason;
12650 	scan_evt_param->channel_freq = ev->channel_freq;
12651 	scan_evt_param->scan_req_id = ev->scan_req_id;
12652 	scan_evt_param->scan_id = ev->scan_id;
12653 	scan_evt_param->vdev_id = ev->vdev_id;
12654 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
12655 
12656 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12657 	return 0;
12658 }
12659 
12660 void
12661 qwx_wmi_event_scan_started(struct qwx_softc *sc)
12662 {
12663 #ifdef notyet
12664 	lockdep_assert_held(&ar->data_lock);
12665 #endif
12666 	switch (sc->scan.state) {
12667 	case ATH11K_SCAN_IDLE:
12668 	case ATH11K_SCAN_RUNNING:
12669 	case ATH11K_SCAN_ABORTING:
12670 		printf("%s: received scan started event in an invalid "
12671 		"scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12672 		qwx_scan_state_str(sc->scan.state), sc->scan.state);
12673 		break;
12674 	case ATH11K_SCAN_STARTING:
12675 		sc->scan.state = ATH11K_SCAN_RUNNING;
12676 #if 0
12677 		if (ar->scan.is_roc)
12678 			ieee80211_ready_on_channel(ar->hw);
12679 #endif
12680 		wakeup(&sc->scan.state);
12681 		break;
12682 	}
12683 }
12684 
12685 void
12686 qwx_wmi_event_scan_completed(struct qwx_softc *sc)
12687 {
12688 #ifdef notyet
12689 	lockdep_assert_held(&ar->data_lock);
12690 #endif
12691 	switch (sc->scan.state) {
12692 	case ATH11K_SCAN_IDLE:
12693 	case ATH11K_SCAN_STARTING:
12694 		/* One suspected reason scan can be completed while starting is
12695 		 * if firmware fails to deliver all scan events to the host,
12696 		 * e.g. when transport pipe is full. This has been observed
12697 		 * with spectral scan phyerr events starving wmi transport
12698 		 * pipe. In such case the "scan completed" event should be (and
12699 		 * is) ignored by the host as it may be just firmware's scan
12700 		 * state machine recovering.
12701 		 */
12702 		printf("%s: received scan completed event in an invalid "
12703 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12704 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12705 		break;
12706 	case ATH11K_SCAN_RUNNING:
12707 	case ATH11K_SCAN_ABORTING:
12708 		qwx_mac_scan_finish(sc);
12709 		break;
12710 	}
12711 }
12712 
12713 void
12714 qwx_wmi_event_scan_bss_chan(struct qwx_softc *sc)
12715 {
12716 #ifdef notyet
12717 	lockdep_assert_held(&ar->data_lock);
12718 #endif
12719 	switch (sc->scan.state) {
12720 	case ATH11K_SCAN_IDLE:
12721 	case ATH11K_SCAN_STARTING:
12722 		printf("%s: received scan bss chan event in an invalid "
12723 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12724 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12725 		break;
12726 	case ATH11K_SCAN_RUNNING:
12727 	case ATH11K_SCAN_ABORTING:
12728 		sc->scan_channel = 0;
12729 		break;
12730 	}
12731 }
12732 
12733 void
12734 qwx_wmi_event_scan_foreign_chan(struct qwx_softc *sc, uint32_t freq)
12735 {
12736 #ifdef notyet
12737 	lockdep_assert_held(&ar->data_lock);
12738 #endif
12739 	switch (sc->scan.state) {
12740 	case ATH11K_SCAN_IDLE:
12741 	case ATH11K_SCAN_STARTING:
12742 		printf("%s: received scan foreign chan event in an invalid "
12743 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12744 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12745 		break;
12746 	case ATH11K_SCAN_RUNNING:
12747 	case ATH11K_SCAN_ABORTING:
12748 		sc->scan_channel = ieee80211_mhz2ieee(freq, 0);
12749 #if 0
12750 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
12751 			complete(&ar->scan.on_channel);
12752 #endif
12753 		break;
12754 	}
12755 }
12756 
12757 void
12758 qwx_wmi_event_scan_start_failed(struct qwx_softc *sc)
12759 {
12760 #ifdef notyet
12761 	lockdep_assert_held(&ar->data_lock);
12762 #endif
12763 	switch (sc->scan.state) {
12764 	case ATH11K_SCAN_IDLE:
12765 	case ATH11K_SCAN_RUNNING:
12766 	case ATH11K_SCAN_ABORTING:
12767 		printf("%s: received scan start failed event in an invalid "
12768 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12769 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12770 		break;
12771 	case ATH11K_SCAN_STARTING:
12772 		wakeup(&sc->scan.state);
12773 		qwx_mac_scan_finish(sc);
12774 		break;
12775 	}
12776 }
12777 
12778 
12779 void
12780 qwx_scan_event(struct qwx_softc *sc, struct mbuf *m)
12781 {
12782 	struct wmi_scan_event scan_ev = { 0 };
12783 	struct qwx_vif *arvif;
12784 
12785 	if (qwx_pull_scan_ev(sc, m, &scan_ev) != 0) {
12786 		printf("%s: failed to extract scan event",
12787 		    sc->sc_dev.dv_xname);
12788 		return;
12789 	}
12790 #ifdef notyet
12791 	rcu_read_lock();
12792 #endif
12793 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12794 		if (arvif->vdev_id == scan_ev.vdev_id)
12795 			break;
12796 	}
12797 
12798 	if (!arvif) {
12799 		printf("%s: received scan event for unknown vdev\n",
12800 		    sc->sc_dev.dv_xname);
12801 #if 0
12802 		rcu_read_unlock();
12803 #endif
12804 		return;
12805 	}
12806 #if 0
12807 	spin_lock_bh(&ar->data_lock);
12808 #endif
12809 	DNPRINTF(QWX_D_WMI,
12810 	    "%s: event scan %s type %d reason %d freq %d req_id %d scan_id %d "
12811 	    "vdev_id %d state %s (%d)\n", __func__,
12812 	    qwx_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
12813 	    scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
12814 	    scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
12815 	    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12816 
12817 	switch (scan_ev.event_type) {
12818 	case WMI_SCAN_EVENT_STARTED:
12819 		qwx_wmi_event_scan_started(sc);
12820 		break;
12821 	case WMI_SCAN_EVENT_COMPLETED:
12822 		qwx_wmi_event_scan_completed(sc);
12823 		break;
12824 	case WMI_SCAN_EVENT_BSS_CHANNEL:
12825 		qwx_wmi_event_scan_bss_chan(sc);
12826 		break;
12827 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
12828 		qwx_wmi_event_scan_foreign_chan(sc, scan_ev.channel_freq);
12829 		break;
12830 	case WMI_SCAN_EVENT_START_FAILED:
12831 		printf("%s: received scan start failure event\n",
12832 		    sc->sc_dev.dv_xname);
12833 		qwx_wmi_event_scan_start_failed(sc);
12834 		break;
12835 	case WMI_SCAN_EVENT_DEQUEUED:
12836 		qwx_mac_scan_finish(sc);
12837 		break;
12838 	case WMI_SCAN_EVENT_PREEMPTED:
12839 	case WMI_SCAN_EVENT_RESTARTED:
12840 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12841 	default:
12842 		break;
12843 	}
12844 #if 0
12845 	spin_unlock_bh(&ar->data_lock);
12846 
12847 	rcu_read_unlock();
12848 #endif
12849 }
12850 
12851 int
12852 qwx_pull_chan_info_ev(struct qwx_softc *sc, uint8_t *evt_buf, uint32_t len,
12853     struct wmi_chan_info_event *ch_info_ev)
12854 {
12855 	const void **tb;
12856 	const struct wmi_chan_info_event *ev;
12857 
12858 	tb = qwx_wmi_tlv_parse_alloc(sc, evt_buf, len);
12859 	if (tb == NULL) {
12860 		printf("%s: failed to parse tlv\n", sc->sc_dev.dv_xname);
12861 		return EINVAL;
12862 	}
12863 
12864 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
12865 	if (!ev) {
12866 		printf("%s: failed to fetch chan info ev\n",
12867 		    sc->sc_dev.dv_xname);
12868 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12869 		return EPROTO;
12870 	}
12871 
12872 	ch_info_ev->err_code = ev->err_code;
12873 	ch_info_ev->freq = ev->freq;
12874 	ch_info_ev->cmd_flags = ev->cmd_flags;
12875 	ch_info_ev->noise_floor = ev->noise_floor;
12876 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
12877 	ch_info_ev->cycle_count = ev->cycle_count;
12878 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
12879 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
12880 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
12881 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
12882 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
12883 	ch_info_ev->vdev_id = ev->vdev_id;
12884 
12885 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12886 	return 0;
12887 }
12888 
12889 void
12890 qwx_chan_info_event(struct qwx_softc *sc, struct mbuf *m)
12891 {
12892 	struct qwx_vif *arvif;
12893 	struct wmi_chan_info_event ch_info_ev = {0};
12894 	struct qwx_survey_info *survey;
12895 	int idx;
12896 	/* HW channel counters frequency value in hertz */
12897 	uint32_t cc_freq_hz = sc->cc_freq_hz;
12898 
12899 	if (qwx_pull_chan_info_ev(sc, mtod(m, void *), m->m_pkthdr.len,
12900 	    &ch_info_ev) != 0) {
12901 		printf("%s: failed to extract chan info event\n",
12902 		    sc->sc_dev.dv_xname);
12903 		return;
12904 	}
12905 
12906 	DNPRINTF(QWX_D_WMI, "%s: event chan info vdev_id %d err_code %d "
12907 	    "freq %d cmd_flags %d noise_floor %d rx_clear_count %d "
12908 	    "cycle_count %d mac_clk_mhz %d\n", __func__,
12909 	    ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
12910 	    ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
12911 	    ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
12912 	    ch_info_ev.mac_clk_mhz);
12913 
12914 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
12915 		DNPRINTF(QWX_D_WMI, "chan info report completed\n");
12916 		return;
12917 	}
12918 #ifdef notyet
12919 	rcu_read_lock();
12920 #endif
12921 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12922 		if (arvif->vdev_id == ch_info_ev.vdev_id)
12923 			break;
12924 	}
12925 	if (!arvif) {
12926 		printf("%s: invalid vdev id in chan info ev %d\n",
12927 		   sc->sc_dev.dv_xname, ch_info_ev.vdev_id);
12928 #ifdef notyet
12929 		rcu_read_unlock();
12930 #endif
12931 		return;
12932 	}
12933 #ifdef notyet
12934 	spin_lock_bh(&ar->data_lock);
12935 #endif
12936 	switch (sc->scan.state) {
12937 	case ATH11K_SCAN_IDLE:
12938 	case ATH11K_SCAN_STARTING:
12939 		printf("%s: received chan info event without a scan request, "
12940 		    "ignoring\n", sc->sc_dev.dv_xname);
12941 		goto exit;
12942 	case ATH11K_SCAN_RUNNING:
12943 	case ATH11K_SCAN_ABORTING:
12944 		break;
12945 	}
12946 
12947 	idx = ieee80211_mhz2ieee(ch_info_ev.freq, 0);
12948 	if (idx >= nitems(sc->survey)) {
12949 		printf("%s: invalid frequency %d (idx %d out of bounds)\n",
12950 		    sc->sc_dev.dv_xname, ch_info_ev.freq, idx);
12951 		goto exit;
12952 	}
12953 
12954 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
12955 	 * HW channel counters frequency value
12956 	 */
12957 	if (ch_info_ev.mac_clk_mhz)
12958 		cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
12959 
12960 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
12961 		survey = &sc->survey[idx];
12962 		memset(survey, 0, sizeof(*survey));
12963 		survey->noise = ch_info_ev.noise_floor;
12964 		survey->time = ch_info_ev.cycle_count / cc_freq_hz;
12965 		survey->time_busy = ch_info_ev.rx_clear_count / cc_freq_hz;
12966 	}
12967 exit:
12968 #ifdef notyet
12969 	spin_unlock_bh(&ar->data_lock);
12970 	rcu_read_unlock();
12971 #else
12972 	return;
12973 #endif
12974 }
12975 
12976 int
12977 qwx_wmi_tlv_mgmt_rx_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
12978     const void *ptr, void *data)
12979 {
12980 	struct wmi_tlv_mgmt_rx_parse *parse = data;
12981 
12982 	switch (tag) {
12983 	case WMI_TAG_MGMT_RX_HDR:
12984 		parse->fixed = ptr;
12985 		break;
12986 	case WMI_TAG_ARRAY_BYTE:
12987 		if (!parse->frame_buf_done) {
12988 			parse->frame_buf = ptr;
12989 			parse->frame_buf_done = 1;
12990 		}
12991 		break;
12992 	}
12993 	return 0;
12994 }
12995 
12996 int
12997 qwx_pull_mgmt_rx_params_tlv(struct qwx_softc *sc, struct mbuf *m,
12998     struct mgmt_rx_event_params *hdr)
12999 {
13000 	struct wmi_tlv_mgmt_rx_parse parse = { 0 };
13001 	const struct wmi_mgmt_rx_hdr *ev;
13002 	const uint8_t *frame;
13003 	int ret;
13004 	size_t totlen, hdrlen;
13005 
13006 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
13007 	    qwx_wmi_tlv_mgmt_rx_parse, &parse);
13008 	if (ret) {
13009 		printf("%s: failed to parse mgmt rx tlv %d\n",
13010 		    sc->sc_dev.dv_xname, ret);
13011 		return ret;
13012 	}
13013 
13014 	ev = parse.fixed;
13015 	frame = parse.frame_buf;
13016 
13017 	if (!ev || !frame) {
13018 		printf("%s: failed to fetch mgmt rx hdr\n",
13019 		    sc->sc_dev.dv_xname);
13020 		return EPROTO;
13021 	}
13022 
13023 	hdr->pdev_id =  ev->pdev_id;
13024 	hdr->chan_freq = le32toh(ev->chan_freq);
13025 	hdr->channel = le32toh(ev->channel);
13026 	hdr->snr = le32toh(ev->snr);
13027 	hdr->rate = le32toh(ev->rate);
13028 	hdr->phy_mode = le32toh(ev->phy_mode);
13029 	hdr->buf_len = le32toh(ev->buf_len);
13030 	hdr->status = le32toh(ev->status);
13031 	hdr->flags = le32toh(ev->flags);
13032 	hdr->rssi = le32toh(ev->rssi);
13033 	hdr->tsf_delta = le32toh(ev->tsf_delta);
13034 	memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
13035 
13036 	if (frame < mtod(m, uint8_t *) ||
13037 	    frame >= mtod(m, uint8_t *) + m->m_pkthdr.len) {
13038 		printf("%s: invalid mgmt rx frame pointer\n",
13039 		    sc->sc_dev.dv_xname);
13040 		return EPROTO;
13041 	}
13042 	hdrlen = frame - mtod(m, uint8_t *);
13043 
13044 	if (hdrlen + hdr->buf_len < hdr->buf_len) {
13045 		printf("%s: length overflow in mgmt rx hdr ev\n",
13046 		    sc->sc_dev.dv_xname);
13047 		return EPROTO;
13048 	}
13049 	totlen = hdrlen + hdr->buf_len;
13050 	if (m->m_pkthdr.len < totlen) {
13051 		printf("%s: invalid length in mgmt rx hdr ev\n",
13052 		    sc->sc_dev.dv_xname);
13053 		return EPROTO;
13054 	}
13055 
13056 	/* shift the mbuf to point at `frame` */
13057 	m->m_len = m->m_pkthdr.len = totlen;
13058 	m_adj(m, hdrlen);
13059 
13060 #if 0 /* Not needed on OpenBSD? */
13061 	ath11k_ce_byte_swap(skb->data, hdr->buf_len);
13062 #endif
13063 	return 0;
13064 }
13065 
13066 void
13067 qwx_mgmt_rx_event(struct qwx_softc *sc, struct mbuf *m)
13068 {
13069 	struct ieee80211com *ic = &sc->sc_ic;
13070 	struct ifnet *ifp = &ic->ic_if;
13071 	struct mgmt_rx_event_params rx_ev = {0};
13072 	struct ieee80211_rxinfo rxi;
13073 	struct ieee80211_frame *wh;
13074 	struct ieee80211_node *ni;
13075 
13076 	if (qwx_pull_mgmt_rx_params_tlv(sc, m, &rx_ev) != 0) {
13077 		printf("%s: failed to extract mgmt rx event\n",
13078 		    sc->sc_dev.dv_xname);
13079 		m_freem(m);
13080 		return;
13081 	}
13082 
13083 	memset(&rxi, 0, sizeof(rxi));
13084 
13085 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx status %08x\n", __func__,
13086 	    rx_ev.status);
13087 #ifdef notyet
13088 	rcu_read_lock();
13089 #endif
13090 	if (rx_ev.pdev_id >= nitems(sc->pdevs)) {
13091 		printf("%s: invalid pdev_id %d in mgmt_rx_event\n",
13092 		    sc->sc_dev.dv_xname, rx_ev.pdev_id);
13093 		m_freem(m);
13094 		goto exit;
13095 	}
13096 
13097 	if ((test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) ||
13098 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
13099 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
13100 		m_freem(m);
13101 		goto exit;
13102 	}
13103 
13104 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) {
13105 		ic->ic_stats.is_ccmp_dec_errs++;
13106 		m_freem(m);
13107 		goto exit;
13108 	}
13109 
13110 	rxi.rxi_chan = rx_ev.channel;
13111 	rxi.rxi_rssi = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
13112 #if 0
13113 	status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
13114 #endif
13115 
13116 	wh = mtod(m, struct ieee80211_frame *);
13117 	ni = ieee80211_find_rxnode(ic, wh);
13118 #if 0
13119 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set.
13120 	 * Don't clear that. Also, FW delivers broadcast management frames
13121 	 * (ex: group privacy action frames in mesh) as encrypted payload.
13122 	 */
13123 	if (ieee80211_has_protected(hdr->frame_control) &&
13124 	    !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
13125 		status->flag |= RX_FLAG_DECRYPTED;
13126 
13127 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
13128 			status->flag |= RX_FLAG_IV_STRIPPED |
13129 					RX_FLAG_MMIC_STRIPPED;
13130 			hdr->frame_control = __cpu_to_le16(fc &
13131 					     ~IEEE80211_FCTL_PROTECTED);
13132 		}
13133 	}
13134 
13135 	if (ieee80211_is_beacon(hdr->frame_control))
13136 		ath11k_mac_handle_beacon(ar, skb);
13137 #endif
13138 
13139 	DNPRINTF(QWX_D_MGMT,
13140 	    "%s: event mgmt rx skb %p len %d ftype %02x stype %02x\n",
13141 	    __func__, m, m->m_pkthdr.len,
13142 	    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK,
13143 	    wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
13144 
13145 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
13146 	    __func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
13147 
13148 #if NBPFILTER > 0
13149 	if (sc->sc_drvbpf != NULL) {
13150 		struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
13151 
13152 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
13153 		    m, BPF_DIRECTION_IN);
13154 	}
13155 #endif
13156 	ieee80211_input(ifp, m, ni, &rxi);
13157 	ieee80211_release_node(ic, ni);
13158 exit:
13159 #ifdef notyet
13160 	rcu_read_unlock();
13161 #else
13162 	return;
13163 #endif
13164 }
13165 
13166 int
13167 qwx_pull_mgmt_tx_compl_param_tlv(struct qwx_softc *sc, struct mbuf *m,
13168     struct wmi_mgmt_tx_compl_event *param)
13169 {
13170 	const void **tb;
13171 	const struct wmi_mgmt_tx_compl_event *ev;
13172 	int ret = 0;
13173 
13174 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13175 	if (tb == NULL) {
13176 		ret = ENOMEM;
13177 		printf("%s: failed to parse tlv: %d\n",
13178 		    sc->sc_dev.dv_xname, ret);
13179 		return ENOMEM;
13180 	}
13181 
13182 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
13183 	if (!ev) {
13184 		printf("%s: failed to fetch mgmt tx compl ev\n",
13185 		    sc->sc_dev.dv_xname);
13186 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13187 		return EPROTO;
13188 	}
13189 
13190 	param->pdev_id = ev->pdev_id;
13191 	param->desc_id = ev->desc_id;
13192 	param->status = ev->status;
13193 	param->ack_rssi = ev->ack_rssi;
13194 
13195 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13196 	return 0;
13197 }
13198 
13199 void
13200 qwx_wmi_process_mgmt_tx_comp(struct qwx_softc *sc,
13201     struct wmi_mgmt_tx_compl_event *tx_compl_param)
13202 {
13203 	struct ieee80211com *ic = &sc->sc_ic;
13204 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
13205 	struct ifnet *ifp = &ic->ic_if;
13206 	struct qwx_tx_data *tx_data;
13207 
13208 	if (tx_compl_param->desc_id >= nitems(arvif->txmgmt.data)) {
13209 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
13210 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
13211 		return;
13212 	}
13213 
13214 	tx_data = &arvif->txmgmt.data[tx_compl_param->desc_id];
13215 	if (tx_data->m == NULL) {
13216 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
13217 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
13218 		return;
13219 	}
13220 
13221 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
13222 	m_freem(tx_data->m);
13223 	tx_data->m = NULL;
13224 
13225 	ieee80211_release_node(ic, tx_data->ni);
13226 	tx_data->ni = NULL;
13227 
13228 	if (arvif->txmgmt.queued > 0)
13229 		arvif->txmgmt.queued--;
13230 
13231 	if (tx_compl_param->status != 0)
13232 		ifp->if_oerrors++;
13233 
13234 	if (arvif->txmgmt.queued < nitems(arvif->txmgmt.data) - 1) {
13235 		sc->qfullmsk &= ~(1U << QWX_MGMT_QUEUE_ID);
13236 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
13237 			ifq_clr_oactive(&ifp->if_snd);
13238 			(*ifp->if_start)(ifp);
13239 		}
13240 	}
13241 }
13242 
13243 void
13244 qwx_mgmt_tx_compl_event(struct qwx_softc *sc, struct mbuf *m)
13245 {
13246 	struct wmi_mgmt_tx_compl_event tx_compl_param = { 0 };
13247 
13248 	if (qwx_pull_mgmt_tx_compl_param_tlv(sc, m, &tx_compl_param) != 0) {
13249 		printf("%s: failed to extract mgmt tx compl event\n",
13250 		    sc->sc_dev.dv_xname);
13251 		return;
13252 	}
13253 
13254 	qwx_wmi_process_mgmt_tx_comp(sc, &tx_compl_param);
13255 
13256 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt tx compl ev pdev_id %d, "
13257 	    "desc_id %d, status %d ack_rssi %d", __func__,
13258 	    tx_compl_param.pdev_id, tx_compl_param.desc_id,
13259 	    tx_compl_param.status, tx_compl_param.ack_rssi);
13260 }
13261 
13262 int
13263 qwx_pull_roam_ev(struct qwx_softc *sc, struct mbuf *m,
13264     struct wmi_roam_event *roam_ev)
13265 {
13266 	const void **tb;
13267 	const struct wmi_roam_event *ev;
13268 	int ret;
13269 
13270 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13271 	if (tb == NULL) {
13272 		ret = ENOMEM;
13273 		printf("%s: failed to parse tlv: %d\n",
13274 		    sc->sc_dev.dv_xname, ret);
13275 		return ret;
13276 	}
13277 
13278 	ev = tb[WMI_TAG_ROAM_EVENT];
13279 	if (!ev) {
13280 		printf("%s: failed to fetch roam ev\n",
13281 		    sc->sc_dev.dv_xname);
13282 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13283 		return EPROTO;
13284 	}
13285 
13286 	roam_ev->vdev_id = ev->vdev_id;
13287 	roam_ev->reason = ev->reason;
13288 	roam_ev->rssi = ev->rssi;
13289 
13290 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13291 	return 0;
13292 }
13293 
13294 void
13295 qwx_mac_handle_beacon_miss(struct qwx_softc *sc, uint32_t vdev_id)
13296 {
13297 	struct ieee80211com *ic = &sc->sc_ic;
13298 
13299 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
13300 	    (ic->ic_state != IEEE80211_S_RUN))
13301 		return;
13302 
13303 	if (ic->ic_mgt_timer == 0) {
13304 		if (ic->ic_if.if_flags & IFF_DEBUG)
13305 			printf("%s: receiving no beacons from %s; checking if "
13306 			    "this AP is still responding to probe requests\n",
13307 			    sc->sc_dev.dv_xname,
13308 			    ether_sprintf(ic->ic_bss->ni_macaddr));
13309 		/*
13310 		 * Rather than go directly to scan state, try to send a
13311 		 * directed probe request first. If that fails then the
13312 		 * state machine will drop us into scanning after timing
13313 		 * out waiting for a probe response.
13314 		 */
13315 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
13316 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
13317 	}
13318 }
13319 
13320 void
13321 qwx_roam_event(struct qwx_softc *sc, struct mbuf *m)
13322 {
13323 	struct wmi_roam_event roam_ev = {};
13324 
13325 	if (qwx_pull_roam_ev(sc, m, &roam_ev) != 0) {
13326 		printf("%s: failed to extract roam event\n",
13327 		    sc->sc_dev.dv_xname);
13328 		return;
13329 	}
13330 
13331 	DNPRINTF(QWX_D_WMI, "%s: event roam vdev %u reason 0x%08x rssi %d\n",
13332 	    __func__, roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
13333 
13334 	if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
13335 		return;
13336 
13337 	switch (roam_ev.reason) {
13338 	case WMI_ROAM_REASON_BEACON_MISS:
13339 		qwx_mac_handle_beacon_miss(sc, roam_ev.vdev_id);
13340 		break;
13341 	case WMI_ROAM_REASON_BETTER_AP:
13342 	case WMI_ROAM_REASON_LOW_RSSI:
13343 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
13344 	case WMI_ROAM_REASON_HO_FAILED:
13345 		break;
13346 	}
13347 }
13348 
13349 int
13350 qwx_pull_vdev_install_key_compl_ev(struct qwx_softc *sc, struct mbuf *m,
13351     struct wmi_vdev_install_key_complete_arg *arg)
13352 {
13353 	const void **tb;
13354 	const struct wmi_vdev_install_key_compl_event *ev;
13355 	int ret;
13356 
13357 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13358 	if (tb == NULL) {
13359 		ret = ENOMEM;
13360 		printf("%s: failed to parse tlv: %d\n",
13361 		    sc->sc_dev.dv_xname, ret);
13362 		return ret;
13363 	}
13364 
13365 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
13366 	if (!ev) {
13367 		printf("%s: failed to fetch vdev install key compl ev\n",
13368 		    sc->sc_dev.dv_xname);
13369 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13370 		return EPROTO;
13371 	}
13372 
13373 	arg->vdev_id = ev->vdev_id;
13374 	arg->macaddr = ev->peer_macaddr.addr;
13375 	arg->key_idx = ev->key_idx;
13376 	arg->key_flags = ev->key_flags;
13377 	arg->status = ev->status;
13378 
13379 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13380 	return 0;
13381 }
13382 
13383 void
13384 qwx_vdev_install_key_compl_event(struct qwx_softc *sc, struct mbuf *m)
13385 {
13386 	struct wmi_vdev_install_key_complete_arg install_key_compl = { 0 };
13387 	struct qwx_vif *arvif;
13388 
13389 	if (qwx_pull_vdev_install_key_compl_ev(sc, m,
13390 	    &install_key_compl) != 0) {
13391 		printf("%s: failed to extract install key compl event\n",
13392 		    sc->sc_dev.dv_xname);
13393 		return;
13394 	}
13395 
13396 	DNPRINTF(QWX_D_WMI, "%s: event vdev install key ev idx %d flags %08x "
13397 	    "macaddr %s status %d\n", __func__, install_key_compl.key_idx,
13398 	    install_key_compl.key_flags,
13399 	    ether_sprintf((u_char *)install_key_compl.macaddr),
13400 	    install_key_compl.status);
13401 
13402 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
13403 		if (arvif->vdev_id == install_key_compl.vdev_id)
13404 			break;
13405 	}
13406 	if (!arvif) {
13407 		printf("%s: invalid vdev id in install key compl ev %d\n",
13408 		    sc->sc_dev.dv_xname, install_key_compl.vdev_id);
13409 		return;
13410 	}
13411 
13412 	sc->install_key_status = 0;
13413 
13414 	if (install_key_compl.status !=
13415 	    WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
13416 		printf("%s: install key failed for %s status %d\n",
13417 		    sc->sc_dev.dv_xname,
13418 		    ether_sprintf((u_char *)install_key_compl.macaddr),
13419 		    install_key_compl.status);
13420 		sc->install_key_status = install_key_compl.status;
13421 	}
13422 
13423 	sc->install_key_done = 1;
13424 	wakeup(&sc->install_key_done);
13425 }
13426 
13427 void
13428 qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m)
13429 {
13430 	struct wmi_cmd_hdr *cmd_hdr;
13431 	enum wmi_tlv_event_id id;
13432 
13433 	cmd_hdr = mtod(m, struct wmi_cmd_hdr *);
13434 	id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
13435 
13436 	m_adj(m, sizeof(struct wmi_cmd_hdr));
13437 
13438 	switch (id) {
13439 		/* Process all the WMI events here */
13440 	case WMI_SERVICE_READY_EVENTID:
13441 		qwx_service_ready_event(sc, m);
13442 		break;
13443 	case WMI_SERVICE_READY_EXT_EVENTID:
13444 		qwx_service_ready_ext_event(sc, m);
13445 		break;
13446 	case WMI_SERVICE_READY_EXT2_EVENTID:
13447 		qwx_service_ready_ext2_event(sc, m);
13448 		break;
13449 	case WMI_REG_CHAN_LIST_CC_EVENTID:
13450 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_ID);
13451 		break;
13452 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
13453 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_EXT_ID);
13454 		break;
13455 	case WMI_READY_EVENTID:
13456 		qwx_ready_event(sc, m);
13457 		break;
13458 	case WMI_PEER_DELETE_RESP_EVENTID:
13459 		qwx_peer_delete_resp_event(sc, m);
13460 		break;
13461 	case WMI_VDEV_START_RESP_EVENTID:
13462 		qwx_vdev_start_resp_event(sc, m);
13463 		break;
13464 #if 0
13465 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
13466 		ath11k_bcn_tx_status_event(ab, skb);
13467 		break;
13468 #endif
13469 	case WMI_VDEV_STOPPED_EVENTID:
13470 		qwx_vdev_stopped_event(sc, m);
13471 		break;
13472 	case WMI_MGMT_RX_EVENTID:
13473 		qwx_mgmt_rx_event(sc, m);
13474 		/* mgmt_rx_event() owns the skb now! */
13475 		return;
13476 	case WMI_MGMT_TX_COMPLETION_EVENTID:
13477 		qwx_mgmt_tx_compl_event(sc, m);
13478 		break;
13479 	case WMI_SCAN_EVENTID:
13480 		qwx_scan_event(sc, m);
13481 		break;
13482 #if 0
13483 	case WMI_PEER_STA_KICKOUT_EVENTID:
13484 		ath11k_peer_sta_kickout_event(ab, skb);
13485 		break;
13486 #endif
13487 	case WMI_ROAM_EVENTID:
13488 		qwx_roam_event(sc, m);
13489 		break;
13490 	case WMI_CHAN_INFO_EVENTID:
13491 		qwx_chan_info_event(sc, m);
13492 		break;
13493 #if 0
13494 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
13495 		ath11k_pdev_bss_chan_info_event(ab, skb);
13496 		break;
13497 #endif
13498 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
13499 		qwx_vdev_install_key_compl_event(sc, m);
13500 		break;
13501 	case WMI_SERVICE_AVAILABLE_EVENTID:
13502 		qwx_service_available_event(sc, m);
13503 		break;
13504 	case WMI_PEER_ASSOC_CONF_EVENTID:
13505 		qwx_peer_assoc_conf_event(sc, m);
13506 		break;
13507 	case WMI_UPDATE_STATS_EVENTID:
13508 		/* ignore */
13509 		break;
13510 #if 0
13511 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
13512 		ath11k_pdev_ctl_failsafe_check_event(ab, skb);
13513 		break;
13514 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
13515 		ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
13516 		break;
13517 	case WMI_PDEV_UTF_EVENTID:
13518 		ath11k_tm_wmi_event(ab, id, skb);
13519 		break;
13520 	case WMI_PDEV_TEMPERATURE_EVENTID:
13521 		ath11k_wmi_pdev_temperature_event(ab, skb);
13522 		break;
13523 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
13524 		ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
13525 		break;
13526 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
13527 		ath11k_fils_discovery_event(ab, skb);
13528 		break;
13529 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
13530 		ath11k_probe_resp_tx_status_event(ab, skb);
13531 		break;
13532 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
13533 		ath11k_wmi_obss_color_collision_event(ab, skb);
13534 		break;
13535 	case WMI_TWT_ADD_DIALOG_EVENTID:
13536 		ath11k_wmi_twt_add_dialog_event(ab, skb);
13537 		break;
13538 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
13539 		ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
13540 		break;
13541 	case WMI_VDEV_DELETE_RESP_EVENTID:
13542 		ath11k_vdev_delete_resp_event(ab, skb);
13543 		break;
13544 	case WMI_WOW_WAKEUP_HOST_EVENTID:
13545 		ath11k_wmi_event_wow_wakeup_host(ab, skb);
13546 		break;
13547 	case WMI_11D_NEW_COUNTRY_EVENTID:
13548 		ath11k_reg_11d_new_cc_event(ab, skb);
13549 		break;
13550 #endif
13551 	case WMI_DIAG_EVENTID:
13552 		/* Ignore. These events trigger tracepoints in Linux. */
13553 		break;
13554 #if 0
13555 	case WMI_PEER_STA_PS_STATECHG_EVENTID:
13556 		ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
13557 		break;
13558 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
13559 		ath11k_wmi_gtk_offload_status_event(ab, skb);
13560 		break;
13561 #endif
13562 	case WMI_UPDATE_FW_MEM_DUMP_EVENTID:
13563 		DPRINTF("%s: 0x%x: update fw mem dump\n", __func__, id);
13564 		break;
13565 	case WMI_PDEV_SET_HW_MODE_RESP_EVENTID:
13566 		DPRINTF("%s: 0x%x: set HW mode response event\n", __func__, id);
13567 		break;
13568 	case WMI_WLAN_FREQ_AVOID_EVENTID:
13569 		DPRINTF("%s: 0x%x: wlan freq avoid event\n", __func__, id);
13570 		break;
13571 	default:
13572 		DPRINTF("%s: unsupported event id 0x%x\n", __func__, id);
13573 		break;
13574 	}
13575 
13576 	m_freem(m);
13577 }
13578 
13579 void
13580 qwx_wmi_op_ep_tx_credits(struct qwx_softc *sc)
13581 {
13582 	struct qwx_htc *htc = &sc->htc;
13583 	int i;
13584 
13585 	/* try to send pending beacons first. they take priority */
13586 	sc->wmi.tx_credits = 1;
13587 	wakeup(&sc->wmi.tx_credits);
13588 
13589 	if (!sc->hw_params.credit_flow)
13590 		return;
13591 
13592 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
13593 		struct qwx_htc_ep *ep = &htc->endpoint[i];
13594 		if (ep->tx_credit_flow_enabled && ep->tx_credits > 0)
13595 			wakeup(&ep->tx_credits);
13596 	}
13597 }
13598 
13599 int
13600 qwx_connect_pdev_htc_service(struct qwx_softc *sc, uint32_t pdev_idx)
13601 {
13602 	int status;
13603 	uint32_t svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
13604 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
13605 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
13606 	struct qwx_htc_svc_conn_req conn_req;
13607 	struct qwx_htc_svc_conn_resp conn_resp;
13608 
13609 	memset(&conn_req, 0, sizeof(conn_req));
13610 	memset(&conn_resp, 0, sizeof(conn_resp));
13611 
13612 	/* these fields are the same for all service endpoints */
13613 	conn_req.ep_ops.ep_tx_complete = qwx_wmi_htc_tx_complete;
13614 	conn_req.ep_ops.ep_rx_complete = qwx_wmi_tlv_op_rx;
13615 	conn_req.ep_ops.ep_tx_credits = qwx_wmi_op_ep_tx_credits;
13616 
13617 	/* connect to control service */
13618 	conn_req.service_id = svc_id[pdev_idx];
13619 
13620 	status = qwx_htc_connect_service(&sc->htc, &conn_req, &conn_resp);
13621 	if (status) {
13622 		printf("%s: failed to connect to WMI CONTROL service "
13623 		    "status: %d\n", sc->sc_dev.dv_xname, status);
13624 		return status;
13625 	}
13626 
13627 	sc->wmi.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
13628 	sc->wmi.wmi[pdev_idx].eid = conn_resp.eid;
13629 	sc->wmi.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
13630 	sc->wmi.wmi[pdev_idx].tx_ce_desc = 0;
13631 
13632 	return 0;
13633 }
13634 
13635 int
13636 qwx_wmi_connect(struct qwx_softc *sc)
13637 {
13638 	uint32_t i;
13639 	uint8_t wmi_ep_count;
13640 
13641 	wmi_ep_count = sc->htc.wmi_ep_count;
13642 	if (wmi_ep_count > sc->hw_params.max_radios)
13643 		return -1;
13644 
13645 	for (i = 0; i < wmi_ep_count; i++)
13646 		qwx_connect_pdev_htc_service(sc, i);
13647 
13648 	return 0;
13649 }
13650 
13651 void
13652 qwx_htc_reset_endpoint_states(struct qwx_htc *htc)
13653 {
13654 	struct qwx_htc_ep *ep;
13655 	int i;
13656 
13657 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
13658 		ep = &htc->endpoint[i];
13659 		ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
13660 		ep->max_ep_message_len = 0;
13661 		ep->max_tx_queue_depth = 0;
13662 		ep->eid = i;
13663 		ep->htc = htc;
13664 		ep->tx_credit_flow_enabled = 1;
13665 	}
13666 }
13667 
13668 void
13669 qwx_htc_control_tx_complete(struct qwx_softc *sc, struct mbuf *m)
13670 {
13671 	printf("%s: not implemented\n", __func__);
13672 
13673 	m_freem(m);
13674 }
13675 
13676 void
13677 qwx_htc_control_rx_complete(struct qwx_softc *sc, struct mbuf *m)
13678 {
13679 	printf("%s: not implemented\n", __func__);
13680 
13681 	m_freem(m);
13682 }
13683 
13684 uint8_t
13685 qwx_htc_get_credit_allocation(struct qwx_htc *htc, uint16_t service_id)
13686 {
13687 	uint8_t i, allocation = 0;
13688 
13689 	for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
13690 		if (htc->service_alloc_table[i].service_id == service_id) {
13691 			allocation =
13692 			    htc->service_alloc_table[i].credit_allocation;
13693 		}
13694 	}
13695 
13696 	return allocation;
13697 }
13698 
13699 const char *
13700 qwx_htc_service_name(enum ath11k_htc_svc_id id)
13701 {
13702 	switch (id) {
13703 	case ATH11K_HTC_SVC_ID_RESERVED:
13704 		return "Reserved";
13705 	case ATH11K_HTC_SVC_ID_RSVD_CTRL:
13706 		return "Control";
13707 	case ATH11K_HTC_SVC_ID_WMI_CONTROL:
13708 		return "WMI";
13709 	case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
13710 		return "DATA BE";
13711 	case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
13712 		return "DATA BK";
13713 	case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
13714 		return "DATA VI";
13715 	case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
13716 		return "DATA VO";
13717 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
13718 		return "WMI MAC1";
13719 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
13720 		return "WMI MAC2";
13721 	case ATH11K_HTC_SVC_ID_NMI_CONTROL:
13722 		return "NMI Control";
13723 	case ATH11K_HTC_SVC_ID_NMI_DATA:
13724 		return "NMI Data";
13725 	case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
13726 		return "HTT Data";
13727 	case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
13728 		return "RAW";
13729 	case ATH11K_HTC_SVC_ID_IPA_TX:
13730 		return "IPA TX";
13731 	case ATH11K_HTC_SVC_ID_PKT_LOG:
13732 		return "PKT LOG";
13733 	}
13734 
13735 	return "Unknown";
13736 }
13737 
13738 struct mbuf *
13739 qwx_htc_alloc_mbuf(size_t payload_size)
13740 {
13741 	struct mbuf *m;
13742 	size_t size = sizeof(struct ath11k_htc_hdr) + payload_size;
13743 
13744 	m = m_gethdr(M_DONTWAIT, MT_DATA);
13745 	if (m == NULL)
13746 		return NULL;
13747 
13748 	if (size <= MCLBYTES)
13749 		MCLGET(m, M_DONTWAIT);
13750 	else
13751 		MCLGETL(m, M_DONTWAIT, size);
13752 	if ((m->m_flags & M_EXT) == 0) {
13753 		m_freem(m);
13754 		return NULL;
13755 	}
13756 
13757 	m->m_len = m->m_pkthdr.len = size;
13758 	memset(mtod(m, void *), 0, size);
13759 
13760 	return m;
13761 }
13762 
13763 struct mbuf *
13764 qwx_htc_build_tx_ctrl_mbuf(void)
13765 {
13766 	size_t size;
13767 
13768 	size = ATH11K_HTC_CONTROL_BUFFER_SIZE - sizeof(struct ath11k_htc_hdr);
13769 
13770 	return qwx_htc_alloc_mbuf(size);
13771 }
13772 
13773 void
13774 qwx_htc_prepare_tx_mbuf(struct qwx_htc_ep *ep, struct mbuf *m)
13775 {
13776 	struct ath11k_htc_hdr *hdr;
13777 
13778 	hdr = mtod(m, struct ath11k_htc_hdr *);
13779 
13780 	memset(hdr, 0, sizeof(*hdr));
13781 	hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
13782 	    FIELD_PREP(HTC_HDR_PAYLOADLEN, (m->m_pkthdr.len - sizeof(*hdr)));
13783 
13784 	if (ep->tx_credit_flow_enabled)
13785 		hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
13786 		    ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
13787 #ifdef notyet
13788 	spin_lock_bh(&ep->htc->tx_lock);
13789 #endif
13790 	hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
13791 #ifdef notyet
13792 	spin_unlock_bh(&ep->htc->tx_lock);
13793 #endif
13794 }
13795 
13796 int
13797 qwx_htc_send(struct qwx_htc *htc, enum ath11k_htc_ep_id eid, struct mbuf *m)
13798 {
13799 	struct qwx_htc_ep *ep = &htc->endpoint[eid];
13800 	struct qwx_softc *sc = htc->sc;
13801 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
13802 	void *ctx;
13803 	struct qwx_tx_data *tx_data;
13804 	int credits = 0;
13805 	int ret;
13806 	int credit_flow_enabled = (sc->hw_params.credit_flow &&
13807 	    ep->tx_credit_flow_enabled);
13808 
13809 	if (eid >= ATH11K_HTC_EP_COUNT) {
13810 		printf("%s: Invalid endpoint id: %d\n", __func__, eid);
13811 		return ENOENT;
13812 	}
13813 
13814 	if (credit_flow_enabled) {
13815 		credits = howmany(m->m_pkthdr.len, htc->target_credit_size);
13816 #ifdef notyet
13817 		spin_lock_bh(&htc->tx_lock);
13818 #endif
13819 		if (ep->tx_credits < credits) {
13820 			DNPRINTF(QWX_D_HTC,
13821 			    "%s: ep %d insufficient credits required %d "
13822 			    "total %d\n", __func__, eid, credits,
13823 			    ep->tx_credits);
13824 #ifdef notyet
13825 			spin_unlock_bh(&htc->tx_lock);
13826 #endif
13827 			return EAGAIN;
13828 		}
13829 		ep->tx_credits -= credits;
13830 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits consumed %d total %d\n",
13831 		    __func__, eid, credits, ep->tx_credits);
13832 #ifdef notyet
13833 		spin_unlock_bh(&htc->tx_lock);
13834 #endif
13835 	}
13836 
13837 	qwx_htc_prepare_tx_mbuf(ep, m);
13838 
13839 	ctx = pipe->src_ring->per_transfer_context[pipe->src_ring->write_index];
13840 	tx_data = (struct qwx_tx_data *)ctx;
13841 
13842 	tx_data->eid = eid;
13843 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
13844 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
13845 	if (ret) {
13846 		printf("%s: can't map mbuf (error %d)\n",
13847 		    sc->sc_dev.dv_xname, ret);
13848 		if (ret != ENOBUFS)
13849 			m_freem(m);
13850 		goto err_credits;
13851 	}
13852 
13853 	DNPRINTF(QWX_D_HTC, "%s: tx mbuf %p eid %d paddr %lx\n",
13854 	    __func__, m, tx_data->eid, tx_data->map->dm_segs[0].ds_addr);
13855 #ifdef QWX_DEBUG
13856 	{
13857 		int i;
13858 		uint8_t *p = mtod(m, uint8_t *);
13859 		DNPRINTF(QWX_D_HTC, "%s message buffer:", __func__);
13860 		for (i = 0; i < m->m_pkthdr.len; i++) {
13861 			DNPRINTF(QWX_D_HTC, "%s %.2x",
13862 			    i % 16 == 0 ? "\n" : "", p[i]);
13863 		}
13864 		if (i % 16)
13865 			DNPRINTF(QWX_D_HTC, "\n");
13866 	}
13867 #endif
13868 	ret = qwx_ce_send(htc->sc, m, ep->ul_pipe_id, ep->eid);
13869 	if (ret)
13870 		goto err_unmap;
13871 
13872 	return 0;
13873 
13874 err_unmap:
13875 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
13876 err_credits:
13877 	if (credit_flow_enabled) {
13878 #ifdef notyet
13879 		spin_lock_bh(&htc->tx_lock);
13880 #endif
13881 		ep->tx_credits += credits;
13882 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits reverted %d total %d\n",
13883 		    __func__, eid, credits, ep->tx_credits);
13884 #ifdef notyet
13885 		spin_unlock_bh(&htc->tx_lock);
13886 #endif
13887 
13888 		if (ep->ep_ops.ep_tx_credits)
13889 			ep->ep_ops.ep_tx_credits(htc->sc);
13890 	}
13891 	return ret;
13892 }
13893 
13894 int
13895 qwx_htc_connect_service(struct qwx_htc *htc,
13896     struct qwx_htc_svc_conn_req *conn_req,
13897     struct qwx_htc_svc_conn_resp *conn_resp)
13898 {
13899 	struct qwx_softc *sc = htc->sc;
13900 	struct ath11k_htc_conn_svc *req_msg;
13901 	struct ath11k_htc_conn_svc_resp resp_msg_dummy;
13902 	struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
13903 	enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
13904 	struct qwx_htc_ep *ep;
13905 	struct mbuf *m;
13906 	unsigned int max_msg_size = 0;
13907 	int length, status = 0;
13908 	int disable_credit_flow_ctrl = 0;
13909 	uint16_t flags = 0;
13910 	uint16_t message_id, service_id;
13911 	uint8_t tx_alloc = 0;
13912 
13913 	/* special case for HTC pseudo control service */
13914 	if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
13915 		disable_credit_flow_ctrl = 1;
13916 		assigned_eid = ATH11K_HTC_EP_0;
13917 		max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
13918 		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
13919 		goto setup;
13920 	}
13921 
13922 	tx_alloc = qwx_htc_get_credit_allocation(htc, conn_req->service_id);
13923 	if (!tx_alloc)
13924 		DNPRINTF(QWX_D_HTC,
13925 		    "%s: htc service %s does not allocate target credits\n",
13926 		    sc->sc_dev.dv_xname,
13927 		    qwx_htc_service_name(conn_req->service_id));
13928 
13929 	m = qwx_htc_build_tx_ctrl_mbuf();
13930 	if (!m) {
13931 		printf("%s: Failed to allocate HTC packet\n",
13932 		    sc->sc_dev.dv_xname);
13933 		return ENOMEM;
13934 	}
13935 
13936 	length = sizeof(*req_msg);
13937 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) + length;
13938 
13939 	req_msg = (struct ath11k_htc_conn_svc *)(mtod(m, uint8_t *) +
13940 	    sizeof(struct ath11k_htc_hdr));
13941 	memset(req_msg, 0, length);
13942 	req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
13943 	    ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
13944 
13945 	flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
13946 
13947 	/* Only enable credit flow control for WMI ctrl service */
13948 	if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
13949 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
13950 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
13951 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13952 		disable_credit_flow_ctrl = 1;
13953 	}
13954 
13955 	if (!sc->hw_params.credit_flow) {
13956 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13957 		disable_credit_flow_ctrl = 1;
13958 	}
13959 
13960 	req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
13961 	req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
13962 	    conn_req->service_id);
13963 
13964 	sc->ctl_resp = 0;
13965 
13966 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
13967 	if (status) {
13968 		if (status != ENOBUFS)
13969 			m_freem(m);
13970 		return status;
13971 	}
13972 
13973 	while (!sc->ctl_resp) {
13974 		int ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
13975 		    SEC_TO_NSEC(1));
13976 		if (ret) {
13977 			printf("%s: Service connect timeout\n",
13978 			    sc->sc_dev.dv_xname);
13979 			return ret;
13980 		}
13981 	}
13982 
13983 	/* we controlled the buffer creation, it's aligned */
13984 	resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
13985 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
13986 	service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
13987 			       resp_msg->msg_svc_id);
13988 	if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
13989 	    (htc->control_resp_len < sizeof(*resp_msg))) {
13990 		printf("%s: Invalid resp message ID 0x%x", __func__,
13991 		    message_id);
13992 		return EPROTO;
13993 	}
13994 
13995 	DNPRINTF(QWX_D_HTC, "%s: service %s connect response status 0x%lx "
13996 	    "assigned ep 0x%lx\n", __func__, qwx_htc_service_name(service_id),
13997 	    FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
13998 	    FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
13999 
14000 	conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
14001 	    resp_msg->flags_len);
14002 
14003 	/* check response status */
14004 	if (conn_resp->connect_resp_code !=
14005 	    ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
14006 		printf("%s: HTC Service %s connect request failed: 0x%x)\n",
14007 		    __func__, qwx_htc_service_name(service_id),
14008 		    conn_resp->connect_resp_code);
14009 		return EPROTO;
14010 	}
14011 
14012 	assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
14013 	    HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len);
14014 
14015 	max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14016 	    resp_msg->flags_len);
14017 setup:
14018 	if (assigned_eid >= ATH11K_HTC_EP_COUNT)
14019 		return EPROTO;
14020 
14021 	if (max_msg_size == 0)
14022 		return EPROTO;
14023 
14024 	ep = &htc->endpoint[assigned_eid];
14025 	ep->eid = assigned_eid;
14026 
14027 	if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
14028 		return EPROTO;
14029 
14030 	/* return assigned endpoint to caller */
14031 	conn_resp->eid = assigned_eid;
14032 	conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14033 	    resp_msg->flags_len);
14034 
14035 	/* setup the endpoint */
14036 	ep->service_id = conn_req->service_id;
14037 	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
14038 	ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14039 	    resp_msg->flags_len);
14040 	ep->tx_credits = tx_alloc;
14041 
14042 	/* copy all the callbacks */
14043 	ep->ep_ops = conn_req->ep_ops;
14044 
14045 	status = sc->ops.map_service_to_pipe(htc->sc, ep->service_id,
14046 	    &ep->ul_pipe_id, &ep->dl_pipe_id);
14047 	if (status)
14048 		return status;
14049 
14050 	DNPRINTF(QWX_D_HTC,
14051 	    "%s: htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
14052 	    __func__, qwx_htc_service_name(ep->service_id), ep->ul_pipe_id,
14053 	    ep->dl_pipe_id, ep->eid);
14054 
14055 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
14056 		ep->tx_credit_flow_enabled = 0;
14057 		DNPRINTF(QWX_D_HTC,
14058 		    "%s: htc service '%s' eid %d tx flow control disabled\n",
14059 		    __func__, qwx_htc_service_name(ep->service_id),
14060 		    assigned_eid);
14061 	}
14062 
14063 	return status;
14064 }
14065 
14066 int
14067 qwx_htc_start(struct qwx_htc *htc)
14068 {
14069 	struct mbuf *m;
14070 	int status = 0;
14071 	struct qwx_softc *sc = htc->sc;
14072 	struct ath11k_htc_setup_complete_extended *msg;
14073 
14074 	m = qwx_htc_build_tx_ctrl_mbuf();
14075 	if (!m)
14076 		return ENOMEM;
14077 
14078 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) +
14079 	    sizeof(*msg);
14080 
14081 	msg = (struct ath11k_htc_setup_complete_extended *)(mtod(m, uint8_t *) +
14082 	    sizeof(struct ath11k_htc_hdr));
14083 	msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
14084 	    ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
14085 
14086 	if (sc->hw_params.credit_flow)
14087 		DNPRINTF(QWX_D_HTC, "%s: using tx credit flow control\n",
14088 		    __func__);
14089 	else
14090 		msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
14091 
14092 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
14093 	if (status) {
14094 		m_freem(m);
14095 		return status;
14096 	}
14097 
14098 	return 0;
14099 }
14100 
14101 int
14102 qwx_htc_init(struct qwx_softc *sc)
14103 {
14104 	struct qwx_htc *htc = &sc->htc;
14105 	struct qwx_htc_svc_conn_req conn_req;
14106 	struct qwx_htc_svc_conn_resp conn_resp;
14107 	int ret;
14108 #ifdef notyet
14109 	spin_lock_init(&htc->tx_lock);
14110 #endif
14111 	qwx_htc_reset_endpoint_states(htc);
14112 
14113 	htc->sc = sc;
14114 
14115 	switch (sc->wmi.preferred_hw_mode) {
14116 	case WMI_HOST_HW_MODE_SINGLE:
14117 		htc->wmi_ep_count = 1;
14118 		break;
14119 	case WMI_HOST_HW_MODE_DBS:
14120 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
14121 		htc->wmi_ep_count = 2;
14122 		break;
14123 	case WMI_HOST_HW_MODE_DBS_SBS:
14124 		htc->wmi_ep_count = 3;
14125 		break;
14126 	default:
14127 		htc->wmi_ep_count = sc->hw_params.max_radios;
14128 		break;
14129 	}
14130 
14131 	/* setup our pseudo HTC control endpoint connection */
14132 	memset(&conn_req, 0, sizeof(conn_req));
14133 	memset(&conn_resp, 0, sizeof(conn_resp));
14134 	conn_req.ep_ops.ep_tx_complete = qwx_htc_control_tx_complete;
14135 	conn_req.ep_ops.ep_rx_complete = qwx_htc_control_rx_complete;
14136 	conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
14137 	conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
14138 
14139 	/* connect fake service */
14140 	ret = qwx_htc_connect_service(htc, &conn_req, &conn_resp);
14141 	if (ret) {
14142 		printf("%s: could not connect to htc service (%d)\n",
14143 		    sc->sc_dev.dv_xname, ret);
14144 		return ret;
14145 	}
14146 
14147 	return 0;
14148 }
14149 
14150 int
14151 qwx_htc_setup_target_buffer_assignments(struct qwx_htc *htc)
14152 {
14153 	struct qwx_htc_svc_tx_credits *serv_entry;
14154 	uint32_t svc_id[] = {
14155 		ATH11K_HTC_SVC_ID_WMI_CONTROL,
14156 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
14157 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
14158 	};
14159 	int i, credits;
14160 
14161 	credits =  htc->total_transmit_credits;
14162 	serv_entry = htc->service_alloc_table;
14163 
14164 	if ((htc->wmi_ep_count == 0) ||
14165 	    (htc->wmi_ep_count > nitems(svc_id)))
14166 		return EINVAL;
14167 
14168 	/* Divide credits among number of endpoints for WMI */
14169 	credits = credits / htc->wmi_ep_count;
14170 	for (i = 0; i < htc->wmi_ep_count; i++) {
14171 		serv_entry[i].service_id = svc_id[i];
14172 		serv_entry[i].credit_allocation = credits;
14173 	}
14174 
14175 	return 0;
14176 }
14177 
14178 int
14179 qwx_htc_wait_target(struct qwx_softc *sc)
14180 {
14181 	struct qwx_htc *htc = &sc->htc;
14182 	int polling = 0, ret;
14183 	uint16_t i;
14184 	struct ath11k_htc_ready *ready;
14185 	uint16_t message_id;
14186 	uint16_t credit_count;
14187 	uint16_t credit_size;
14188 
14189 	sc->ctl_resp = 0;
14190 	while (!sc->ctl_resp) {
14191 		ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
14192 		    SEC_TO_NSEC(1));
14193 		if (ret) {
14194 			if (ret != EWOULDBLOCK)
14195 				return ret;
14196 
14197 			if (polling) {
14198 				printf("%s: failed to receive control response "
14199 				    "completion\n", sc->sc_dev.dv_xname);
14200 				return ret;
14201 			}
14202 
14203 			printf("%s: failed to receive control response "
14204 			    "completion, polling...\n", sc->sc_dev.dv_xname);
14205 			polling = 1;
14206 
14207 			for (i = 0; i < sc->hw_params.ce_count; i++)
14208 				qwx_ce_per_engine_service(sc, i);
14209 		}
14210 	}
14211 
14212 	if (htc->control_resp_len < sizeof(*ready)) {
14213 		printf("%s: Invalid HTC ready msg len:%d\n", __func__,
14214 		    htc->control_resp_len);
14215 		return EINVAL;
14216 	}
14217 
14218 	ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
14219 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
14220 	credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
14221 	    ready->id_credit_count);
14222 	credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
14223 
14224 	if (message_id != ATH11K_HTC_MSG_READY_ID) {
14225 		printf("%s: Invalid HTC ready msg: 0x%x\n", __func__,
14226 		    message_id);
14227 		return EINVAL;
14228 	}
14229 
14230 	htc->total_transmit_credits = credit_count;
14231 	htc->target_credit_size = credit_size;
14232 
14233 	DNPRINTF(QWX_D_HTC, "%s: target ready total_transmit_credits %d "
14234 	    "target_credit_size %d\n", __func__,
14235 	    htc->total_transmit_credits, htc->target_credit_size);
14236 
14237 	if ((htc->total_transmit_credits == 0) ||
14238 	    (htc->target_credit_size == 0)) {
14239 		printf("%s: Invalid credit size received\n", __func__);
14240 		return EINVAL;
14241 	}
14242 
14243 	/* For QCA6390, wmi endpoint uses 1 credit to avoid
14244 	 * back-to-back write.
14245 	 */
14246 	if (sc->hw_params.supports_shadow_regs)
14247 		htc->total_transmit_credits = 1;
14248 
14249 	qwx_htc_setup_target_buffer_assignments(htc);
14250 
14251 	return 0;
14252 }
14253 
14254 void
14255 qwx_dp_htt_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
14256 {
14257 	/* Just free the mbuf, no further action required. */
14258 	m_freem(m);
14259 }
14260 
14261 static inline void
14262 qwx_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
14263 {
14264 #if 0 /* Not needed on OpenBSD? We do swapping in software... */
14265 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
14266 		addr_l32 = swab32(addr_l32);
14267 		addr_h16 = swab16(addr_h16);
14268 	}
14269 #endif
14270 	uint32_t val32;
14271 	uint16_t val16;
14272 
14273 	val32 = le32toh(addr_l32);
14274 	memcpy(addr, &val32, 4);
14275 	val16 = le16toh(addr_h16);
14276 	memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
14277 }
14278 
14279 void
14280 qwx_peer_map_event(struct qwx_softc *sc, uint8_t vdev_id, uint16_t peer_id,
14281     uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
14282 {
14283 	struct ieee80211com *ic = &sc->sc_ic;
14284 	struct ieee80211_node *ni;
14285 	struct qwx_node *nq;
14286 	struct ath11k_peer *peer;
14287 #ifdef notyet
14288 	spin_lock_bh(&ab->base_lock);
14289 #endif
14290 	ni = ieee80211_find_node(ic, mac_addr);
14291 	if (ni == NULL)
14292 		return;
14293 	nq = (struct qwx_node *)ni;
14294 	peer = &nq->peer;
14295 
14296 	peer->vdev_id = vdev_id;
14297 	peer->peer_id = peer_id;
14298 	peer->ast_hash = ast_hash;
14299 	peer->hw_peer_id = hw_peer_id;
14300 #if 0
14301 	ether_addr_copy(peer->addr, mac_addr);
14302 	list_add(&peer->list, &ab->peers);
14303 #endif
14304 	sc->peer_mapped = 1;
14305 	wakeup(&sc->peer_mapped);
14306 
14307 	DNPRINTF(QWX_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
14308 	    __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
14309 #ifdef notyet
14310 	spin_unlock_bh(&ab->base_lock);
14311 #endif
14312 }
14313 
14314 struct ieee80211_node *
14315 qwx_peer_find_by_id(struct qwx_softc *sc, uint16_t peer_id)
14316 {
14317 	struct ieee80211com *ic = &sc->sc_ic;
14318 	struct ieee80211_node *ni = NULL;
14319 	int s;
14320 
14321 	s = splnet();
14322 	RBT_FOREACH(ni, ieee80211_tree, &ic->ic_tree) {
14323 		struct qwx_node *nq = (struct qwx_node *)ni;
14324 		if (nq->peer.peer_id == peer_id)
14325 			break;
14326 	}
14327 	splx(s);
14328 
14329 	return ni;
14330 }
14331 
14332 void
14333 qwx_peer_unmap_event(struct qwx_softc *sc, uint16_t peer_id)
14334 {
14335 	struct ieee80211_node *ni;
14336 #ifdef notyet
14337 	spin_lock_bh(&ab->base_lock);
14338 #endif
14339 	ni = qwx_peer_find_by_id(sc, peer_id);
14340 	if (!ni) {
14341 		printf("%s: peer-unmap-event: unknown peer id %d\n",
14342 		    sc->sc_dev.dv_xname, peer_id);
14343 		goto exit;
14344 	}
14345 
14346 	DNPRINTF(QWX_D_HTT, "%s: peer unmap peer %s id %d\n",
14347 	    __func__, ether_sprintf(ni->ni_macaddr), peer_id);
14348 #if 0
14349 	list_del(&peer->list);
14350 	kfree(peer);
14351 #endif
14352 	sc->peer_mapped = 1;
14353 	wakeup(&sc->peer_mapped);
14354 exit:
14355 #ifdef notyet
14356 	spin_unlock_bh(&ab->base_lock);
14357 #endif
14358 	return;
14359 }
14360 
14361 void
14362 qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m)
14363 {
14364 	struct qwx_dp *dp = &sc->dp;
14365 	struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
14366 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
14367 	    *(uint32_t *)resp);
14368 	uint16_t peer_id;
14369 	uint8_t vdev_id;
14370 	uint8_t mac_addr[IEEE80211_ADDR_LEN];
14371 	uint16_t peer_mac_h16;
14372 	uint16_t ast_hash;
14373 	uint16_t hw_peer_id;
14374 
14375 	DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
14376 
14377 	switch (type) {
14378 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
14379 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
14380 		    resp->version_msg.version);
14381 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
14382 		    resp->version_msg.version);
14383 		dp->htt_tgt_version_received = 1;
14384 		wakeup(&dp->htt_tgt_version_received);
14385 		break;
14386 	case HTT_T2H_MSG_TYPE_PEER_MAP:
14387 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
14388 		    resp->peer_map_ev.info);
14389 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
14390 		    resp->peer_map_ev.info);
14391 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
14392 		    resp->peer_map_ev.info1);
14393 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
14394 		    peer_mac_h16, mac_addr);
14395 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
14396 		break;
14397 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
14398 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
14399 		    resp->peer_map_ev.info);
14400 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
14401 		    resp->peer_map_ev.info);
14402 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
14403 		    resp->peer_map_ev.info1);
14404 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
14405 		    peer_mac_h16, mac_addr);
14406 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
14407 		    resp->peer_map_ev.info2);
14408 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
14409 				       resp->peer_map_ev.info1);
14410 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
14411 		    hw_peer_id);
14412 		break;
14413 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
14414 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
14415 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
14416 		    resp->peer_unmap_ev.info);
14417 		qwx_peer_unmap_event(sc, peer_id);
14418 		break;
14419 #if 0
14420 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
14421 		ath11k_htt_pull_ppdu_stats(ab, skb);
14422 		break;
14423 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
14424 		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
14425 		break;
14426 	case HTT_T2H_MSG_TYPE_PKTLOG:
14427 		ath11k_htt_pktlog(ab, skb);
14428 		break;
14429 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
14430 		ath11k_htt_backpressure_event_handler(ab, skb);
14431 		break;
14432 #endif
14433 	default:
14434 		printf("%s: htt event %d not handled\n", __func__, type);
14435 		break;
14436 	}
14437 
14438 	m_freem(m);
14439 }
14440 
14441 int
14442 qwx_dp_htt_connect(struct qwx_dp *dp)
14443 {
14444 	struct qwx_htc_svc_conn_req conn_req;
14445 	struct qwx_htc_svc_conn_resp conn_resp;
14446 	int status;
14447 
14448 	memset(&conn_req, 0, sizeof(conn_req));
14449 	memset(&conn_resp, 0, sizeof(conn_resp));
14450 
14451 	conn_req.ep_ops.ep_tx_complete = qwx_dp_htt_htc_tx_complete;
14452 	conn_req.ep_ops.ep_rx_complete = qwx_dp_htt_htc_t2h_msg_handler;
14453 
14454 	/* connect to control service */
14455 	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
14456 
14457 	status = qwx_htc_connect_service(&dp->sc->htc, &conn_req, &conn_resp);
14458 
14459 	if (status)
14460 		return status;
14461 
14462 	dp->eid = conn_resp.eid;
14463 
14464 	return 0;
14465 }
14466 
14467 void
14468 qwx_dp_pdev_reo_cleanup(struct qwx_softc *sc)
14469 {
14470 	struct qwx_dp *dp = &sc->dp;
14471 	int i;
14472 
14473 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
14474 		qwx_dp_srng_cleanup(sc, &dp->reo_dst_ring[i]);
14475 }
14476 
14477 int
14478 qwx_dp_pdev_reo_setup(struct qwx_softc *sc)
14479 {
14480 	struct qwx_dp *dp = &sc->dp;
14481 	int ret;
14482 	int i;
14483 
14484 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
14485 		ret = qwx_dp_srng_setup(sc, &dp->reo_dst_ring[i],
14486 		    HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE);
14487 		if (ret) {
14488 			printf("%s: failed to setup reo_dst_ring\n", __func__);
14489 			qwx_dp_pdev_reo_cleanup(sc);
14490 			return ret;
14491 		}
14492 	}
14493 
14494 	return 0;
14495 }
14496 
14497 void
14498 qwx_dp_rx_pdev_srng_free(struct qwx_softc *sc, int mac_id)
14499 {
14500 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14501 	int i;
14502 
14503 	qwx_dp_srng_cleanup(sc, &dp->rx_refill_buf_ring.refill_buf_ring);
14504 
14505 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14506 		if (sc->hw_params.rx_mac_buf_ring)
14507 			qwx_dp_srng_cleanup(sc, &dp->rx_mac_buf_ring[i]);
14508 
14509 		qwx_dp_srng_cleanup(sc, &dp->rxdma_err_dst_ring[i]);
14510 		qwx_dp_srng_cleanup(sc,
14511 		    &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
14512 	}
14513 
14514 	qwx_dp_srng_cleanup(sc, &dp->rxdma_mon_buf_ring.refill_buf_ring);
14515 }
14516 
14517 int
14518 qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc)
14519 {
14520 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14521 #if 0
14522 	struct dp_srng *srng = NULL;
14523 #endif
14524 	int i;
14525 	int ret;
14526 
14527 	ret = qwx_dp_srng_setup(sc, &dp->rx_refill_buf_ring.refill_buf_ring,
14528 	    HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
14529 	if (ret) {
14530 		printf("%s: failed to setup rx_refill_buf_ring\n",
14531 		    sc->sc_dev.dv_xname);
14532 		return ret;
14533 	}
14534 
14535 	if (sc->hw_params.rx_mac_buf_ring) {
14536 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14537 			ret = qwx_dp_srng_setup(sc, &dp->rx_mac_buf_ring[i],
14538 			    HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024);
14539 			if (ret) {
14540 				printf("%s: failed to setup "
14541 				    "rx_mac_buf_ring %d\n",
14542 				    sc->sc_dev.dv_xname, i);
14543 				return ret;
14544 			}
14545 		}
14546 	}
14547 
14548 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14549 		ret = qwx_dp_srng_setup(sc, &dp->rxdma_err_dst_ring[i],
14550 		    HAL_RXDMA_DST, 0, dp->mac_id + i,
14551 		    DP_RXDMA_ERR_DST_RING_SIZE);
14552 		if (ret) {
14553 			printf("%s: failed to setup rxdma_err_dst_ring %d\n",
14554 			   sc->sc_dev.dv_xname, i);
14555 			return ret;
14556 		}
14557 	}
14558 #if 0
14559 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14560 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
14561 		ret = qwx_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0,
14562 		    dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE);
14563 		if (ret) {
14564 			printf("%s: failed to setup "
14565 			    "rx_mon_status_refill_ring %d\n",
14566 			    sc->sc_dev.dv_xname, i);
14567 			return ret;
14568 		}
14569 	}
14570 #endif
14571 	/* if rxdma1_enable is false, then it doesn't need
14572 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
14573 	 * and rxdma_mon_desc_ring.
14574 	 * init reap timer for QCA6390.
14575 	 */
14576 	if (!sc->hw_params.rxdma1_enable) {
14577 		timeout_set(&sc->mon_reap_timer, qwx_dp_service_mon_ring, sc);
14578 		return 0;
14579 	}
14580 #if 0
14581 	ret = ath11k_dp_srng_setup(ar->ab,
14582 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
14583 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
14584 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
14585 	if (ret) {
14586 		ath11k_warn(ar->ab,
14587 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
14588 		return ret;
14589 	}
14590 
14591 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
14592 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
14593 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
14594 	if (ret) {
14595 		ath11k_warn(ar->ab,
14596 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
14597 		return ret;
14598 	}
14599 
14600 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
14601 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
14602 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
14603 	if (ret) {
14604 		ath11k_warn(ar->ab,
14605 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
14606 		return ret;
14607 	}
14608 #endif
14609 	return 0;
14610 }
14611 
14612 void
14613 qwx_dp_rxdma_buf_ring_free(struct qwx_softc *sc, struct dp_rxdma_ring *rx_ring)
14614 {
14615 	int i;
14616 
14617 	for (i = 0; i < rx_ring->bufs_max; i++) {
14618 		struct qwx_rx_data *rx_data = &rx_ring->rx_data[i];
14619 
14620 		if (rx_data->map == NULL)
14621 			continue;
14622 
14623 		if (rx_data->m) {
14624 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14625 			m_free(rx_data->m);
14626 			rx_data->m = NULL;
14627 		}
14628 
14629 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
14630 		rx_data->map = NULL;
14631 	}
14632 
14633 	free(rx_ring->rx_data, M_DEVBUF,
14634 	    sizeof(rx_ring->rx_data[0]) * rx_ring->bufs_max);
14635 	rx_ring->rx_data = NULL;
14636 	rx_ring->bufs_max = 0;
14637 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14638 }
14639 
14640 void
14641 qwx_dp_rxdma_pdev_buf_free(struct qwx_softc *sc, int mac_id)
14642 {
14643 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14644 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
14645 	int i;
14646 
14647 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14648 
14649 	rx_ring = &dp->rxdma_mon_buf_ring;
14650 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14651 
14652 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14653 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14654 		qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14655 	}
14656 }
14657 
14658 void
14659 qwx_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie,
14660     uint8_t manager)
14661 {
14662 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
14663 	uint32_t paddr_lo, paddr_hi;
14664 
14665 	paddr_lo = paddr & 0xffffffff;
14666 	paddr_hi = paddr >> 32;
14667 	binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
14668 	binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
14669 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
14670 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
14671 }
14672 
14673 void
14674 qwx_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie,
14675     uint8_t *rbm)
14676 {
14677 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
14678 
14679 	*paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
14680 	    binfo->info1)) << 32) |
14681 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
14682 	*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
14683 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
14684 }
14685 
14686 int
14687 qwx_next_free_rxbuf_idx(struct dp_rxdma_ring *rx_ring)
14688 {
14689 	int i, idx;
14690 
14691 	for (i = 0; i < nitems(rx_ring->freemap); i++) {
14692 		idx = ffs(rx_ring->freemap[i]);
14693 		if (idx > 0)
14694 			return ((idx - 1) + (i * 8));
14695 	}
14696 
14697 	return -1;
14698 }
14699 
14700 int
14701 qwx_dp_rxbufs_replenish(struct qwx_softc *sc, int mac_id,
14702     struct dp_rxdma_ring *rx_ring, int req_entries,
14703     enum hal_rx_buf_return_buf_manager mgr)
14704 {
14705 	struct hal_srng *srng;
14706 	uint32_t *desc;
14707 	struct mbuf *m;
14708 	int num_free;
14709 	int num_remain;
14710 	int ret, idx;
14711 	uint32_t cookie;
14712 	uint64_t paddr;
14713 	struct qwx_rx_data *rx_data;
14714 
14715 	req_entries = MIN(req_entries, rx_ring->bufs_max);
14716 
14717 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
14718 #ifdef notyet
14719 	spin_lock_bh(&srng->lock);
14720 #endif
14721 	qwx_hal_srng_access_begin(sc, srng);
14722 
14723 	num_free = qwx_hal_srng_src_num_free(sc, srng, 1);
14724 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
14725 		req_entries = num_free;
14726 
14727 	req_entries = MIN(num_free, req_entries);
14728 	num_remain = req_entries;
14729 
14730 	while (num_remain > 0) {
14731 		const size_t size = DP_RX_BUFFER_SIZE;
14732 
14733 		m = m_gethdr(M_DONTWAIT, MT_DATA);
14734 		if (m == NULL)
14735 			goto fail_free_mbuf;
14736 
14737 		if (size <= MCLBYTES)
14738 			MCLGET(m, M_DONTWAIT);
14739 		else
14740 			MCLGETL(m, M_DONTWAIT, size);
14741 		if ((m->m_flags & M_EXT) == 0)
14742 			goto fail_free_mbuf;
14743 
14744 		m->m_len = m->m_pkthdr.len = size;
14745 
14746 		idx = qwx_next_free_rxbuf_idx(rx_ring);
14747 		if (idx == -1)
14748 			goto fail_free_mbuf;
14749 
14750 		rx_data = &rx_ring->rx_data[idx];
14751 		if (rx_data->map == NULL) {
14752 			ret = bus_dmamap_create(sc->sc_dmat, size, 1,
14753 			    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
14754 			if (ret)
14755 				goto fail_free_mbuf;
14756 		}
14757 
14758 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
14759 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
14760 		if (ret) {
14761 			printf("%s: can't map mbuf (error %d)\n",
14762 			    sc->sc_dev.dv_xname, ret);
14763 			goto fail_free_mbuf;
14764 		}
14765 
14766 		desc = qwx_hal_srng_src_get_next_entry(sc, srng);
14767 		if (!desc)
14768 			goto fail_dma_unmap;
14769 
14770 		rx_data->m = m;
14771 		m = NULL;
14772 
14773 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
14774 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, idx);
14775 
14776 		clrbit(rx_ring->freemap, idx);
14777 		num_remain--;
14778 
14779 		paddr = rx_data->map->dm_segs[0].ds_addr;
14780 		qwx_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
14781 	}
14782 
14783 	qwx_hal_srng_access_end(sc, srng);
14784 #ifdef notyet
14785 	spin_unlock_bh(&srng->lock);
14786 #endif
14787 	return 0;
14788 
14789 fail_dma_unmap:
14790 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14791 fail_free_mbuf:
14792 	m_free(m);
14793 
14794 	qwx_hal_srng_access_end(sc, srng);
14795 #ifdef notyet
14796 	spin_unlock_bh(&srng->lock);
14797 #endif
14798 	return ENOBUFS;
14799 }
14800 
14801 int
14802 qwx_dp_rxdma_ring_buf_setup(struct qwx_softc *sc,
14803     struct dp_rxdma_ring *rx_ring, uint32_t ringtype)
14804 {
14805 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14806 	int num_entries;
14807 
14808 	num_entries = rx_ring->refill_buf_ring.size /
14809 	    qwx_hal_srng_get_entrysize(sc, ringtype);
14810 
14811 	KASSERT(rx_ring->rx_data == NULL);
14812 	rx_ring->rx_data = mallocarray(num_entries, sizeof(rx_ring->rx_data[0]),
14813 	    M_DEVBUF, M_NOWAIT | M_ZERO);
14814 	if (rx_ring->rx_data == NULL)
14815 		return ENOMEM;
14816 
14817 	rx_ring->bufs_max = num_entries;
14818 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14819 
14820 	return qwx_dp_rxbufs_replenish(sc, dp->mac_id, rx_ring, num_entries,
14821 	    sc->hw_params.hal_params->rx_buf_rbm);
14822 }
14823 
14824 int
14825 qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc)
14826 {
14827 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14828 	struct dp_rxdma_ring *rx_ring;
14829 	int ret;
14830 #if 0
14831 	int i;
14832 #endif
14833 
14834 	rx_ring = &dp->rx_refill_buf_ring;
14835 	ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
14836 	if (ret)
14837 		return ret;
14838 
14839 	if (sc->hw_params.rxdma1_enable) {
14840 		rx_ring = &dp->rxdma_mon_buf_ring;
14841 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
14842 		    HAL_RXDMA_MONITOR_BUF);
14843 		if (ret)
14844 			return ret;
14845 	}
14846 #if 0
14847 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14848 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14849 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
14850 		    HAL_RXDMA_MONITOR_STATUS);
14851 		if (ret)
14852 			return ret;
14853 	}
14854 #endif
14855 	return 0;
14856 }
14857 
14858 void
14859 qwx_dp_rx_pdev_free(struct qwx_softc *sc, int mac_id)
14860 {
14861 	qwx_dp_rx_pdev_srng_free(sc, mac_id);
14862 	qwx_dp_rxdma_pdev_buf_free(sc, mac_id);
14863 }
14864 
14865 bus_addr_t
14866 qwx_hal_srng_get_hp_addr(struct qwx_softc *sc, struct hal_srng *srng)
14867 {
14868 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14869 		return 0;
14870 
14871 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14872 		return sc->hal.wrp.paddr +
14873 		    ((unsigned long)srng->u.src_ring.hp_addr -
14874 		    (unsigned long)sc->hal.wrp.vaddr);
14875 	} else {
14876 		return sc->hal.rdp.paddr +
14877 		    ((unsigned long)srng->u.dst_ring.hp_addr -
14878 		    (unsigned long)sc->hal.rdp.vaddr);
14879 	}
14880 }
14881 
14882 bus_addr_t
14883 qwx_hal_srng_get_tp_addr(struct qwx_softc *sc, struct hal_srng *srng)
14884 {
14885 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14886 		return 0;
14887 
14888 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14889 		return sc->hal.rdp.paddr +
14890 		    ((unsigned long)srng->u.src_ring.tp_addr -
14891 		    (unsigned long)sc->hal.rdp.vaddr);
14892 	} else {
14893 		return sc->hal.wrp.paddr +
14894 		    ((unsigned long)srng->u.dst_ring.tp_addr -
14895 		    (unsigned long)sc->hal.wrp.vaddr);
14896 	}
14897 }
14898 
14899 int
14900 qwx_dp_tx_get_ring_id_type(struct qwx_softc *sc, int mac_id, uint32_t ring_id,
14901     enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type,
14902     enum htt_srng_ring_id *htt_ring_id)
14903 {
14904 	int lmac_ring_id_offset = 0;
14905 
14906 	switch (ring_type) {
14907 	case HAL_RXDMA_BUF:
14908 		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
14909 
14910 		/* for QCA6390, host fills rx buffer to fw and fw fills to
14911 		 * rxbuf ring for each rxdma
14912 		 */
14913 		if (!sc->hw_params.rx_mac_buf_ring) {
14914 			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
14915 			    lmac_ring_id_offset) ||
14916 			    ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
14917 			    lmac_ring_id_offset)))
14918 				return EINVAL;
14919 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14920 			*htt_ring_type = HTT_SW_TO_HW_RING;
14921 		} else {
14922 			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
14923 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
14924 				*htt_ring_type = HTT_SW_TO_SW_RING;
14925 			} else {
14926 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14927 				*htt_ring_type = HTT_SW_TO_HW_RING;
14928 			}
14929 		}
14930 		break;
14931 	case HAL_RXDMA_DST:
14932 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
14933 		*htt_ring_type = HTT_HW_TO_SW_RING;
14934 		break;
14935 	case HAL_RXDMA_MONITOR_BUF:
14936 		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
14937 		*htt_ring_type = HTT_SW_TO_HW_RING;
14938 		break;
14939 	case HAL_RXDMA_MONITOR_STATUS:
14940 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
14941 		*htt_ring_type = HTT_SW_TO_HW_RING;
14942 		break;
14943 	case HAL_RXDMA_MONITOR_DST:
14944 		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
14945 		*htt_ring_type = HTT_HW_TO_SW_RING;
14946 		break;
14947 	case HAL_RXDMA_MONITOR_DESC:
14948 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
14949 		*htt_ring_type = HTT_SW_TO_HW_RING;
14950 		break;
14951 	default:
14952 		printf("%s: Unsupported ring type in DP :%d\n",
14953 		    sc->sc_dev.dv_xname, ring_type);
14954 		return EINVAL;
14955 	}
14956 
14957 	return 0;
14958 }
14959 
14960 int
14961 qwx_dp_tx_htt_srng_setup(struct qwx_softc *sc, uint32_t ring_id, int mac_id,
14962     enum hal_ring_type ring_type)
14963 {
14964 	struct htt_srng_setup_cmd *cmd;
14965 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
14966 	struct hal_srng_params params;
14967 	struct mbuf *m;
14968 	uint32_t ring_entry_sz;
14969 	uint64_t hp_addr, tp_addr;
14970 	enum htt_srng_ring_type htt_ring_type;
14971 	enum htt_srng_ring_id htt_ring_id;
14972 	int ret;
14973 
14974 	m = qwx_htc_alloc_mbuf(sizeof(*cmd));
14975 	if (!m)
14976 		return ENOMEM;
14977 
14978 	memset(&params, 0, sizeof(params));
14979 	qwx_hal_srng_get_params(sc, srng, &params);
14980 
14981 	hp_addr = qwx_hal_srng_get_hp_addr(sc, srng);
14982 	tp_addr = qwx_hal_srng_get_tp_addr(sc, srng);
14983 
14984 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
14985 	    ring_type, &htt_ring_type, &htt_ring_id);
14986 	if (ret)
14987 		goto err_free;
14988 
14989 	cmd = (struct htt_srng_setup_cmd *)(mtod(m, uint8_t *) +
14990 	    sizeof(struct ath11k_htc_hdr));
14991 	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
14992 	    HTT_H2T_MSG_TYPE_SRING_SETUP);
14993 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
14994 	    htt_ring_type == HTT_HW_TO_SW_RING)
14995 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14996 		    DP_SW2HW_MACID(mac_id));
14997 	else
14998 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14999 		    mac_id);
15000 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
15001 	    htt_ring_type);
15002 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
15003 
15004 	cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK;
15005 
15006 	cmd->ring_base_addr_hi = (uint64_t)params.ring_base_paddr >>
15007 	    HAL_ADDR_MSB_REG_SHIFT;
15008 
15009 	ring_entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
15010 
15011 	ring_entry_sz >>= 2;
15012 	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
15013 	    ring_entry_sz);
15014 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
15015 	    params.num_entries * ring_entry_sz);
15016 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
15017 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
15018 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
15019 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
15020 	cmd->info1 |= FIELD_PREP(
15021 	    HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
15022 	    !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
15023 	if (htt_ring_type == HTT_SW_TO_HW_RING)
15024 		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
15025 
15026 	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
15027 	cmd->ring_head_off32_remote_addr_hi = hp_addr >> HAL_ADDR_MSB_REG_SHIFT;
15028 
15029 	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
15030 	cmd->ring_tail_off32_remote_addr_hi = tp_addr >> HAL_ADDR_MSB_REG_SHIFT;
15031 
15032 	cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
15033 	cmd->ring_msi_addr_hi = 0;
15034 	cmd->msi_data = params.msi_data;
15035 
15036 	cmd->intr_info = FIELD_PREP(
15037 	    HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
15038 	    params.intr_batch_cntr_thres_entries * ring_entry_sz);
15039 	cmd->intr_info |= FIELD_PREP(
15040 	    HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
15041 	    params.intr_timer_thres_us >> 3);
15042 
15043 	cmd->info2 = 0;
15044 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
15045 		cmd->info2 = FIELD_PREP(
15046 		    HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
15047 		    params.low_threshold);
15048 	}
15049 
15050 	DNPRINTF(QWX_D_HTT, "%s: htt srng setup msi_addr_lo 0x%x "
15051 	    "msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d "
15052 	    "intr_info 0x%x flags 0x%x\n", __func__, cmd->ring_msi_addr_lo,
15053 	    cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type,
15054 	    cmd->intr_info, cmd->info2);
15055 
15056 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
15057 	if (ret)
15058 		goto err_free;
15059 
15060 	return 0;
15061 
15062 err_free:
15063 	m_freem(m);
15064 
15065 	return ret;
15066 }
15067 
15068 int
15069 qwx_dp_tx_htt_h2t_ppdu_stats_req(struct qwx_softc *sc, uint32_t mask,
15070     uint8_t pdev_id)
15071 {
15072 	struct qwx_dp *dp = &sc->dp;
15073 	struct mbuf *m;
15074 	struct htt_ppdu_stats_cfg_cmd *cmd;
15075 	int len = sizeof(*cmd);
15076 	uint8_t pdev_mask;
15077 	int ret;
15078 	int i;
15079 
15080 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15081 		m = qwx_htc_alloc_mbuf(len);
15082 		if (!m)
15083 			return ENOMEM;
15084 
15085 		cmd = (struct htt_ppdu_stats_cfg_cmd *)(mtod(m, uint8_t *) +
15086 		    sizeof(struct ath11k_htc_hdr));
15087 		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
15088 				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
15089 
15090 		pdev_mask = 1 << (pdev_id + i);
15091 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
15092 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK,
15093 		    mask);
15094 
15095 		ret = qwx_htc_send(&sc->htc, dp->eid, m);
15096 		if (ret) {
15097 			m_freem(m);
15098 			return ret;
15099 		}
15100 	}
15101 
15102 	return 0;
15103 }
15104 
15105 int
15106 qwx_dp_tx_htt_rx_filter_setup(struct qwx_softc *sc, uint32_t ring_id,
15107     int mac_id, enum hal_ring_type ring_type, size_t rx_buf_size,
15108     struct htt_rx_ring_tlv_filter *tlv_filter)
15109 {
15110 	struct htt_rx_ring_selection_cfg_cmd *cmd;
15111 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
15112 	struct hal_srng_params params;
15113 	struct mbuf *m;
15114 	int len = sizeof(*cmd);
15115 	enum htt_srng_ring_type htt_ring_type;
15116 	enum htt_srng_ring_id htt_ring_id;
15117 	int ret;
15118 
15119 	m = qwx_htc_alloc_mbuf(len);
15120 	if (!m)
15121 		return ENOMEM;
15122 
15123 	memset(&params, 0, sizeof(params));
15124 	qwx_hal_srng_get_params(sc, srng, &params);
15125 
15126 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
15127 	    ring_type, &htt_ring_type, &htt_ring_id);
15128 	if (ret)
15129 		goto err_free;
15130 
15131 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)(mtod(m, uint8_t *) +
15132 	    sizeof(struct ath11k_htc_hdr));
15133 	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
15134 	    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
15135 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
15136 	    htt_ring_type == HTT_HW_TO_SW_RING) {
15137 		cmd->info0 |=
15138 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
15139 		    DP_SW2HW_MACID(mac_id));
15140 	} else {
15141 		cmd->info0 |=
15142 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
15143 		    mac_id);
15144 	}
15145 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
15146 	    htt_ring_id);
15147 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
15148 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
15149 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
15150 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
15151 
15152 	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
15153 	    rx_buf_size);
15154 	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
15155 	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
15156 	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
15157 	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
15158 	cmd->rx_filter_tlv = tlv_filter->rx_filter;
15159 
15160 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
15161 	if (ret)
15162 		goto err_free;
15163 
15164 	return 0;
15165 
15166 err_free:
15167 	m_freem(m);
15168 
15169 	return ret;
15170 }
15171 
15172 int
15173 qwx_dp_rx_pdev_alloc(struct qwx_softc *sc, int mac_id)
15174 {
15175 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
15176 	uint32_t ring_id;
15177 	int i;
15178 	int ret;
15179 
15180 	ret = qwx_dp_rx_pdev_srng_alloc(sc);
15181 	if (ret) {
15182 		printf("%s: failed to setup rx srngs: %d\n",
15183 		    sc->sc_dev.dv_xname, ret);
15184 		return ret;
15185 	}
15186 
15187 	ret = qwx_dp_rxdma_pdev_buf_setup(sc);
15188 	if (ret) {
15189 		printf("%s: failed to setup rxdma ring: %d\n",
15190 		    sc->sc_dev.dv_xname, ret);
15191 		return ret;
15192 	}
15193 
15194 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
15195 	ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id, HAL_RXDMA_BUF);
15196 	if (ret) {
15197 		printf("%s: failed to configure rx_refill_buf_ring: %d\n",
15198 		    sc->sc_dev.dv_xname, ret);
15199 		return ret;
15200 	}
15201 
15202 	if (sc->hw_params.rx_mac_buf_ring) {
15203 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15204 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
15205 			ret = qwx_dp_tx_htt_srng_setup(sc, ring_id,
15206 			    mac_id + i, HAL_RXDMA_BUF);
15207 			if (ret) {
15208 				printf("%s: failed to configure "
15209 				    "rx_mac_buf_ring%d: %d\n",
15210 				    sc->sc_dev.dv_xname, i, ret);
15211 				return ret;
15212 			}
15213 		}
15214 	}
15215 
15216 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15217 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
15218 		ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id + i,
15219 		    HAL_RXDMA_DST);
15220 		if (ret) {
15221 			printf("%s: failed to configure "
15222 			    "rxdma_err_dest_ring%d %d\n",
15223 			    sc->sc_dev.dv_xname, i, ret);
15224 			return ret;
15225 		}
15226 	}
15227 
15228 	if (!sc->hw_params.rxdma1_enable)
15229 		goto config_refill_ring;
15230 #if 0
15231 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
15232 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
15233 					  mac_id, HAL_RXDMA_MONITOR_BUF);
15234 	if (ret) {
15235 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
15236 			    ret);
15237 		return ret;
15238 	}
15239 	ret = ath11k_dp_tx_htt_srng_setup(ab,
15240 					  dp->rxdma_mon_dst_ring.ring_id,
15241 					  mac_id, HAL_RXDMA_MONITOR_DST);
15242 	if (ret) {
15243 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
15244 			    ret);
15245 		return ret;
15246 	}
15247 	ret = ath11k_dp_tx_htt_srng_setup(ab,
15248 					  dp->rxdma_mon_desc_ring.ring_id,
15249 					  mac_id, HAL_RXDMA_MONITOR_DESC);
15250 	if (ret) {
15251 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
15252 			    ret);
15253 		return ret;
15254 	}
15255 #endif
15256 config_refill_ring:
15257 #if 0
15258 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15259 		ret = qwx_dp_tx_htt_srng_setup(sc,
15260 		    dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id,
15261 		    mac_id + i, HAL_RXDMA_MONITOR_STATUS);
15262 		if (ret) {
15263 			printf("%s: failed to configure "
15264 			    "mon_status_refill_ring%d %d\n",
15265 			    sc->sc_dev.dv_xname, i, ret);
15266 			return ret;
15267 		}
15268 	}
15269 #endif
15270 	return 0;
15271 }
15272 
15273 void
15274 qwx_dp_pdev_free(struct qwx_softc *sc)
15275 {
15276 	int i;
15277 
15278 	timeout_del(&sc->mon_reap_timer);
15279 
15280 	for (i = 0; i < sc->num_radios; i++)
15281 		qwx_dp_rx_pdev_free(sc, i);
15282 }
15283 
15284 int
15285 qwx_dp_pdev_alloc(struct qwx_softc *sc)
15286 {
15287 	int ret;
15288 	int i;
15289 
15290 	for (i = 0; i < sc->num_radios; i++) {
15291 		ret = qwx_dp_rx_pdev_alloc(sc, i);
15292 		if (ret) {
15293 			printf("%s: failed to allocate pdev rx "
15294 			    "for pdev_id %d\n", sc->sc_dev.dv_xname, i);
15295 			goto err;
15296 		}
15297 	}
15298 
15299 	return 0;
15300 
15301 err:
15302 	qwx_dp_pdev_free(sc);
15303 
15304 	return ret;
15305 }
15306 
15307 int
15308 qwx_dp_tx_htt_h2t_ver_req_msg(struct qwx_softc *sc)
15309 {
15310 	struct qwx_dp *dp = &sc->dp;
15311 	struct mbuf *m;
15312 	struct htt_ver_req_cmd *cmd;
15313 	int len = sizeof(*cmd);
15314 	int ret;
15315 
15316 	dp->htt_tgt_version_received = 0;
15317 
15318 	m = qwx_htc_alloc_mbuf(len);
15319 	if (!m)
15320 		return ENOMEM;
15321 
15322 	cmd = (struct htt_ver_req_cmd *)(mtod(m, uint8_t *) +
15323 	    sizeof(struct ath11k_htc_hdr));
15324 	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
15325 	    HTT_H2T_MSG_TYPE_VERSION_REQ);
15326 
15327 	ret = qwx_htc_send(&sc->htc, dp->eid, m);
15328 	if (ret) {
15329 		m_freem(m);
15330 		return ret;
15331 	}
15332 
15333 	while (!dp->htt_tgt_version_received) {
15334 		ret = tsleep_nsec(&dp->htt_tgt_version_received, 0,
15335 		    "qwxtgtver", SEC_TO_NSEC(3));
15336 		if (ret)
15337 			return ETIMEDOUT;
15338 	}
15339 
15340 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
15341 		printf("%s: unsupported htt major version %d "
15342 		    "supported version is %d\n", __func__,
15343 		    dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
15344 		return ENOTSUP;
15345 	}
15346 
15347 	return 0;
15348 }
15349 
15350 void
15351 qwx_dp_update_vdev_search(struct qwx_softc *sc, struct qwx_vif *arvif)
15352 {
15353 	 /* When v2_map_support is true:for STA mode, enable address
15354 	  * search index, tcl uses ast_hash value in the descriptor.
15355 	  * When v2_map_support is false: for STA mode, don't enable
15356 	  * address search index.
15357 	  */
15358 	switch (arvif->vdev_type) {
15359 	case WMI_VDEV_TYPE_STA:
15360 		if (sc->hw_params.htt_peer_map_v2) {
15361 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
15362 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
15363 		} else {
15364 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
15365 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
15366 		}
15367 		break;
15368 	case WMI_VDEV_TYPE_AP:
15369 	case WMI_VDEV_TYPE_IBSS:
15370 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
15371 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
15372 		break;
15373 	case WMI_VDEV_TYPE_MONITOR:
15374 	default:
15375 		return;
15376 	}
15377 }
15378 
15379 void
15380 qwx_dp_vdev_tx_attach(struct qwx_softc *sc, struct qwx_pdev *pdev,
15381     struct qwx_vif *arvif)
15382 {
15383 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
15384 	    FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) |
15385 	    FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, pdev->pdev_id);
15386 
15387 	/* set HTT extension valid bit to 0 by default */
15388 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
15389 
15390 	qwx_dp_update_vdev_search(sc, arvif);
15391 }
15392 
15393 void
15394 qwx_dp_tx_status_parse(struct qwx_softc *sc, struct hal_wbm_release_ring *desc,
15395     struct hal_tx_status *ts)
15396 {
15397 	ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15398 	    desc->info0);
15399 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
15400 	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
15401 		return;
15402 
15403 	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
15404 		return;
15405 
15406 	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
15407 	    desc->info0);
15408 	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
15409 	    desc->info1);
15410 	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
15411 	    desc->info1);
15412 	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
15413 	    desc->info2);
15414 	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
15415 	    ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
15416 	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
15417 	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
15418 	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
15419 		ts->rate_stats = desc->rate_stats.info0;
15420 	else
15421 		ts->rate_stats = 0;
15422 }
15423 
15424 void
15425 qwx_dp_tx_free_txbuf(struct qwx_softc *sc, int msdu_id,
15426     struct dp_tx_ring *tx_ring)
15427 {
15428 	struct qwx_tx_data *tx_data;
15429 
15430 	if (msdu_id >= sc->hw_params.tx_ring_size)
15431 		return;
15432 
15433 	tx_data = &tx_ring->data[msdu_id];
15434 
15435 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
15436 	m_freem(tx_data->m);
15437 	tx_data->m = NULL;
15438 
15439 	if (tx_ring->queued > 0)
15440 		tx_ring->queued--;
15441 }
15442 
15443 void
15444 qwx_dp_tx_htt_tx_complete_buf(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
15445     struct qwx_dp_htt_wbm_tx_status *ts)
15446 {
15447 	/* Not using Tx status info for now. Just free the buffer. */
15448 	qwx_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
15449 }
15450 
15451 void
15452 qwx_dp_tx_process_htt_tx_complete(struct qwx_softc *sc, void *desc,
15453     uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
15454 {
15455 	struct htt_tx_wbm_completion *status_desc;
15456 	struct qwx_dp_htt_wbm_tx_status ts = {0};
15457 	enum hal_wbm_htt_tx_comp_status wbm_status;
15458 
15459 	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
15460 
15461 	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
15462 	    status_desc->info0);
15463 
15464 	switch (wbm_status) {
15465 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
15466 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
15467 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
15468 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
15469 		ts.msdu_id = msdu_id;
15470 		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
15471 		    status_desc->info1);
15472 
15473 		if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
15474 			ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
15475 			    status_desc->info2);
15476 		else
15477 			ts.peer_id = HTT_INVALID_PEER_ID;
15478 
15479 		qwx_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
15480 		break;
15481 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
15482 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
15483 		qwx_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
15484 		break;
15485 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
15486 		/* This event is to be handled only when the driver decides to
15487 		 * use WDS offload functionality.
15488 		 */
15489 		break;
15490 	default:
15491 		printf("%s: Unknown htt tx status %d\n",
15492 		    sc->sc_dev.dv_xname, wbm_status);
15493 		break;
15494 	}
15495 }
15496 
15497 int
15498 qwx_mac_hw_ratecode_to_legacy_rate(struct ieee80211_node *ni, uint8_t hw_rc,
15499     uint8_t preamble, uint8_t *rateidx, uint16_t *rate)
15500 {
15501 	struct ieee80211_rateset *rs = &ni->ni_rates;
15502 	int i;
15503 
15504 	if (preamble == WMI_RATE_PREAMBLE_CCK) {
15505 		hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
15506 		switch (hw_rc) {
15507 			case ATH11K_HW_RATE_CCK_LP_1M:
15508 				*rate = 2;
15509 				break;
15510 			case ATH11K_HW_RATE_CCK_LP_2M:
15511 			case ATH11K_HW_RATE_CCK_SP_2M:
15512 				*rate = 4;
15513 				break;
15514 			case ATH11K_HW_RATE_CCK_LP_5_5M:
15515 			case ATH11K_HW_RATE_CCK_SP_5_5M:
15516 				*rate = 11;
15517 				break;
15518 			case ATH11K_HW_RATE_CCK_LP_11M:
15519 			case ATH11K_HW_RATE_CCK_SP_11M:
15520 				*rate = 22;
15521 				break;
15522 			default:
15523 				return EINVAL;
15524 		}
15525 	} else {
15526 		switch (hw_rc) {
15527 			case ATH11K_HW_RATE_OFDM_6M:
15528 				*rate = 12;
15529 				break;
15530 			case ATH11K_HW_RATE_OFDM_9M:
15531 				*rate = 18;
15532 				break;
15533 			case ATH11K_HW_RATE_OFDM_12M:
15534 				*rate = 24;
15535 				break;
15536 			case ATH11K_HW_RATE_OFDM_18M:
15537 				*rate = 36;
15538 				break;
15539 			case ATH11K_HW_RATE_OFDM_24M:
15540 				*rate = 48;
15541 				break;
15542 			case ATH11K_HW_RATE_OFDM_36M:
15543 				*rate = 72;
15544 				break;
15545 			case ATH11K_HW_RATE_OFDM_48M:
15546 				*rate = 96;
15547 				break;
15548 			case ATH11K_HW_RATE_OFDM_54M:
15549 				*rate = 104;
15550 				break;
15551 			default:
15552 				return EINVAL;
15553 		}
15554 	}
15555 
15556 	for (i = 0; i < rs->rs_nrates; i++) {
15557 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
15558 		if (rval == *rate) {
15559 			*rateidx = i;
15560 			return 0;
15561 		}
15562 	}
15563 
15564 	return EINVAL;
15565 }
15566 
15567 void
15568 qwx_dp_tx_complete_msdu(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
15569     uint32_t msdu_id, struct hal_tx_status *ts)
15570 {
15571 	struct ieee80211com *ic = &sc->sc_ic;
15572 	struct qwx_tx_data *tx_data = &tx_ring->data[msdu_id];
15573 	uint8_t pkt_type, mcs, rateidx;
15574 	uint16_t rate;
15575 
15576 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) {
15577 		/* Must not happen */
15578 		return;
15579 	}
15580 
15581 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
15582 	m_freem(tx_data->m);
15583 	tx_data->m = NULL;
15584 
15585 	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, ts->rate_stats);
15586 	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, ts->rate_stats);
15587 	if (qwx_mac_hw_ratecode_to_legacy_rate(tx_data->ni, mcs, pkt_type,
15588 	    &rateidx, &rate) == 0)
15589 		tx_data->ni->ni_txrate = rateidx;
15590 
15591 	ieee80211_release_node(ic, tx_data->ni);
15592 	tx_data->ni = NULL;
15593 
15594 	if (tx_ring->queued > 0)
15595 		tx_ring->queued--;
15596 }
15597 
15598 #define QWX_TX_COMPL_NEXT(x)	(((x) + 1) % DP_TX_COMP_RING_SIZE)
15599 
15600 int
15601 qwx_dp_tx_completion_handler(struct qwx_softc *sc, int ring_id)
15602 {
15603 	struct ieee80211com *ic = &sc->sc_ic;
15604 	struct ifnet *ifp = &ic->ic_if;
15605 	struct qwx_dp *dp = &sc->dp;
15606 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
15607 	struct hal_srng *status_ring = &sc->hal.srng_list[hal_ring_id];
15608 	struct hal_tx_status ts = { 0 };
15609 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
15610 	uint32_t *desc;
15611 	uint32_t msdu_id;
15612 	uint8_t mac_id;
15613 #ifdef notyet
15614 	spin_lock_bh(&status_ring->lock);
15615 #endif
15616 	qwx_hal_srng_access_begin(sc, status_ring);
15617 
15618 	while ((QWX_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
15619 		tx_ring->tx_status_tail) &&
15620 	       (desc = qwx_hal_srng_dst_get_next_entry(sc, status_ring))) {
15621 		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
15622 		    sizeof(struct hal_wbm_release_ring));
15623 		tx_ring->tx_status_head =
15624 		    QWX_TX_COMPL_NEXT(tx_ring->tx_status_head);
15625 	}
15626 #if 0
15627 	if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
15628 		     (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
15629 		      tx_ring->tx_status_tail))) {
15630 		/* TODO: Process pending tx_status messages when kfifo_is_full() */
15631 		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
15632 	}
15633 #endif
15634 	qwx_hal_srng_access_end(sc, status_ring);
15635 #ifdef notyet
15636 	spin_unlock_bh(&status_ring->lock);
15637 #endif
15638 	while (QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
15639 	    tx_ring->tx_status_head) {
15640 		struct hal_wbm_release_ring *tx_status;
15641 		uint32_t desc_id;
15642 
15643 		tx_ring->tx_status_tail =
15644 		   QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail);
15645 		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
15646 		qwx_dp_tx_status_parse(sc, tx_status, &ts);
15647 
15648 		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15649 		    tx_status->buf_addr_info.info1);
15650 		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
15651 		if (mac_id >= MAX_RADIOS)
15652 			continue;
15653 		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
15654 		if (msdu_id >= sc->hw_params.tx_ring_size)
15655 			continue;
15656 
15657 		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
15658 			qwx_dp_tx_process_htt_tx_complete(sc,
15659 			    (void *)tx_status, mac_id, msdu_id, tx_ring);
15660 			continue;
15661 		}
15662 #if 0
15663 		spin_lock(&tx_ring->tx_idr_lock);
15664 		msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
15665 		if (unlikely(!msdu)) {
15666 			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
15667 				    msdu_id);
15668 			spin_unlock(&tx_ring->tx_idr_lock);
15669 			continue;
15670 		}
15671 
15672 		spin_unlock(&tx_ring->tx_idr_lock);
15673 		ar = ab->pdevs[mac_id].ar;
15674 
15675 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
15676 			wake_up(&ar->dp.tx_empty_waitq);
15677 #endif
15678 		qwx_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
15679 	}
15680 
15681 	if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
15682 		sc->qfullmsk &= ~(1 << ring_id);
15683 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
15684 			ifq_clr_oactive(&ifp->if_snd);
15685 			(*ifp->if_start)(ifp);
15686 		}
15687 	}
15688 
15689 	return 0;
15690 }
15691 
15692 void
15693 qwx_hal_rx_reo_ent_paddr_get(struct qwx_softc *sc, void *desc, uint64_t *paddr,
15694     uint32_t *desc_bank)
15695 {
15696 	struct ath11k_buffer_addr *buff_addr = desc;
15697 
15698 	*paddr = ((uint64_t)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
15699 	    buff_addr->info1)) << 32) |
15700 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
15701 
15702 	*desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
15703 }
15704 
15705 int
15706 qwx_hal_desc_reo_parse_err(struct qwx_softc *sc, uint32_t *rx_desc,
15707     uint64_t *paddr, uint32_t *desc_bank)
15708 {
15709 	struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
15710 	enum hal_reo_dest_ring_push_reason push_reason;
15711 	enum hal_reo_dest_ring_error_code err_code;
15712 
15713 	push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
15714 	    desc->info0);
15715 	err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
15716 	    desc->info0);
15717 #if 0
15718 	ab->soc_stats.reo_error[err_code]++;
15719 #endif
15720 	if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
15721 	    push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
15722 		printf("%s: expected error push reason code, received %d\n",
15723 		    sc->sc_dev.dv_xname, push_reason);
15724 		return EINVAL;
15725 	}
15726 
15727 	if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
15728 	    HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
15729 		printf("%s: expected buffer type link_desc",
15730 		    sc->sc_dev.dv_xname);
15731 		return EINVAL;
15732 	}
15733 
15734 	qwx_hal_rx_reo_ent_paddr_get(sc, rx_desc, paddr, desc_bank);
15735 
15736 	return 0;
15737 }
15738 
15739 void
15740 qwx_hal_rx_msdu_link_info_get(void *link_desc, uint32_t *num_msdus,
15741     uint32_t *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm)
15742 {
15743 	struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
15744 	struct hal_rx_msdu_details *msdu;
15745 	int i;
15746 
15747 	*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
15748 
15749 	msdu = &link->msdu_link[0];
15750 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
15751 	    msdu->buf_addr_info.info1);
15752 
15753 	for (i = 0; i < *num_msdus; i++) {
15754 		msdu = &link->msdu_link[i];
15755 
15756 		if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
15757 		    msdu->buf_addr_info.info0)) {
15758 			*num_msdus = i;
15759 			break;
15760 		}
15761 		*msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15762 		    msdu->buf_addr_info.info1);
15763 		msdu_cookies++;
15764 	}
15765 }
15766 
15767 void
15768 qwx_hal_rx_msdu_link_desc_set(struct qwx_softc *sc, void *desc,
15769     void *link_desc, enum hal_wbm_rel_bm_act action)
15770 {
15771 	struct hal_wbm_release_ring *dst_desc = desc;
15772 	struct hal_wbm_release_ring *src_desc = link_desc;
15773 
15774 	dst_desc->buf_addr_info = src_desc->buf_addr_info;
15775 	dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15776 	    HAL_WBM_REL_SRC_MODULE_SW) |
15777 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
15778 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
15779 	    HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
15780 }
15781 
15782 int
15783 qwx_dp_rx_link_desc_return(struct qwx_softc *sc, uint32_t *link_desc,
15784     enum hal_wbm_rel_bm_act action)
15785 {
15786 	struct qwx_dp *dp = &sc->dp;
15787 	struct hal_srng *srng;
15788 	uint32_t *desc;
15789 	int ret = 0;
15790 
15791 	srng = &sc->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
15792 #ifdef notyet
15793 	spin_lock_bh(&srng->lock);
15794 #endif
15795 	qwx_hal_srng_access_begin(sc, srng);
15796 
15797 	desc = qwx_hal_srng_src_get_next_entry(sc, srng);
15798 	if (!desc) {
15799 		ret = ENOBUFS;
15800 		goto exit;
15801 	}
15802 
15803 	qwx_hal_rx_msdu_link_desc_set(sc, (void *)desc, (void *)link_desc,
15804 	    action);
15805 
15806 exit:
15807 	qwx_hal_srng_access_end(sc, srng);
15808 #ifdef notyet
15809 	spin_unlock_bh(&srng->lock);
15810 #endif
15811 	return ret;
15812 }
15813 
15814 int
15815 qwx_dp_rx_frag_h_mpdu(struct qwx_softc *sc, struct mbuf *m,
15816     uint32_t *ring_desc)
15817 {
15818 	printf("%s: not implemented\n", __func__);
15819 	return ENOTSUP;
15820 }
15821 
15822 static inline uint16_t
15823 qwx_dp_rx_h_msdu_start_msdu_len(struct qwx_softc *sc, struct hal_rx_desc *desc)
15824 {
15825 	return sc->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
15826 }
15827 
15828 void
15829 qwx_dp_process_rx_err_buf(struct qwx_softc *sc, uint32_t *ring_desc,
15830     int buf_id, int drop)
15831 {
15832 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
15833 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
15834 	struct mbuf *m;
15835 	struct qwx_rx_data *rx_data;
15836 	struct hal_rx_desc *rx_desc;
15837 	uint16_t msdu_len;
15838 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
15839 
15840 	if (buf_id >= rx_ring->bufs_max || isset(rx_ring->freemap, buf_id))
15841 		return;
15842 
15843 	rx_data = &rx_ring->rx_data[buf_id];
15844 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
15845 	m = rx_data->m;
15846 	rx_data->m = NULL;
15847 	setbit(rx_ring->freemap, buf_id);
15848 
15849 	if (drop) {
15850 		m_freem(m);
15851 		return;
15852 	}
15853 
15854 	rx_desc = mtod(m, struct hal_rx_desc *);
15855 	msdu_len = qwx_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
15856 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
15857 #if 0
15858 		uint8_t *hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
15859 		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
15860 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
15861 				sizeof(struct ieee80211_hdr));
15862 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
15863 				sizeof(struct hal_rx_desc));
15864 #endif
15865 		m_freem(m);
15866 		return;
15867 	}
15868 
15869 	if (qwx_dp_rx_frag_h_mpdu(sc, m, ring_desc)) {
15870 		qwx_dp_rx_link_desc_return(sc, ring_desc,
15871 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15872 	}
15873 
15874 	m_freem(m);
15875 }
15876 
15877 int
15878 qwx_dp_process_rx_err(struct qwx_softc *sc)
15879 {
15880 	struct ieee80211com *ic = &sc->sc_ic;
15881 	struct ifnet *ifp = &ic->ic_if;
15882 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
15883 	struct dp_link_desc_bank *link_desc_banks;
15884 	enum hal_rx_buf_return_buf_manager rbm;
15885 	int tot_n_bufs_reaped, ret, i;
15886 	int n_bufs_reaped[MAX_RADIOS] = {0};
15887 	struct dp_rxdma_ring *rx_ring;
15888 	struct dp_srng *reo_except;
15889 	uint32_t desc_bank, num_msdus;
15890 	struct hal_srng *srng;
15891 	struct qwx_dp *dp;
15892 	void *link_desc_va;
15893 	int buf_id, mac_id;
15894 	uint64_t paddr;
15895 	uint32_t *desc;
15896 	int is_frag;
15897 	uint8_t drop = 0;
15898 
15899 	tot_n_bufs_reaped = 0;
15900 
15901 	dp = &sc->dp;
15902 	reo_except = &dp->reo_except_ring;
15903 	link_desc_banks = dp->link_desc_banks;
15904 
15905 	srng = &sc->hal.srng_list[reo_except->ring_id];
15906 #ifdef notyet
15907 	spin_lock_bh(&srng->lock);
15908 #endif
15909 	qwx_hal_srng_access_begin(sc, srng);
15910 
15911 	while ((desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
15912 		struct hal_reo_dest_ring *reo_desc =
15913 		    (struct hal_reo_dest_ring *)desc;
15914 #if 0
15915 		ab->soc_stats.err_ring_pkts++;
15916 #endif
15917 		ret = qwx_hal_desc_reo_parse_err(sc, desc, &paddr, &desc_bank);
15918 		if (ret) {
15919 			printf("%s: failed to parse error reo desc %d\n",
15920 			    sc->sc_dev.dv_xname, ret);
15921 			continue;
15922 		}
15923 		link_desc_va = link_desc_banks[desc_bank].vaddr +
15924 		    (paddr - link_desc_banks[desc_bank].paddr);
15925 		qwx_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
15926 		    msdu_cookies, &rbm);
15927 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
15928 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
15929 #if 0
15930 			ab->soc_stats.invalid_rbm++;
15931 #endif
15932 			printf("%s: invalid return buffer manager %d\n",
15933 			    sc->sc_dev.dv_xname, rbm);
15934 			qwx_dp_rx_link_desc_return(sc, desc,
15935 			    HAL_WBM_REL_BM_ACT_REL_MSDU);
15936 			continue;
15937 		}
15938 
15939 		is_frag = !!(reo_desc->rx_mpdu_info.info0 &
15940 		    RX_MPDU_DESC_INFO0_FRAG_FLAG);
15941 
15942 		/* Process only rx fragments with one msdu per link desc below,
15943 		 * and drop msdu's indicated due to error reasons.
15944 		 */
15945 		if (!is_frag || num_msdus > 1) {
15946 			drop = 1;
15947 			/* Return the link desc back to wbm idle list */
15948 			qwx_dp_rx_link_desc_return(sc, desc,
15949 			   HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15950 		}
15951 
15952 		for (i = 0; i < num_msdus; i++) {
15953 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
15954 			    msdu_cookies[i]);
15955 
15956 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
15957 			    msdu_cookies[i]);
15958 
15959 			qwx_dp_process_rx_err_buf(sc, desc, buf_id, drop);
15960 			n_bufs_reaped[mac_id]++;
15961 			tot_n_bufs_reaped++;
15962 		}
15963 	}
15964 
15965 	qwx_hal_srng_access_end(sc, srng);
15966 #ifdef notyet
15967 	spin_unlock_bh(&srng->lock);
15968 #endif
15969 	for (i = 0; i < sc->num_radios; i++) {
15970 		if (!n_bufs_reaped[i])
15971 			continue;
15972 
15973 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
15974 
15975 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, n_bufs_reaped[i],
15976 		    sc->hw_params.hal_params->rx_buf_rbm);
15977 	}
15978 
15979 	ifp->if_ierrors += tot_n_bufs_reaped;
15980 
15981 	return tot_n_bufs_reaped;
15982 }
15983 
15984 int
15985 qwx_hal_wbm_desc_parse_err(void *desc, struct hal_rx_wbm_rel_info *rel_info)
15986 {
15987 	struct hal_wbm_release_ring *wbm_desc = desc;
15988 	enum hal_wbm_rel_desc_type type;
15989 	enum hal_wbm_rel_src_module rel_src;
15990 	enum hal_rx_buf_return_buf_manager ret_buf_mgr;
15991 
15992 	type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE, wbm_desc->info0);
15993 
15994 	/* We expect only WBM_REL buffer type */
15995 	if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU)
15996 		return -EINVAL;
15997 
15998 	rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15999 	    wbm_desc->info0);
16000 	if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
16001 	    rel_src != HAL_WBM_REL_SRC_MODULE_REO)
16002 		return EINVAL;
16003 
16004 	ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
16005 	    wbm_desc->buf_addr_info.info1);
16006 	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
16007 #if 0
16008 		ab->soc_stats.invalid_rbm++;
16009 #endif
16010 		return EINVAL;
16011 	}
16012 
16013 	rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
16014 	    wbm_desc->buf_addr_info.info1);
16015 	rel_info->err_rel_src = rel_src;
16016 	if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
16017 		rel_info->push_reason = FIELD_GET(
16018 		    HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON, wbm_desc->info0);
16019 		rel_info->err_code = FIELD_GET(
16020 		    HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE, wbm_desc->info0);
16021 	} else {
16022 		rel_info->push_reason = FIELD_GET(
16023 		    HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON, wbm_desc->info0);
16024 		rel_info->err_code = FIELD_GET(
16025 		    HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE, wbm_desc->info0);
16026 	}
16027 
16028 	rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
16029 	    wbm_desc->info2);
16030 	rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
16031 	    wbm_desc->info2);
16032 
16033 	return 0;
16034 }
16035 
16036 int
16037 qwx_dp_rx_h_null_q_desc(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16038     struct qwx_rx_msdu_list *msdu_list)
16039 {
16040 	printf("%s: not implemented\n", __func__);
16041 	return ENOTSUP;
16042 }
16043 
16044 int
16045 qwx_dp_rx_h_reo_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16046     struct qwx_rx_msdu_list *msdu_list)
16047 {
16048 	int drop = 0;
16049 #if 0
16050 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
16051 #endif
16052 	switch (msdu->err_code) {
16053 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
16054 		if (qwx_dp_rx_h_null_q_desc(sc, msdu, msdu_list))
16055 			drop = 1;
16056 		break;
16057 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
16058 		/* TODO: Do not drop PN failed packets in the driver;
16059 		 * instead, it is good to drop such packets in mac80211
16060 		 * after incrementing the replay counters.
16061 		 */
16062 		/* fallthrough */
16063 	default:
16064 		/* TODO: Review other errors and process them to mac80211
16065 		 * as appropriate.
16066 		 */
16067 		drop = 1;
16068 		break;
16069 	}
16070 
16071 	return drop;
16072 }
16073 
16074 int
16075 qwx_dp_rx_h_rxdma_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu)
16076 {
16077 	struct ieee80211com *ic = &sc->sc_ic;
16078 	int drop = 0;
16079 #if 0
16080 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
16081 #endif
16082 	switch (msdu->err_code) {
16083 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
16084 		ic->ic_stats.is_rx_locmicfail++;
16085 		drop = 1;
16086 		break;
16087 	default:
16088 		/* TODO: Review other rxdma error code to check if anything is
16089 		 * worth reporting to mac80211
16090 		 */
16091 		drop = 1;
16092 		break;
16093 	}
16094 
16095 	return drop;
16096 }
16097 
16098 void
16099 qwx_dp_rx_wbm_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16100     struct qwx_rx_msdu_list *msdu_list)
16101 {
16102 	int drop = 1;
16103 
16104 	switch (msdu->err_rel_src) {
16105 	case HAL_WBM_REL_SRC_MODULE_REO:
16106 		drop = qwx_dp_rx_h_reo_err(sc, msdu, msdu_list);
16107 		break;
16108 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
16109 		drop = qwx_dp_rx_h_rxdma_err(sc, msdu);
16110 		break;
16111 	default:
16112 		/* msdu will get freed */
16113 		break;
16114 	}
16115 
16116 	if (drop) {
16117 		m_freem(msdu->m);
16118 		msdu->m = NULL;
16119 		return;
16120 	}
16121 
16122 	qwx_dp_rx_deliver_msdu(sc, msdu);
16123 }
16124 
16125 int
16126 qwx_dp_rx_process_wbm_err(struct qwx_softc *sc)
16127 {
16128 	struct ieee80211com *ic = &sc->sc_ic;
16129 	struct ifnet *ifp = &ic->ic_if;
16130 	struct qwx_dp *dp = &sc->dp;
16131 	struct dp_rxdma_ring *rx_ring;
16132 	struct hal_rx_wbm_rel_info err_info;
16133 	struct hal_srng *srng;
16134 	struct qwx_rx_msdu_list msdu_list[MAX_RADIOS];
16135 	struct qwx_rx_msdu *msdu;
16136 	struct mbuf *m;
16137 	struct qwx_rx_data *rx_data;
16138 	uint32_t *rx_desc;
16139 	int idx, mac_id;
16140 	int num_buffs_reaped[MAX_RADIOS] = {0};
16141 	int total_num_buffs_reaped = 0;
16142 	int ret, i;
16143 
16144 	for (i = 0; i < sc->num_radios; i++)
16145 		TAILQ_INIT(&msdu_list[i]);
16146 
16147 	srng = &sc->hal.srng_list[dp->rx_rel_ring.ring_id];
16148 #ifdef notyet
16149 	spin_lock_bh(&srng->lock);
16150 #endif
16151 	qwx_hal_srng_access_begin(sc, srng);
16152 
16153 	while ((rx_desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
16154 		ret = qwx_hal_wbm_desc_parse_err(rx_desc, &err_info);
16155 		if (ret) {
16156 			printf("%s: failed to parse rx error in wbm_rel "
16157 			    "ring desc %d\n", sc->sc_dev.dv_xname, ret);
16158 			continue;
16159 		}
16160 
16161 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
16162 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
16163 
16164 		if (mac_id >= MAX_RADIOS)
16165 			continue;
16166 
16167 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16168 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
16169 			continue;
16170 
16171 		rx_data = &rx_ring->rx_data[idx];
16172 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16173 		m = rx_data->m;
16174 		rx_data->m = NULL;
16175 		setbit(rx_ring->freemap, idx);
16176 
16177 		num_buffs_reaped[mac_id]++;
16178 		total_num_buffs_reaped++;
16179 
16180 		if (err_info.push_reason !=
16181 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
16182 			m_freem(m);
16183 			continue;
16184 		}
16185 
16186 		msdu = &rx_data->rx_msdu;
16187 		memset(&msdu->rxi, 0, sizeof(msdu->rxi));
16188 		msdu->m = m;
16189 		msdu->err_rel_src = err_info.err_rel_src;
16190 		msdu->err_code = err_info.err_code;
16191 		msdu->rx_desc = mtod(m, struct hal_rx_desc *);
16192 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
16193 	}
16194 
16195 	qwx_hal_srng_access_end(sc, srng);
16196 #ifdef notyet
16197 	spin_unlock_bh(&srng->lock);
16198 #endif
16199 	if (!total_num_buffs_reaped)
16200 		goto done;
16201 
16202 	for (i = 0; i < sc->num_radios; i++) {
16203 		if (!num_buffs_reaped[i])
16204 			continue;
16205 
16206 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16207 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
16208 		    sc->hw_params.hal_params->rx_buf_rbm);
16209 	}
16210 
16211 	for (i = 0; i < sc->num_radios; i++) {
16212 		while ((msdu = TAILQ_FIRST(msdu_list))) {
16213 			TAILQ_REMOVE(msdu_list, msdu, entry);
16214 			if (test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) {
16215 				m_freem(msdu->m);
16216 				msdu->m = NULL;
16217 				continue;
16218 			}
16219 			qwx_dp_rx_wbm_err(sc, msdu, &msdu_list[i]);
16220 			msdu->m = NULL;
16221 		}
16222 	}
16223 done:
16224 	ifp->if_ierrors += total_num_buffs_reaped;
16225 
16226 	return total_num_buffs_reaped;
16227 }
16228 
16229 struct qwx_rx_msdu *
16230 qwx_dp_rx_get_msdu_last_buf(struct qwx_rx_msdu_list *msdu_list,
16231     struct qwx_rx_msdu *first)
16232 {
16233 	struct qwx_rx_msdu *msdu;
16234 
16235 	if (!first->is_continuation)
16236 		return first;
16237 
16238 	TAILQ_FOREACH(msdu, msdu_list, entry) {
16239 		if (!msdu->is_continuation)
16240 			return msdu;
16241 	}
16242 
16243 	return NULL;
16244 }
16245 
16246 static inline void *
16247 qwx_dp_rx_get_attention(struct qwx_softc *sc, struct hal_rx_desc *desc)
16248 {
16249 	return sc->hw_params.hw_ops->rx_desc_get_attention(desc);
16250 }
16251 
16252 int
16253 qwx_dp_rx_h_attn_is_mcbc(struct qwx_softc *sc, struct hal_rx_desc *desc)
16254 {
16255 	struct rx_attention *attn = qwx_dp_rx_get_attention(sc, desc);
16256 
16257 	return qwx_dp_rx_h_msdu_end_first_msdu(sc, desc) &&
16258 		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
16259 		 le32toh(attn->info1)));
16260 }
16261 
16262 static inline uint8_t
16263 qwx_dp_rx_h_msdu_end_l3pad(struct qwx_softc *sc, struct hal_rx_desc *desc)
16264 {
16265 	return sc->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
16266 }
16267 
16268 static inline int
16269 qwx_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
16270 {
16271 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, le32toh(attn->info2));
16272 }
16273 
16274 static inline uint32_t
16275 qwx_dp_rx_h_msdu_start_freq(struct qwx_softc *sc, struct hal_rx_desc *desc)
16276 {
16277 	return sc->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
16278 }
16279 
16280 uint32_t
16281 qwx_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
16282 {
16283 	uint32_t info = le32toh(attn->info1);
16284 	uint32_t errmap = 0;
16285 
16286 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
16287 		errmap |= DP_RX_MPDU_ERR_FCS;
16288 
16289 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
16290 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
16291 
16292 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
16293 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
16294 
16295 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
16296 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
16297 
16298 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
16299 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
16300 
16301 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
16302 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
16303 
16304 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
16305 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
16306 
16307 	return errmap;
16308 }
16309 
16310 int
16311 qwx_dp_rx_h_attn_msdu_len_err(struct qwx_softc *sc, struct hal_rx_desc *desc)
16312 {
16313 	struct rx_attention *rx_attention;
16314 	uint32_t errmap;
16315 
16316 	rx_attention = qwx_dp_rx_get_attention(sc, desc);
16317 	errmap = qwx_dp_rx_h_attn_mpdu_err(rx_attention);
16318 
16319 	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
16320 }
16321 
16322 int
16323 qwx_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
16324 {
16325 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
16326 	    le32toh(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK);
16327 }
16328 
16329 int
16330 qwx_dp_rx_msdu_coalesce(struct qwx_softc *sc, struct qwx_rx_msdu_list *msdu_list,
16331     struct qwx_rx_msdu *first, struct qwx_rx_msdu *last, uint8_t l3pad_bytes,
16332     int msdu_len)
16333 {
16334 	printf("%s: not implemented\n", __func__);
16335 	return ENOTSUP;
16336 }
16337 
16338 void
16339 qwx_dp_rx_h_rate(struct qwx_softc *sc, struct hal_rx_desc *rx_desc,
16340     struct ieee80211_rxinfo *rxi)
16341 {
16342 	/* TODO */
16343 }
16344 
16345 void
16346 qwx_dp_rx_h_ppdu(struct qwx_softc *sc, struct hal_rx_desc *rx_desc,
16347     struct ieee80211_rxinfo *rxi)
16348 {
16349 	uint8_t channel_num;
16350 	uint32_t meta_data;
16351 
16352 	meta_data = qwx_dp_rx_h_msdu_start_freq(sc, rx_desc);
16353 	channel_num = meta_data & 0xff;
16354 
16355 	rxi->rxi_chan = channel_num;
16356 
16357 	qwx_dp_rx_h_rate(sc, rx_desc, rxi);
16358 }
16359 
16360 void
16361 qwx_dp_rx_h_undecap_nwifi(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16362     uint8_t *first_hdr, enum hal_encrypt_type enctype)
16363 {
16364 	/*
16365 	* This function will need to do some work once we are receiving
16366 	* aggregated frames. For now, it needs to do nothing.
16367 	*/
16368 
16369 	if (!msdu->is_first_msdu)
16370 		printf("%s: not implemented\n", __func__);
16371 }
16372 
16373 void
16374 qwx_dp_rx_h_undecap_raw(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16375     enum hal_encrypt_type enctype, int decrypted)
16376 {
16377 #if 0
16378 	struct ieee80211_hdr *hdr;
16379 	size_t hdr_len;
16380 	size_t crypto_len;
16381 #endif
16382 
16383 	if (!msdu->is_first_msdu ||
16384 	    !(msdu->is_first_msdu && msdu->is_last_msdu))
16385 		return;
16386 
16387 	m_adj(msdu->m, -IEEE80211_CRC_LEN);
16388 #if 0
16389 	if (!decrypted)
16390 		return;
16391 
16392 	hdr = (void *)msdu->data;
16393 
16394 	/* Tail */
16395 	if (status->flag & RX_FLAG_IV_STRIPPED) {
16396 		skb_trim(msdu, msdu->len -
16397 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
16398 
16399 		skb_trim(msdu, msdu->len -
16400 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
16401 	} else {
16402 		/* MIC */
16403 		if (status->flag & RX_FLAG_MIC_STRIPPED)
16404 			skb_trim(msdu, msdu->len -
16405 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
16406 
16407 		/* ICV */
16408 		if (status->flag & RX_FLAG_ICV_STRIPPED)
16409 			skb_trim(msdu, msdu->len -
16410 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
16411 	}
16412 
16413 	/* MMIC */
16414 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
16415 	    !ieee80211_has_morefrags(hdr->frame_control) &&
16416 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
16417 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
16418 
16419 	/* Head */
16420 	if (status->flag & RX_FLAG_IV_STRIPPED) {
16421 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
16422 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
16423 
16424 		memmove((void *)msdu->data + crypto_len,
16425 			(void *)msdu->data, hdr_len);
16426 		skb_pull(msdu, crypto_len);
16427 	}
16428 #endif
16429 }
16430 
16431 static inline uint8_t *
16432 qwx_dp_rx_h_80211_hdr(struct qwx_softc *sc, struct hal_rx_desc *desc)
16433 {
16434 	return sc->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
16435 }
16436 
16437 static inline enum hal_encrypt_type
16438 qwx_dp_rx_h_mpdu_start_enctype(struct qwx_softc *sc, struct hal_rx_desc *desc)
16439 {
16440 	if (!sc->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
16441 		return HAL_ENCRYPT_TYPE_OPEN;
16442 
16443 	return sc->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
16444 }
16445 
16446 static inline uint8_t
16447 qwx_dp_rx_h_msdu_start_decap_type(struct qwx_softc *sc, struct hal_rx_desc *desc)
16448 {
16449 	return sc->hw_params.hw_ops->rx_desc_get_decap_type(desc);
16450 }
16451 
16452 void
16453 qwx_dp_rx_h_undecap(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16454     struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype,
16455     int decrypted)
16456 {
16457 	uint8_t *first_hdr;
16458 	uint8_t decap;
16459 
16460 	first_hdr = qwx_dp_rx_h_80211_hdr(sc, rx_desc);
16461 	decap = qwx_dp_rx_h_msdu_start_decap_type(sc, rx_desc);
16462 
16463 	switch (decap) {
16464 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
16465 		qwx_dp_rx_h_undecap_nwifi(sc, msdu, first_hdr, enctype);
16466 		break;
16467 	case DP_RX_DECAP_TYPE_RAW:
16468 		qwx_dp_rx_h_undecap_raw(sc, msdu, enctype, decrypted);
16469 		break;
16470 #if 0
16471 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
16472 		ehdr = (struct ethhdr *)msdu->data;
16473 
16474 		/* mac80211 allows fast path only for authorized STA */
16475 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
16476 			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
16477 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
16478 						   enctype, status);
16479 			break;
16480 		}
16481 
16482 		/* PN for mcast packets will be validated in mac80211;
16483 		 * remove eth header and add 802.11 header.
16484 		 */
16485 		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
16486 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
16487 						   enctype, status);
16488 		break;
16489 	case DP_RX_DECAP_TYPE_8023:
16490 		/* TODO: Handle undecap for these formats */
16491 		break;
16492 #endif
16493 	}
16494 }
16495 
16496 int
16497 qwx_dp_rx_h_mpdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16498     struct hal_rx_desc *rx_desc)
16499 {
16500 	struct ieee80211com *ic = &sc->sc_ic;
16501 	int fill_crypto_hdr = 0;
16502 	enum hal_encrypt_type enctype;
16503 	int is_decrypted = 0;
16504 #if 0
16505 	struct ath11k_skb_rxcb *rxcb;
16506 #endif
16507 	struct ieee80211_frame *wh;
16508 #if 0
16509 	struct ath11k_peer *peer;
16510 #endif
16511 	struct rx_attention *rx_attention;
16512 	uint32_t err_bitmap;
16513 
16514 	/* PN for multicast packets will be checked in net80211 */
16515 	fill_crypto_hdr = qwx_dp_rx_h_attn_is_mcbc(sc, rx_desc);
16516 	msdu->is_mcbc = fill_crypto_hdr;
16517 #if 0
16518 	if (rxcb->is_mcbc) {
16519 		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
16520 		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
16521 	}
16522 
16523 	spin_lock_bh(&ar->ab->base_lock);
16524 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
16525 	if (peer) {
16526 		if (rxcb->is_mcbc)
16527 			enctype = peer->sec_type_grp;
16528 		else
16529 			enctype = peer->sec_type;
16530 	} else {
16531 #endif
16532 		enctype = qwx_dp_rx_h_mpdu_start_enctype(sc, rx_desc);
16533 #if 0
16534 	}
16535 	spin_unlock_bh(&ar->ab->base_lock);
16536 #endif
16537 	rx_attention = qwx_dp_rx_get_attention(sc, rx_desc);
16538 	err_bitmap = qwx_dp_rx_h_attn_mpdu_err(rx_attention);
16539 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
16540 		is_decrypted = qwx_dp_rx_h_attn_is_decrypted(rx_attention);
16541 #if 0
16542 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
16543 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
16544 			     RX_FLAG_MMIC_ERROR |
16545 			     RX_FLAG_DECRYPTED |
16546 			     RX_FLAG_IV_STRIPPED |
16547 			     RX_FLAG_MMIC_STRIPPED);
16548 
16549 #endif
16550 	if (err_bitmap & DP_RX_MPDU_ERR_FCS) {
16551 		if (ic->ic_flags & IEEE80211_F_RSNON)
16552 			ic->ic_stats.is_rx_decryptcrc++;
16553 		else
16554 			ic->ic_stats.is_rx_decap++;
16555 	}
16556 
16557 	/* XXX Trusting firmware to handle Michael MIC counter-measures... */
16558 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
16559 		ic->ic_stats.is_rx_locmicfail++;
16560 
16561 	if (err_bitmap & DP_RX_MPDU_ERR_DECRYPT)
16562 		ic->ic_stats.is_rx_wepfail++;
16563 
16564 	if (is_decrypted) {
16565 #if 0
16566 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
16567 
16568 		if (fill_crypto_hdr)
16569 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
16570 					RX_FLAG_ICV_STRIPPED;
16571 		else
16572 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
16573 					   RX_FLAG_PN_VALIDATED;
16574 #endif
16575 		msdu->rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
16576 	}
16577 #if 0
16578 	ath11k_dp_rx_h_csum_offload(ar, msdu);
16579 #endif
16580 	qwx_dp_rx_h_undecap(sc, msdu, rx_desc, enctype, is_decrypted);
16581 
16582 	if (is_decrypted && !fill_crypto_hdr &&
16583 	    qwx_dp_rx_h_msdu_start_decap_type(sc, rx_desc) !=
16584 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
16585 		/* Hardware has stripped the IV. */
16586 		wh = mtod(msdu->m, struct ieee80211_frame *);
16587 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
16588 	}
16589 
16590 	return err_bitmap ? EIO : 0;
16591 }
16592 
16593 int
16594 qwx_dp_rx_process_msdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16595     struct qwx_rx_msdu_list *msdu_list)
16596 {
16597 	struct hal_rx_desc *rx_desc, *lrx_desc;
16598 	struct rx_attention *rx_attention;
16599 	struct qwx_rx_msdu *last_buf;
16600 	uint8_t l3_pad_bytes;
16601 	uint16_t msdu_len;
16602 	int ret;
16603 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
16604 
16605 	last_buf = qwx_dp_rx_get_msdu_last_buf(msdu_list, msdu);
16606 	if (!last_buf) {
16607 		DPRINTF("%s: No valid Rx buffer to access "
16608 		    "Atten/MSDU_END/MPDU_END tlvs\n", __func__);
16609 		return EIO;
16610 	}
16611 
16612 	rx_desc = mtod(msdu->m, struct hal_rx_desc *);
16613 	if (qwx_dp_rx_h_attn_msdu_len_err(sc, rx_desc)) {
16614 		DPRINTF("%s: msdu len not valid\n", __func__);
16615 		return EIO;
16616 	}
16617 
16618 	lrx_desc = mtod(last_buf->m, struct hal_rx_desc *);
16619 	rx_attention = qwx_dp_rx_get_attention(sc, lrx_desc);
16620 	if (!qwx_dp_rx_h_attn_msdu_done(rx_attention)) {
16621 		DPRINTF("%s: msdu_done bit in attention is not set\n",
16622 		    __func__);
16623 		return EIO;
16624 	}
16625 
16626 	msdu->rx_desc = rx_desc;
16627 	msdu_len = qwx_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
16628 	l3_pad_bytes = qwx_dp_rx_h_msdu_end_l3pad(sc, lrx_desc);
16629 
16630 	if (msdu->is_frag) {
16631 		m_adj(msdu->m, hal_rx_desc_sz);
16632 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
16633 	} else if (!msdu->is_continuation) {
16634 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
16635 #if 0
16636 			uint8_t *hdr_status;
16637 
16638 			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
16639 #endif
16640 			DPRINTF("%s: invalid msdu len %u\n",
16641 			    __func__, msdu_len);
16642 #if 0
16643 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
16644 					sizeof(struct ieee80211_hdr));
16645 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
16646 					sizeof(struct hal_rx_desc));
16647 #endif
16648 			return EINVAL;
16649 		}
16650 		m_adj(msdu->m, hal_rx_desc_sz + l3_pad_bytes);
16651 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
16652 	} else {
16653 		ret = qwx_dp_rx_msdu_coalesce(sc, msdu_list, msdu, last_buf,
16654 		    l3_pad_bytes, msdu_len);
16655 		if (ret) {
16656 			DPRINTF("%s: failed to coalesce msdu rx buffer%d\n",
16657 			    __func__, ret);
16658 			return ret;
16659 		}
16660 	}
16661 
16662 	memset(&msdu->rxi, 0, sizeof(msdu->rxi));
16663 	qwx_dp_rx_h_ppdu(sc, rx_desc, &msdu->rxi);
16664 
16665 	return qwx_dp_rx_h_mpdu(sc, msdu, rx_desc);
16666 }
16667 
16668 void
16669 qwx_dp_rx_deliver_msdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu)
16670 {
16671 	struct ieee80211com *ic = &sc->sc_ic;
16672 	struct ifnet *ifp = &ic->ic_if;
16673 	struct ieee80211_frame *wh;
16674 	struct ieee80211_node *ni;
16675 
16676 	wh = mtod(msdu->m, struct ieee80211_frame *);
16677 	ni = ieee80211_find_rxnode(ic, wh);
16678 
16679 #if NBPFILTER > 0
16680 	if (sc->sc_drvbpf != NULL) {
16681 		struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
16682 
16683 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
16684 		    msdu->m, BPF_DIRECTION_IN);
16685 	}
16686 #endif
16687 	ieee80211_input(ifp, msdu->m, ni, &msdu->rxi);
16688 	ieee80211_release_node(ic, ni);
16689 }
16690 
16691 void
16692 qwx_dp_rx_process_received_packets(struct qwx_softc *sc,
16693     struct qwx_rx_msdu_list *msdu_list, int mac_id)
16694 {
16695 	struct qwx_rx_msdu *msdu;
16696 	int ret;
16697 
16698 	while ((msdu = TAILQ_FIRST(msdu_list))) {
16699 		TAILQ_REMOVE(msdu_list, msdu, entry);
16700 		ret = qwx_dp_rx_process_msdu(sc, msdu, msdu_list);
16701 		if (ret) {
16702 			DNPRINTF(QWX_D_MAC, "Unable to process msdu: %d", ret);
16703 			m_freem(msdu->m);
16704 			msdu->m = NULL;
16705 			continue;
16706 		}
16707 
16708 		qwx_dp_rx_deliver_msdu(sc, msdu);
16709 		msdu->m = NULL;
16710 	}
16711 }
16712 
16713 int
16714 qwx_dp_process_rx(struct qwx_softc *sc, int ring_id)
16715 {
16716 	struct qwx_dp *dp = &sc->dp;
16717 	struct qwx_pdev_dp *pdev_dp = &sc->pdev_dp;
16718 	struct dp_rxdma_ring *rx_ring;
16719 	int num_buffs_reaped[MAX_RADIOS] = {0};
16720 	struct qwx_rx_msdu_list msdu_list[MAX_RADIOS];
16721 	struct qwx_rx_msdu *msdu;
16722 	struct mbuf *m;
16723 	struct qwx_rx_data *rx_data;
16724 	int total_msdu_reaped = 0;
16725 	struct hal_srng *srng;
16726 	int done = 0;
16727 	int idx;
16728 	unsigned int mac_id;
16729 	struct hal_reo_dest_ring *desc;
16730 	enum hal_reo_dest_ring_push_reason push_reason;
16731 	uint32_t cookie;
16732 	int i;
16733 
16734 	for (i = 0; i < MAX_RADIOS; i++)
16735 		TAILQ_INIT(&msdu_list[i]);
16736 
16737 	srng = &sc->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
16738 #ifdef notyet
16739 	spin_lock_bh(&srng->lock);
16740 #endif
16741 try_again:
16742 	qwx_hal_srng_access_begin(sc, srng);
16743 
16744 	while ((desc = (struct hal_reo_dest_ring *)
16745 	    qwx_hal_srng_dst_get_next_entry(sc, srng))) {
16746 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
16747 		    desc->buf_addr_info.info1);
16748 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16749 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
16750 
16751 		if (mac_id >= MAX_RADIOS)
16752 			continue;
16753 
16754 		rx_ring = &pdev_dp->rx_refill_buf_ring;
16755 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
16756 			continue;
16757 
16758 		rx_data = &rx_ring->rx_data[idx];
16759 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16760 		m = rx_data->m;
16761 		rx_data->m = NULL;
16762 		setbit(rx_ring->freemap, idx);
16763 
16764 		num_buffs_reaped[mac_id]++;
16765 
16766 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
16767 		    desc->info0);
16768 		if (push_reason !=
16769 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
16770 			m_freem(m);
16771 #if 0
16772 			sc->soc_stats.hal_reo_error[
16773 			    dp->reo_dst_ring[ring_id].ring_id]++;
16774 #endif
16775 			continue;
16776 		}
16777 
16778 		msdu = &rx_data->rx_msdu;
16779 		msdu->m = m;
16780 		msdu->is_first_msdu = !!(desc->rx_msdu_info.info0 &
16781 		    RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
16782 		msdu->is_last_msdu = !!(desc->rx_msdu_info.info0 &
16783 		    RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
16784 		msdu->is_continuation = !!(desc->rx_msdu_info.info0 &
16785 		    RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
16786 		msdu->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
16787 		    desc->rx_mpdu_info.meta_data);
16788 		msdu->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
16789 		    desc->rx_mpdu_info.info0);
16790 		msdu->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
16791 		    desc->info0);
16792 
16793 		msdu->mac_id = mac_id;
16794 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
16795 
16796 		if (msdu->is_continuation) {
16797 			done = 0;
16798 		} else {
16799 			total_msdu_reaped++;
16800 			done = 1;
16801 		}
16802 	}
16803 
16804 	/* Hw might have updated the head pointer after we cached it.
16805 	 * In this case, even though there are entries in the ring we'll
16806 	 * get rx_desc NULL. Give the read another try with updated cached
16807 	 * head pointer so that we can reap complete MPDU in the current
16808 	 * rx processing.
16809 	 */
16810 	if (!done && qwx_hal_srng_dst_num_free(sc, srng, 1)) {
16811 		qwx_hal_srng_access_end(sc, srng);
16812 		goto try_again;
16813 	}
16814 
16815 	qwx_hal_srng_access_end(sc, srng);
16816 #ifdef notyet
16817 	spin_unlock_bh(&srng->lock);
16818 #endif
16819 	if (!total_msdu_reaped)
16820 		goto exit;
16821 
16822 	for (i = 0; i < sc->num_radios; i++) {
16823 		if (!num_buffs_reaped[i])
16824 			continue;
16825 
16826 		qwx_dp_rx_process_received_packets(sc, &msdu_list[i], i);
16827 
16828 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16829 
16830 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
16831 		    sc->hw_params.hal_params->rx_buf_rbm);
16832 	}
16833 exit:
16834 	return total_msdu_reaped;
16835 }
16836 
16837 struct mbuf *
16838 qwx_dp_rx_alloc_mon_status_buf(struct qwx_softc *sc,
16839     struct dp_rxdma_ring *rx_ring, int *buf_idx)
16840 {
16841 	struct mbuf *m;
16842 	struct qwx_rx_data *rx_data;
16843 	const size_t size = DP_RX_BUFFER_SIZE;
16844 	int ret, idx;
16845 
16846 	m = m_gethdr(M_DONTWAIT, MT_DATA);
16847 	if (m == NULL)
16848 		return NULL;
16849 
16850 	if (size <= MCLBYTES)
16851 		MCLGET(m, M_DONTWAIT);
16852 	else
16853 		MCLGETL(m, M_DONTWAIT, size);
16854 	if ((m->m_flags & M_EXT) == 0)
16855 		goto fail_free_mbuf;
16856 
16857 	m->m_len = m->m_pkthdr.len = size;
16858 	idx = qwx_next_free_rxbuf_idx(rx_ring);
16859 	if (idx == -1)
16860 		goto fail_free_mbuf;
16861 
16862 	rx_data = &rx_ring->rx_data[idx];
16863 	if (rx_data->m != NULL)
16864 		goto fail_free_mbuf;
16865 
16866 	if (rx_data->map == NULL) {
16867 		ret = bus_dmamap_create(sc->sc_dmat, size, 1,
16868 		    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
16869 		if (ret)
16870 			goto fail_free_mbuf;
16871 	}
16872 
16873 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
16874 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
16875 	if (ret) {
16876 		printf("%s: can't map mbuf (error %d)\n",
16877 		    sc->sc_dev.dv_xname, ret);
16878 		goto fail_free_mbuf;
16879 	}
16880 
16881 	*buf_idx = idx;
16882 	rx_data->m = m;
16883 	clrbit(rx_ring->freemap, idx);
16884 	return m;
16885 
16886 fail_free_mbuf:
16887 	m_freem(m);
16888 	return NULL;
16889 }
16890 
16891 int
16892 qwx_dp_rx_reap_mon_status_ring(struct qwx_softc *sc, int mac_id,
16893     struct mbuf_list *ml)
16894 {
16895 	const struct ath11k_hw_hal_params *hal_params;
16896 	struct qwx_pdev_dp *dp;
16897 	struct dp_rxdma_ring *rx_ring;
16898 	struct qwx_mon_data *pmon;
16899 	struct hal_srng *srng;
16900 	void *rx_mon_status_desc;
16901 	struct mbuf *m;
16902 	struct qwx_rx_data *rx_data;
16903 	struct hal_tlv_hdr *tlv;
16904 	uint32_t cookie;
16905 	int buf_idx, srng_id;
16906 	uint64_t paddr;
16907 	uint8_t rbm;
16908 	int num_buffs_reaped = 0;
16909 
16910 	dp = &sc->pdev_dp;
16911 	pmon = &dp->mon_data;
16912 
16913 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
16914 	    mac_id);
16915 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
16916 
16917 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
16918 #ifdef notyet
16919 	spin_lock_bh(&srng->lock);
16920 #endif
16921 	qwx_hal_srng_access_begin(sc, srng);
16922 	while (1) {
16923 		rx_mon_status_desc = qwx_hal_srng_src_peek(sc, srng);
16924 		if (!rx_mon_status_desc) {
16925 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16926 			break;
16927 		}
16928 
16929 		qwx_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
16930 		    &cookie, &rbm);
16931 		if (paddr) {
16932 			buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16933 			if (buf_idx >= rx_ring->bufs_max ||
16934 			    isset(rx_ring->freemap, buf_idx)) {
16935 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
16936 				goto move_next;
16937 			}
16938 
16939 			rx_data = &rx_ring->rx_data[buf_idx];
16940 
16941 			bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0,
16942 			    rx_data->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
16943 
16944 			tlv = mtod(rx_data->m, struct hal_tlv_hdr *);
16945 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
16946 			    HAL_RX_STATUS_BUFFER_DONE) {
16947 				/* If done status is missing, hold onto status
16948 				 * ring until status is done for this status
16949 				 * ring buffer.
16950 				 * Keep HP in mon_status_ring unchanged,
16951 				 * and break from here.
16952 				 * Check status for same buffer for next time
16953 				 */
16954 				pmon->buf_state = DP_MON_STATUS_NO_DMA;
16955 				break;
16956 			}
16957 
16958 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16959 			m = rx_data->m;
16960 			rx_data->m = NULL;
16961 			setbit(rx_ring->freemap, buf_idx);
16962 #if 0
16963 			if (ab->hw_params.full_monitor_mode) {
16964 				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
16965 				if (paddr == pmon->mon_status_paddr)
16966 					pmon->buf_state = DP_MON_STATUS_MATCH;
16967 			}
16968 #endif
16969 			ml_enqueue(ml, m);
16970 		} else {
16971 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16972 		}
16973 move_next:
16974 		m = qwx_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx);
16975 		if (!m) {
16976 			hal_params = sc->hw_params.hal_params;
16977 			qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
16978 			    hal_params->rx_buf_rbm);
16979 			num_buffs_reaped++;
16980 			break;
16981 		}
16982 		rx_data = &rx_ring->rx_data[buf_idx];
16983 
16984 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
16985 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx);
16986 
16987 		paddr = rx_data->map->dm_segs[0].ds_addr;
16988 		qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr,
16989 		    cookie, sc->hw_params.hal_params->rx_buf_rbm);
16990 		qwx_hal_srng_src_get_next_entry(sc, srng);
16991 		num_buffs_reaped++;
16992 	}
16993 	qwx_hal_srng_access_end(sc, srng);
16994 #ifdef notyet
16995 	spin_unlock_bh(&srng->lock);
16996 #endif
16997 	return num_buffs_reaped;
16998 }
16999 
17000 enum hal_rx_mon_status
17001 qwx_hal_rx_parse_mon_status(struct qwx_softc *sc,
17002     struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m)
17003 {
17004 	/* TODO */
17005 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
17006 }
17007 
17008 int
17009 qwx_dp_rx_process_mon_status(struct qwx_softc *sc, int mac_id)
17010 {
17011 	enum hal_rx_mon_status hal_status;
17012 	struct mbuf *m;
17013 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
17014 #if 0
17015 	struct ath11k_peer *peer;
17016 	struct ath11k_sta *arsta;
17017 #endif
17018 	int num_buffs_reaped = 0;
17019 #if 0
17020 	uint32_t rx_buf_sz;
17021 	uint16_t log_type;
17022 #endif
17023 	struct qwx_mon_data *pmon = (struct qwx_mon_data *)&sc->pdev_dp.mon_data;
17024 #if  0
17025 	struct qwx_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
17026 #endif
17027 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
17028 
17029 	num_buffs_reaped = qwx_dp_rx_reap_mon_status_ring(sc, mac_id, &ml);
17030 	if (!num_buffs_reaped)
17031 		goto exit;
17032 
17033 	memset(ppdu_info, 0, sizeof(*ppdu_info));
17034 	ppdu_info->peer_id = HAL_INVALID_PEERID;
17035 
17036 	while ((m = ml_dequeue(&ml))) {
17037 #if 0
17038 		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
17039 			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
17040 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
17041 		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
17042 			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
17043 			rx_buf_sz = DP_RX_BUFFER_SIZE;
17044 		} else {
17045 			log_type = ATH11K_PKTLOG_TYPE_INVALID;
17046 			rx_buf_sz = 0;
17047 		}
17048 
17049 		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
17050 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
17051 #endif
17052 
17053 		memset(ppdu_info, 0, sizeof(*ppdu_info));
17054 		ppdu_info->peer_id = HAL_INVALID_PEERID;
17055 		hal_status = qwx_hal_rx_parse_mon_status(sc, ppdu_info, m);
17056 #if 0
17057 		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
17058 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
17059 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
17060 			rx_mon_stats->status_ppdu_done++;
17061 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
17062 			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
17063 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
17064 		}
17065 #endif
17066 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
17067 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
17068 			m_freem(m);
17069 			continue;
17070 		}
17071 #if 0
17072 		rcu_read_lock();
17073 		spin_lock_bh(&ab->base_lock);
17074 		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
17075 
17076 		if (!peer || !peer->sta) {
17077 			ath11k_dbg(ab, ATH11K_DBG_DATA,
17078 				   "failed to find the peer with peer_id %d\n",
17079 				   ppdu_info->peer_id);
17080 			goto next_skb;
17081 		}
17082 
17083 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
17084 		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
17085 
17086 		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
17087 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
17088 
17089 next_skb:
17090 		spin_unlock_bh(&ab->base_lock);
17091 		rcu_read_unlock();
17092 
17093 		dev_kfree_skb_any(skb);
17094 		memset(ppdu_info, 0, sizeof(*ppdu_info));
17095 		ppdu_info->peer_id = HAL_INVALID_PEERID;
17096 #endif
17097 	}
17098 exit:
17099 	return num_buffs_reaped;
17100 }
17101 
17102 int
17103 qwx_dp_rx_process_mon_rings(struct qwx_softc *sc, int mac_id)
17104 {
17105 	int ret = 0;
17106 #if 0
17107 	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
17108 	    ab->hw_params.full_monitor_mode)
17109 		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
17110 	else
17111 #endif
17112 		ret = qwx_dp_rx_process_mon_status(sc, mac_id);
17113 
17114 	return ret;
17115 }
17116 
17117 void
17118 qwx_dp_service_mon_ring(void *arg)
17119 {
17120 	struct qwx_softc *sc = arg;
17121 	int i;
17122 
17123 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++)
17124 		qwx_dp_rx_process_mon_rings(sc, i);
17125 
17126 	timeout_add(&sc->mon_reap_timer, ATH11K_MON_TIMER_INTERVAL);
17127 }
17128 
17129 int
17130 qwx_dp_process_rxdma_err(struct qwx_softc *sc, int mac_id)
17131 {
17132 	struct ieee80211com *ic = &sc->sc_ic;
17133 	struct ifnet *ifp = &ic->ic_if;
17134 	struct dp_srng *err_ring;
17135 	struct dp_rxdma_ring *rx_ring;
17136 	struct dp_link_desc_bank *link_desc_banks = sc->dp.link_desc_banks;
17137 	struct hal_srng *srng;
17138 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
17139 	enum hal_rx_buf_return_buf_manager rbm;
17140 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
17141 	struct qwx_rx_data *rx_data;
17142 	struct hal_reo_entrance_ring *entr_ring;
17143 	void *desc;
17144 	int num_buf_freed = 0;
17145 	uint64_t paddr;
17146 	uint32_t desc_bank;
17147 	void *link_desc_va;
17148 	int num_msdus;
17149 	int i, idx, srng_id;
17150 
17151 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
17152 	    mac_id);
17153 	err_ring = &sc->pdev_dp.rxdma_err_dst_ring[srng_id];
17154 	rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
17155 
17156 	srng = &sc->hal.srng_list[err_ring->ring_id];
17157 #ifdef notyet
17158 	spin_lock_bh(&srng->lock);
17159 #endif
17160 	qwx_hal_srng_access_begin(sc, srng);
17161 
17162 	while ((desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
17163 		qwx_hal_rx_reo_ent_paddr_get(sc, desc, &paddr, &desc_bank);
17164 
17165 		entr_ring = (struct hal_reo_entrance_ring *)desc;
17166 		rxdma_err_code = FIELD_GET(
17167 		    HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
17168 		    entr_ring->info1);
17169 #if 0
17170 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
17171 #endif
17172 		link_desc_va = link_desc_banks[desc_bank].vaddr +
17173 		     (paddr - link_desc_banks[desc_bank].paddr);
17174 		qwx_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
17175 		    msdu_cookies, &rbm);
17176 
17177 		for (i = 0; i < num_msdus; i++) {
17178 			idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
17179 			    msdu_cookies[i]);
17180 			if (idx >= rx_ring->bufs_max ||
17181 			    isset(rx_ring->freemap, idx))
17182 				continue;
17183 
17184 			rx_data = &rx_ring->rx_data[idx];
17185 
17186 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
17187 			m_freem(rx_data->m);
17188 			rx_data->m = NULL;
17189 			setbit(rx_ring->freemap, idx);
17190 
17191 			num_buf_freed++;
17192 		}
17193 
17194 		qwx_dp_rx_link_desc_return(sc, desc,
17195 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
17196 	}
17197 
17198 	qwx_hal_srng_access_end(sc, srng);
17199 #ifdef notyet
17200 	spin_unlock_bh(&srng->lock);
17201 #endif
17202 	if (num_buf_freed)
17203 		qwx_dp_rxbufs_replenish(sc, mac_id, rx_ring, num_buf_freed,
17204 		    sc->hw_params.hal_params->rx_buf_rbm);
17205 
17206 	ifp->if_ierrors += num_buf_freed;
17207 
17208 	return num_buf_freed;
17209 }
17210 
17211 void
17212 qwx_hal_reo_status_queue_stats(struct qwx_softc *sc, uint32_t *reo_desc,
17213     struct hal_reo_status *status)
17214 {
17215 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17216 	struct hal_reo_get_queue_stats_status *desc =
17217 	    (struct hal_reo_get_queue_stats_status *)tlv->value;
17218 
17219 	status->uniform_hdr.cmd_num =
17220 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17221 	status->uniform_hdr.cmd_status =
17222 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17223 #if 0
17224 	ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
17225 	ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
17226 		   status->uniform_hdr.cmd_num,
17227 		   status->uniform_hdr.cmd_status);
17228 	ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
17229 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
17230 			     desc->info0),
17231 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
17232 			     desc->info0));
17233 	ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
17234 		   desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
17235 	ath11k_dbg(ab, ATH11K_DBG_HAL,
17236 		   "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
17237 		   desc->last_rx_enqueue_timestamp,
17238 		   desc->last_rx_dequeue_timestamp);
17239 	ath11k_dbg(ab, ATH11K_DBG_HAL,
17240 		   "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
17241 		   desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
17242 		   desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
17243 		   desc->rx_bitmap[6], desc->rx_bitmap[7]);
17244 	ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
17245 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
17246 			     desc->info1),
17247 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
17248 			     desc->info1));
17249 	ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
17250 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
17251 			     desc->info2),
17252 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
17253 			     desc->info2),
17254 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
17255 			     desc->info2));
17256 	ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
17257 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
17258 			     desc->info3),
17259 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
17260 			     desc->info3));
17261 	ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
17262 		   desc->num_mpdu_frames, desc->num_msdu_frames,
17263 		   desc->total_bytes);
17264 	ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
17265 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
17266 			     desc->info4),
17267 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
17268 			     desc->info4),
17269 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
17270 			     desc->info4));
17271 	ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
17272 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
17273 			     desc->info5));
17274 #endif
17275 }
17276 
17277 void
17278 qwx_hal_reo_flush_queue_status(struct qwx_softc *sc, uint32_t *reo_desc,
17279     struct hal_reo_status *status)
17280 {
17281 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17282 	struct hal_reo_flush_queue_status *desc =
17283 	    (struct hal_reo_flush_queue_status *)tlv->value;
17284 
17285 	status->uniform_hdr.cmd_num = FIELD_GET(
17286 	   HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17287 	status->uniform_hdr.cmd_status = FIELD_GET(
17288 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17289 	status->u.flush_queue.err_detected = FIELD_GET(
17290 	    HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED, desc->info0);
17291 }
17292 
17293 void
17294 qwx_hal_reo_flush_cache_status(struct qwx_softc *sc, uint32_t *reo_desc,
17295     struct hal_reo_status *status)
17296 {
17297 	struct ath11k_hal *hal = &sc->hal;
17298 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17299 	struct hal_reo_flush_cache_status *desc =
17300 	    (struct hal_reo_flush_cache_status *)tlv->value;
17301 
17302 	status->uniform_hdr.cmd_num = FIELD_GET(
17303 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17304 	status->uniform_hdr.cmd_status = FIELD_GET(
17305 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17306 
17307 	status->u.flush_cache.err_detected = FIELD_GET(
17308 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
17309 	status->u.flush_cache.err_code = FIELD_GET(
17310 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE, desc->info0);
17311 	if (!status->u.flush_cache.err_code)
17312 		hal->avail_blk_resource |= BIT(hal->current_blk_index);
17313 
17314 	status->u.flush_cache.cache_controller_flush_status_hit = FIELD_GET(
17315 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT, desc->info0);
17316 
17317 	status->u.flush_cache.cache_controller_flush_status_desc_type =
17318 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
17319 	    desc->info0);
17320 	status->u.flush_cache.cache_controller_flush_status_client_id =
17321 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
17322 	    desc->info0);
17323 	status->u.flush_cache.cache_controller_flush_status_err =
17324 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
17325 	    desc->info0);
17326 	status->u.flush_cache.cache_controller_flush_status_cnt =
17327 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
17328 	    desc->info0);
17329 }
17330 
17331 void
17332 qwx_hal_reo_unblk_cache_status(struct qwx_softc *sc, uint32_t *reo_desc,
17333     struct hal_reo_status *status)
17334 {
17335 	struct ath11k_hal *hal = &sc->hal;
17336 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17337 	struct hal_reo_unblock_cache_status *desc =
17338 	   (struct hal_reo_unblock_cache_status *)tlv->value;
17339 
17340 	status->uniform_hdr.cmd_num = FIELD_GET(
17341 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17342 	status->uniform_hdr.cmd_status = FIELD_GET(
17343 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17344 
17345 	status->u.unblock_cache.err_detected = FIELD_GET(
17346 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
17347 	status->u.unblock_cache.unblock_type = FIELD_GET(
17348 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE, desc->info0);
17349 
17350 	if (!status->u.unblock_cache.err_detected &&
17351 	    status->u.unblock_cache.unblock_type ==
17352 	    HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
17353 		hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
17354 }
17355 
17356 void
17357 qwx_hal_reo_flush_timeout_list_status(struct qwx_softc *ab, uint32_t *reo_desc,
17358     struct hal_reo_status *status)
17359 {
17360 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17361 	struct hal_reo_flush_timeout_list_status *desc =
17362 	    (struct hal_reo_flush_timeout_list_status *)tlv->value;
17363 
17364 	status->uniform_hdr.cmd_num = FIELD_GET(
17365 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17366 	status->uniform_hdr.cmd_status = FIELD_GET(
17367 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17368 
17369 	status->u.timeout_list.err_detected = FIELD_GET(
17370 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR, desc->info0);
17371 	status->u.timeout_list.list_empty = FIELD_GET(
17372 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY, desc->info0);
17373 
17374 	status->u.timeout_list.release_desc_cnt = FIELD_GET(
17375 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT, desc->info1);
17376 	status->u.timeout_list.fwd_buf_cnt = FIELD_GET(
17377 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT, desc->info1);
17378 }
17379 
17380 void
17381 qwx_hal_reo_desc_thresh_reached_status(struct qwx_softc *sc, uint32_t *reo_desc,
17382     struct hal_reo_status *status)
17383 {
17384 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17385 	struct hal_reo_desc_thresh_reached_status *desc =
17386 	    (struct hal_reo_desc_thresh_reached_status *)tlv->value;
17387 
17388 	status->uniform_hdr.cmd_num = FIELD_GET(
17389 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17390 	status->uniform_hdr.cmd_status = FIELD_GET(
17391 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17392 
17393 	status->u.desc_thresh_reached.threshold_idx = FIELD_GET(
17394 	    HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX, desc->info0);
17395 
17396 	status->u.desc_thresh_reached.link_desc_counter0 = FIELD_GET(
17397 	    HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0, desc->info1);
17398 
17399 	status->u.desc_thresh_reached.link_desc_counter1 = FIELD_GET(
17400 	    HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1, desc->info2);
17401 
17402 	status->u.desc_thresh_reached.link_desc_counter2 = FIELD_GET(
17403 	    HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2, desc->info3);
17404 
17405 	status->u.desc_thresh_reached.link_desc_counter_sum = FIELD_GET(
17406 	    HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
17407 	    desc->info4);
17408 }
17409 
17410 void
17411 qwx_hal_reo_update_rx_reo_queue_status(struct qwx_softc *ab, uint32_t *reo_desc,
17412     struct hal_reo_status *status)
17413 {
17414 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17415 	struct hal_reo_status_hdr *desc =
17416 	    (struct hal_reo_status_hdr *)tlv->value;
17417 
17418 	status->uniform_hdr.cmd_num = FIELD_GET(
17419 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->info0);
17420 	status->uniform_hdr.cmd_status = FIELD_GET(
17421 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->info0);
17422 }
17423 
17424 int
17425 qwx_dp_process_reo_status(struct qwx_softc *sc)
17426 {
17427 	struct qwx_dp *dp = &sc->dp;
17428 	struct hal_srng *srng;
17429 	struct dp_reo_cmd *cmd, *tmp;
17430 	int found = 0, ret = 0;
17431 	uint32_t *reo_desc;
17432 	uint16_t tag;
17433 	struct hal_reo_status reo_status;
17434 
17435 	srng = &sc->hal.srng_list[dp->reo_status_ring.ring_id];
17436 	memset(&reo_status, 0, sizeof(reo_status));
17437 #ifdef notyet
17438 	spin_lock_bh(&srng->lock);
17439 #endif
17440 	qwx_hal_srng_access_begin(sc, srng);
17441 
17442 	while ((reo_desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
17443 		ret = 1;
17444 
17445 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
17446 		switch (tag) {
17447 		case HAL_REO_GET_QUEUE_STATS_STATUS:
17448 			qwx_hal_reo_status_queue_stats(sc, reo_desc,
17449 			    &reo_status);
17450 			break;
17451 		case HAL_REO_FLUSH_QUEUE_STATUS:
17452 			qwx_hal_reo_flush_queue_status(sc, reo_desc,
17453 			    &reo_status);
17454 			break;
17455 		case HAL_REO_FLUSH_CACHE_STATUS:
17456 			qwx_hal_reo_flush_cache_status(sc, reo_desc,
17457 			    &reo_status);
17458 			break;
17459 		case HAL_REO_UNBLOCK_CACHE_STATUS:
17460 			qwx_hal_reo_unblk_cache_status(sc, reo_desc,
17461 			    &reo_status);
17462 			break;
17463 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
17464 			qwx_hal_reo_flush_timeout_list_status(sc, reo_desc,
17465 			    &reo_status);
17466 			break;
17467 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
17468 			qwx_hal_reo_desc_thresh_reached_status(sc, reo_desc,
17469 			    &reo_status);
17470 			break;
17471 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
17472 			qwx_hal_reo_update_rx_reo_queue_status(sc, reo_desc,
17473 			    &reo_status);
17474 			break;
17475 		default:
17476 			printf("%s: Unknown reo status type %d\n",
17477 			    sc->sc_dev.dv_xname, tag);
17478 			continue;
17479 		}
17480 #ifdef notyet
17481 		spin_lock_bh(&dp->reo_cmd_lock);
17482 #endif
17483 		TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
17484 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
17485 				found = 1;
17486 				TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
17487 				break;
17488 			}
17489 		}
17490 #ifdef notyet
17491 		spin_unlock_bh(&dp->reo_cmd_lock);
17492 #endif
17493 		if (found) {
17494 			cmd->handler(dp, (void *)&cmd->data,
17495 			    reo_status.uniform_hdr.cmd_status);
17496 			free(cmd, M_DEVBUF, sizeof(*cmd));
17497 		}
17498 		found = 0;
17499 	}
17500 
17501 	qwx_hal_srng_access_end(sc, srng);
17502 #ifdef notyet
17503 	spin_unlock_bh(&srng->lock);
17504 #endif
17505 	return ret;
17506 }
17507 
17508 int
17509 qwx_dp_service_srng(struct qwx_softc *sc, int grp_id)
17510 {
17511 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
17512 	int i, j, ret = 0;
17513 
17514 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
17515 		const struct ath11k_hw_tcl2wbm_rbm_map *map;
17516 
17517 		map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i];
17518 		if ((sc->hw_params.ring_mask->tx[grp_id]) &
17519 		    (1 << (map->wbm_ring_num)) &&
17520 		    qwx_dp_tx_completion_handler(sc, i))
17521 			ret = 1;
17522 	}
17523 
17524 	if (sc->hw_params.ring_mask->rx_err[grp_id] &&
17525 	    qwx_dp_process_rx_err(sc))
17526 		ret = 1;
17527 
17528 	if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] &&
17529 	    qwx_dp_rx_process_wbm_err(sc))
17530 		ret = 1;
17531 
17532 	if (sc->hw_params.ring_mask->rx[grp_id]) {
17533 		i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1;
17534 		if (qwx_dp_process_rx(sc, i))
17535 			ret = 1;
17536 	}
17537 
17538 	for (i = 0; i < sc->num_radios; i++) {
17539 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
17540 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
17541 
17542 			if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
17543 			   (1 << id)) == 0)
17544 				continue;
17545 
17546 			if (qwx_dp_rx_process_mon_rings(sc, id))
17547 				ret = 1;
17548 		}
17549 	}
17550 
17551 	if (sc->hw_params.ring_mask->reo_status[grp_id] &&
17552 	    qwx_dp_process_reo_status(sc))
17553 		ret = 1;
17554 
17555 	for (i = 0; i < sc->num_radios; i++) {
17556 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
17557 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
17558 
17559 			if (sc->hw_params.ring_mask->rxdma2host[grp_id] &
17560 			   (1 << (id))) {
17561 				if (qwx_dp_process_rxdma_err(sc, id))
17562 					ret = 1;
17563 			}
17564 
17565 			if (sc->hw_params.ring_mask->host2rxdma[grp_id] &
17566 			    (1 << id)) {
17567 				qwx_dp_rxbufs_replenish(sc, id,
17568 				    &dp->rx_refill_buf_ring, 0,
17569 				    sc->hw_params.hal_params->rx_buf_rbm);
17570 			}
17571 		}
17572 	}
17573 
17574 	return ret;
17575 }
17576 
17577 int
17578 qwx_wmi_wait_for_service_ready(struct qwx_softc *sc)
17579 {
17580 	int ret;
17581 
17582 	while (!sc->wmi.service_ready) {
17583 		ret = tsleep_nsec(&sc->wmi.service_ready, 0, "qwxwmirdy",
17584 		    SEC_TO_NSEC(5));
17585 		if (ret)
17586 			return -1;
17587 	}
17588 
17589 	return 0;
17590 }
17591 
17592 void
17593 qwx_fill_band_to_mac_param(struct qwx_softc *sc,
17594     struct wmi_host_pdev_band_to_mac *band_to_mac)
17595 {
17596 	uint8_t i;
17597 	struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
17598 	struct qwx_pdev *pdev;
17599 
17600 	for (i = 0; i < sc->num_radios; i++) {
17601 		pdev = &sc->pdevs[i];
17602 		hal_reg_cap = &sc->hal_reg_cap[i];
17603 		band_to_mac[i].pdev_id = pdev->pdev_id;
17604 
17605 		switch (pdev->cap.supported_bands) {
17606 		case WMI_HOST_WLAN_2G_5G_CAP:
17607 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
17608 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
17609 			break;
17610 		case WMI_HOST_WLAN_2G_CAP:
17611 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
17612 			band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
17613 			break;
17614 		case WMI_HOST_WLAN_5G_CAP:
17615 			band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
17616 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
17617 			break;
17618 		default:
17619 			break;
17620 		}
17621 	}
17622 }
17623 
17624 struct mbuf *
17625 qwx_wmi_alloc_mbuf(size_t len)
17626 {
17627 	struct mbuf *m;
17628 	uint32_t round_len = roundup(len, 4);
17629 
17630 	m = qwx_htc_alloc_mbuf(sizeof(struct wmi_cmd_hdr) + round_len);
17631 	if (!m)
17632 		return NULL;
17633 
17634 	return m;
17635 }
17636 
17637 int
17638 qwx_wmi_cmd_send_nowait(struct qwx_pdev_wmi *wmi, struct mbuf *m,
17639     uint32_t cmd_id)
17640 {
17641 	struct qwx_softc *sc = wmi->wmi->sc;
17642 	struct wmi_cmd_hdr *cmd_hdr;
17643 	uint32_t cmd = 0;
17644 
17645 	cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
17646 
17647 	cmd_hdr = (struct wmi_cmd_hdr *)(mtod(m, uint8_t *) +
17648 	    sizeof(struct ath11k_htc_hdr));
17649 	cmd_hdr->cmd_id = htole32(cmd);
17650 
17651 	DNPRINTF(QWX_D_WMI, "%s: sending WMI command 0x%u\n", __func__, cmd);
17652 	return qwx_htc_send(&sc->htc, wmi->eid, m);
17653 }
17654 
17655 int
17656 qwx_wmi_cmd_send(struct qwx_pdev_wmi *wmi, struct mbuf *m, uint32_t cmd_id)
17657 {
17658 	struct qwx_wmi_base *wmi_sc = wmi->wmi;
17659 	int ret = EOPNOTSUPP;
17660 	struct qwx_softc *sc = wmi_sc->sc;
17661 #ifdef notyet
17662 	might_sleep();
17663 #endif
17664 	if (sc->hw_params.credit_flow) {
17665 		struct qwx_htc *htc = &sc->htc;
17666 		struct qwx_htc_ep *ep = &htc->endpoint[wmi->eid];
17667 
17668 		while (!ep->tx_credits) {
17669 			ret = tsleep_nsec(&ep->tx_credits, 0, "qwxtxcrd",
17670 			    SEC_TO_NSEC(3));
17671 			if (ret) {
17672 				printf("%s: tx credits timeout\n",
17673 				    sc->sc_dev.dv_xname);
17674 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
17675 				    sc->sc_flags))
17676 					return ESHUTDOWN;
17677 				else
17678 					return EAGAIN;
17679 			}
17680 		}
17681 	} else {
17682 		while (!wmi->tx_ce_desc) {
17683 			ret = tsleep_nsec(&wmi->tx_ce_desc, 0, "qwxtxce",
17684 			    SEC_TO_NSEC(3));
17685 			if (ret) {
17686 				printf("%s: tx ce desc timeout\n",
17687 				    sc->sc_dev.dv_xname);
17688 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
17689 				    sc->sc_flags))
17690 					return ESHUTDOWN;
17691 				else
17692 					return EAGAIN;
17693 			}
17694 		}
17695 	}
17696 
17697 	ret = qwx_wmi_cmd_send_nowait(wmi, m, cmd_id);
17698 
17699 	if (ret == EAGAIN)
17700 		printf("%s: wmi command %d timeout\n",
17701 		    sc->sc_dev.dv_xname, cmd_id);
17702 
17703 	if (ret == ENOBUFS)
17704 		printf("%s: ce desc not available for wmi command %d\n",
17705 		    sc->sc_dev.dv_xname, cmd_id);
17706 
17707 	return ret;
17708 }
17709 
17710 int
17711 qwx_wmi_pdev_set_param(struct qwx_softc *sc, uint32_t param_id,
17712     uint32_t param_value, uint8_t pdev_id)
17713 {
17714 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17715 	struct wmi_pdev_set_param_cmd *cmd;
17716 	struct mbuf *m;
17717 	int ret;
17718 
17719 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17720 	if (!m)
17721 		return ENOMEM;
17722 
17723 	cmd = (struct wmi_pdev_set_param_cmd *)(mtod(m, uint8_t *) +
17724 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17725 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
17726 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17727 	cmd->pdev_id = pdev_id;
17728 	cmd->param_id = param_id;
17729 	cmd->param_value = param_value;
17730 
17731 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PDEV_SET_PARAM_CMDID);
17732 	if (ret) {
17733 		if (ret != ESHUTDOWN) {
17734 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17735 			    sc->sc_dev.dv_xname);
17736 		}
17737 		m_freem(m);
17738 		return ret;
17739 	}
17740 
17741 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set param %d pdev id %d value %d\n",
17742 	    __func__, param_id, pdev_id, param_value);
17743 
17744 	return 0;
17745 }
17746 
17747 int
17748 qwx_wmi_pdev_lro_cfg(struct qwx_softc *sc, uint8_t pdev_id)
17749 {
17750 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17751 	struct ath11k_wmi_pdev_lro_config_cmd *cmd;
17752 	struct mbuf *m;
17753 	int ret;
17754 
17755 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17756 	if (!m)
17757 		return ENOMEM;
17758 
17759 	cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)(mtod(m, uint8_t *) +
17760 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17761 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
17762 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17763 
17764 	arc4random_buf(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
17765 	arc4random_buf(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
17766 
17767 	cmd->pdev_id = pdev_id;
17768 
17769 	ret = qwx_wmi_cmd_send(wmi, m, WMI_LRO_CONFIG_CMDID);
17770 	if (ret) {
17771 		if (ret != ESHUTDOWN) {
17772 			printf("%s: failed to send lro cfg req wmi cmd\n",
17773 			    sc->sc_dev.dv_xname);
17774 		}
17775 		m_freem(m);
17776 		return ret;
17777 	}
17778 
17779 	DNPRINTF(QWX_D_WMI, "%s: cmd lro config pdev_id 0x%x\n",
17780 	    __func__, pdev_id);
17781 
17782 	return 0;
17783 }
17784 
17785 int
17786 qwx_wmi_pdev_set_ps_mode(struct qwx_softc *sc, int vdev_id, uint8_t pdev_id,
17787     enum wmi_sta_ps_mode psmode)
17788 {
17789 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17790 	struct wmi_pdev_set_ps_mode_cmd *cmd;
17791 	struct mbuf *m;
17792 	int ret;
17793 
17794 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17795 	if (!m)
17796 		return ENOMEM;
17797 
17798 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)(mtod(m, uint8_t *) +
17799 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17800 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17801 	    WMI_TAG_STA_POWERSAVE_MODE_CMD) |
17802 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17803 	cmd->vdev_id = vdev_id;
17804 	cmd->sta_ps_mode = psmode;
17805 
17806 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_MODE_CMDID);
17807 	if (ret) {
17808 		if (ret != ESHUTDOWN) {
17809 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17810 			    sc->sc_dev.dv_xname);
17811 		}
17812 		m_freem(m);
17813 		return ret;
17814 	}
17815 
17816 	DNPRINTF(QWX_D_WMI, "%s: cmd sta powersave mode psmode %d vdev id %d\n",
17817 	    __func__, psmode, vdev_id);
17818 
17819 	return 0;
17820 }
17821 
17822 int
17823 qwx_wmi_scan_prob_req_oui(struct qwx_softc *sc, const uint8_t *mac_addr,
17824     uint8_t pdev_id)
17825 {
17826 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17827 	struct mbuf *m;
17828 	struct wmi_scan_prob_req_oui_cmd *cmd;
17829 	uint32_t prob_req_oui;
17830 	int len, ret;
17831 
17832 	prob_req_oui = (((uint32_t)mac_addr[0]) << 16) |
17833 		       (((uint32_t)mac_addr[1]) << 8) | mac_addr[2];
17834 
17835 	len = sizeof(*cmd);
17836 	m = qwx_wmi_alloc_mbuf(len);
17837 	if (!m)
17838 		return ENOMEM;
17839 
17840 	cmd = (struct wmi_scan_prob_req_oui_cmd *)(mtod(m, uint8_t *) +
17841 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17842 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17843 	    WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
17844 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17845 	cmd->prob_req_oui = prob_req_oui;
17846 
17847 	DNPRINTF(QWX_D_WMI, "%s: scan prob req oui %d\n", __func__,
17848 	    prob_req_oui);
17849 
17850 	ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_PROB_REQ_OUI_CMDID);
17851 	if (ret) {
17852 		if (ret != ESHUTDOWN) {
17853 			printf("%s: failed to send WMI_SCAN_PROB_REQ_OUI cmd\n",
17854 			    sc->sc_dev.dv_xname);
17855 		}
17856 		m_freem(m);
17857 		return ret;
17858 	}
17859 
17860 	return 0;
17861 }
17862 
17863 int
17864 qwx_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwx_softc *sc, uint32_t pdev_id)
17865 {
17866 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17867 	struct wmi_dfs_phyerr_offload_cmd *cmd;
17868 	struct mbuf *m;
17869 	int ret;
17870 
17871 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17872 	if (!m)
17873 		return ENOMEM;
17874 
17875 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)(mtod(m, uint8_t *) +
17876 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17877 
17878 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17879 	    WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
17880 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17881 
17882 	cmd->pdev_id = pdev_id;
17883 
17884 	ret = qwx_wmi_cmd_send(wmi, m,
17885 	    WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
17886 	if (ret) {
17887 		if (ret != ESHUTDOWN) {
17888 			printf("%s: failed to send "
17889 			    "WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n",
17890 			    sc->sc_dev.dv_xname);
17891 		}
17892 		m_free(m);
17893 		return ret;
17894 	}
17895 
17896 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev dfs phyerr offload enable "
17897 	    "pdev id %d\n", __func__, pdev_id);
17898 
17899 	return 0;
17900 }
17901 
17902 int
17903 qwx_wmi_send_scan_chan_list_cmd(struct qwx_softc *sc, uint8_t pdev_id,
17904     struct scan_chan_list_params *chan_list)
17905 {
17906 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17907 	struct wmi_scan_chan_list_cmd *cmd;
17908 	struct mbuf *m;
17909 	struct wmi_channel *chan_info;
17910 	struct channel_param *tchan_info;
17911 	struct wmi_tlv *tlv;
17912 	void *ptr;
17913 	int i, ret, len;
17914 	uint16_t num_send_chans, num_sends = 0, max_chan_limit = 0;
17915 	uint32_t *reg1, *reg2;
17916 
17917 	tchan_info = chan_list->ch_param;
17918 	while (chan_list->nallchans) {
17919 		len = sizeof(*cmd) + TLV_HDR_SIZE;
17920 		max_chan_limit = (wmi->wmi->max_msg_len[pdev_id] - len) /
17921 		    sizeof(*chan_info);
17922 
17923 		if (chan_list->nallchans > max_chan_limit)
17924 			num_send_chans = max_chan_limit;
17925 		else
17926 			num_send_chans = chan_list->nallchans;
17927 
17928 		chan_list->nallchans -= num_send_chans;
17929 		len += sizeof(*chan_info) * num_send_chans;
17930 
17931 		m = qwx_wmi_alloc_mbuf(len);
17932 		if (!m)
17933 			return ENOMEM;
17934 
17935 		cmd = (struct wmi_scan_chan_list_cmd *)(mtod(m, uint8_t *) +
17936 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17937 		cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17938 		    WMI_TAG_SCAN_CHAN_LIST_CMD) |
17939 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17940 		cmd->pdev_id = chan_list->pdev_id;
17941 		cmd->num_scan_chans = num_send_chans;
17942 		if (num_sends)
17943 			cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
17944 
17945 		DNPRINTF(QWX_D_WMI, "%s: no.of chan = %d len = %d "
17946 		    "pdev_id = %d num_sends = %d\n", __func__, num_send_chans,
17947 		    len, cmd->pdev_id, num_sends);
17948 
17949 		ptr = (void *)(mtod(m, uint8_t *) +
17950 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
17951 		    sizeof(*cmd));
17952 
17953 		len = sizeof(*chan_info) * num_send_chans;
17954 		tlv = ptr;
17955 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
17956 		    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17957 		ptr += TLV_HDR_SIZE;
17958 
17959 		for (i = 0; i < num_send_chans; ++i) {
17960 			chan_info = ptr;
17961 			memset(chan_info, 0, sizeof(*chan_info));
17962 			len = sizeof(*chan_info);
17963 			chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17964 			    WMI_TAG_CHANNEL) |
17965 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17966 
17967 			reg1 = &chan_info->reg_info_1;
17968 			reg2 = &chan_info->reg_info_2;
17969 			chan_info->mhz = tchan_info->mhz;
17970 			chan_info->band_center_freq1 = tchan_info->cfreq1;
17971 			chan_info->band_center_freq2 = tchan_info->cfreq2;
17972 
17973 			if (tchan_info->is_chan_passive)
17974 				chan_info->info |= WMI_CHAN_INFO_PASSIVE;
17975 			if (tchan_info->allow_he)
17976 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
17977 			else if (tchan_info->allow_vht)
17978 				chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
17979 			else if (tchan_info->allow_ht)
17980 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
17981 			if (tchan_info->half_rate)
17982 				chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
17983 			if (tchan_info->quarter_rate)
17984 				chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
17985 			if (tchan_info->psc_channel)
17986 				chan_info->info |= WMI_CHAN_INFO_PSC;
17987 			if (tchan_info->dfs_set)
17988 				chan_info->info |= WMI_CHAN_INFO_DFS;
17989 
17990 			chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
17991 			    tchan_info->phy_mode);
17992 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
17993 			    tchan_info->minpower);
17994 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
17995 			    tchan_info->maxpower);
17996 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
17997 			    tchan_info->maxregpower);
17998 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
17999 			    tchan_info->reg_class_id);
18000 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
18001 			    tchan_info->antennamax);
18002 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
18003 			    tchan_info->maxregpower);
18004 
18005 			DNPRINTF(QWX_D_WMI, "%s: chan scan list "
18006 			    "chan[%d] = %u, chan_info->info %8x\n",
18007 			    __func__, i, chan_info->mhz, chan_info->info);
18008 
18009 			ptr += sizeof(*chan_info);
18010 
18011 			tchan_info++;
18012 		}
18013 
18014 		ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_CHAN_LIST_CMDID);
18015 		if (ret) {
18016 			if (ret != ESHUTDOWN) {
18017 				printf("%s: failed to send WMI_SCAN_CHAN_LIST "
18018 				    "cmd\n", sc->sc_dev.dv_xname);
18019 			}
18020 			m_freem(m);
18021 			return ret;
18022 		}
18023 
18024 		DNPRINTF(QWX_D_WMI, "%s: cmd scan chan list channels %d\n",
18025 		    __func__, num_send_chans);
18026 
18027 		num_sends++;
18028 	}
18029 
18030 	return 0;
18031 }
18032 
18033 int
18034 qwx_wmi_send_11d_scan_start_cmd(struct qwx_softc *sc,
18035     struct wmi_11d_scan_start_params *param, uint8_t pdev_id)
18036 {
18037 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18038 	struct wmi_11d_scan_start_cmd *cmd;
18039 	struct mbuf *m;
18040 	int ret;
18041 
18042 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18043 	if (!m)
18044 		return ENOMEM;
18045 
18046 	cmd = (struct wmi_11d_scan_start_cmd *)(mtod(m, uint8_t *) +
18047 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18048 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
18049 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18050 
18051 	cmd->vdev_id = param->vdev_id;
18052 	cmd->scan_period_msec = param->scan_period_msec;
18053 	cmd->start_interval_msec = param->start_interval_msec;
18054 
18055 	ret = qwx_wmi_cmd_send(wmi, m, WMI_11D_SCAN_START_CMDID);
18056 	if (ret) {
18057 		if (ret != ESHUTDOWN) {
18058 			printf("%s: failed to send WMI_11D_SCAN_START_CMDID: "
18059 			    "%d\n", sc->sc_dev.dv_xname, ret);
18060 		}
18061 		m_freem(m);
18062 		return ret;
18063 	}
18064 
18065 	DNPRINTF(QWX_D_WMI, "%s: cmd 11d scan start vdev id %d period %d "
18066 	    "ms internal %d ms\n", __func__, cmd->vdev_id,
18067 	    cmd->scan_period_msec, cmd->start_interval_msec);
18068 
18069 	return 0;
18070 }
18071 
18072 static inline void
18073 qwx_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
18074     struct scan_req_params *param)
18075 {
18076 	/* Scan events subscription */
18077 	if (param->scan_ev_started)
18078 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
18079 	if (param->scan_ev_completed)
18080 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
18081 	if (param->scan_ev_bss_chan)
18082 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
18083 	if (param->scan_ev_foreign_chan)
18084 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
18085 	if (param->scan_ev_dequeued)
18086 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
18087 	if (param->scan_ev_preempted)
18088 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
18089 	if (param->scan_ev_start_failed)
18090 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
18091 	if (param->scan_ev_restarted)
18092 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
18093 	if (param->scan_ev_foreign_chn_exit)
18094 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
18095 	if (param->scan_ev_suspended)
18096 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
18097 	if (param->scan_ev_resumed)
18098 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
18099 
18100 	/** Set scan control flags */
18101 	cmd->scan_ctrl_flags = 0;
18102 	if (param->scan_f_passive)
18103 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
18104 	if (param->scan_f_strict_passive_pch)
18105 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
18106 	if (param->scan_f_promisc_mode)
18107 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
18108 	if (param->scan_f_capture_phy_err)
18109 		cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
18110 	if (param->scan_f_half_rate)
18111 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
18112 	if (param->scan_f_quarter_rate)
18113 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
18114 	if (param->scan_f_cck_rates)
18115 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
18116 	if (param->scan_f_ofdm_rates)
18117 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
18118 	if (param->scan_f_chan_stat_evnt)
18119 		cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
18120 	if (param->scan_f_filter_prb_req)
18121 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
18122 	if (param->scan_f_bcast_probe)
18123 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
18124 	if (param->scan_f_offchan_mgmt_tx)
18125 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
18126 	if (param->scan_f_offchan_data_tx)
18127 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
18128 	if (param->scan_f_force_active_dfs_chn)
18129 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
18130 	if (param->scan_f_add_tpc_ie_in_probe)
18131 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
18132 	if (param->scan_f_add_ds_ie_in_probe)
18133 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
18134 	if (param->scan_f_add_spoofed_mac_in_probe)
18135 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
18136 	if (param->scan_f_add_rand_seq_in_probe)
18137 		cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
18138 	if (param->scan_f_en_ie_whitelist_in_probe)
18139 		cmd->scan_ctrl_flags |=
18140 			 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
18141 
18142 	/* for adaptive scan mode using 3 bits (21 - 23 bits) */
18143 	WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
18144 	    param->adaptive_dwell_time_mode);
18145 
18146 	cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
18147 }
18148 
18149 int
18150 qwx_wmi_send_scan_start_cmd(struct qwx_softc *sc,
18151     struct scan_req_params *params)
18152 {
18153 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[params->pdev_id];
18154 	struct wmi_start_scan_cmd *cmd;
18155 	struct wmi_ssid *ssid = NULL;
18156 	struct wmi_mac_addr *bssid;
18157 	struct mbuf *m;
18158 	struct wmi_tlv *tlv;
18159 	void *ptr;
18160 	int i, ret, len;
18161 	uint32_t *tmp_ptr;
18162 	uint16_t extraie_len_with_pad = 0;
18163 	struct hint_short_ssid *s_ssid = NULL;
18164 	struct hint_bssid *hint_bssid = NULL;
18165 
18166 	len = sizeof(*cmd);
18167 
18168 	len += TLV_HDR_SIZE;
18169 	if (params->num_chan)
18170 		len += params->num_chan * sizeof(uint32_t);
18171 
18172 	len += TLV_HDR_SIZE;
18173 	if (params->num_ssids)
18174 		len += params->num_ssids * sizeof(*ssid);
18175 
18176 	len += TLV_HDR_SIZE;
18177 	if (params->num_bssid)
18178 		len += sizeof(*bssid) * params->num_bssid;
18179 
18180 	len += TLV_HDR_SIZE;
18181 	if (params->extraie.len && params->extraie.len <= 0xFFFF) {
18182 		extraie_len_with_pad = roundup(params->extraie.len,
18183 		    sizeof(uint32_t));
18184 	}
18185 	len += extraie_len_with_pad;
18186 
18187 	if (params->num_hint_bssid) {
18188 		len += TLV_HDR_SIZE +
18189 		    params->num_hint_bssid * sizeof(struct hint_bssid);
18190 	}
18191 
18192 	if (params->num_hint_s_ssid) {
18193 		len += TLV_HDR_SIZE +
18194 		    params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
18195 	}
18196 
18197 	m = qwx_wmi_alloc_mbuf(len);
18198 	if (!m)
18199 		return ENOMEM;
18200 
18201 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18202 	    sizeof(struct wmi_cmd_hdr));
18203 
18204 	cmd = ptr;
18205 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
18206 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18207 
18208 	cmd->scan_id = params->scan_id;
18209 	cmd->scan_req_id = params->scan_req_id;
18210 	cmd->vdev_id = params->vdev_id;
18211 	cmd->scan_priority = params->scan_priority;
18212 	cmd->notify_scan_events = params->notify_scan_events;
18213 
18214 	qwx_wmi_copy_scan_event_cntrl_flags(cmd, params);
18215 
18216 	cmd->dwell_time_active = params->dwell_time_active;
18217 	cmd->dwell_time_active_2g = params->dwell_time_active_2g;
18218 	cmd->dwell_time_passive = params->dwell_time_passive;
18219 	cmd->dwell_time_active_6g = params->dwell_time_active_6g;
18220 	cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
18221 	cmd->min_rest_time = params->min_rest_time;
18222 	cmd->max_rest_time = params->max_rest_time;
18223 	cmd->repeat_probe_time = params->repeat_probe_time;
18224 	cmd->probe_spacing_time = params->probe_spacing_time;
18225 	cmd->idle_time = params->idle_time;
18226 	cmd->max_scan_time = params->max_scan_time;
18227 	cmd->probe_delay = params->probe_delay;
18228 	cmd->burst_duration = params->burst_duration;
18229 	cmd->num_chan = params->num_chan;
18230 	cmd->num_bssid = params->num_bssid;
18231 	cmd->num_ssids = params->num_ssids;
18232 	cmd->ie_len = params->extraie.len;
18233 	cmd->n_probes = params->n_probes;
18234 	IEEE80211_ADDR_COPY(cmd->mac_addr.addr, params->mac_addr.addr);
18235 	IEEE80211_ADDR_COPY(cmd->mac_mask.addr, params->mac_mask.addr);
18236 
18237 	ptr += sizeof(*cmd);
18238 
18239 	len = params->num_chan * sizeof(uint32_t);
18240 
18241 	tlv = ptr;
18242 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
18243 	    FIELD_PREP(WMI_TLV_LEN, len);
18244 	ptr += TLV_HDR_SIZE;
18245 	tmp_ptr = (uint32_t *)ptr;
18246 
18247 	for (i = 0; i < params->num_chan; ++i)
18248 		tmp_ptr[i] = params->chan_list[i];
18249 
18250 	ptr += len;
18251 
18252 	len = params->num_ssids * sizeof(*ssid);
18253 	tlv = ptr;
18254 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
18255 	    FIELD_PREP(WMI_TLV_LEN, len);
18256 
18257 	ptr += TLV_HDR_SIZE;
18258 
18259 	if (params->num_ssids) {
18260 		ssid = ptr;
18261 		for (i = 0; i < params->num_ssids; ++i) {
18262 			ssid->ssid_len = params->ssid[i].length;
18263 			memcpy(ssid->ssid, params->ssid[i].ssid,
18264 			       params->ssid[i].length);
18265 			ssid++;
18266 		}
18267 	}
18268 
18269 	ptr += (params->num_ssids * sizeof(*ssid));
18270 	len = params->num_bssid * sizeof(*bssid);
18271 	tlv = ptr;
18272 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
18273 	    FIELD_PREP(WMI_TLV_LEN, len);
18274 
18275 	ptr += TLV_HDR_SIZE;
18276 	bssid = ptr;
18277 
18278 	if (params->num_bssid) {
18279 		for (i = 0; i < params->num_bssid; ++i) {
18280 			IEEE80211_ADDR_COPY(bssid->addr,
18281 			    params->bssid_list[i].addr);
18282 			bssid++;
18283 		}
18284 	}
18285 
18286 	ptr += params->num_bssid * sizeof(*bssid);
18287 
18288 	len = extraie_len_with_pad;
18289 	tlv = ptr;
18290 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18291 	    FIELD_PREP(WMI_TLV_LEN, len);
18292 	ptr += TLV_HDR_SIZE;
18293 
18294 	if (extraie_len_with_pad)
18295 		memcpy(ptr, params->extraie.ptr, params->extraie.len);
18296 
18297 	ptr += extraie_len_with_pad;
18298 
18299 	if (params->num_hint_s_ssid) {
18300 		len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
18301 		tlv = ptr;
18302 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
18303 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
18304 		    FIELD_PREP(WMI_TLV_LEN, len);
18305 		ptr += TLV_HDR_SIZE;
18306 		s_ssid = ptr;
18307 		for (i = 0; i < params->num_hint_s_ssid; ++i) {
18308 			s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
18309 			s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
18310 			s_ssid++;
18311 		}
18312 		ptr += len;
18313 	}
18314 
18315 	if (params->num_hint_bssid) {
18316 		len = params->num_hint_bssid * sizeof(struct hint_bssid);
18317 		tlv = ptr;
18318 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
18319 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
18320 		    FIELD_PREP(WMI_TLV_LEN, len);
18321 		ptr += TLV_HDR_SIZE;
18322 		hint_bssid = ptr;
18323 		for (i = 0; i < params->num_hint_bssid; ++i) {
18324 			hint_bssid->freq_flags =
18325 				params->hint_bssid[i].freq_flags;
18326 			IEEE80211_ADDR_COPY(
18327 			    &params->hint_bssid[i].bssid.addr[0],
18328 			    &hint_bssid->bssid.addr[0]);
18329 			hint_bssid++;
18330 		}
18331 	}
18332 
18333 	ret = qwx_wmi_cmd_send(wmi, m, WMI_START_SCAN_CMDID);
18334 	if (ret) {
18335 		if (ret != ESHUTDOWN) {
18336 			printf("%s: failed to send WMI_START_SCAN_CMDID\n",
18337 			    sc->sc_dev.dv_xname);
18338 		}
18339 		m_freem(m);
18340 		return ret;
18341 	}
18342 
18343 	DNPRINTF(QWX_D_WMI, "%s: cmd start scan", __func__);
18344 
18345 	return 0;
18346 }
18347 
18348 int
18349 qwx_wmi_send_scan_stop_cmd(struct qwx_softc *sc,
18350     struct scan_cancel_param *param)
18351 {
18352 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
18353 	struct wmi_stop_scan_cmd *cmd;
18354 	struct mbuf *m;
18355 	int ret;
18356 
18357 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18358 	if (!m)
18359 		return ENOMEM;
18360 
18361 	cmd = (struct wmi_stop_scan_cmd *)(mtod(m, uint8_t *) +
18362 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18363 
18364 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
18365 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18366 
18367 	cmd->vdev_id = param->vdev_id;
18368 	cmd->requestor = param->requester;
18369 	cmd->scan_id = param->scan_id;
18370 	cmd->pdev_id = param->pdev_id;
18371 	/* stop the scan with the corresponding scan_id */
18372 	if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
18373 		/* Cancelling all scans */
18374 		cmd->req_type =  WMI_SCAN_STOP_ALL;
18375 	} else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
18376 		/* Cancelling VAP scans */
18377 		cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
18378 	} else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
18379 		/* Cancelling specific scan */
18380 		cmd->req_type =  WMI_SCAN_STOP_ONE;
18381 	} else {
18382 		printf("%s: invalid scan cancel param %d\n",
18383 		    sc->sc_dev.dv_xname, param->req_type);
18384 		m_freem(m);
18385 		return EINVAL;
18386 	}
18387 
18388 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STOP_SCAN_CMDID);
18389 	if (ret) {
18390 		if (ret != ESHUTDOWN) {
18391 			printf("%s: failed to send WMI_STOP_SCAN_CMDID\n",
18392 			    sc->sc_dev.dv_xname);
18393 		}
18394 		m_freem(m);
18395 		return ret;
18396 	}
18397 
18398 	DNPRINTF(QWX_D_WMI, "%s: cmd stop scan\n", __func__);
18399 	return ret;
18400 }
18401 
18402 int
18403 qwx_wmi_send_peer_create_cmd(struct qwx_softc *sc, uint8_t pdev_id,
18404     struct peer_create_params *param)
18405 {
18406 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18407 	struct wmi_peer_create_cmd *cmd;
18408 	struct mbuf *m;
18409 	int ret;
18410 
18411 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18412 	if (!m)
18413 		return ENOMEM;
18414 
18415 	cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
18416 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18417 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
18418 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18419 
18420 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
18421 	cmd->peer_type = param->peer_type;
18422 	cmd->vdev_id = param->vdev_id;
18423 
18424 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
18425 	if (ret) {
18426 		if (ret != ESHUTDOWN) {
18427 			printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
18428 			    sc->sc_dev.dv_xname);
18429 		}
18430 		m_freem(m);
18431 		return ret;
18432 	}
18433 
18434 	DNPRINTF(QWX_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
18435 	    __func__, param->vdev_id, ether_sprintf(param->peer_addr));
18436 
18437 	return ret;
18438 }
18439 
18440 int
18441 qwx_wmi_send_peer_delete_cmd(struct qwx_softc *sc, const uint8_t *peer_addr,
18442     uint8_t vdev_id, uint8_t pdev_id)
18443 {
18444 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18445 	struct wmi_peer_delete_cmd *cmd;
18446 	struct mbuf *m;
18447 	int ret;
18448 
18449 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18450 	if (!m)
18451 		return ENOMEM;
18452 
18453 	cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
18454 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18455 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
18456 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18457 
18458 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
18459 	cmd->vdev_id = vdev_id;
18460 
18461 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
18462 	if (ret) {
18463 		if (ret != ESHUTDOWN) {
18464 			printf("%s: failed to send WMI_PEER_DELETE cmd\n",
18465 			    sc->sc_dev.dv_xname);
18466 		}
18467 		m_freem(m);
18468 		return ret;
18469 	}
18470 
18471 	DNPRINTF(QWX_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
18472 	    __func__, vdev_id, peer_addr);
18473 
18474 	return 0;
18475 }
18476 
18477 int
18478 qwx_wmi_vdev_install_key(struct qwx_softc *sc,
18479     struct wmi_vdev_install_key_arg *arg, uint8_t pdev_id)
18480 {
18481 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18482 	struct wmi_vdev_install_key_cmd *cmd;
18483 	struct wmi_tlv *tlv;
18484 	struct mbuf *m;
18485 	int ret, len;
18486 	int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
18487 
18488 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
18489 
18490 	m = qwx_wmi_alloc_mbuf(len);
18491 	if (m == NULL)
18492 		return -ENOMEM;
18493 
18494 	cmd = (struct wmi_vdev_install_key_cmd *)(mtod(m, uint8_t *) +
18495 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18496 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18497 	    WMI_TAG_VDEV_INSTALL_KEY_CMD) |
18498 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18499 	cmd->vdev_id = arg->vdev_id;
18500 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, arg->macaddr);
18501 	cmd->key_idx = arg->key_idx;
18502 	cmd->key_flags = arg->key_flags;
18503 	cmd->key_cipher = arg->key_cipher;
18504 	cmd->key_len = arg->key_len;
18505 	cmd->key_txmic_len = arg->key_txmic_len;
18506 	cmd->key_rxmic_len = arg->key_rxmic_len;
18507 
18508 	if (arg->key_rsc_counter)
18509 		memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
18510 		       sizeof(struct wmi_key_seq_counter));
18511 
18512 	tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
18513 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
18514 	    sizeof(*cmd));
18515 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18516 	    FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
18517 	if (arg->key_data)
18518 		memcpy(tlv->value, (uint8_t *)arg->key_data,
18519 		    key_len_aligned);
18520 
18521 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_INSTALL_KEY_CMDID);
18522 	if (ret) {
18523 		printf("%s: failed to send WMI_VDEV_INSTALL_KEY cmd\n",
18524 		    sc->sc_dev.dv_xname);
18525 		m_freem(m);
18526 		return ret;
18527 	}
18528 
18529 	DNPRINTF(QWX_D_WMI,
18530 	    "%s: cmd vdev install key idx %d cipher %d len %d\n",
18531 	    __func__, arg->key_idx, arg->key_cipher, arg->key_len);
18532 
18533 	return ret;
18534 }
18535 
18536 void
18537 qwx_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
18538     struct peer_assoc_params *param, int hw_crypto_disabled)
18539 {
18540 	cmd->peer_flags = 0;
18541 
18542 	if (param->is_wme_set) {
18543 		if (param->qos_flag)
18544 			cmd->peer_flags |= WMI_PEER_QOS;
18545 		if (param->apsd_flag)
18546 			cmd->peer_flags |= WMI_PEER_APSD;
18547 		if (param->ht_flag)
18548 			cmd->peer_flags |= WMI_PEER_HT;
18549 		if (param->bw_40)
18550 			cmd->peer_flags |= WMI_PEER_40MHZ;
18551 		if (param->bw_80)
18552 			cmd->peer_flags |= WMI_PEER_80MHZ;
18553 		if (param->bw_160)
18554 			cmd->peer_flags |= WMI_PEER_160MHZ;
18555 
18556 		/* Typically if STBC is enabled for VHT it should be enabled
18557 		 * for HT as well
18558 		 **/
18559 		if (param->stbc_flag)
18560 			cmd->peer_flags |= WMI_PEER_STBC;
18561 
18562 		/* Typically if LDPC is enabled for VHT it should be enabled
18563 		 * for HT as well
18564 		 **/
18565 		if (param->ldpc_flag)
18566 			cmd->peer_flags |= WMI_PEER_LDPC;
18567 
18568 		if (param->static_mimops_flag)
18569 			cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
18570 		if (param->dynamic_mimops_flag)
18571 			cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
18572 		if (param->spatial_mux_flag)
18573 			cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
18574 		if (param->vht_flag)
18575 			cmd->peer_flags |= WMI_PEER_VHT;
18576 		if (param->he_flag)
18577 			cmd->peer_flags |= WMI_PEER_HE;
18578 		if (param->twt_requester)
18579 			cmd->peer_flags |= WMI_PEER_TWT_REQ;
18580 		if (param->twt_responder)
18581 			cmd->peer_flags |= WMI_PEER_TWT_RESP;
18582 	}
18583 
18584 	/* Suppress authorization for all AUTH modes that need 4-way handshake
18585 	 * (during re-association).
18586 	 * Authorization will be done for these modes on key installation.
18587 	 */
18588 	if (param->auth_flag)
18589 		cmd->peer_flags |= WMI_PEER_AUTH;
18590 	if (param->need_ptk_4_way) {
18591 		cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
18592 		if (!hw_crypto_disabled && param->is_assoc)
18593 			cmd->peer_flags &= ~WMI_PEER_AUTH;
18594 	}
18595 	if (param->need_gtk_2_way)
18596 		cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
18597 	/* safe mode bypass the 4-way handshake */
18598 	if (param->safe_mode_enabled)
18599 		cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
18600 				     WMI_PEER_NEED_GTK_2_WAY);
18601 
18602 	if (param->is_pmf_enabled)
18603 		cmd->peer_flags |= WMI_PEER_PMF;
18604 
18605 	/* Disable AMSDU for station transmit, if user configures it */
18606 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
18607 	 * it
18608 	 * if (param->amsdu_disable) Add after FW support
18609 	 **/
18610 
18611 	/* Target asserts if node is marked HT and all MCS is set to 0.
18612 	 * Mark the node as non-HT if all the mcs rates are disabled through
18613 	 * iwpriv
18614 	 **/
18615 	if (param->peer_ht_rates.num_rates == 0)
18616 		cmd->peer_flags &= ~WMI_PEER_HT;
18617 }
18618 
18619 int
18620 qwx_wmi_send_peer_assoc_cmd(struct qwx_softc *sc, uint8_t pdev_id,
18621     struct peer_assoc_params *param)
18622 {
18623 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18624 	struct wmi_peer_assoc_complete_cmd *cmd;
18625 	struct wmi_vht_rate_set *mcs;
18626 	struct wmi_he_rate_set *he_mcs;
18627 	struct mbuf *m;
18628 	struct wmi_tlv *tlv;
18629 	void *ptr;
18630 	uint32_t peer_legacy_rates_align;
18631 	uint32_t peer_ht_rates_align;
18632 	int i, ret, len;
18633 
18634 	peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
18635 	    sizeof(uint32_t));
18636 	peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
18637 	    sizeof(uint32_t));
18638 
18639 	len = sizeof(*cmd) +
18640 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(uint8_t)) +
18641 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(uint8_t)) +
18642 	      sizeof(*mcs) + TLV_HDR_SIZE +
18643 	      (sizeof(*he_mcs) * param->peer_he_mcs_count);
18644 
18645 	m = qwx_wmi_alloc_mbuf(len);
18646 	if (!m)
18647 		return ENOMEM;
18648 
18649 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18650 	    sizeof(struct wmi_cmd_hdr));
18651 
18652 	cmd = ptr;
18653 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18654 	    WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
18655 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18656 
18657 	cmd->vdev_id = param->vdev_id;
18658 
18659 	cmd->peer_new_assoc = param->peer_new_assoc;
18660 	cmd->peer_associd = param->peer_associd;
18661 
18662 	qwx_wmi_copy_peer_flags(cmd, param,
18663 	    test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags));
18664 
18665 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_mac);
18666 
18667 	cmd->peer_rate_caps = param->peer_rate_caps;
18668 	cmd->peer_caps = param->peer_caps;
18669 	cmd->peer_listen_intval = param->peer_listen_intval;
18670 	cmd->peer_ht_caps = param->peer_ht_caps;
18671 	cmd->peer_max_mpdu = param->peer_max_mpdu;
18672 	cmd->peer_mpdu_density = param->peer_mpdu_density;
18673 	cmd->peer_vht_caps = param->peer_vht_caps;
18674 	cmd->peer_phymode = param->peer_phymode;
18675 
18676 	/* Update 11ax capabilities */
18677 	cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
18678 	cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
18679 	cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
18680 	cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
18681 	cmd->peer_he_ops = param->peer_he_ops;
18682 	memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
18683 	       sizeof(param->peer_he_cap_phyinfo));
18684 	memcpy(&cmd->peer_ppet, &param->peer_ppet,
18685 	       sizeof(param->peer_ppet));
18686 
18687 	/* Update peer legacy rate information */
18688 	ptr += sizeof(*cmd);
18689 
18690 	tlv = ptr;
18691 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18692 	    FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
18693 
18694 	ptr += TLV_HDR_SIZE;
18695 
18696 	cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
18697 	memcpy(ptr, param->peer_legacy_rates.rates,
18698 	    param->peer_legacy_rates.num_rates);
18699 
18700 	/* Update peer HT rate information */
18701 	ptr += peer_legacy_rates_align;
18702 
18703 	tlv = ptr;
18704 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18705 	    FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
18706 	ptr += TLV_HDR_SIZE;
18707 	cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
18708 	memcpy(ptr, param->peer_ht_rates.rates,
18709 	    param->peer_ht_rates.num_rates);
18710 
18711 	/* VHT Rates */
18712 	ptr += peer_ht_rates_align;
18713 
18714 	mcs = ptr;
18715 
18716 	mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
18717 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
18718 
18719 	cmd->peer_nss = param->peer_nss;
18720 
18721 	/* Update bandwidth-NSS mapping */
18722 	cmd->peer_bw_rxnss_override = 0;
18723 	cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
18724 
18725 	if (param->vht_capable) {
18726 		mcs->rx_max_rate = param->rx_max_rate;
18727 		mcs->rx_mcs_set = param->rx_mcs_set;
18728 		mcs->tx_max_rate = param->tx_max_rate;
18729 		mcs->tx_mcs_set = param->tx_mcs_set;
18730 	}
18731 
18732 	/* HE Rates */
18733 	cmd->peer_he_mcs = param->peer_he_mcs_count;
18734 	cmd->min_data_rate = param->min_data_rate;
18735 
18736 	ptr += sizeof(*mcs);
18737 
18738 	len = param->peer_he_mcs_count * sizeof(*he_mcs);
18739 
18740 	tlv = ptr;
18741 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18742 	    FIELD_PREP(WMI_TLV_LEN, len);
18743 	ptr += TLV_HDR_SIZE;
18744 
18745 	/* Loop through the HE rate set */
18746 	for (i = 0; i < param->peer_he_mcs_count; i++) {
18747 		he_mcs = ptr;
18748 		he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18749 		    WMI_TAG_HE_RATE_SET) |
18750 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE);
18751 
18752 		he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
18753 		he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
18754 		ptr += sizeof(*he_mcs);
18755 	}
18756 
18757 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_ASSOC_CMDID);
18758 	if (ret) {
18759 		if (ret != ESHUTDOWN) {
18760 			printf("%s: failed to send WMI_PEER_ASSOC_CMDID\n",
18761 			    sc->sc_dev.dv_xname);
18762 		}
18763 		m_freem(m);
18764 		return ret;
18765 	}
18766 
18767 	DNPRINTF(QWX_D_WMI, "%s: cmd peer assoc vdev id %d assoc id %d "
18768 	    "peer mac %s peer_flags %x rate_caps %x peer_caps %x "
18769 	    "listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d "
18770 	    "peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x "
18771 	    "he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
18772 	    __func__, cmd->vdev_id, cmd->peer_associd,
18773 	    ether_sprintf(param->peer_mac),
18774 	    cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
18775 	    cmd->peer_listen_intval, cmd->peer_ht_caps,
18776 	    cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
18777 	    cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info,
18778 	    cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
18779 	    cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
18780 	    cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override);
18781 
18782 	return 0;
18783 }
18784 
18785 void
18786 qwx_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
18787     struct target_resource_config *tg_cfg)
18788 {
18789 	wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
18790 	wmi_cfg->num_peers = tg_cfg->num_peers;
18791 	wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
18792 	wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
18793 	wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
18794 	wmi_cfg->num_tids = tg_cfg->num_tids;
18795 	wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
18796 	wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
18797 	wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
18798 	wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
18799 	wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
18800 	wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
18801 	wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
18802 	wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
18803 	wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
18804 	wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
18805 	wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
18806 	wmi_cfg->roam_offload_max_ap_profiles =
18807 	    tg_cfg->roam_offload_max_ap_profiles;
18808 	wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
18809 	wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
18810 	wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
18811 	wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
18812 	wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
18813 	wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
18814 	wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
18815 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
18816 	    tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
18817 	wmi_cfg->vow_config = tg_cfg->vow_config;
18818 	wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
18819 	wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
18820 	wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
18821 	wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
18822 	wmi_cfg->num_tdls_conn_table_entries =
18823 	    tg_cfg->num_tdls_conn_table_entries;
18824 	wmi_cfg->beacon_tx_offload_max_vdev =
18825 	    tg_cfg->beacon_tx_offload_max_vdev;
18826 	wmi_cfg->num_multicast_filter_entries =
18827 	    tg_cfg->num_multicast_filter_entries;
18828 	wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
18829 	wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
18830 	wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
18831 	wmi_cfg->max_tdls_concurrent_sleep_sta =
18832 	    tg_cfg->max_tdls_concurrent_sleep_sta;
18833 	wmi_cfg->max_tdls_concurrent_buffer_sta =
18834 	    tg_cfg->max_tdls_concurrent_buffer_sta;
18835 	wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
18836 	wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
18837 	wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
18838 	wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
18839 	wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
18840 	wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
18841 	wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
18842 	wmi_cfg->flag1 = tg_cfg->flag1;
18843 	wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
18844 	wmi_cfg->sched_params = tg_cfg->sched_params;
18845 	wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
18846 	wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
18847 #ifdef notyet /* 6 GHz support */
18848 	wmi_cfg->host_service_flags &=
18849 	    ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18850 	wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
18851 	    WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18852 	wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
18853 	wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
18854 	wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
18855 #endif
18856 }
18857 
18858 int
18859 qwx_init_cmd_send(struct qwx_pdev_wmi *wmi, struct wmi_init_cmd_param *param)
18860 {
18861 	struct mbuf *m;
18862 	struct wmi_init_cmd *cmd;
18863 	struct wmi_resource_config *cfg;
18864 	struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
18865 	struct wmi_pdev_band_to_mac *band_to_mac;
18866 	struct wlan_host_mem_chunk *host_mem_chunks;
18867 	struct wmi_tlv *tlv;
18868 	size_t ret, len;
18869 	void *ptr;
18870 	uint32_t hw_mode_len = 0;
18871 	uint16_t idx;
18872 
18873 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
18874 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
18875 		    (param->num_band_to_mac * sizeof(*band_to_mac));
18876 
18877 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
18878 	    (param->num_mem_chunks ?
18879 	    (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
18880 
18881 	m = qwx_wmi_alloc_mbuf(len);
18882 	if (!m)
18883 		return ENOMEM;
18884 
18885 	cmd = (struct wmi_init_cmd *)(mtod(m, uint8_t *) +
18886 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18887 
18888 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
18889 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18890 
18891 	ptr = mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18892 	   sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
18893 	cfg = ptr;
18894 
18895 	qwx_wmi_copy_resource_config(cfg, param->res_cfg);
18896 
18897 	cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
18898 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
18899 
18900 	ptr += sizeof(*cfg);
18901 	host_mem_chunks = ptr + TLV_HDR_SIZE;
18902 	len = sizeof(struct wlan_host_mem_chunk);
18903 
18904 	for (idx = 0; idx < param->num_mem_chunks; ++idx) {
18905 		host_mem_chunks[idx].tlv_header =
18906 		    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
18907 		    FIELD_PREP(WMI_TLV_LEN, len);
18908 
18909 		host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
18910 		host_mem_chunks[idx].size = param->mem_chunks[idx].len;
18911 		host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
18912 
18913 		DNPRINTF(QWX_D_WMI,
18914 		    "%s: host mem chunk req_id %d paddr 0x%llx len %d\n",
18915 		    __func__, param->mem_chunks[idx].req_id,
18916 		    (uint64_t)param->mem_chunks[idx].paddr,
18917 		    param->mem_chunks[idx].len);
18918 	}
18919 	cmd->num_host_mem_chunks = param->num_mem_chunks;
18920 	len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
18921 
18922 	/* num_mem_chunks is zero */
18923 	tlv = ptr;
18924 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18925 	    FIELD_PREP(WMI_TLV_LEN, len);
18926 	ptr += TLV_HDR_SIZE + len;
18927 
18928 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
18929 		hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
18930 		hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18931 		    WMI_TAG_PDEV_SET_HW_MODE_CMD) |
18932 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE);
18933 
18934 		hw_mode->hw_mode_index = param->hw_mode_id;
18935 		hw_mode->num_band_to_mac = param->num_band_to_mac;
18936 
18937 		ptr += sizeof(*hw_mode);
18938 
18939 		len = param->num_band_to_mac * sizeof(*band_to_mac);
18940 		tlv = ptr;
18941 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18942 		    FIELD_PREP(WMI_TLV_LEN, len);
18943 
18944 		ptr += TLV_HDR_SIZE;
18945 		len = sizeof(*band_to_mac);
18946 
18947 		for (idx = 0; idx < param->num_band_to_mac; idx++) {
18948 			band_to_mac = (void *)ptr;
18949 
18950 			band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18951 			    WMI_TAG_PDEV_BAND_TO_MAC) |
18952 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
18953 			band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
18954 			band_to_mac->start_freq =
18955 			    param->band_to_mac[idx].start_freq;
18956 			band_to_mac->end_freq =
18957 			    param->band_to_mac[idx].end_freq;
18958 			ptr += sizeof(*band_to_mac);
18959 		}
18960 	}
18961 
18962 	ret = qwx_wmi_cmd_send(wmi, m, WMI_INIT_CMDID);
18963 	if (ret) {
18964 		if (ret != ESHUTDOWN)
18965 			printf("%s: failed to send WMI_INIT_CMDID\n", __func__);
18966 		m_freem(m);
18967 		return ret;
18968 	}
18969 
18970 	DNPRINTF(QWX_D_WMI, "%s: cmd wmi init\n", __func__);
18971 
18972 	return 0;
18973 }
18974 
18975 int
18976 qwx_wmi_cmd_init(struct qwx_softc *sc)
18977 {
18978 	struct qwx_wmi_base *wmi_sc = &sc->wmi;
18979 	struct wmi_init_cmd_param init_param;
18980 	struct target_resource_config  config;
18981 
18982 	memset(&init_param, 0, sizeof(init_param));
18983 	memset(&config, 0, sizeof(config));
18984 
18985 	sc->hw_params.hw_ops->wmi_init_config(sc, &config);
18986 
18987 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT))
18988 		config.is_reg_cc_ext_event_supported = 1;
18989 
18990 	memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
18991 
18992 	init_param.res_cfg = &wmi_sc->wlan_resource_config;
18993 	init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
18994 	init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
18995 	init_param.mem_chunks = wmi_sc->mem_chunks;
18996 
18997 	if (sc->hw_params.single_pdev_only)
18998 		init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
18999 
19000 	init_param.num_band_to_mac = sc->num_radios;
19001 	qwx_fill_band_to_mac_param(sc, init_param.band_to_mac);
19002 
19003 	return qwx_init_cmd_send(&wmi_sc->wmi[0], &init_param);
19004 }
19005 
19006 int
19007 qwx_wmi_wait_for_unified_ready(struct qwx_softc *sc)
19008 {
19009 	int ret;
19010 
19011 	while (!sc->wmi.unified_ready) {
19012 		ret = tsleep_nsec(&sc->wmi.unified_ready, 0, "qwxunfrdy",
19013 		    SEC_TO_NSEC(5));
19014 		if (ret)
19015 			return -1;
19016 	}
19017 
19018 	return 0;
19019 }
19020 
19021 int
19022 qwx_wmi_set_hw_mode(struct qwx_softc *sc,
19023     enum wmi_host_hw_mode_config_type mode)
19024 {
19025 	struct wmi_pdev_set_hw_mode_cmd_param *cmd;
19026 	struct mbuf *m;
19027 	struct qwx_wmi_base *wmi = &sc->wmi;
19028 	int len;
19029 	int ret;
19030 
19031 	len = sizeof(*cmd);
19032 
19033 	m = qwx_wmi_alloc_mbuf(len);
19034 	if (!m)
19035 		return ENOMEM;
19036 
19037 	cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)(mtod(m, uint8_t *) +
19038 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19039 
19040 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
19041 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19042 
19043 	cmd->pdev_id = WMI_PDEV_ID_SOC;
19044 	cmd->hw_mode_index = mode;
19045 
19046 	ret = qwx_wmi_cmd_send(&wmi->wmi[0], m, WMI_PDEV_SET_HW_MODE_CMDID);
19047 	if (ret) {
19048 		if (ret != ESHUTDOWN) {
19049 			printf("%s: failed to send "
19050 			    "WMI_PDEV_SET_HW_MODE_CMDID\n", __func__);
19051 		}
19052 		m_freem(m);
19053 		return ret;
19054 	}
19055 
19056 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set hw mode %d\n", __func__,
19057 	    cmd->hw_mode_index);
19058 
19059 	return 0;
19060 }
19061 
19062 int
19063 qwx_wmi_set_sta_ps_param(struct qwx_softc *sc, uint32_t vdev_id,
19064      uint8_t pdev_id, uint32_t param, uint32_t param_value)
19065 {
19066 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19067 	struct wmi_sta_powersave_param_cmd *cmd;
19068 	struct mbuf *m;
19069 	int ret;
19070 
19071 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19072 	if (!m)
19073 		return ENOMEM;
19074 
19075 	cmd = (struct wmi_sta_powersave_param_cmd *)(mtod(m, uint8_t *) +
19076 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19077 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
19078 	    WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
19079 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19080 
19081 	cmd->vdev_id = vdev_id;
19082 	cmd->param = param;
19083 	cmd->value = param_value;
19084 
19085 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_PARAM_CMDID);
19086 	if (ret) {
19087 		if (ret != ESHUTDOWN) {
19088 			printf("%s: failed to send "
19089 			    "WMI_STA_POWERSAVE_PARAM_CMDID",
19090 			    sc->sc_dev.dv_xname);
19091 		}
19092 		m_freem(m);
19093 		return ret;
19094 	}
19095 
19096 	DNPRINTF(QWX_D_WMI, "%s: cmd set powersave param vdev_id %d param %d "
19097 	    "value %d\n", __func__, vdev_id, param, param_value);
19098 
19099 	return 0;
19100 }
19101 
19102 int
19103 qwx_wmi_mgmt_send(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
19104     uint32_t buf_id, struct mbuf *frame, struct qwx_tx_data *tx_data)
19105 {
19106 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19107 	struct wmi_mgmt_send_cmd *cmd;
19108 	struct wmi_tlv *frame_tlv;
19109 	struct mbuf *m;
19110 	uint32_t buf_len;
19111 	int ret, len;
19112 	uint64_t paddr;
19113 
19114 	paddr = tx_data->map->dm_segs[0].ds_addr;
19115 
19116 	buf_len = frame->m_pkthdr.len < WMI_MGMT_SEND_DOWNLD_LEN ?
19117 	    frame->m_pkthdr.len : WMI_MGMT_SEND_DOWNLD_LEN;
19118 
19119 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
19120 
19121 	m = qwx_wmi_alloc_mbuf(len);
19122 	if (!m)
19123 		return ENOMEM;
19124 
19125 	cmd = (struct wmi_mgmt_send_cmd *)(mtod(m, uint8_t *) +
19126 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19127 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
19128 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19129 	cmd->vdev_id = arvif->vdev_id;
19130 	cmd->desc_id = buf_id;
19131 	cmd->chanfreq = 0;
19132 	cmd->paddr_lo = paddr & 0xffffffff;
19133 	cmd->paddr_hi = paddr >> 32;
19134 	cmd->frame_len = frame->m_pkthdr.len;
19135 	cmd->buf_len = buf_len;
19136 	cmd->tx_params_valid = 0;
19137 
19138 	frame_tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
19139 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
19140 	    sizeof(*cmd));
19141 	frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
19142 	    FIELD_PREP(WMI_TLV_LEN, buf_len);
19143 
19144 	memcpy(frame_tlv->value, mtod(frame, void *), buf_len);
19145 #if 0 /* Not needed on OpenBSD? */
19146 	ath11k_ce_byte_swap(frame_tlv->value, buf_len);
19147 #endif
19148 	ret = qwx_wmi_cmd_send(wmi, m, WMI_MGMT_TX_SEND_CMDID);
19149 	if (ret) {
19150 		if (ret != ESHUTDOWN) {
19151 			printf("%s: failed to submit "
19152 			    "WMI_MGMT_TX_SEND_CMDID cmd\n",
19153 			    sc->sc_dev.dv_xname);
19154 		}
19155 		m_freem(m);
19156 		return ret;
19157 	}
19158 
19159 	DNPRINTF(QWX_D_WMI, "%s: cmd mgmt tx send", __func__);
19160 
19161 	tx_data->m = frame;
19162 	return 0;
19163 }
19164 
19165 int
19166 qwx_wmi_vdev_create(struct qwx_softc *sc, uint8_t *macaddr,
19167     struct vdev_create_params *param)
19168 {
19169 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
19170 	struct wmi_vdev_create_cmd *cmd;
19171 	struct mbuf *m;
19172 	struct wmi_vdev_txrx_streams *txrx_streams;
19173 	struct wmi_tlv *tlv;
19174 	int ret, len;
19175 	void *ptr;
19176 
19177 	/* It can be optimized my sending tx/rx chain configuration
19178 	 * only for supported bands instead of always sending it for
19179 	 * both the bands.
19180 	 */
19181 	len = sizeof(*cmd) + TLV_HDR_SIZE +
19182 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
19183 
19184 	m = qwx_wmi_alloc_mbuf(len);
19185 	if (!m)
19186 		return ENOMEM;
19187 
19188 	cmd = (struct wmi_vdev_create_cmd *)(mtod(m, uint8_t *) +
19189 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19190 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
19191 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19192 
19193 	cmd->vdev_id = param->if_id;
19194 	cmd->vdev_type = param->type;
19195 	cmd->vdev_subtype = param->subtype;
19196 	cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
19197 	cmd->pdev_id = param->pdev_id;
19198 	cmd->mbssid_flags = param->mbssid_flags;
19199 	cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
19200 
19201 	IEEE80211_ADDR_COPY(cmd->vdev_macaddr.addr, macaddr);
19202 
19203 	ptr = (void *)(mtod(m, uint8_t *) +
19204 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
19205 	    sizeof(*cmd));
19206 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
19207 
19208 	tlv = ptr;
19209 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
19210 	    FIELD_PREP(WMI_TLV_LEN, len);
19211 
19212 	ptr += TLV_HDR_SIZE;
19213 	txrx_streams = ptr;
19214 	len = sizeof(*txrx_streams);
19215 	txrx_streams->tlv_header =
19216 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
19217 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
19218 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
19219 	txrx_streams->supported_tx_streams = param->chains[0].tx;
19220 	txrx_streams->supported_rx_streams = param->chains[0].rx;
19221 
19222 	txrx_streams++;
19223 	txrx_streams->tlv_header =
19224 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
19225 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
19226 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
19227 	txrx_streams->supported_tx_streams = param->chains[1].tx;
19228 	txrx_streams->supported_rx_streams = param->chains[1].rx;
19229 
19230 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_CREATE_CMDID);
19231 	if (ret) {
19232 		if (ret != ESHUTDOWN) {
19233 			printf("%s: failed to submit WMI_VDEV_CREATE_CMDID\n",
19234 			    sc->sc_dev.dv_xname);
19235 		}
19236 		m_freem(m);
19237 		return ret;
19238 	}
19239 
19240 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev create id %d type %d subtype %d "
19241 	    "macaddr %s pdevid %d\n", __func__, param->if_id, param->type,
19242 	    param->subtype, ether_sprintf(macaddr), param->pdev_id);
19243 
19244 	return ret;
19245 }
19246 
19247 int
19248 qwx_wmi_vdev_set_param_cmd(struct qwx_softc *sc, uint32_t vdev_id,
19249     uint8_t pdev_id, uint32_t param_id, uint32_t param_value)
19250 {
19251 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19252 	struct wmi_vdev_set_param_cmd *cmd;
19253 	struct mbuf *m;
19254 	int ret;
19255 
19256 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19257 	if (!m)
19258 		return ENOMEM;
19259 
19260 	cmd = (struct wmi_vdev_set_param_cmd *)(mtod(m, uint8_t *) +
19261 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19262 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
19263 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19264 
19265 	cmd->vdev_id = vdev_id;
19266 	cmd->param_id = param_id;
19267 	cmd->param_value = param_value;
19268 
19269 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_SET_PARAM_CMDID);
19270 	if (ret) {
19271 		if (ret != ESHUTDOWN) {
19272 			printf("%s: failed to send WMI_VDEV_SET_PARAM_CMDID\n",
19273 			    sc->sc_dev.dv_xname);
19274 		}
19275 		m_freem(m);
19276 		return ret;
19277 	}
19278 
19279 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev set param vdev 0x%x param %d "
19280 	    "value %d\n", __func__, vdev_id, param_id, param_value);
19281 
19282 	return 0;
19283 }
19284 
19285 int
19286 qwx_wmi_vdev_up(struct qwx_softc *sc, uint32_t vdev_id, uint32_t pdev_id,
19287     uint32_t aid, const uint8_t *bssid, uint8_t *tx_bssid,
19288     uint32_t nontx_profile_idx, uint32_t nontx_profile_cnt)
19289 {
19290 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19291 	struct wmi_vdev_up_cmd *cmd;
19292 	struct mbuf *m;
19293 	int ret;
19294 
19295 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19296 	if (!m)
19297 		return ENOMEM;
19298 
19299 	cmd = (struct wmi_vdev_up_cmd *)(mtod(m, uint8_t *) +
19300 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19301 
19302 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
19303 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19304 	cmd->vdev_id = vdev_id;
19305 	cmd->vdev_assoc_id = aid;
19306 
19307 	IEEE80211_ADDR_COPY(cmd->vdev_bssid.addr, bssid);
19308 
19309 	cmd->nontx_profile_idx = nontx_profile_idx;
19310 	cmd->nontx_profile_cnt = nontx_profile_cnt;
19311 	if (tx_bssid)
19312 		IEEE80211_ADDR_COPY(cmd->tx_vdev_bssid.addr, tx_bssid);
19313 #if 0
19314 	if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
19315 		bss_conf = &arvif->vif->bss_conf;
19316 
19317 		if (bss_conf->nontransmitted) {
19318 			ether_addr_copy(cmd->tx_vdev_bssid.addr,
19319 					bss_conf->transmitter_bssid);
19320 			cmd->nontx_profile_idx = bss_conf->bssid_index;
19321 			cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
19322 		}
19323 	}
19324 #endif
19325 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_UP_CMDID);
19326 	if (ret) {
19327 		if (ret != ESHUTDOWN) {
19328 			printf("%s: failed to submit WMI_VDEV_UP cmd\n",
19329 			    sc->sc_dev.dv_xname);
19330 		}
19331 		m_freem(m);
19332 		return ret;
19333 	}
19334 
19335 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev up id 0x%x assoc id %d bssid %s\n",
19336 	    __func__, vdev_id, aid, ether_sprintf((u_char *)bssid));
19337 
19338 	return 0;
19339 }
19340 
19341 int
19342 qwx_wmi_vdev_down(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id)
19343 {
19344 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19345 	struct wmi_vdev_down_cmd *cmd;
19346 	struct mbuf *m;
19347 	int ret;
19348 
19349 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19350 	if (!m)
19351 		return ENOMEM;
19352 
19353 	cmd = (struct wmi_vdev_down_cmd *)(mtod(m, uint8_t *) +
19354 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19355 
19356 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
19357 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19358 	cmd->vdev_id = vdev_id;
19359 
19360 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_DOWN_CMDID);
19361 	if (ret) {
19362 		if (ret != ESHUTDOWN) {
19363 			printf("%s: failed to submit WMI_VDEV_DOWN cmd\n",
19364 			    sc->sc_dev.dv_xname);
19365 		}
19366 		m_freem(m);
19367 		return ret;
19368 	}
19369 
19370 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev down id 0x%x\n", __func__, vdev_id);
19371 
19372 	return 0;
19373 }
19374 
19375 void
19376 qwx_wmi_put_wmi_channel(struct wmi_channel *chan,
19377     struct wmi_vdev_start_req_arg *arg)
19378 {
19379 	uint32_t center_freq1 = arg->channel.band_center_freq1;
19380 
19381 	memset(chan, 0, sizeof(*chan));
19382 
19383 	chan->mhz = arg->channel.freq;
19384 	chan->band_center_freq1 = arg->channel.band_center_freq1;
19385 
19386 	if (arg->channel.mode == MODE_11AX_HE160) {
19387 		if (arg->channel.freq > arg->channel.band_center_freq1)
19388 			chan->band_center_freq1 = center_freq1 + 40;
19389 		else
19390 			chan->band_center_freq1 = center_freq1 - 40;
19391 
19392 		chan->band_center_freq2 = arg->channel.band_center_freq1;
19393 	} else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
19394 	    (arg->channel.mode == MODE_11AX_HE80_80)) {
19395 		chan->band_center_freq2 = arg->channel.band_center_freq2;
19396 	} else
19397 		chan->band_center_freq2 = 0;
19398 
19399 	chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
19400 	if (arg->channel.passive)
19401 		chan->info |= WMI_CHAN_INFO_PASSIVE;
19402 	if (arg->channel.allow_ibss)
19403 		chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
19404 	if (arg->channel.allow_ht)
19405 		chan->info |= WMI_CHAN_INFO_ALLOW_HT;
19406 	if (arg->channel.allow_vht)
19407 		chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
19408 	if (arg->channel.allow_he)
19409 		chan->info |= WMI_CHAN_INFO_ALLOW_HE;
19410 	if (arg->channel.ht40plus)
19411 		chan->info |= WMI_CHAN_INFO_HT40_PLUS;
19412 	if (arg->channel.chan_radar)
19413 		chan->info |= WMI_CHAN_INFO_DFS;
19414 	if (arg->channel.freq2_radar)
19415 		chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
19416 
19417 	chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
19418 	    arg->channel.max_power) |
19419 	    FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
19420 	    arg->channel.max_reg_power);
19421 
19422 	chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
19423 	    arg->channel.max_antenna_gain) |
19424 	    FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
19425 	    arg->channel.max_power);
19426 }
19427 
19428 int
19429 qwx_wmi_vdev_stop(struct qwx_softc *sc, uint8_t vdev_id, uint8_t pdev_id)
19430 {
19431 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19432 	struct wmi_vdev_stop_cmd *cmd;
19433 	struct mbuf *m;
19434 	int ret;
19435 
19436 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19437 	if (!m)
19438 		return ENOMEM;
19439 
19440 	cmd = (struct wmi_vdev_stop_cmd *)(mtod(m, uint8_t *) +
19441 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19442 
19443 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
19444 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19445 	cmd->vdev_id = vdev_id;
19446 
19447 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_STOP_CMDID);
19448 	if (ret) {
19449 		if (ret != ESHUTDOWN) {
19450 			printf("%s: failed to submit WMI_VDEV_STOP cmd\n",
19451 			    sc->sc_dev.dv_xname);
19452 		}
19453 		m_freem(m);
19454 		return ret;
19455 	}
19456 
19457 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev stop id 0x%x\n", __func__, vdev_id);
19458 
19459 	return ret;
19460 }
19461 
19462 int
19463 qwx_wmi_vdev_start(struct qwx_softc *sc, struct wmi_vdev_start_req_arg *arg,
19464     int pdev_id, int restart)
19465 {
19466 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19467 	struct wmi_vdev_start_request_cmd *cmd;
19468 	struct mbuf *m;
19469 	struct wmi_channel *chan;
19470 	struct wmi_tlv *tlv;
19471 	void *ptr;
19472 	int ret, len;
19473 
19474 	if (arg->ssid_len > sizeof(cmd->ssid.ssid))
19475 		return EINVAL;
19476 
19477 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
19478 
19479 	m = qwx_wmi_alloc_mbuf(len);
19480 	if (!m)
19481 		return ENOMEM;
19482 
19483 	cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
19484 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19485 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
19486 	    WMI_TAG_VDEV_START_REQUEST_CMD) |
19487 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19488 	cmd->vdev_id = arg->vdev_id;
19489 	cmd->beacon_interval = arg->bcn_intval;
19490 	cmd->bcn_tx_rate = arg->bcn_tx_rate;
19491 	cmd->dtim_period = arg->dtim_period;
19492 	cmd->num_noa_descriptors = arg->num_noa_descriptors;
19493 	cmd->preferred_rx_streams = arg->pref_rx_streams;
19494 	cmd->preferred_tx_streams = arg->pref_tx_streams;
19495 	cmd->cac_duration_ms = arg->cac_duration_ms;
19496 	cmd->regdomain = arg->regdomain;
19497 	cmd->he_ops = arg->he_ops;
19498 	cmd->mbssid_flags = arg->mbssid_flags;
19499 	cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
19500 
19501 	if (!restart) {
19502 		if (arg->ssid) {
19503 			cmd->ssid.ssid_len = arg->ssid_len;
19504 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
19505 		}
19506 		if (arg->hidden_ssid)
19507 			cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
19508 		if (arg->pmf_enabled)
19509 			cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
19510 	}
19511 
19512 	cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
19513 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
19514 		cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
19515 
19516 	ptr = mtod(m, void *) + sizeof(struct ath11k_htc_hdr) +
19517 	    sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
19518 	chan = ptr;
19519 
19520 	qwx_wmi_put_wmi_channel(chan, arg);
19521 
19522 	chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
19523 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
19524 	ptr += sizeof(*chan);
19525 
19526 	tlv = ptr;
19527 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
19528 	    FIELD_PREP(WMI_TLV_LEN, 0);
19529 
19530 	/* Note: This is a nested TLV containing:
19531 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
19532 	 */
19533 
19534 	ptr += sizeof(*tlv);
19535 
19536 	ret = qwx_wmi_cmd_send(wmi, m, restart ?
19537 	    WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
19538 	if (ret) {
19539 		if (ret != ESHUTDOWN) {
19540 			printf("%s: failed to submit vdev_%s cmd\n",
19541 			    sc->sc_dev.dv_xname, restart ? "restart" : "start");
19542 		}
19543 		m_freem(m);
19544 		return ret;
19545 	}
19546 
19547 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
19548 	   __func__, restart ? "restart" : "start", arg->vdev_id,
19549 	   arg->channel.freq, arg->channel.mode);
19550 
19551 	return ret;
19552 }
19553 
19554 int
19555 qwx_core_start(struct qwx_softc *sc)
19556 {
19557 	int ret;
19558 
19559 	ret = qwx_wmi_attach(sc);
19560 	if (ret) {
19561 		printf("%s: failed to attach wmi: %d\n",
19562 		    sc->sc_dev.dv_xname, ret);
19563 		return ret;
19564 	}
19565 
19566 	ret = qwx_htc_init(sc);
19567 	if (ret) {
19568 		printf("%s: failed to init htc: %d\n",
19569 		    sc->sc_dev.dv_xname, ret);
19570 		goto err_wmi_detach;
19571 	}
19572 
19573 	ret = sc->ops.start(sc);
19574 	if (ret) {
19575 		printf("%s: failed to start host interface: %d\n",
19576 		    sc->sc_dev.dv_xname, ret);
19577 		goto err_wmi_detach;
19578 	}
19579 
19580 	ret = qwx_htc_wait_target(sc);
19581 	if (ret) {
19582 		printf("%s: failed to connect to HTC: %d\n",
19583 		    sc->sc_dev.dv_xname, ret);
19584 		goto err_hif_stop;
19585 	}
19586 
19587 	ret = qwx_dp_htt_connect(&sc->dp);
19588 	if (ret) {
19589 		printf("%s: failed to connect to HTT: %d\n",
19590 		    sc->sc_dev.dv_xname, ret);
19591 		goto err_hif_stop;
19592 	}
19593 
19594 	ret = qwx_wmi_connect(sc);
19595 	if (ret) {
19596 		printf("%s: failed to connect wmi: %d\n",
19597 		    sc->sc_dev.dv_xname, ret);
19598 		goto err_hif_stop;
19599 	}
19600 
19601 	sc->wmi.service_ready = 0;
19602 
19603 	ret = qwx_htc_start(&sc->htc);
19604 	if (ret) {
19605 		printf("%s: failed to start HTC: %d\n",
19606 		    sc->sc_dev.dv_xname, ret);
19607 		goto err_hif_stop;
19608 	}
19609 
19610 	ret = qwx_wmi_wait_for_service_ready(sc);
19611 	if (ret) {
19612 		printf("%s: failed to receive wmi service ready event: %d\n",
19613 		    sc->sc_dev.dv_xname, ret);
19614 		goto err_hif_stop;
19615 	}
19616 #if 0
19617 	ret = ath11k_mac_allocate(ab);
19618 	if (ret) {
19619 		ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
19620 			   ret);
19621 		goto err_hif_stop;
19622 	}
19623 	ath11k_dp_pdev_pre_alloc(sc);
19624 #endif
19625 	ret = qwx_dp_pdev_reo_setup(sc);
19626 	if (ret) {
19627 		printf("%s: failed to initialize reo destination rings: %d\n",
19628 		    __func__, ret);
19629 		goto err_mac_destroy;
19630 	}
19631 
19632 	ret = qwx_wmi_cmd_init(sc);
19633 	if (ret) {
19634 		printf("%s: failed to send wmi init cmd: %d\n", __func__, ret);
19635 		goto err_reo_cleanup;
19636 	}
19637 
19638 	ret = qwx_wmi_wait_for_unified_ready(sc);
19639 	if (ret) {
19640 		printf("%s: failed to receive wmi unified ready event: %d\n",
19641 		    __func__, ret);
19642 		goto err_reo_cleanup;
19643 	}
19644 
19645 	/* put hardware to DBS mode */
19646 	if (sc->hw_params.single_pdev_only &&
19647 	    sc->hw_params.num_rxmda_per_pdev > 1) {
19648 		ret = qwx_wmi_set_hw_mode(sc, WMI_HOST_HW_MODE_DBS);
19649 		if (ret) {
19650 			printf("%s: failed to send dbs mode: %d\n",
19651 			    __func__, ret);
19652 			goto err_hif_stop;
19653 		}
19654 	}
19655 
19656 	ret = qwx_dp_tx_htt_h2t_ver_req_msg(sc);
19657 	if (ret) {
19658 		if (ret != ENOTSUP) {
19659 			printf("%s: failed to send htt version "
19660 			    "request message: %d\n", __func__, ret);
19661 		}
19662 		goto err_reo_cleanup;
19663 	}
19664 
19665 	return 0;
19666 err_reo_cleanup:
19667 	qwx_dp_pdev_reo_cleanup(sc);
19668 err_mac_destroy:
19669 #if 0
19670 	ath11k_mac_destroy(ab);
19671 #endif
19672 err_hif_stop:
19673 	sc->ops.stop(sc);
19674 err_wmi_detach:
19675 	qwx_wmi_detach(sc);
19676 	return ret;
19677 }
19678 
19679 void
19680 qwx_core_stop(struct qwx_softc *sc)
19681 {
19682 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
19683 		qwx_qmi_firmware_stop(sc);
19684 
19685 	sc->ops.stop(sc);
19686 	qwx_wmi_detach(sc);
19687 	qwx_dp_pdev_reo_cleanup(sc);
19688 }
19689 
19690 void
19691 qwx_core_pdev_destroy(struct qwx_softc *sc)
19692 {
19693 	qwx_dp_pdev_free(sc);
19694 }
19695 
19696 int
19697 qwx_core_pdev_create(struct qwx_softc *sc)
19698 {
19699 	int ret;
19700 
19701 	ret = qwx_dp_pdev_alloc(sc);
19702 	if (ret) {
19703 		printf("%s: failed to attach DP pdev: %d\n",
19704 		    sc->sc_dev.dv_xname, ret);
19705 		return ret;
19706 	}
19707 
19708 	ret = qwx_mac_register(sc);
19709 	if (ret) {
19710 		printf("%s: failed register the radio with mac80211: %d\n",
19711 		    sc->sc_dev.dv_xname, ret);
19712 		goto err_dp_pdev_free;
19713 	}
19714 #if 0
19715 
19716 	ret = ath11k_thermal_register(ab);
19717 	if (ret) {
19718 		ath11k_err(ab, "could not register thermal device: %d\n",
19719 			   ret);
19720 		goto err_mac_unregister;
19721 	}
19722 
19723 	ret = ath11k_spectral_init(ab);
19724 	if (ret) {
19725 		ath11k_err(ab, "failed to init spectral %d\n", ret);
19726 		goto err_thermal_unregister;
19727 	}
19728 #endif
19729 	return 0;
19730 #if 0
19731 err_thermal_unregister:
19732 	ath11k_thermal_unregister(ab);
19733 err_mac_unregister:
19734 	ath11k_mac_unregister(ab);
19735 #endif
19736 err_dp_pdev_free:
19737 	qwx_dp_pdev_free(sc);
19738 #if 0
19739 err_pdev_debug:
19740 	ath11k_debugfs_pdev_destroy(ab);
19741 #endif
19742 	return ret;
19743 }
19744 
19745 void
19746 qwx_core_deinit(struct qwx_softc *sc)
19747 {
19748 	struct ath11k_hal *hal = &sc->hal;
19749 	int s = splnet();
19750 
19751 #ifdef notyet
19752 	mutex_lock(&ab->core_lock);
19753 #endif
19754 	sc->ops.irq_disable(sc);
19755 
19756 	qwx_core_stop(sc);
19757 	qwx_core_pdev_destroy(sc);
19758 #ifdef notyet
19759 	mutex_unlock(&ab->core_lock);
19760 #endif
19761 	sc->ops.power_down(sc);
19762 #if 0
19763 	ath11k_mac_destroy(ab);
19764 	ath11k_debugfs_soc_destroy(ab);
19765 #endif
19766 	qwx_dp_free(sc);
19767 #if 0
19768 	ath11k_reg_free(ab);
19769 #endif
19770 	qwx_qmi_deinit_service(sc);
19771 
19772 	hal->num_shadow_reg_configured = 0;
19773 
19774 	splx(s);
19775 }
19776 
19777 int
19778 qwx_core_qmi_firmware_ready(struct qwx_softc *sc)
19779 {
19780 	int ret;
19781 
19782 	ret = qwx_core_start_firmware(sc, sc->fw_mode);
19783 	if (ret) {
19784 		printf("%s: failed to start firmware: %d\n",
19785 		    sc->sc_dev.dv_xname, ret);
19786 		return ret;
19787 	}
19788 
19789 	ret = qwx_ce_init_pipes(sc);
19790 	if (ret) {
19791 		printf("%s: failed to initialize CE: %d\n",
19792 		    sc->sc_dev.dv_xname, ret);
19793 		goto err_firmware_stop;
19794 	}
19795 
19796 	ret = qwx_dp_alloc(sc);
19797 	if (ret) {
19798 		printf("%s: failed to init DP: %d\n",
19799 		    sc->sc_dev.dv_xname, ret);
19800 		goto err_firmware_stop;
19801 	}
19802 
19803 	switch (sc->crypto_mode) {
19804 	case ATH11K_CRYPT_MODE_SW:
19805 		set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19806 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19807 		break;
19808 	case ATH11K_CRYPT_MODE_HW:
19809 		clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19810 		clear_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19811 		break;
19812 	default:
19813 		printf("%s: invalid crypto_mode: %d\n",
19814 		    sc->sc_dev.dv_xname, sc->crypto_mode);
19815 		return EINVAL;
19816 	}
19817 
19818 	if (sc->frame_mode == ATH11K_HW_TXRX_RAW)
19819 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19820 #if 0
19821 	mutex_lock(&ab->core_lock);
19822 #endif
19823 	ret = qwx_core_start(sc);
19824 	if (ret) {
19825 		printf("%s: failed to start core: %d\n",
19826 		    sc->sc_dev.dv_xname, ret);
19827 		goto err_dp_free;
19828 	}
19829 
19830 	if (!sc->attached) {
19831 		printf("%s: %s fw 0x%x address %s\n", sc->sc_dev.dv_xname,
19832 		    sc->hw_params.name, sc->qmi_target.fw_version,
19833 		    ether_sprintf(sc->mac_addr));
19834 	}
19835 
19836 	ret = qwx_core_pdev_create(sc);
19837 	if (ret) {
19838 		printf("%s: failed to create pdev core: %d\n",
19839 		    sc->sc_dev.dv_xname, ret);
19840 		goto err_core_stop;
19841 	}
19842 
19843 	sc->ops.irq_enable(sc);
19844 #if 0
19845 	mutex_unlock(&ab->core_lock);
19846 #endif
19847 
19848 	return 0;
19849 err_core_stop:
19850 	qwx_core_stop(sc);
19851 #if 0
19852 	ath11k_mac_destroy(ab);
19853 #endif
19854 err_dp_free:
19855 	qwx_dp_free(sc);
19856 #if 0
19857 	mutex_unlock(&ab->core_lock);
19858 #endif
19859 err_firmware_stop:
19860 	qwx_qmi_firmware_stop(sc);
19861 
19862 	return ret;
19863 }
19864 
19865 void
19866 qwx_qmi_fw_init_done(struct qwx_softc *sc)
19867 {
19868 	int ret = 0;
19869 
19870 	clear_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
19871 
19872 	if (sc->qmi_cal_done == 0 && sc->hw_params.cold_boot_calib) {
19873 		qwx_qmi_process_coldboot_calibration(sc);
19874 	} else {
19875 		clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
19876 		clear_bit(ATH11K_FLAG_RECOVERY, sc->sc_flags);
19877 		ret = qwx_core_qmi_firmware_ready(sc);
19878 		if (ret) {
19879 			set_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
19880 			return;
19881 		}
19882 	}
19883 }
19884 
19885 int
19886 qwx_qmi_event_server_arrive(struct qwx_softc *sc)
19887 {
19888 	int ret;
19889 
19890 	sc->fw_init_done = 0;
19891 	sc->expect_fwmem_req = 1;
19892 
19893 	ret = qwx_qmi_fw_ind_register_send(sc);
19894 	if (ret < 0) {
19895 		printf("%s: failed to send qmi firmware indication: %d\n",
19896 		    sc->sc_dev.dv_xname, ret);
19897 		sc->expect_fwmem_req = 0;
19898 		return ret;
19899 	}
19900 
19901 	ret = qwx_qmi_host_cap_send(sc);
19902 	if (ret < 0) {
19903 		printf("%s: failed to send qmi host cap: %d\n",
19904 		    sc->sc_dev.dv_xname, ret);
19905 		sc->expect_fwmem_req = 0;
19906 		return ret;
19907 	}
19908 
19909 	ret = qwx_qmi_mem_seg_send(sc);
19910 	if (ret == EBUSY)
19911 		ret = qwx_qmi_mem_seg_send(sc);
19912 	sc->expect_fwmem_req = 0;
19913 	if (ret) {
19914 		printf("%s: failed to send qmi memory segments: %d\n",
19915 		    sc->sc_dev.dv_xname, ret);
19916 		return ret;
19917 	}
19918 
19919 	ret = qwx_qmi_event_load_bdf(sc);
19920 	if (ret < 0) {
19921 		printf("%s: qmi failed to download BDF:%d\n",
19922 		    sc->sc_dev.dv_xname, ret);
19923 		return ret;
19924 	}
19925 
19926 	ret = qwx_qmi_wlanfw_m3_info_send(sc);
19927 	if (ret) {
19928 		printf("%s: qmi m3 info send failed:%d\n",
19929 		    sc->sc_dev.dv_xname, ret);
19930 		return ret;
19931 	}
19932 
19933 	while (!sc->fw_init_done) {
19934 		ret = tsleep_nsec(&sc->fw_init_done, 0, "qwxfwinit",
19935 		    SEC_TO_NSEC(10));
19936 		if (ret) {
19937 			printf("%s: fw init timeout\n", sc->sc_dev.dv_xname);
19938 			return -1;
19939 		}
19940 	}
19941 
19942 	qwx_qmi_fw_init_done(sc);
19943 	return 0;
19944 }
19945 
19946 int
19947 qwx_core_init(struct qwx_softc *sc)
19948 {
19949 	int error;
19950 
19951 	error = qwx_qmi_init_service(sc);
19952 	if (error) {
19953 		printf("failed to initialize qmi :%d\n", error);
19954 		return error;
19955 	}
19956 
19957 	error = sc->ops.power_up(sc);
19958 	if (error)
19959 		qwx_qmi_deinit_service(sc);
19960 
19961 	return error;
19962 }
19963 
19964 int
19965 qwx_init_hw_params(struct qwx_softc *sc)
19966 {
19967 	const struct ath11k_hw_params *hw_params = NULL;
19968 	int i;
19969 
19970 	for (i = 0; i < nitems(ath11k_hw_params); i++) {
19971 		hw_params = &ath11k_hw_params[i];
19972 
19973 		if (hw_params->hw_rev == sc->sc_hw_rev)
19974 			break;
19975 	}
19976 
19977 	if (i == nitems(ath11k_hw_params)) {
19978 		printf("%s: Unsupported hardware version: 0x%x\n",
19979 		    sc->sc_dev.dv_xname, sc->sc_hw_rev);
19980 		return EINVAL;
19981 	}
19982 
19983 	sc->hw_params = *hw_params;
19984 
19985 	DPRINTF("%s: %s\n", sc->sc_dev.dv_xname, sc->hw_params.name);
19986 
19987 	return 0;
19988 }
19989 
19990 static const struct hal_srng_config hw_srng_config_templ[QWX_NUM_SRNG_CFG] = {
19991 	/* TODO: max_rings can populated by querying HW capabilities */
19992 	{ /* REO_DST */
19993 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
19994 		.max_rings = 4,
19995 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
19996 		.lmac_ring = false,
19997 		.ring_dir = HAL_SRNG_DIR_DST,
19998 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
19999 	},
20000 
20001 	{ /* REO_EXCEPTION */
20002 		/* Designating REO2TCL ring as exception ring. This ring is
20003 		 * similar to other REO2SW rings though it is named as REO2TCL.
20004 		 * Any of theREO2SW rings can be used as exception ring.
20005 		 */
20006 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
20007 		.max_rings = 1,
20008 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
20009 		.lmac_ring = false,
20010 		.ring_dir = HAL_SRNG_DIR_DST,
20011 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
20012 	},
20013 	{ /* REO_REINJECT */
20014 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
20015 		.max_rings = 1,
20016 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20017 		.lmac_ring = false,
20018 		.ring_dir = HAL_SRNG_DIR_SRC,
20019 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
20020 	},
20021 	{ /* REO_CMD */
20022 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
20023 		.max_rings = 1,
20024 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20025 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
20026 		.lmac_ring = false,
20027 		.ring_dir = HAL_SRNG_DIR_SRC,
20028 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
20029 	},
20030 	{ /* REO_STATUS */
20031 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
20032 		.max_rings = 1,
20033 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20034 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
20035 		.lmac_ring = false,
20036 		.ring_dir = HAL_SRNG_DIR_DST,
20037 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
20038 	},
20039 	{ /* TCL_DATA */
20040 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
20041 		.max_rings = 3,
20042 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20043 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
20044 		.lmac_ring = false,
20045 		.ring_dir = HAL_SRNG_DIR_SRC,
20046 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
20047 	},
20048 	{ /* TCL_CMD */
20049 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
20050 		.max_rings = 1,
20051 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20052 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
20053 		.lmac_ring =  false,
20054 		.ring_dir = HAL_SRNG_DIR_SRC,
20055 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
20056 	},
20057 	{ /* TCL_STATUS */
20058 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
20059 		.max_rings = 1,
20060 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20061 			     sizeof(struct hal_tcl_status_ring)) >> 2,
20062 		.lmac_ring = false,
20063 		.ring_dir = HAL_SRNG_DIR_DST,
20064 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
20065 	},
20066 	{ /* CE_SRC */
20067 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
20068 		.max_rings = 12,
20069 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
20070 		.lmac_ring = false,
20071 		.ring_dir = HAL_SRNG_DIR_SRC,
20072 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
20073 	},
20074 	{ /* CE_DST */
20075 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
20076 		.max_rings = 12,
20077 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
20078 		.lmac_ring = false,
20079 		.ring_dir = HAL_SRNG_DIR_SRC,
20080 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
20081 	},
20082 	{ /* CE_DST_STATUS */
20083 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
20084 		.max_rings = 12,
20085 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
20086 		.lmac_ring = false,
20087 		.ring_dir = HAL_SRNG_DIR_DST,
20088 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
20089 	},
20090 	{ /* WBM_IDLE_LINK */
20091 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
20092 		.max_rings = 1,
20093 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
20094 		.lmac_ring = false,
20095 		.ring_dir = HAL_SRNG_DIR_SRC,
20096 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
20097 	},
20098 	{ /* SW2WBM_RELEASE */
20099 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
20100 		.max_rings = 1,
20101 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
20102 		.lmac_ring = false,
20103 		.ring_dir = HAL_SRNG_DIR_SRC,
20104 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
20105 	},
20106 	{ /* WBM2SW_RELEASE */
20107 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
20108 		.max_rings = 5,
20109 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
20110 		.lmac_ring = false,
20111 		.ring_dir = HAL_SRNG_DIR_DST,
20112 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
20113 	},
20114 	{ /* RXDMA_BUF */
20115 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
20116 		.max_rings = 2,
20117 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20118 		.lmac_ring = true,
20119 		.ring_dir = HAL_SRNG_DIR_SRC,
20120 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20121 	},
20122 	{ /* RXDMA_DST */
20123 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
20124 		.max_rings = 1,
20125 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20126 		.lmac_ring = true,
20127 		.ring_dir = HAL_SRNG_DIR_DST,
20128 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20129 	},
20130 	{ /* RXDMA_MONITOR_BUF */
20131 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
20132 		.max_rings = 1,
20133 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20134 		.lmac_ring = true,
20135 		.ring_dir = HAL_SRNG_DIR_SRC,
20136 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20137 	},
20138 	{ /* RXDMA_MONITOR_STATUS */
20139 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
20140 		.max_rings = 1,
20141 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20142 		.lmac_ring = true,
20143 		.ring_dir = HAL_SRNG_DIR_SRC,
20144 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20145 	},
20146 	{ /* RXDMA_MONITOR_DST */
20147 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
20148 		.max_rings = 1,
20149 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20150 		.lmac_ring = true,
20151 		.ring_dir = HAL_SRNG_DIR_DST,
20152 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20153 	},
20154 	{ /* RXDMA_MONITOR_DESC */
20155 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
20156 		.max_rings = 1,
20157 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20158 		.lmac_ring = true,
20159 		.ring_dir = HAL_SRNG_DIR_SRC,
20160 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20161 	},
20162 	{ /* RXDMA DIR BUF */
20163 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
20164 		.max_rings = 1,
20165 		.entry_size = 8 >> 2, /* TODO: Define the struct */
20166 		.lmac_ring = true,
20167 		.ring_dir = HAL_SRNG_DIR_SRC,
20168 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20169 	},
20170 };
20171 
20172 int
20173 qwx_hal_srng_create_config(struct qwx_softc *sc)
20174 {
20175 	struct ath11k_hal *hal = &sc->hal;
20176 	struct hal_srng_config *s;
20177 
20178 	memcpy(hal->srng_config, hw_srng_config_templ,
20179 	    sizeof(hal->srng_config));
20180 
20181 	s = &hal->srng_config[HAL_REO_DST];
20182 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(sc);
20183 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(sc);
20184 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc);
20185 	s->reg_size[1] = HAL_REO2_RING_HP(sc) - HAL_REO1_RING_HP(sc);
20186 
20187 	s = &hal->srng_config[HAL_REO_EXCEPTION];
20188 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(sc);
20189 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(sc);
20190 
20191 	s = &hal->srng_config[HAL_REO_REINJECT];
20192 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(sc);
20193 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(sc);
20194 
20195 	s = &hal->srng_config[HAL_REO_CMD];
20196 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(sc);
20197 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(sc);
20198 
20199 	s = &hal->srng_config[HAL_REO_STATUS];
20200 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(sc);
20201 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(sc);
20202 
20203 	s = &hal->srng_config[HAL_TCL_DATA];
20204 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(sc);
20205 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
20206 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc);
20207 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
20208 
20209 	s = &hal->srng_config[HAL_TCL_CMD];
20210 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(sc);
20211 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
20212 
20213 	s = &hal->srng_config[HAL_TCL_STATUS];
20214 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(sc);
20215 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
20216 
20217 	s = &hal->srng_config[HAL_CE_SRC];
20218 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
20219 		ATH11K_CE_OFFSET(sc);
20220 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_HP +
20221 		ATH11K_CE_OFFSET(sc);
20222 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
20223 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
20224 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
20225 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
20226 
20227 	s = &hal->srng_config[HAL_CE_DST];
20228 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
20229 		ATH11K_CE_OFFSET(sc);
20230 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_HP +
20231 		ATH11K_CE_OFFSET(sc);
20232 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20233 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20234 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20235 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20236 
20237 	s = &hal->srng_config[HAL_CE_DST_STATUS];
20238 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) +
20239 		HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(sc);
20240 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_STATUS_RING_HP +
20241 		ATH11K_CE_OFFSET(sc);
20242 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20243 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20244 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20245 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20246 
20247 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
20248 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(sc);
20249 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
20250 
20251 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
20252 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(sc);
20253 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
20254 
20255 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
20256 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
20257 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
20258 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(sc) -
20259 		HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
20260 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
20261 
20262 	return 0;
20263 }
20264 
20265 int
20266 qwx_hal_srng_get_ring_id(struct qwx_softc *sc,
20267     enum hal_ring_type type, int ring_num, int mac_id)
20268 {
20269 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
20270 	int ring_id;
20271 
20272 	if (ring_num >= srng_config->max_rings) {
20273 		printf("%s: invalid ring number :%d\n", __func__, ring_num);
20274 		return -1;
20275 	}
20276 
20277 	ring_id = srng_config->start_ring_id + ring_num;
20278 	if (srng_config->lmac_ring)
20279 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
20280 
20281 	if (ring_id >= HAL_SRNG_RING_ID_MAX) {
20282 		printf("%s: invalid ring ID :%d\n", __func__, ring_id);
20283 		return -1;
20284 	}
20285 
20286 	return ring_id;
20287 }
20288 
20289 void
20290 qwx_hal_srng_update_hp_tp_addr(struct qwx_softc *sc, int shadow_cfg_idx,
20291     enum hal_ring_type ring_type, int ring_num)
20292 {
20293 	struct hal_srng *srng;
20294 	struct ath11k_hal *hal = &sc->hal;
20295 	int ring_id;
20296 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
20297 
20298 	ring_id = qwx_hal_srng_get_ring_id(sc, ring_type, ring_num, 0);
20299 	if (ring_id < 0)
20300 		return;
20301 
20302 	srng = &hal->srng_list[ring_id];
20303 
20304 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
20305 		srng->u.dst_ring.tp_addr = (uint32_t *)(
20306 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
20307 		    (unsigned long)sc->mem);
20308 	else
20309 		srng->u.src_ring.hp_addr = (uint32_t *)(
20310 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
20311 		    (unsigned long)sc->mem);
20312 }
20313 
20314 void
20315 qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *sc, struct hal_srng *srng)
20316 {
20317 #ifdef notyet
20318 	lockdep_assert_held(&srng->lock);
20319 #endif
20320 	/* Update the shadow HP if the ring isn't empty. */
20321 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
20322 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
20323 		qwx_hal_srng_access_end(sc, srng);
20324 }
20325 
20326 int
20327 qwx_hal_srng_update_shadow_config(struct qwx_softc *sc,
20328     enum hal_ring_type ring_type, int ring_num)
20329 {
20330 	struct ath11k_hal *hal = &sc->hal;
20331 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
20332 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
20333 	uint32_t target_reg;
20334 
20335 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
20336 		return EINVAL;
20337 
20338 	hal->num_shadow_reg_configured++;
20339 
20340 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
20341 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
20342 		ring_num;
20343 
20344 	/* For destination ring, shadow the TP */
20345 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
20346 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
20347 
20348 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
20349 
20350 	/* update hp/tp addr to hal structure*/
20351 	qwx_hal_srng_update_hp_tp_addr(sc, shadow_cfg_idx, ring_type, ring_num);
20352 
20353 	DPRINTF("%s: target_reg %x, shadow reg 0x%x shadow_idx 0x%x, "
20354 	    "ring_type %d, ring num %d\n", __func__, target_reg,
20355 	     HAL_SHADOW_REG(sc, shadow_cfg_idx), shadow_cfg_idx,
20356 	     ring_type, ring_num);
20357 
20358 	return 0;
20359 }
20360 
20361 void
20362 qwx_hal_srng_shadow_config(struct qwx_softc *sc)
20363 {
20364 	struct ath11k_hal *hal = &sc->hal;
20365 	int ring_type, ring_num;
20366 	struct hal_srng_config *cfg;
20367 
20368 	/* update all the non-CE srngs. */
20369 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
20370 		cfg = &hal->srng_config[ring_type];
20371 
20372 		if (ring_type == HAL_CE_SRC ||
20373 		    ring_type == HAL_CE_DST ||
20374 			ring_type == HAL_CE_DST_STATUS)
20375 			continue;
20376 
20377 		if (cfg->lmac_ring)
20378 			continue;
20379 
20380 		for (ring_num = 0; ring_num < cfg->max_rings; ring_num++) {
20381 			qwx_hal_srng_update_shadow_config(sc, ring_type,
20382 			    ring_num);
20383 		}
20384 	}
20385 }
20386 
20387 void
20388 qwx_hal_srng_get_shadow_config(struct qwx_softc *sc, uint32_t **cfg,
20389     uint32_t *len)
20390 {
20391 	struct ath11k_hal *hal = &sc->hal;
20392 
20393 	*len = hal->num_shadow_reg_configured;
20394 	*cfg = hal->shadow_reg_addr;
20395 }
20396 
20397 int
20398 qwx_hal_alloc_cont_rdp(struct qwx_softc *sc)
20399 {
20400 	struct ath11k_hal *hal = &sc->hal;
20401 	size_t size = sizeof(uint32_t) * HAL_SRNG_RING_ID_MAX;
20402 
20403 	if (hal->rdpmem == NULL) {
20404 		hal->rdpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
20405 		if (hal->rdpmem == NULL) {
20406 			printf("%s: could not allocate RDP DMA memory\n",
20407 			    sc->sc_dev.dv_xname);
20408 			return ENOMEM;
20409 
20410 		}
20411 	}
20412 
20413 	hal->rdp.vaddr = QWX_DMA_KVA(hal->rdpmem);
20414 	hal->rdp.paddr = QWX_DMA_DVA(hal->rdpmem);
20415 	return 0;
20416 }
20417 
20418 void
20419 qwx_hal_free_cont_rdp(struct qwx_softc *sc)
20420 {
20421 	struct ath11k_hal *hal = &sc->hal;
20422 
20423 	if (hal->rdpmem == NULL)
20424 		return;
20425 
20426 	hal->rdp.vaddr = NULL;
20427 	hal->rdp.paddr = 0L;
20428 	qwx_dmamem_free(sc->sc_dmat, hal->rdpmem);
20429 	hal->rdpmem = NULL;
20430 }
20431 
20432 int
20433 qwx_hal_alloc_cont_wrp(struct qwx_softc *sc)
20434 {
20435 	struct ath11k_hal *hal = &sc->hal;
20436 	size_t size = sizeof(uint32_t) * HAL_SRNG_NUM_LMAC_RINGS;
20437 
20438 	if (hal->wrpmem == NULL) {
20439 		hal->wrpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
20440 		if (hal->wrpmem == NULL) {
20441 			printf("%s: could not allocate WDP DMA memory\n",
20442 			    sc->sc_dev.dv_xname);
20443 			return ENOMEM;
20444 
20445 		}
20446 	}
20447 
20448 	hal->wrp.vaddr = QWX_DMA_KVA(hal->wrpmem);
20449 	hal->wrp.paddr = QWX_DMA_DVA(hal->wrpmem);
20450 	return 0;
20451 }
20452 
20453 void
20454 qwx_hal_free_cont_wrp(struct qwx_softc *sc)
20455 {
20456 	struct ath11k_hal *hal = &sc->hal;
20457 
20458 	if (hal->wrpmem == NULL)
20459 		return;
20460 
20461 	hal->wrp.vaddr = NULL;
20462 	hal->wrp.paddr = 0L;
20463 	qwx_dmamem_free(sc->sc_dmat, hal->wrpmem);
20464 	hal->wrpmem = NULL;
20465 }
20466 
20467 int
20468 qwx_hal_srng_init(struct qwx_softc *sc)
20469 {
20470 	struct ath11k_hal *hal = &sc->hal;
20471 	int ret;
20472 
20473 	memset(hal, 0, sizeof(*hal));
20474 
20475 	ret = qwx_hal_srng_create_config(sc);
20476 	if (ret)
20477 		goto err_hal;
20478 
20479 	ret = qwx_hal_alloc_cont_rdp(sc);
20480 	if (ret)
20481 		goto err_hal;
20482 
20483 	ret = qwx_hal_alloc_cont_wrp(sc);
20484 	if (ret)
20485 		goto err_free_cont_rdp;
20486 
20487 #ifdef notyet
20488 	qwx_hal_register_srng_key(sc);
20489 #endif
20490 
20491 	return 0;
20492 err_free_cont_rdp:
20493 	qwx_hal_free_cont_rdp(sc);
20494 
20495 err_hal:
20496 	return ret;
20497 }
20498 
20499 void
20500 qwx_hal_srng_dst_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20501 {
20502 	struct ath11k_hal *hal = &sc->hal;
20503 	uint32_t val;
20504 	uint64_t hp_addr;
20505 	uint32_t reg_base;
20506 
20507 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20508 
20509 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
20510 		sc->ops.write32(sc,
20511 		    reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc),
20512 		    srng->msi_addr);
20513 
20514 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
20515 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
20516 		    HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
20517 		sc->ops.write32(sc,
20518 		    reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc), val);
20519 
20520 		sc->ops.write32(sc,
20521 		    reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(sc),
20522 		    srng->msi_data);
20523 	}
20524 
20525 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
20526 
20527 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20528 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20529 	    FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
20530 	    (srng->entry_size * srng->num_entries));
20531 	sc->ops.write32(sc,
20532 	    reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(sc), val);
20533 
20534 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
20535 	    FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
20536 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_ID_OFFSET(sc), val);
20537 
20538 	/* interrupt setup */
20539 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
20540 	    (srng->intr_timer_thres_us >> 3));
20541 
20542 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
20543 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
20544 
20545 	sc->ops.write32(sc,
20546 	    reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc), val);
20547 
20548 	hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr -
20549 	    (unsigned long)hal->rdp.vaddr);
20550 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc),
20551 	    hp_addr & HAL_ADDR_LSB_REG_MASK);
20552 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc),
20553 	    hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
20554 
20555 	/* Initialize head and tail pointers to indicate ring is empty */
20556 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20557 	sc->ops.write32(sc, reg_base, 0);
20558 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_TP_OFFSET(sc), 0);
20559 	*srng->u.dst_ring.hp_addr = 0;
20560 
20561 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20562 	val = 0;
20563 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
20564 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
20565 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
20566 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
20567 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
20568 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
20569 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
20570 
20571 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_MISC_OFFSET(sc), val);
20572 }
20573 
20574 void
20575 qwx_hal_srng_src_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20576 {
20577 	struct ath11k_hal *hal = &sc->hal;
20578 	uint32_t val;
20579 	uint64_t tp_addr;
20580 	uint32_t reg_base;
20581 
20582 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20583 
20584 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
20585 		sc->ops.write32(sc,
20586 		    reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc),
20587 		    srng->msi_addr);
20588 
20589 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
20590 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
20591 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
20592 		sc->ops.write32(sc,
20593 		    reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc),
20594 		    val);
20595 
20596 		sc->ops.write32(sc,
20597 		    reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(sc),
20598 		    srng->msi_data);
20599 	}
20600 
20601 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
20602 
20603 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20604 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20605 	    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
20606 	    (srng->entry_size * srng->num_entries));
20607 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
20608 
20609 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
20610 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_ID_OFFSET(sc), val);
20611 
20612 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
20613 		sc->ops.write32(sc, reg_base, (uint32_t)srng->ring_base_paddr);
20614 		val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20615 		    ((uint64_t)srng->ring_base_paddr >>
20616 		    HAL_ADDR_MSB_REG_SHIFT)) |
20617 		    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
20618 		    (srng->entry_size * srng->num_entries));
20619 		sc->ops.write32(sc,
20620 		    reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
20621 	}
20622 
20623 	/* interrupt setup */
20624 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
20625 	 * unit of 8 usecs instead of 1 usec (as required by v1).
20626 	 */
20627 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
20628 	    srng->intr_timer_thres_us);
20629 
20630 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
20631 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
20632 
20633 	sc->ops.write32(sc,
20634 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc), val);
20635 
20636 	val = 0;
20637 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
20638 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
20639 		    srng->u.src_ring.low_threshold);
20640 	}
20641 	sc->ops.write32(sc,
20642 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc), val);
20643 
20644 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
20645 		tp_addr = hal->rdp.paddr +
20646 		    ((unsigned long)srng->u.src_ring.tp_addr -
20647 		    (unsigned long)hal->rdp.vaddr);
20648 		sc->ops.write32(sc,
20649 		    reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc),
20650 		    tp_addr & HAL_ADDR_LSB_REG_MASK);
20651 		sc->ops.write32(sc,
20652 		    reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc),
20653 		    tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
20654 	}
20655 
20656 	/* Initialize head and tail pointers to indicate ring is empty */
20657 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20658 	sc->ops.write32(sc, reg_base, 0);
20659 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
20660 	*srng->u.src_ring.tp_addr = 0;
20661 
20662 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20663 	val = 0;
20664 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
20665 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
20666 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
20667 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
20668 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
20669 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
20670 
20671 	/* Loop count is not used for SRC rings */
20672 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
20673 
20674 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
20675 
20676 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_MISC_OFFSET(sc), val);
20677 }
20678 
20679 void
20680 qwx_hal_srng_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20681 {
20682 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
20683 		qwx_hal_srng_src_hw_init(sc, srng);
20684 	else
20685 		qwx_hal_srng_dst_hw_init(sc, srng);
20686 }
20687 
20688 void
20689 qwx_hal_ce_dst_setup(struct qwx_softc *sc, struct hal_srng *srng, int ring_num)
20690 {
20691 	struct hal_srng_config *srng_config = &sc->hal.srng_config[HAL_CE_DST];
20692 	uint32_t addr;
20693 	uint32_t val;
20694 
20695 	addr = HAL_CE_DST_RING_CTRL +
20696 	    srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
20697 	    ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
20698 
20699 	val = sc->ops.read32(sc, addr);
20700 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
20701 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
20702 	    srng->u.dst_ring.max_buffer_length);
20703 	sc->ops.write32(sc, addr, val);
20704 }
20705 
20706 void
20707 qwx_hal_ce_src_set_desc(void *buf, uint64_t paddr, uint32_t len, uint32_t id,
20708     uint8_t byte_swap_data)
20709 {
20710 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
20711 
20712 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
20713 	desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
20714 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20715 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
20716 	    byte_swap_data) |
20717 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
20718 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
20719 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
20720 }
20721 
20722 void
20723 qwx_hal_ce_dst_set_desc(void *buf, uint64_t paddr)
20724 {
20725 	struct hal_ce_srng_dest_desc *desc =
20726 	    (struct hal_ce_srng_dest_desc *)buf;
20727 
20728 	desc->buffer_addr_low = htole32(paddr & HAL_ADDR_LSB_REG_MASK);
20729 	desc->buffer_addr_info = htole32(FIELD_PREP(
20730 	    HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
20731 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)));
20732 }
20733 
20734 uint32_t
20735 qwx_hal_ce_dst_status_get_length(void *buf)
20736 {
20737 	struct hal_ce_srng_dst_status_desc *desc =
20738 		(struct hal_ce_srng_dst_status_desc *)buf;
20739 	uint32_t len;
20740 
20741 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
20742 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
20743 
20744 	return len;
20745 }
20746 
20747 
20748 int
20749 qwx_hal_srng_setup(struct qwx_softc *sc, enum hal_ring_type type,
20750     int ring_num, int mac_id, struct hal_srng_params *params)
20751 {
20752 	struct ath11k_hal *hal = &sc->hal;
20753 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
20754 	struct hal_srng *srng;
20755 	int ring_id;
20756 	uint32_t lmac_idx;
20757 	int i;
20758 	uint32_t reg_base;
20759 
20760 	ring_id = qwx_hal_srng_get_ring_id(sc, type, ring_num, mac_id);
20761 	if (ring_id < 0)
20762 		return ring_id;
20763 
20764 	srng = &hal->srng_list[ring_id];
20765 
20766 	srng->ring_id = ring_id;
20767 	srng->ring_dir = srng_config->ring_dir;
20768 	srng->ring_base_paddr = params->ring_base_paddr;
20769 	srng->ring_base_vaddr = params->ring_base_vaddr;
20770 	srng->entry_size = srng_config->entry_size;
20771 	srng->num_entries = params->num_entries;
20772 	srng->ring_size = srng->entry_size * srng->num_entries;
20773 	srng->intr_batch_cntr_thres_entries =
20774 	    params->intr_batch_cntr_thres_entries;
20775 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
20776 	srng->flags = params->flags;
20777 	srng->msi_addr = params->msi_addr;
20778 	srng->msi_data = params->msi_data;
20779 	srng->initialized = 1;
20780 #if 0
20781 	spin_lock_init(&srng->lock);
20782 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
20783 #endif
20784 
20785 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
20786 		srng->hwreg_base[i] = srng_config->reg_start[i] +
20787 		    (ring_num * srng_config->reg_size[i]);
20788 	}
20789 
20790 	memset(srng->ring_base_vaddr, 0,
20791 	    (srng->entry_size * srng->num_entries) << 2);
20792 
20793 #if 0 /* Not needed on OpenBSD? We do swapping in software... */
20794 	/* TODO: Add comments on these swap configurations */
20795 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
20796 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
20797 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
20798 #endif
20799 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20800 
20801 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
20802 		srng->u.src_ring.hp = 0;
20803 		srng->u.src_ring.cached_tp = 0;
20804 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
20805 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
20806 		srng->u.src_ring.low_threshold = params->low_threshold *
20807 		    srng->entry_size;
20808 		if (srng_config->lmac_ring) {
20809 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
20810 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
20811 			    lmac_idx);
20812 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20813 		} else {
20814 			if (!sc->hw_params.supports_shadow_regs)
20815 				srng->u.src_ring.hp_addr =
20816 				    (uint32_t *)((unsigned long)sc->mem +
20817 				    reg_base);
20818 			else
20819 				DPRINTF("%s: type %d ring_num %d reg_base "
20820 				    "0x%x shadow 0x%lx\n",
20821 				    sc->sc_dev.dv_xname, type, ring_num, reg_base,
20822 				   (unsigned long)srng->u.src_ring.hp_addr -
20823 				   (unsigned long)sc->mem);
20824 		}
20825 	} else {
20826 		/* During initialization loop count in all the descriptors
20827 		 * will be set to zero, and HW will set it to 1 on completing
20828 		 * descriptor update in first loop, and increments it by 1 on
20829 		 * subsequent loops (loop count wraps around after reaching
20830 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
20831 		 * loop count in descriptors updated by HW (to be processed
20832 		 * by SW).
20833 		 */
20834 		srng->u.dst_ring.loop_cnt = 1;
20835 		srng->u.dst_ring.tp = 0;
20836 		srng->u.dst_ring.cached_hp = 0;
20837 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
20838 		if (srng_config->lmac_ring) {
20839 			/* For LMAC rings, tail pointer updates will be done
20840 			 * through FW by writing to a shared memory location
20841 			 */
20842 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
20843 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
20844 			    lmac_idx);
20845 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20846 		} else {
20847 			if (!sc->hw_params.supports_shadow_regs)
20848 				srng->u.dst_ring.tp_addr =
20849 				    (uint32_t *)((unsigned long)sc->mem +
20850 				    reg_base + (HAL_REO1_RING_TP(sc) -
20851 				    HAL_REO1_RING_HP(sc)));
20852 			else
20853 				DPRINTF("%s: type %d ring_num %d target_reg "
20854 				    "0x%x shadow 0x%lx\n", sc->sc_dev.dv_xname,
20855 				    type, ring_num,
20856 				    reg_base + (HAL_REO1_RING_TP(sc) -
20857 				    HAL_REO1_RING_HP(sc)),
20858 				    (unsigned long)srng->u.dst_ring.tp_addr -
20859 				    (unsigned long)sc->mem);
20860 		}
20861 	}
20862 
20863 	if (srng_config->lmac_ring)
20864 		return ring_id;
20865 
20866 	qwx_hal_srng_hw_init(sc, srng);
20867 
20868 	if (type == HAL_CE_DST) {
20869 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
20870 		qwx_hal_ce_dst_setup(sc, srng, ring_num);
20871 	}
20872 
20873 	return ring_id;
20874 }
20875 
20876 size_t
20877 qwx_hal_ce_get_desc_size(enum hal_ce_desc type)
20878 {
20879 	switch (type) {
20880 	case HAL_CE_DESC_SRC:
20881 		return sizeof(struct hal_ce_srng_src_desc);
20882 	case HAL_CE_DESC_DST:
20883 		return sizeof(struct hal_ce_srng_dest_desc);
20884 	case HAL_CE_DESC_DST_STATUS:
20885 		return sizeof(struct hal_ce_srng_dst_status_desc);
20886 	}
20887 
20888 	return 0;
20889 }
20890 
20891 void
20892 qwx_htc_tx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
20893 {
20894 	printf("%s: not implemented\n", __func__);
20895 }
20896 
20897 struct qwx_tx_data *
20898 qwx_ce_completed_send_next(struct qwx_ce_pipe *pipe)
20899 {
20900 	struct qwx_softc *sc = pipe->sc;
20901 	struct hal_srng *srng;
20902 	unsigned int sw_index;
20903 	unsigned int nentries_mask;
20904 	void *ctx;
20905 	struct qwx_tx_data *tx_data = NULL;
20906 	uint32_t *desc;
20907 #ifdef notyet
20908 	spin_lock_bh(&ab->ce.ce_lock);
20909 #endif
20910 	sw_index = pipe->src_ring->sw_index;
20911 	nentries_mask = pipe->src_ring->nentries_mask;
20912 
20913 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
20914 #ifdef notyet
20915 	spin_lock_bh(&srng->lock);
20916 #endif
20917 	qwx_hal_srng_access_begin(sc, srng);
20918 
20919 	desc = qwx_hal_srng_src_reap_next(sc, srng);
20920 	if (!desc)
20921 		goto err_unlock;
20922 
20923 	ctx = pipe->src_ring->per_transfer_context[sw_index];
20924 	tx_data = (struct qwx_tx_data *)ctx;
20925 
20926 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
20927 	pipe->src_ring->sw_index = sw_index;
20928 
20929 err_unlock:
20930 #ifdef notyet
20931 	spin_unlock_bh(&srng->lock);
20932 
20933 	spin_unlock_bh(&ab->ce.ce_lock);
20934 #endif
20935 	return tx_data;
20936 }
20937 
20938 int
20939 qwx_ce_tx_process_cb(struct qwx_ce_pipe *pipe)
20940 {
20941 	struct qwx_softc *sc = pipe->sc;
20942 	struct qwx_tx_data *tx_data;
20943 	struct mbuf *m;
20944 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
20945 	int ret = 0;
20946 
20947 	while ((tx_data = qwx_ce_completed_send_next(pipe)) != NULL) {
20948 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
20949 		m = tx_data->m;
20950 		tx_data->m = NULL;
20951 
20952 		if ((!pipe->send_cb) || sc->hw_params.credit_flow) {
20953 			m_freem(m);
20954 			continue;
20955 		}
20956 
20957 		ml_enqueue(&ml, m);
20958 		ret = 1;
20959 	}
20960 
20961 	while ((m = ml_dequeue(&ml))) {
20962 		DNPRINTF(QWX_D_CE, "%s: tx ce pipe %d len %d\n", __func__,
20963 		    pipe->pipe_num, m->m_len);
20964 		pipe->send_cb(sc, m);
20965 	}
20966 
20967 	return ret;
20968 }
20969 
20970 void
20971 qwx_ce_poll_send_completed(struct qwx_softc *sc, uint8_t pipe_id)
20972 {
20973 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
20974 	const struct ce_attr *attr =  &sc->hw_params.host_ce_config[pipe_id];
20975 
20976 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
20977 		qwx_ce_tx_process_cb(pipe);
20978 }
20979 
20980 void
20981 qwx_htc_process_credit_report(struct qwx_htc *htc,
20982     const struct ath11k_htc_credit_report *report, int len,
20983     enum ath11k_htc_ep_id eid)
20984 {
20985 	struct qwx_softc *sc = htc->sc;
20986 	struct qwx_htc_ep *ep;
20987 	int i, n_reports;
20988 
20989 	if (len % sizeof(*report))
20990 		printf("%s: Uneven credit report len %d", __func__, len);
20991 
20992 	n_reports = len / sizeof(*report);
20993 #ifdef notyet
20994 	spin_lock_bh(&htc->tx_lock);
20995 #endif
20996 	for (i = 0; i < n_reports; i++, report++) {
20997 		if (report->eid >= ATH11K_HTC_EP_COUNT)
20998 			break;
20999 
21000 		ep = &htc->endpoint[report->eid];
21001 		ep->tx_credits += report->credits;
21002 
21003 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits got %d total %d\n",
21004 		    __func__, report->eid, report->credits, ep->tx_credits);
21005 
21006 		if (ep->ep_ops.ep_tx_credits) {
21007 #ifdef notyet
21008 			spin_unlock_bh(&htc->tx_lock);
21009 #endif
21010 			ep->ep_ops.ep_tx_credits(sc);
21011 #ifdef notyet
21012 			spin_lock_bh(&htc->tx_lock);
21013 #endif
21014 		}
21015 	}
21016 #ifdef notyet
21017 	spin_unlock_bh(&htc->tx_lock);
21018 #endif
21019 }
21020 
21021 int
21022 qwx_htc_process_trailer(struct qwx_htc *htc, uint8_t *buffer, int length,
21023     enum ath11k_htc_ep_id src_eid)
21024 {
21025 	struct qwx_softc *sc = htc->sc;
21026 	int status = 0;
21027 	struct ath11k_htc_record *record;
21028 	size_t len;
21029 
21030 	while (length > 0) {
21031 		record = (struct ath11k_htc_record *)buffer;
21032 
21033 		if (length < sizeof(record->hdr)) {
21034 			status = EINVAL;
21035 			break;
21036 		}
21037 
21038 		if (record->hdr.len > length) {
21039 			/* no room left in buffer for record */
21040 			printf("%s: Invalid record length: %d\n",
21041 			    __func__, record->hdr.len);
21042 			status = EINVAL;
21043 			break;
21044 		}
21045 
21046 		if (sc->hw_params.credit_flow) {
21047 			switch (record->hdr.id) {
21048 			case ATH11K_HTC_RECORD_CREDITS:
21049 				len = sizeof(struct ath11k_htc_credit_report);
21050 				if (record->hdr.len < len) {
21051 					printf("%s: Credit report too long\n",
21052 					    __func__);
21053 					status = EINVAL;
21054 					break;
21055 				}
21056 				qwx_htc_process_credit_report(htc,
21057 				    record->credit_report,
21058 				    record->hdr.len, src_eid);
21059 				break;
21060 			default:
21061 				printf("%s: unhandled record: id:%d length:%d\n",
21062 				    __func__, record->hdr.id, record->hdr.len);
21063 				break;
21064 			}
21065 		}
21066 
21067 		if (status)
21068 			break;
21069 
21070 		/* multiple records may be present in a trailer */
21071 		buffer += sizeof(record->hdr) + record->hdr.len;
21072 		length -= sizeof(record->hdr) + record->hdr.len;
21073 	}
21074 
21075 	return status;
21076 }
21077 
21078 void
21079 qwx_htc_suspend_complete(struct qwx_softc *sc, int ack)
21080 {
21081 	printf("%s: not implemented\n", __func__);
21082 }
21083 
21084 void
21085 qwx_htc_wakeup_from_suspend(struct qwx_softc *sc)
21086 {
21087 	/* TODO This is really all the Linux driver does here... silence it? */
21088 	printf("%s: wakeup from suspend received\n", __func__);
21089 }
21090 
21091 void
21092 qwx_htc_rx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
21093 {
21094 	struct qwx_htc *htc = &sc->htc;
21095 	struct ath11k_htc_hdr *hdr;
21096 	struct qwx_htc_ep *ep;
21097 	uint16_t payload_len;
21098 	uint32_t message_id, trailer_len = 0;
21099 	uint8_t eid;
21100 	int trailer_present;
21101 
21102 	m = m_pullup(m, sizeof(struct ath11k_htc_hdr));
21103 	if (m == NULL) {
21104 		printf("%s: m_pullup failed\n", __func__);
21105 		m = NULL; /* already freed */
21106 		goto out;
21107 	}
21108 
21109 	hdr = mtod(m, struct ath11k_htc_hdr *);
21110 
21111 	eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
21112 
21113 	if (eid >= ATH11K_HTC_EP_COUNT) {
21114 		printf("%s: HTC Rx: invalid eid %d\n", __func__, eid);
21115 		printf("%s: HTC info: 0x%x\n", __func__, hdr->htc_info);
21116 		printf("%s: CTRL info: 0x%x\n", __func__, hdr->ctrl_info);
21117 		goto out;
21118 	}
21119 
21120 	ep = &htc->endpoint[eid];
21121 
21122 	payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
21123 
21124 	if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
21125 		printf("%s: HTC rx frame too long, len: %zu\n", __func__,
21126 		    payload_len + sizeof(*hdr));
21127 		goto out;
21128 	}
21129 
21130 	if (m->m_pkthdr.len < payload_len) {
21131 		printf("%s: HTC Rx: insufficient length, got %d, "
21132 		    "expected %d\n", __func__, m->m_pkthdr.len, payload_len);
21133 		goto out;
21134 	}
21135 
21136 	/* get flags to check for trailer */
21137 	trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
21138 	    ATH11K_HTC_FLAG_TRAILER_PRESENT;
21139 
21140 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p trailer_present %d\n",
21141 	    __func__, eid, m, trailer_present);
21142 
21143 	if (trailer_present) {
21144 		int status = 0;
21145 		uint8_t *trailer;
21146 		int trim;
21147 		size_t min_len;
21148 
21149 		trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
21150 		min_len = sizeof(struct ath11k_htc_record_hdr);
21151 
21152 		if ((trailer_len < min_len) ||
21153 		    (trailer_len > payload_len)) {
21154 			printf("%s: Invalid trailer length: %d\n", __func__,
21155 			    trailer_len);
21156 			goto out;
21157 		}
21158 
21159 		trailer = (uint8_t *)hdr;
21160 		trailer += sizeof(*hdr);
21161 		trailer += payload_len;
21162 		trailer -= trailer_len;
21163 		status = qwx_htc_process_trailer(htc, trailer,
21164 		    trailer_len, eid);
21165 		if (status)
21166 			goto out;
21167 
21168 		trim = trailer_len;
21169 		m_adj(m, -trim);
21170 	}
21171 
21172 	if (trailer_len >= payload_len)
21173 		/* zero length packet with trailer data, just drop these */
21174 		goto out;
21175 
21176 	m_adj(m, sizeof(*hdr));
21177 
21178 	if (eid == ATH11K_HTC_EP_0) {
21179 		struct ath11k_htc_msg *msg;
21180 
21181 		msg = mtod(m, struct ath11k_htc_msg *);
21182 		message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
21183 
21184 		DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p message_id %d\n",
21185 		    __func__, eid, m, message_id);
21186 
21187 		switch (message_id) {
21188 		case ATH11K_HTC_MSG_READY_ID:
21189 		case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
21190 			/* handle HTC control message */
21191 			if (sc->ctl_resp) {
21192 				/* this is a fatal error, target should not be
21193 				 * sending unsolicited messages on the ep 0
21194 				 */
21195 				printf("%s: HTC rx ctrl still processing\n",
21196 				    __func__);
21197 				goto out;
21198 			}
21199 
21200 			htc->control_resp_len =
21201 			    MIN(m->m_pkthdr.len, ATH11K_HTC_MAX_CTRL_MSG_LEN);
21202 
21203 			m_copydata(m, 0, htc->control_resp_len,
21204 			    htc->control_resp_buffer);
21205 
21206 			sc->ctl_resp = 1;
21207 			wakeup(&sc->ctl_resp);
21208 			break;
21209 		case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
21210 			qwx_htc_suspend_complete(sc, 1);
21211 			break;
21212 		case ATH11K_HTC_MSG_NACK_SUSPEND:
21213 			qwx_htc_suspend_complete(sc, 0);
21214 			break;
21215 		case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
21216 			qwx_htc_wakeup_from_suspend(sc);
21217 			break;
21218 		default:
21219 			printf("%s: ignoring unsolicited htc ep0 event %ld\n",
21220 			    __func__,
21221 			    FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
21222 			break;
21223 		}
21224 		goto out;
21225 	}
21226 
21227 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p\n", __func__, eid, m);
21228 
21229 	ep->ep_ops.ep_rx_complete(sc, m);
21230 
21231 	/* poll tx completion for interrupt disabled CE's */
21232 	qwx_ce_poll_send_completed(sc, ep->ul_pipe_id);
21233 
21234 	/* mbuf is now owned by the rx completion handler */
21235 	m = NULL;
21236 out:
21237 	m_freem(m);
21238 }
21239 
21240 void
21241 qwx_ce_free_ring(struct qwx_softc *sc, struct qwx_ce_ring *ring)
21242 {
21243 	bus_size_t dsize;
21244 	size_t size;
21245 
21246 	if (ring == NULL)
21247 		return;
21248 
21249 	if (ring->base_addr) {
21250 		dsize = ring->nentries * ring->desc_sz;
21251 		bus_dmamem_unmap(sc->sc_dmat, ring->base_addr, dsize);
21252 	}
21253 	if (ring->nsegs)
21254 		bus_dmamem_free(sc->sc_dmat, &ring->dsegs, ring->nsegs);
21255 	if (ring->dmap)
21256 		bus_dmamap_destroy(sc->sc_dmat, ring->dmap);
21257 
21258 	size = sizeof(*ring) + (ring->nentries *
21259 	    sizeof(ring->per_transfer_context[0]));
21260 	free(ring, M_DEVBUF, size);
21261 }
21262 
21263 static inline int
21264 qwx_ce_need_shadow_fix(int ce_id)
21265 {
21266 	/* only ce4 needs shadow workaround */
21267 	return (ce_id == 4);
21268 }
21269 
21270 void
21271 qwx_ce_stop_shadow_timers(struct qwx_softc *sc)
21272 {
21273 	int i;
21274 
21275 	if (!sc->hw_params.supports_shadow_regs)
21276 		return;
21277 
21278 	for (i = 0; i < sc->hw_params.ce_count; i++)
21279 		if (qwx_ce_need_shadow_fix(i))
21280 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
21281 }
21282 
21283 void
21284 qwx_ce_free_pipes(struct qwx_softc *sc)
21285 {
21286 	struct qwx_ce_pipe *pipe;
21287 	int i;
21288 
21289 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21290 		pipe = &sc->ce.ce_pipe[i];
21291 		if (qwx_ce_need_shadow_fix(i))
21292 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
21293 		if (pipe->src_ring) {
21294 			qwx_ce_free_ring(sc, pipe->src_ring);
21295 			pipe->src_ring = NULL;
21296 		}
21297 
21298 		if (pipe->dest_ring) {
21299 			qwx_ce_free_ring(sc, pipe->dest_ring);
21300 			pipe->dest_ring = NULL;
21301 		}
21302 
21303 		if (pipe->status_ring) {
21304 			qwx_ce_free_ring(sc, pipe->status_ring);
21305 			pipe->status_ring = NULL;
21306 		}
21307 	}
21308 }
21309 
21310 int
21311 qwx_ce_alloc_src_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
21312     const struct ce_attr *attr)
21313 {
21314 	struct qwx_softc *sc = pipe->sc;
21315 	struct qwx_tx_data *txdata;
21316 	size_t size;
21317 	int ret, i;
21318 
21319 	/* Allocate an array of qwx_tx_data structures. */
21320 	txdata = mallocarray(pipe->src_ring->nentries, sizeof(*txdata),
21321 	    M_DEVBUF, M_NOWAIT | M_ZERO);
21322 	if (txdata == NULL)
21323 		return ENOMEM;
21324 
21325 	size = sizeof(*txdata) * pipe->src_ring->nentries;
21326 
21327 	/* Create per-transfer DMA maps. */
21328 	for (i = 0; i < pipe->src_ring->nentries; i++) {
21329 		struct qwx_tx_data *ctx = &txdata[i];
21330 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
21331 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
21332 		if (ret) {
21333 			int j;
21334 			for (j = 0; j < i; j++) {
21335 				struct qwx_tx_data *ctx = &txdata[j];
21336 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
21337 			}
21338 			free(txdata, M_DEVBUF, size);
21339 			return ret;
21340 		}
21341 		pipe->src_ring->per_transfer_context[i] = ctx;
21342 	}
21343 
21344 	return 0;
21345 }
21346 
21347 int
21348 qwx_ce_alloc_dest_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
21349     const struct ce_attr *attr)
21350 {
21351 	struct qwx_softc *sc = pipe->sc;
21352 	struct qwx_rx_data *rxdata;
21353 	size_t size;
21354 	int ret, i;
21355 
21356 	/* Allocate an array of qwx_rx_data structures. */
21357 	rxdata = mallocarray(pipe->dest_ring->nentries, sizeof(*rxdata),
21358 	    M_DEVBUF, M_NOWAIT | M_ZERO);
21359 	if (rxdata == NULL)
21360 		return ENOMEM;
21361 
21362 	size = sizeof(*rxdata) * pipe->dest_ring->nentries;
21363 
21364 	/* Create per-transfer DMA maps. */
21365 	for (i = 0; i < pipe->dest_ring->nentries; i++) {
21366 		struct qwx_rx_data *ctx = &rxdata[i];
21367 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
21368 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
21369 		if (ret) {
21370 			int j;
21371 			for (j = 0; j < i; j++) {
21372 				struct qwx_rx_data *ctx = &rxdata[j];
21373 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
21374 			}
21375 			free(rxdata, M_DEVBUF, size);
21376 			return ret;
21377 		}
21378 		pipe->dest_ring->per_transfer_context[i] = ctx;
21379 	}
21380 
21381 	return 0;
21382 }
21383 
21384 struct qwx_ce_ring *
21385 qwx_ce_alloc_ring(struct qwx_softc *sc, int nentries, size_t desc_sz)
21386 {
21387 	struct qwx_ce_ring *ce_ring;
21388 	size_t size = sizeof(*ce_ring) +
21389 	    (nentries * sizeof(ce_ring->per_transfer_context[0]));
21390 	bus_size_t dsize;
21391 
21392 	ce_ring = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
21393 	if (ce_ring == NULL)
21394 		return NULL;
21395 
21396 	ce_ring->nentries = nentries;
21397 	ce_ring->nentries_mask = nentries - 1;
21398 	ce_ring->desc_sz = desc_sz;
21399 
21400 	dsize = nentries * desc_sz;
21401 	if (bus_dmamap_create(sc->sc_dmat, dsize, 1, dsize, 0, BUS_DMA_NOWAIT,
21402 	    &ce_ring->dmap)) {
21403 		free(ce_ring, M_DEVBUF, size);
21404 		return NULL;
21405 	}
21406 
21407 	if (bus_dmamem_alloc(sc->sc_dmat, dsize, CE_DESC_RING_ALIGN, 0,
21408 	    &ce_ring->dsegs, 1, &ce_ring->nsegs,
21409 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
21410 		qwx_ce_free_ring(sc, ce_ring);
21411 		return NULL;
21412 	}
21413 
21414 	if (bus_dmamem_map(sc->sc_dmat, &ce_ring->dsegs, 1, dsize,
21415 	    &ce_ring->base_addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
21416 		qwx_ce_free_ring(sc, ce_ring);
21417 		return NULL;
21418 	}
21419 
21420 	if (bus_dmamap_load(sc->sc_dmat, ce_ring->dmap, ce_ring->base_addr,
21421 	    dsize, NULL, BUS_DMA_NOWAIT)) {
21422 		qwx_ce_free_ring(sc, ce_ring);
21423 		return NULL;
21424 	}
21425 
21426 	return ce_ring;
21427 }
21428 
21429 int
21430 qwx_ce_alloc_pipe(struct qwx_softc *sc, int ce_id)
21431 {
21432 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
21433 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
21434 	struct qwx_ce_ring *ring;
21435 	int nentries;
21436 	size_t desc_sz;
21437 
21438 	pipe->attr_flags = attr->flags;
21439 
21440 	if (attr->src_nentries) {
21441 		pipe->send_cb = attr->send_cb;
21442 		nentries = qwx_roundup_pow_of_two(attr->src_nentries);
21443 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
21444 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21445 		if (ring == NULL)
21446 			return ENOMEM;
21447 		pipe->src_ring = ring;
21448 		if (qwx_ce_alloc_src_ring_transfer_contexts(pipe, attr))
21449 			return ENOMEM;
21450 	}
21451 
21452 	if (attr->dest_nentries) {
21453 		pipe->recv_cb = attr->recv_cb;
21454 		nentries = qwx_roundup_pow_of_two(attr->dest_nentries);
21455 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST);
21456 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21457 		if (ring == NULL)
21458 			return ENOMEM;
21459 		pipe->dest_ring = ring;
21460 		if (qwx_ce_alloc_dest_ring_transfer_contexts(pipe, attr))
21461 			return ENOMEM;
21462 
21463 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
21464 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21465 		if (ring == NULL)
21466 			return ENOMEM;
21467 		pipe->status_ring = ring;
21468 	}
21469 
21470 	return 0;
21471 }
21472 
21473 void
21474 qwx_ce_rx_pipe_cleanup(struct qwx_ce_pipe *pipe)
21475 {
21476 	struct qwx_softc *sc = pipe->sc;
21477 	struct qwx_ce_ring *ring = pipe->dest_ring;
21478 	void *ctx;
21479 	struct qwx_rx_data *rx_data;
21480 	int i;
21481 
21482 	if (!(ring && pipe->buf_sz))
21483 		return;
21484 
21485 	for (i = 0; i < ring->nentries; i++) {
21486 		ctx = ring->per_transfer_context[i];
21487 		if (!ctx)
21488 			continue;
21489 
21490 		rx_data = (struct qwx_rx_data *)ctx;
21491 		if (rx_data->m) {
21492 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21493 			m_freem(rx_data->m);
21494 			rx_data->m = NULL;
21495 		}
21496 	}
21497 }
21498 
21499 void
21500 qwx_ce_shadow_config(struct qwx_softc *sc)
21501 {
21502 	int i;
21503 
21504 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21505 		if (sc->hw_params.host_ce_config[i].src_nentries)
21506 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_SRC, i);
21507 
21508 		if (sc->hw_params.host_ce_config[i].dest_nentries) {
21509 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_DST, i);
21510 
21511 			qwx_hal_srng_update_shadow_config(sc,
21512 			    HAL_CE_DST_STATUS, i);
21513 		}
21514 	}
21515 }
21516 
21517 void
21518 qwx_ce_get_shadow_config(struct qwx_softc *sc, uint32_t **shadow_cfg,
21519     uint32_t *shadow_cfg_len)
21520 {
21521 	if (!sc->hw_params.supports_shadow_regs)
21522 		return;
21523 
21524 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
21525 
21526 	/* shadow is already configured */
21527 	if (*shadow_cfg_len)
21528 		return;
21529 
21530 	/* shadow isn't configured yet, configure now.
21531 	 * non-CE srngs are configured firstly, then
21532 	 * all CE srngs.
21533 	 */
21534 	qwx_hal_srng_shadow_config(sc);
21535 	qwx_ce_shadow_config(sc);
21536 
21537 	/* get the shadow configuration */
21538 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
21539 }
21540 
21541 void
21542 qwx_ce_cleanup_pipes(struct qwx_softc *sc)
21543 {
21544 	struct qwx_ce_pipe *pipe;
21545 	int pipe_num;
21546 
21547 	qwx_ce_stop_shadow_timers(sc);
21548 
21549 	for (pipe_num = 0; pipe_num < sc->hw_params.ce_count; pipe_num++) {
21550 		pipe = &sc->ce.ce_pipe[pipe_num];
21551 		qwx_ce_rx_pipe_cleanup(pipe);
21552 
21553 		/* Cleanup any src CE's which have interrupts disabled */
21554 		qwx_ce_poll_send_completed(sc, pipe_num);
21555 	}
21556 }
21557 
21558 int
21559 qwx_ce_alloc_pipes(struct qwx_softc *sc)
21560 {
21561 	struct qwx_ce_pipe *pipe;
21562 	int i;
21563 	int ret;
21564 	const struct ce_attr *attr;
21565 
21566 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21567 		attr = &sc->hw_params.host_ce_config[i];
21568 		pipe = &sc->ce.ce_pipe[i];
21569 		pipe->pipe_num = i;
21570 		pipe->sc = sc;
21571 		pipe->buf_sz = attr->src_sz_max;
21572 
21573 		ret = qwx_ce_alloc_pipe(sc, i);
21574 		if (ret) {
21575 			/* Free any partial successful allocation */
21576 			qwx_ce_free_pipes(sc);
21577 			return ret;
21578 		}
21579 	}
21580 
21581 	return 0;
21582 }
21583 
21584 void
21585 qwx_get_ce_msi_idx(struct qwx_softc *sc, uint32_t ce_id,
21586     uint32_t *msi_data_idx)
21587 {
21588 	*msi_data_idx = ce_id;
21589 }
21590 
21591 void
21592 qwx_ce_srng_msi_ring_params_setup(struct qwx_softc *sc, uint32_t ce_id,
21593     struct hal_srng_params *ring_params)
21594 {
21595 	uint32_t msi_data_start = 0;
21596 	uint32_t msi_data_count = 1, msi_data_idx;
21597 	uint32_t msi_irq_start = 0;
21598 	uint32_t addr_lo;
21599 	uint32_t addr_hi;
21600 	int ret;
21601 
21602 	ret = sc->ops.get_user_msi_vector(sc, "CE",
21603 	    &msi_data_count, &msi_data_start, &msi_irq_start);
21604 	if (ret)
21605 		return;
21606 
21607 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
21608 	qwx_get_ce_msi_idx(sc, ce_id, &msi_data_idx);
21609 
21610 	ring_params->msi_addr = addr_lo;
21611 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
21612 	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
21613 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
21614 }
21615 
21616 int
21617 qwx_ce_init_ring(struct qwx_softc *sc, struct qwx_ce_ring *ce_ring,
21618     int ce_id, enum hal_ring_type type)
21619 {
21620 	struct hal_srng_params params = { 0 };
21621 	int ret;
21622 
21623 	params.ring_base_paddr = ce_ring->dmap->dm_segs[0].ds_addr;
21624 	params.ring_base_vaddr = (uint32_t *)ce_ring->base_addr;
21625 	params.num_entries = ce_ring->nentries;
21626 
21627 	if (!(CE_ATTR_DIS_INTR & sc->hw_params.host_ce_config[ce_id].flags))
21628 		qwx_ce_srng_msi_ring_params_setup(sc, ce_id, &params);
21629 
21630 	switch (type) {
21631 	case HAL_CE_SRC:
21632 		if (!(CE_ATTR_DIS_INTR &
21633 		    sc->hw_params.host_ce_config[ce_id].flags))
21634 			params.intr_batch_cntr_thres_entries = 1;
21635 		break;
21636 	case HAL_CE_DST:
21637 		params.max_buffer_len =
21638 		    sc->hw_params.host_ce_config[ce_id].src_sz_max;
21639 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21640 		    CE_ATTR_DIS_INTR)) {
21641 			params.intr_timer_thres_us = 1024;
21642 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
21643 			params.low_threshold = ce_ring->nentries - 3;
21644 		}
21645 		break;
21646 	case HAL_CE_DST_STATUS:
21647 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21648 		    CE_ATTR_DIS_INTR)) {
21649 			params.intr_batch_cntr_thres_entries = 1;
21650 			params.intr_timer_thres_us = 0x1000;
21651 		}
21652 		break;
21653 	default:
21654 		printf("%s: Invalid CE ring type %d\n",
21655 		    sc->sc_dev.dv_xname, type);
21656 		return EINVAL;
21657 	}
21658 
21659 	/* TODO: Init other params needed by HAL to init the ring */
21660 
21661 	ret = qwx_hal_srng_setup(sc, type, ce_id, 0, &params);
21662 	if (ret < 0) {
21663 		printf("%s: failed to setup srng: ring_id %d ce_id %d\n",
21664 		    sc->sc_dev.dv_xname, ret, ce_id);
21665 		return ret;
21666 	}
21667 
21668 	ce_ring->hal_ring_id = ret;
21669 
21670 	if (sc->hw_params.supports_shadow_regs &&
21671 	    qwx_ce_need_shadow_fix(ce_id))
21672 		qwx_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
21673 		    ATH11K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id);
21674 
21675 	return 0;
21676 }
21677 
21678 int
21679 qwx_ce_init_pipes(struct qwx_softc *sc)
21680 {
21681 	struct qwx_ce_pipe *pipe;
21682 	int i;
21683 	int ret;
21684 
21685 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21686 		pipe = &sc->ce.ce_pipe[i];
21687 
21688 		if (pipe->src_ring) {
21689 			ret = qwx_ce_init_ring(sc, pipe->src_ring, i,
21690 			    HAL_CE_SRC);
21691 			if (ret) {
21692 				printf("%s: failed to init src ring: %d\n",
21693 				    sc->sc_dev.dv_xname, ret);
21694 				/* Should we clear any partial init */
21695 				return ret;
21696 			}
21697 
21698 			pipe->src_ring->write_index = 0;
21699 			pipe->src_ring->sw_index = 0;
21700 		}
21701 
21702 		if (pipe->dest_ring) {
21703 			ret = qwx_ce_init_ring(sc, pipe->dest_ring, i,
21704 			    HAL_CE_DST);
21705 			if (ret) {
21706 				printf("%s: failed to init dest ring: %d\n",
21707 				    sc->sc_dev.dv_xname, ret);
21708 				/* Should we clear any partial init */
21709 				return ret;
21710 			}
21711 
21712 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
21713 			    pipe->dest_ring->nentries - 2 : 0;
21714 
21715 			pipe->dest_ring->write_index = 0;
21716 			pipe->dest_ring->sw_index = 0;
21717 		}
21718 
21719 		if (pipe->status_ring) {
21720 			ret = qwx_ce_init_ring(sc, pipe->status_ring, i,
21721 			    HAL_CE_DST_STATUS);
21722 			if (ret) {
21723 				printf("%s: failed to init status ring: %d\n",
21724 				    sc->sc_dev.dv_xname, ret);
21725 				/* Should we clear any partial init */
21726 				return ret;
21727 			}
21728 
21729 			pipe->status_ring->write_index = 0;
21730 			pipe->status_ring->sw_index = 0;
21731 		}
21732 	}
21733 
21734 	return 0;
21735 }
21736 
21737 int
21738 qwx_hal_srng_src_num_free(struct qwx_softc *sc, struct hal_srng *srng,
21739     int sync_hw_ptr)
21740 {
21741 	uint32_t tp, hp;
21742 #ifdef notyet
21743 	lockdep_assert_held(&srng->lock);
21744 #endif
21745 	hp = srng->u.src_ring.hp;
21746 
21747 	if (sync_hw_ptr) {
21748 		tp = *srng->u.src_ring.tp_addr;
21749 		srng->u.src_ring.cached_tp = tp;
21750 	} else {
21751 		tp = srng->u.src_ring.cached_tp;
21752 	}
21753 
21754 	if (tp > hp)
21755 		return ((tp - hp) / srng->entry_size) - 1;
21756 	else
21757 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
21758 }
21759 
21760 int
21761 qwx_ce_rx_buf_enqueue_pipe(struct qwx_ce_pipe *pipe, bus_dmamap_t map)
21762 {
21763 	struct qwx_softc *sc = pipe->sc;
21764 	struct qwx_ce_ring *ring = pipe->dest_ring;
21765 	struct hal_srng *srng;
21766 	unsigned int write_index;
21767 	unsigned int nentries_mask = ring->nentries_mask;
21768 	uint32_t *desc;
21769 	uint64_t paddr;
21770 	int ret;
21771 #ifdef notyet
21772 	lockdep_assert_held(&ab->ce.ce_lock);
21773 #endif
21774 	write_index = ring->write_index;
21775 
21776 	srng = &sc->hal.srng_list[ring->hal_ring_id];
21777 #ifdef notyet
21778 	spin_lock_bh(&srng->lock);
21779 #endif
21780 	qwx_hal_srng_access_begin(sc, srng);
21781 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21782 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_POSTREAD);
21783 
21784 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
21785 		ret = ENOSPC;
21786 		goto exit;
21787 	}
21788 
21789 	desc = qwx_hal_srng_src_get_next_entry(sc, srng);
21790 	if (!desc) {
21791 		ret = ENOSPC;
21792 		goto exit;
21793 	}
21794 
21795 	paddr = map->dm_segs[0].ds_addr;
21796 	qwx_hal_ce_dst_set_desc(desc, paddr);
21797 
21798 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
21799 	ring->write_index = write_index;
21800 
21801 	pipe->rx_buf_needed--;
21802 
21803 	ret = 0;
21804 exit:
21805 	qwx_hal_srng_access_end(sc, srng);
21806 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21807 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
21808 #ifdef notyet
21809 	spin_unlock_bh(&srng->lock);
21810 #endif
21811 	return ret;
21812 }
21813 
21814 int
21815 qwx_ce_rx_post_pipe(struct qwx_ce_pipe *pipe)
21816 {
21817 	struct qwx_softc *sc = pipe->sc;
21818 	int ret = 0;
21819 	unsigned int idx;
21820 	void *ctx;
21821 	struct qwx_rx_data *rx_data;
21822 	struct mbuf *m;
21823 
21824 	if (!pipe->dest_ring)
21825 		return 0;
21826 
21827 #ifdef notyet
21828 	spin_lock_bh(&ab->ce.ce_lock);
21829 #endif
21830 	while (pipe->rx_buf_needed) {
21831 		m = m_gethdr(M_DONTWAIT, MT_DATA);
21832 		if (m == NULL) {
21833 			ret = ENOBUFS;
21834 			goto done;
21835 		}
21836 
21837 		if (pipe->buf_sz <= MCLBYTES)
21838 			MCLGET(m, M_DONTWAIT);
21839 		else
21840 			MCLGETL(m, M_DONTWAIT, pipe->buf_sz);
21841 		if ((m->m_flags & M_EXT) == 0) {
21842 			ret = ENOBUFS;
21843 			goto done;
21844 		}
21845 
21846 		idx = pipe->dest_ring->write_index;
21847 		ctx = pipe->dest_ring->per_transfer_context[idx];
21848 		rx_data = (struct qwx_rx_data *)ctx;
21849 
21850 		m->m_len = m->m_pkthdr.len = pipe->buf_sz;
21851 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
21852 		    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
21853 		if (ret) {
21854 			printf("%s: can't map mbuf (error %d)\n",
21855 			    sc->sc_dev.dv_xname, ret);
21856 			m_freem(m);
21857 			goto done;
21858 		}
21859 
21860 		ret = qwx_ce_rx_buf_enqueue_pipe(pipe, rx_data->map);
21861 		if (ret) {
21862 			printf("%s: failed to enqueue rx buf: %d\n",
21863 			    sc->sc_dev.dv_xname, ret);
21864 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21865 			m_freem(m);
21866 			break;
21867 		} else
21868 			rx_data->m = m;
21869 	}
21870 
21871 done:
21872 #ifdef notyet
21873 	spin_unlock_bh(&ab->ce.ce_lock);
21874 #endif
21875 	return ret;
21876 }
21877 
21878 void
21879 qwx_ce_rx_post_buf(struct qwx_softc *sc)
21880 {
21881 	struct qwx_ce_pipe *pipe;
21882 	int i;
21883 	int ret;
21884 
21885 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21886 		pipe = &sc->ce.ce_pipe[i];
21887 		ret = qwx_ce_rx_post_pipe(pipe);
21888 		if (ret) {
21889 			if (ret == ENOSPC)
21890 				continue;
21891 
21892 			printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21893 			    sc->sc_dev.dv_xname, i, ret);
21894 #ifdef notyet
21895 			mod_timer(&ab->rx_replenish_retry,
21896 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
21897 #endif
21898 
21899 			return;
21900 		}
21901 	}
21902 }
21903 
21904 int
21905 qwx_ce_completed_recv_next(struct qwx_ce_pipe *pipe,
21906     void **per_transfer_contextp, int *nbytes)
21907 {
21908 	struct qwx_softc *sc = pipe->sc;
21909 	struct hal_srng *srng;
21910 	unsigned int sw_index;
21911 	unsigned int nentries_mask;
21912 	uint32_t *desc;
21913 	int ret = 0;
21914 #ifdef notyet
21915 	spin_lock_bh(&ab->ce.ce_lock);
21916 #endif
21917 	sw_index = pipe->dest_ring->sw_index;
21918 	nentries_mask = pipe->dest_ring->nentries_mask;
21919 
21920 	srng = &sc->hal.srng_list[pipe->status_ring->hal_ring_id];
21921 #ifdef notyet
21922 	spin_lock_bh(&srng->lock);
21923 #endif
21924 	qwx_hal_srng_access_begin(sc, srng);
21925 
21926 	desc = qwx_hal_srng_dst_get_next_entry(sc, srng);
21927 	if (!desc) {
21928 		ret = EIO;
21929 		goto err;
21930 	}
21931 
21932 	*nbytes = qwx_hal_ce_dst_status_get_length(desc);
21933 	if (*nbytes == 0) {
21934 		ret = EIO;
21935 		goto err;
21936 	}
21937 
21938 	if (per_transfer_contextp) {
21939 		*per_transfer_contextp =
21940 		    pipe->dest_ring->per_transfer_context[sw_index];
21941 	}
21942 
21943 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
21944 	pipe->dest_ring->sw_index = sw_index;
21945 
21946 	pipe->rx_buf_needed++;
21947 err:
21948 	qwx_hal_srng_access_end(sc, srng);
21949 #ifdef notyet
21950 	spin_unlock_bh(&srng->lock);
21951 	spin_unlock_bh(&ab->ce.ce_lock);
21952 #endif
21953 	return ret;
21954 }
21955 
21956 int
21957 qwx_ce_recv_process_cb(struct qwx_ce_pipe *pipe)
21958 {
21959 	struct qwx_softc *sc = pipe->sc;
21960 	struct mbuf *m;
21961 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
21962 	void *transfer_context;
21963 	unsigned int nbytes, max_nbytes;
21964 	int ret = 0, err;
21965 
21966 	while (qwx_ce_completed_recv_next(pipe, &transfer_context,
21967 	    &nbytes) == 0) {
21968 		struct qwx_rx_data *rx_data = transfer_context;
21969 
21970 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21971 		m = rx_data->m;
21972 		rx_data->m = NULL;
21973 
21974 		max_nbytes = m->m_pkthdr.len;
21975 		if (max_nbytes < nbytes) {
21976 			printf("%s: received more than expected (nbytes %d, "
21977 			    "max %d)", __func__, nbytes, max_nbytes);
21978 			m_freem(m);
21979 			continue;
21980 		}
21981 		m->m_len = m->m_pkthdr.len = nbytes;
21982 		ml_enqueue(&ml, m);
21983 		ret = 1;
21984 	}
21985 
21986 	while ((m = ml_dequeue(&ml))) {
21987 		DNPRINTF(QWX_D_CE, "%s: rx ce pipe %d len %d\n", __func__,
21988 		    pipe->pipe_num, m->m_len);
21989 		pipe->recv_cb(sc, m);
21990 	}
21991 
21992 	err = qwx_ce_rx_post_pipe(pipe);
21993 	if (err && err != ENOSPC) {
21994 		printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21995 		    __func__, pipe->pipe_num, err);
21996 #ifdef notyet
21997 		mod_timer(&ab->rx_replenish_retry,
21998 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
21999 #endif
22000 	}
22001 
22002 	return ret;
22003 }
22004 
22005 int
22006 qwx_ce_per_engine_service(struct qwx_softc *sc, uint16_t ce_id)
22007 {
22008 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
22009 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
22010 	int ret = 0;
22011 
22012 	if (attr->src_nentries) {
22013 		if (qwx_ce_tx_process_cb(pipe))
22014 			ret = 1;
22015 	}
22016 
22017 	if (pipe->recv_cb) {
22018 		if (qwx_ce_recv_process_cb(pipe))
22019 			ret = 1;
22020 	}
22021 
22022 	return ret;
22023 }
22024 
22025 int
22026 qwx_ce_send(struct qwx_softc *sc, struct mbuf *m, uint8_t pipe_id,
22027     uint16_t transfer_id)
22028 {
22029 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
22030 	struct hal_srng *srng;
22031 	uint32_t *desc;
22032 	unsigned int write_index, sw_index;
22033 	unsigned int nentries_mask;
22034 	int ret = 0;
22035 	uint8_t byte_swap_data = 0;
22036 	int num_used;
22037 	uint64_t paddr;
22038 	void *ctx;
22039 	struct qwx_tx_data *tx_data;
22040 
22041 	/* Check if some entries could be regained by handling tx completion if
22042 	 * the CE has interrupts disabled and the used entries is more than the
22043 	 * defined usage threshold.
22044 	 */
22045 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
22046 #ifdef notyet
22047 		spin_lock_bh(&ab->ce.ce_lock);
22048 #endif
22049 		write_index = pipe->src_ring->write_index;
22050 
22051 		sw_index = pipe->src_ring->sw_index;
22052 
22053 		if (write_index >= sw_index)
22054 			num_used = write_index - sw_index;
22055 		else
22056 			num_used = pipe->src_ring->nentries - sw_index +
22057 			    write_index;
22058 #ifdef notyet
22059 		spin_unlock_bh(&ab->ce.ce_lock);
22060 #endif
22061 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
22062 			qwx_ce_poll_send_completed(sc, pipe->pipe_num);
22063 	}
22064 
22065 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
22066 		return ESHUTDOWN;
22067 #ifdef notyet
22068 	spin_lock_bh(&ab->ce.ce_lock);
22069 #endif
22070 	write_index = pipe->src_ring->write_index;
22071 	nentries_mask = pipe->src_ring->nentries_mask;
22072 
22073 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
22074 #ifdef notyet
22075 	spin_lock_bh(&srng->lock);
22076 #endif
22077 	qwx_hal_srng_access_begin(sc, srng);
22078 
22079 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
22080 		qwx_hal_srng_access_end(sc, srng);
22081 		ret = ENOBUFS;
22082 		goto err_unlock;
22083 	}
22084 
22085 	desc = qwx_hal_srng_src_get_next_reaped(sc, srng);
22086 	if (!desc) {
22087 		qwx_hal_srng_access_end(sc, srng);
22088 		ret = ENOBUFS;
22089 		goto err_unlock;
22090 	}
22091 
22092 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
22093 		byte_swap_data = 1;
22094 
22095 	ctx = pipe->src_ring->per_transfer_context[write_index];
22096 	tx_data = (struct qwx_tx_data *)ctx;
22097 
22098 	paddr = tx_data->map->dm_segs[0].ds_addr;
22099 	qwx_hal_ce_src_set_desc(desc, paddr, m->m_pkthdr.len,
22100 	    transfer_id, byte_swap_data);
22101 
22102 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
22103 	    write_index);
22104 
22105 	qwx_hal_srng_access_end(sc, srng);
22106 
22107 	if (qwx_ce_need_shadow_fix(pipe_id))
22108 		qwx_dp_shadow_start_timer(sc, srng, &sc->ce.hp_timer[pipe_id]);
22109 
22110 err_unlock:
22111 #ifdef notyet
22112 	spin_unlock_bh(&srng->lock);
22113 
22114 	spin_unlock_bh(&ab->ce.ce_lock);
22115 #endif
22116 	return ret;
22117 }
22118 
22119 int
22120 qwx_get_num_chains(uint32_t mask)
22121 {
22122 	int num_chains = 0;
22123 
22124 	while (mask) {
22125 		if (mask & 0x1)
22126 			num_chains++;
22127 		mask >>= 1;
22128 	}
22129 
22130 	return num_chains;
22131 }
22132 
22133 int
22134 qwx_set_antenna(struct qwx_pdev *pdev, uint32_t tx_ant, uint32_t rx_ant)
22135 {
22136 	struct qwx_softc *sc = pdev->sc;
22137 	int ret;
22138 #ifdef notyet
22139 	lockdep_assert_held(&ar->conf_mutex);
22140 #endif
22141 	sc->cfg_tx_chainmask = tx_ant;
22142 	sc->cfg_rx_chainmask = rx_ant;
22143 #if 0
22144 	if (ar->state != ATH11K_STATE_ON &&
22145 	    ar->state != ATH11K_STATE_RESTARTED)
22146 		return 0;
22147 #endif
22148 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_TX_CHAIN_MASK,
22149 	    tx_ant, pdev->pdev_id);
22150 	if (ret) {
22151 		printf("%s: failed to set tx-chainmask: %d, req 0x%x\n",
22152 		    sc->sc_dev.dv_xname, ret, tx_ant);
22153 		return ret;
22154 	}
22155 
22156 	sc->num_tx_chains = qwx_get_num_chains(tx_ant);
22157 
22158 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_RX_CHAIN_MASK,
22159 	    rx_ant, pdev->pdev_id);
22160 	if (ret) {
22161 		printf("%s: failed to set rx-chainmask: %d, req 0x%x\n",
22162 		    sc->sc_dev.dv_xname, ret, rx_ant);
22163 		return ret;
22164 	}
22165 
22166 	sc->num_rx_chains = qwx_get_num_chains(rx_ant);
22167 #if 0
22168 	/* Reload HT/VHT/HE capability */
22169 	ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
22170 	ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
22171 #endif
22172 	return 0;
22173 }
22174 
22175 int
22176 qwx_reg_update_chan_list(struct qwx_softc *sc, uint8_t pdev_id)
22177 {
22178 	struct ieee80211com *ic = &sc->sc_ic;
22179 	struct scan_chan_list_params *params;
22180 	struct ieee80211_channel *channel, *lastc;
22181 	struct channel_param *ch;
22182 	int num_channels = 0;
22183 	size_t params_size;
22184 	int ret;
22185 #if 0
22186 	if (ar->state == ATH11K_STATE_RESTARTING)
22187 		return 0;
22188 #endif
22189 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
22190 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
22191 		if (channel->ic_flags == 0)
22192 			continue;
22193 		num_channels++;
22194 	}
22195 
22196 	if (!num_channels)
22197 		return EINVAL;
22198 
22199 	params_size = sizeof(*params) +
22200 	    num_channels * sizeof(*params->ch_param);
22201 
22202 	/*
22203 	 * TODO: This is a temporary list for qwx_wmi_send_scan_chan_list_cmd
22204 	 * to loop over. Could that function loop over ic_channels directly?
22205 	 */
22206 	params = malloc(params_size, M_DEVBUF, M_NOWAIT | M_ZERO);
22207 	if (!params)
22208 		return ENOMEM;
22209 
22210 	params->pdev_id = pdev_id;
22211 	params->nallchans = num_channels;
22212 
22213 	ch = params->ch_param;
22214 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
22215 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
22216 		if (channel->ic_flags == 0)
22217 			continue;
22218 #ifdef notyet
22219 		/* TODO: Set to true/false based on some condition? */
22220 		ch->allow_ht = true;
22221 		ch->allow_vht = true;
22222 		ch->allow_he = true;
22223 #endif
22224 		ch->dfs_set = !!(IEEE80211_IS_CHAN_5GHZ(channel) &&
22225 		    (channel->ic_flags & IEEE80211_CHAN_PASSIVE));
22226 		ch->is_chan_passive = !!(channel->ic_flags &
22227 		    IEEE80211_CHAN_PASSIVE);
22228 		ch->is_chan_passive |= ch->dfs_set;
22229 		ch->mhz = ieee80211_ieee2mhz(ieee80211_chan2ieee(ic, channel),
22230 		    channel->ic_flags);
22231 		ch->cfreq1 = ch->mhz;
22232 		ch->minpower = 0;
22233 		ch->maxpower = 40; /* XXX from Linux debug trace */
22234 		ch->maxregpower = ch->maxpower;
22235 		ch->antennamax = 0;
22236 
22237 		/* TODO: Use appropriate phymodes */
22238 		if (IEEE80211_IS_CHAN_A(channel))
22239 			ch->phy_mode = MODE_11A;
22240 		else if (IEEE80211_IS_CHAN_G(channel))
22241 			ch->phy_mode = MODE_11G;
22242 		else
22243 			ch->phy_mode = MODE_11B;
22244 #ifdef notyet
22245 		if (channel->band == NL80211_BAND_6GHZ &&
22246 		    cfg80211_channel_is_psc(channel))
22247 			ch->psc_channel = true;
22248 #endif
22249 		DNPRINTF(QWX_D_WMI, "%s: mac channel freq %d maxpower %d "
22250 		    "regpower %d antenna %d mode %d\n", __func__,
22251 		    ch->mhz, ch->maxpower, ch->maxregpower,
22252 		    ch->antennamax, ch->phy_mode);
22253 
22254 		ch++;
22255 		/* TODO: use quarter/half rate, cfreq12, dfs_cfreq2
22256 		 * set_agile, reg_class_idx
22257 		 */
22258 	}
22259 
22260 	ret = qwx_wmi_send_scan_chan_list_cmd(sc, pdev_id, params);
22261 	free(params, M_DEVBUF, params_size);
22262 
22263 	return ret;
22264 }
22265 
22266 static const struct htt_rx_ring_tlv_filter qwx_mac_mon_status_filter_default = {
22267 	.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
22268 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
22269 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
22270 	.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
22271 	.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
22272 	.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
22273 	.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
22274 	    HTT_RX_FP_CTRL_FILTER_FLASG3
22275 };
22276 
22277 int
22278 qwx_mac_register(struct qwx_softc *sc)
22279 {
22280 	/* Initialize channel counters frequency value in hertz */
22281 	sc->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
22282 
22283 	sc->free_vdev_map = (1U << (sc->num_radios * TARGET_NUM_VDEVS(sc))) - 1;
22284 
22285 	if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
22286 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr, sc->mac_addr);
22287 
22288 	return 0;
22289 }
22290 
22291 int
22292 qwx_mac_config_mon_status_default(struct qwx_softc *sc, int enable)
22293 {
22294 	struct htt_rx_ring_tlv_filter tlv_filter = { 0 };
22295 	int ret = 0;
22296 #if 0
22297 	int i;
22298 	struct dp_rxdma_ring *ring;
22299 #endif
22300 
22301 	if (enable)
22302 		tlv_filter = qwx_mac_mon_status_filter_default;
22303 #if 0 /* mon status info is not useful and the code triggers mbuf corruption */
22304 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
22305 		ring = &sc->pdev_dp.rx_mon_status_refill_ring[i];
22306 		ret = qwx_dp_tx_htt_rx_filter_setup(sc,
22307 		    ring->refill_buf_ring.ring_id, sc->pdev_dp.mac_id + i,
22308 		    HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter);
22309 		if (ret)
22310 			return ret;
22311 	}
22312 
22313 	if (enable && !sc->hw_params.rxdma1_enable) {
22314 		timeout_add_msec(&sc->mon_reap_timer,
22315 		    ATH11K_MON_TIMER_INTERVAL);
22316 	}
22317 #endif
22318 	return ret;
22319 }
22320 
22321 int
22322 qwx_mac_txpower_recalc(struct qwx_softc *sc, struct qwx_pdev *pdev)
22323 {
22324 	struct qwx_vif *arvif;
22325 	int ret, txpower = -1;
22326 	uint32_t param;
22327 	uint32_t min_tx_power = sc->target_caps.hw_min_tx_power;
22328 	uint32_t max_tx_power = sc->target_caps.hw_max_tx_power;
22329 #ifdef notyet
22330 	lockdep_assert_held(&ar->conf_mutex);
22331 #endif
22332 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
22333 		if (arvif->txpower <= 0)
22334 			continue;
22335 
22336 		if (txpower == -1)
22337 			txpower = arvif->txpower;
22338 		else
22339 			txpower = MIN(txpower, arvif->txpower);
22340 	}
22341 
22342 	if (txpower == -1)
22343 		return 0;
22344 
22345 	/* txpwr is set as 2 units per dBm in FW*/
22346 	txpower = MIN(MAX(min_tx_power, txpower), max_tx_power) * 2;
22347 	DNPRINTF(QWX_D_MAC, "txpower to set in hw %d\n", txpower / 2);
22348 
22349 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
22350 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
22351 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
22352 		    pdev->pdev_id);
22353 		if (ret)
22354 			goto fail;
22355 	}
22356 
22357 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
22358 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
22359 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
22360 		    pdev->pdev_id);
22361 		if (ret)
22362 			goto fail;
22363 	}
22364 
22365 	return 0;
22366 
22367 fail:
22368 	DNPRINTF(QWX_D_MAC, "%s: failed to recalc txpower limit %d "
22369 	    "using pdev param %d: %d\n", sc->sc_dev.dv_xname, txpower / 2,
22370 	    param, ret);
22371 
22372 	return ret;
22373 }
22374 
22375 int
22376 qwx_mac_op_start(struct qwx_pdev *pdev)
22377 {
22378 	struct qwx_softc *sc = pdev->sc;
22379 	struct ieee80211com *ic = &sc->sc_ic;
22380 	int ret;
22381 
22382 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_PMF_QOS, 1,
22383 	    pdev->pdev_id);
22384 	if (ret) {
22385 		printf("%s: failed to enable PMF QOS for pdev %d: %d\n",
22386 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22387 		goto err;
22388 	}
22389 
22390 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
22391 	    pdev->pdev_id);
22392 	if (ret) {
22393 		printf("%s: failed to enable dynamic bw for pdev %d: %d\n",
22394 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22395 		goto err;
22396 	}
22397 
22398 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT)) {
22399 		ret = qwx_wmi_scan_prob_req_oui(sc, ic->ic_myaddr,
22400 		    pdev->pdev_id);
22401 		if (ret) {
22402 			printf("%s: failed to set prob req oui for "
22403 			    "pdev %d: %i\n", sc->sc_dev.dv_xname,
22404 			    pdev->pdev_id, ret);
22405 			goto err;
22406 		}
22407 	}
22408 
22409 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0,
22410 	    pdev->pdev_id);
22411 	if (ret) {
22412 		printf("%s: failed to set ac override for ARP for "
22413 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22414 		goto err;
22415 	}
22416 
22417 	ret = qwx_wmi_send_dfs_phyerr_offload_enable_cmd(sc, pdev->pdev_id);
22418 	if (ret) {
22419 		printf("%s: failed to offload radar detection for "
22420 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22421 		goto err;
22422 	}
22423 
22424 	ret = qwx_dp_tx_htt_h2t_ppdu_stats_req(sc, HTT_PPDU_STATS_TAG_DEFAULT,
22425 	    pdev->pdev_id);
22426 	if (ret) {
22427 		printf("%s: failed to req ppdu stats for pdev %d: %d\n",
22428 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22429 		goto err;
22430 	}
22431 
22432 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1,
22433 	    pdev->pdev_id);
22434 	if (ret) {
22435 		printf("%s: failed to enable MESH MCAST ENABLE for "
22436 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22437 		goto err;
22438 	}
22439 
22440 	qwx_set_antenna(pdev, pdev->cap.tx_chain_mask, pdev->cap.rx_chain_mask);
22441 
22442 	/* TODO: Do we need to enable ANI? */
22443 
22444 	ret = qwx_reg_update_chan_list(sc, pdev->pdev_id);
22445 	if (ret) {
22446 		printf("%s: failed to update channel list for pdev %d: %d\n",
22447 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22448 		goto err;
22449 	}
22450 
22451 	sc->num_started_vdevs = 0;
22452 	sc->num_created_vdevs = 0;
22453 	sc->num_peers = 0;
22454 	sc->allocated_vdev_map = 0;
22455 
22456 	/* Configure monitor status ring with default rx_filter to get rx status
22457 	 * such as rssi, rx_duration.
22458 	 */
22459 	ret = qwx_mac_config_mon_status_default(sc, 1);
22460 	if (ret) {
22461 		printf("%s: failed to configure monitor status ring "
22462 		    "with default rx_filter: (%d)\n",
22463 		    sc->sc_dev.dv_xname, ret);
22464 		goto err;
22465 	}
22466 
22467 	/* Configure the hash seed for hash based reo dest ring selection */
22468 	qwx_wmi_pdev_lro_cfg(sc, pdev->pdev_id);
22469 
22470 	/* allow device to enter IMPS */
22471 	if (sc->hw_params.idle_ps) {
22472 		ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
22473 		    1, pdev->pdev_id);
22474 		if (ret) {
22475 			printf("%s: failed to enable idle ps: %d\n",
22476 			    sc->sc_dev.dv_xname, ret);
22477 			goto err;
22478 		}
22479 	}
22480 #ifdef notyet
22481 	mutex_unlock(&ar->conf_mutex);
22482 #endif
22483 	sc->pdevs_active |= (1 << pdev->pdev_id);
22484 	return 0;
22485 err:
22486 #ifdef notyet
22487 	ar->state = ATH11K_STATE_OFF;
22488 	mutex_unlock(&ar->conf_mutex);
22489 #endif
22490 	return ret;
22491 }
22492 
22493 int
22494 qwx_mac_setup_vdev_params_mbssid(struct qwx_vif *arvif,
22495     uint32_t *flags, uint32_t *tx_vdev_id)
22496 {
22497 	*tx_vdev_id = 0;
22498 	*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
22499 	return 0;
22500 }
22501 
22502 int
22503 qwx_mac_setup_vdev_create_params(struct qwx_vif *arvif, struct qwx_pdev *pdev,
22504     struct vdev_create_params *params)
22505 {
22506 	struct qwx_softc *sc = arvif->sc;
22507 	int ret;
22508 
22509 	params->if_id = arvif->vdev_id;
22510 	params->type = arvif->vdev_type;
22511 	params->subtype = arvif->vdev_subtype;
22512 	params->pdev_id = pdev->pdev_id;
22513 	params->mbssid_flags = 0;
22514 	params->mbssid_tx_vdev_id = 0;
22515 
22516 	if (!isset(sc->wmi.svc_map,
22517 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
22518 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
22519 		    &params->mbssid_flags, &params->mbssid_tx_vdev_id);
22520 		if (ret)
22521 			return ret;
22522 	}
22523 
22524 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
22525 		params->chains[0].tx = sc->num_tx_chains;
22526 		params->chains[0].rx = sc->num_rx_chains;
22527 	}
22528 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
22529 		params->chains[1].tx = sc->num_tx_chains;
22530 		params->chains[1].rx = sc->num_rx_chains;
22531 	}
22532 #if 0
22533 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
22534 	    ar->supports_6ghz) {
22535 		params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
22536 		params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
22537 	}
22538 #endif
22539 	return 0;
22540 }
22541 
22542 int
22543 qwx_mac_op_update_vif_offload(struct qwx_softc *sc, struct qwx_pdev *pdev,
22544     struct qwx_vif *arvif)
22545 {
22546 	uint32_t param_id, param_value;
22547 	int ret;
22548 
22549 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
22550 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
22551 		param_value = ATH11K_HW_TXRX_RAW;
22552 	else
22553 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
22554 
22555 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22556 	    param_id, param_value);
22557 	if (ret) {
22558 		printf("%s: failed to set vdev %d tx encap mode: %d\n",
22559 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22560 		return ret;
22561 	}
22562 
22563 	param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
22564 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
22565 		param_value = ATH11K_HW_TXRX_RAW;
22566 	else
22567 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
22568 
22569 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22570 	    param_id, param_value);
22571 	if (ret) {
22572 		printf("%s: failed to set vdev %d rx decap mode: %d\n",
22573 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22574 		return ret;
22575 	}
22576 
22577 	return 0;
22578 }
22579 
22580 void
22581 qwx_mac_vdev_delete(struct qwx_softc *sc, struct qwx_vif *arvif)
22582 {
22583 	printf("%s: not implemented\n", __func__);
22584 }
22585 
22586 int
22587 qwx_mac_vdev_setup_sync(struct qwx_softc *sc)
22588 {
22589 	int ret;
22590 
22591 #ifdef notyet
22592 	lockdep_assert_held(&ar->conf_mutex);
22593 #endif
22594 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
22595 		return ESHUTDOWN;
22596 
22597 	while (!sc->vdev_setup_done) {
22598 		ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwxvdev",
22599 		    SEC_TO_NSEC(1));
22600 		if (ret) {
22601 			printf("%s: vdev start timeout\n",
22602 			    sc->sc_dev.dv_xname);
22603 			return ret;
22604 		}
22605 	}
22606 
22607 	return 0;
22608 }
22609 
22610 int
22611 qwx_mac_set_txbf_conf(struct qwx_vif *arvif)
22612 {
22613 	/* TX beamforming is not yet supported. */
22614 	return 0;
22615 }
22616 
22617 int
22618 qwx_mac_vdev_stop(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22619 {
22620 	int ret;
22621 #ifdef notyet
22622 	lockdep_assert_held(&ar->conf_mutex);
22623 #endif
22624 #if 0
22625 	reinit_completion(&ar->vdev_setup_done);
22626 #endif
22627 	sc->vdev_setup_done = 0;
22628 	ret = qwx_wmi_vdev_stop(sc, arvif->vdev_id, pdev_id);
22629 	if (ret) {
22630 		printf("%s: failed to stop WMI vdev %i: %d\n",
22631 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22632 		return ret;
22633 	}
22634 
22635 	ret = qwx_mac_vdev_setup_sync(sc);
22636 	if (ret) {
22637 		printf("%s: failed to synchronize setup for vdev %i: %d\n",
22638 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22639 		return ret;
22640 	}
22641 
22642 	if (sc->num_started_vdevs > 0)
22643 		sc->num_started_vdevs--;
22644 
22645 	DNPRINTF(QWX_D_MAC, "%s: vdev vdev_id %d stopped\n", __func__,
22646 	    arvif->vdev_id);
22647 
22648 	if (test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) {
22649 		clear_bit(ATH11K_CAC_RUNNING, sc->sc_flags);
22650 		DNPRINTF(QWX_D_MAC, "%s: CAC Stopped for vdev %d\n", __func__,
22651 		    arvif->vdev_id);
22652 	}
22653 
22654 	return 0;
22655 }
22656 
22657 int
22658 qwx_mac_vdev_start_restart(struct qwx_softc *sc, struct qwx_vif *arvif,
22659     int pdev_id, int restart)
22660 {
22661 	struct ieee80211com *ic = &sc->sc_ic;
22662 	struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
22663 	struct wmi_vdev_start_req_arg arg = {};
22664 	int ret = 0;
22665 #ifdef notyet
22666 	lockdep_assert_held(&ar->conf_mutex);
22667 #endif
22668 #if 0
22669 	reinit_completion(&ar->vdev_setup_done);
22670 #endif
22671 	arg.vdev_id = arvif->vdev_id;
22672 	arg.dtim_period = ic->ic_dtim_period;
22673 	arg.bcn_intval = ic->ic_lintval;
22674 
22675 	arg.channel.freq = chan->ic_freq;
22676 	arg.channel.band_center_freq1 = chan->ic_freq;
22677 	arg.channel.band_center_freq2 = chan->ic_freq;
22678 
22679 	switch (ic->ic_curmode) {
22680 	case IEEE80211_MODE_11A:
22681 		arg.channel.mode = MODE_11A;
22682 		break;
22683 	case IEEE80211_MODE_11B:
22684 		arg.channel.mode = MODE_11B;
22685 		break;
22686 	case IEEE80211_MODE_11G:
22687 		arg.channel.mode = MODE_11G;
22688 		break;
22689 	default:
22690 		printf("%s: unsupported phy mode %d\n",
22691 		    sc->sc_dev.dv_xname, ic->ic_curmode);
22692 		return ENOTSUP;
22693 	}
22694 
22695 	arg.channel.min_power = 0;
22696 	arg.channel.max_power = 20; /* XXX */
22697 	arg.channel.max_reg_power = 20; /* XXX */
22698 	arg.channel.max_antenna_gain = 0; /* XXX */
22699 
22700 	arg.pref_tx_streams = 1;
22701 	arg.pref_rx_streams = 1;
22702 
22703 	arg.mbssid_flags = 0;
22704 	arg.mbssid_tx_vdev_id = 0;
22705 	if (isset(sc->wmi.svc_map,
22706 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
22707 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
22708 		    &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
22709 		if (ret)
22710 			return ret;
22711 	}
22712 #if 0
22713 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
22714 		arg.ssid = arvif->u.ap.ssid;
22715 		arg.ssid_len = arvif->u.ap.ssid_len;
22716 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
22717 
22718 		/* For now allow DFS for AP mode */
22719 		arg.channel.chan_radar =
22720 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
22721 
22722 		arg.channel.freq2_radar = ctx->radar_enabled;
22723 
22724 		arg.channel.passive = arg.channel.chan_radar;
22725 
22726 		spin_lock_bh(&ab->base_lock);
22727 		arg.regdomain = ar->ab->dfs_region;
22728 		spin_unlock_bh(&ab->base_lock);
22729 	}
22730 #endif
22731 	/* XXX */
22732 	arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
22733 
22734 	DNPRINTF(QWX_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
22735 	    __func__, arg.vdev_id, arg.channel.freq,
22736 	    qwx_wmi_phymode_str(arg.channel.mode));
22737 
22738 	sc->vdev_setup_done = 0;
22739 	ret = qwx_wmi_vdev_start(sc, &arg, pdev_id, restart);
22740 	if (ret) {
22741 		printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
22742 		    restart ? "restart" : "start", arg.vdev_id);
22743 		return ret;
22744 	}
22745 
22746 	ret = qwx_mac_vdev_setup_sync(sc);
22747 	if (ret) {
22748 		printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
22749 		    sc->sc_dev.dv_xname, arg.vdev_id,
22750 		    restart ? "restart" : "start", ret);
22751 		return ret;
22752 	}
22753 
22754 	if (!restart)
22755 		sc->num_started_vdevs++;
22756 
22757 	DNPRINTF(QWX_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
22758 
22759 	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
22760 	 * i.e dfs_cac_ms value which will be valid only for radar channels
22761 	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
22762 	 * done before channel usage. This flags is used to drop rx packets.
22763 	 * during CAC.
22764 	 */
22765 	/* TODO Set the flag for other interface types as required */
22766 #if 0
22767 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
22768 	    chandef->chan->dfs_cac_ms &&
22769 	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
22770 		set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
22771 		ath11k_dbg(ab, ATH11K_DBG_MAC,
22772 			   "CAC Started in chan_freq %d for vdev %d\n",
22773 			   arg.channel.freq, arg.vdev_id);
22774 	}
22775 #endif
22776 	ret = qwx_mac_set_txbf_conf(arvif);
22777 	if (ret)
22778 		printf("%s: failed to set txbf conf for vdev %d: %d\n",
22779 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22780 
22781 	return 0;
22782 }
22783 
22784 int
22785 qwx_mac_vdev_restart(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22786 {
22787 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
22788 }
22789 
22790 int
22791 qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22792 {
22793 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
22794 }
22795 
22796 void
22797 qwx_vif_free(struct qwx_softc *sc, struct qwx_vif *arvif)
22798 {
22799 	struct qwx_txmgmt_queue *txmgmt;
22800 	int i;
22801 
22802 	if (arvif == NULL)
22803 		return;
22804 
22805 	txmgmt = &arvif->txmgmt;
22806 	for (i = 0; i < nitems(txmgmt->data); i++) {
22807 		struct qwx_tx_data *tx_data = &txmgmt->data[i];
22808 
22809 		if (tx_data->m) {
22810 			m_freem(tx_data->m);
22811 			tx_data->m = NULL;
22812 		}
22813 		if (tx_data->map) {
22814 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
22815 			tx_data->map = NULL;
22816 		}
22817 	}
22818 
22819 	free(arvif, M_DEVBUF, sizeof(*arvif));
22820 }
22821 
22822 void
22823 qwx_vif_free_all(struct qwx_softc *sc)
22824 {
22825 	struct qwx_vif *arvif;
22826 
22827 	while (!TAILQ_EMPTY(&sc->vif_list)) {
22828 		arvif = TAILQ_FIRST(&sc->vif_list);
22829 		TAILQ_REMOVE(&sc->vif_list, arvif, entry);
22830 		qwx_vif_free(sc, arvif);
22831 	}
22832 }
22833 
22834 struct qwx_vif *
22835 qwx_vif_alloc(struct qwx_softc *sc)
22836 {
22837 	struct qwx_vif *arvif;
22838 	struct qwx_txmgmt_queue *txmgmt;
22839 	int i, ret = 0;
22840 	const bus_size_t size = IEEE80211_MAX_LEN;
22841 
22842 	arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
22843 	if (arvif == NULL)
22844 		return NULL;
22845 
22846 	txmgmt = &arvif->txmgmt;
22847 	for (i = 0; i < nitems(txmgmt->data); i++) {
22848 		struct qwx_tx_data *tx_data = &txmgmt->data[i];
22849 
22850 		ret = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
22851 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &tx_data->map);
22852 		if (ret) {
22853 			qwx_vif_free(sc, arvif);
22854 			return NULL;
22855 		}
22856 	}
22857 
22858 	arvif->sc = sc;
22859 
22860 	return arvif;
22861 }
22862 
22863 int
22864 qwx_mac_op_add_interface(struct qwx_pdev *pdev)
22865 {
22866 	struct qwx_softc *sc = pdev->sc;
22867 	struct ieee80211com *ic = &sc->sc_ic;
22868 	struct qwx_vif *arvif = NULL;
22869 	struct vdev_create_params vdev_param = { 0 };
22870 #if 0
22871 	struct peer_create_params peer_param;
22872 #endif
22873 	uint32_t param_id, param_value;
22874 	uint16_t nss;
22875 #if 0
22876 	int i;
22877 	int fbret;
22878 #endif
22879 	int ret, bit;
22880 #ifdef notyet
22881 	mutex_lock(&ar->conf_mutex);
22882 #endif
22883 #if 0
22884 	if (vif->type == NL80211_IFTYPE_AP &&
22885 	    ar->num_peers > (ar->max_num_peers - 1)) {
22886 		ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
22887 		ret = -ENOBUFS;
22888 		goto err;
22889 	}
22890 #endif
22891 	if (sc->num_created_vdevs > (TARGET_NUM_VDEVS(sc) - 1)) {
22892 		printf("%s: failed to create vdev %u, reached vdev limit %d\n",
22893 		    sc->sc_dev.dv_xname, sc->num_created_vdevs,
22894 		    TARGET_NUM_VDEVS(sc));
22895 		ret = EBUSY;
22896 		goto err;
22897 	}
22898 
22899 	arvif = qwx_vif_alloc(sc);
22900 	if (arvif == NULL) {
22901 		ret = ENOMEM;
22902 		goto err;
22903 	}
22904 #if 0
22905 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
22906 			  ath11k_mac_vif_sta_connection_loss_work);
22907 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
22908 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
22909 		arvif->bitrate_mask.control[i].gi = 0;
22910 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
22911 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
22912 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
22913 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
22914 		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
22915 		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
22916 	}
22917 #endif
22918 
22919 	if (sc->free_vdev_map == 0) {
22920 		printf("%s: cannot add interface; all vdevs are busy\n",
22921 		    sc->sc_dev.dv_xname);
22922 		ret = EBUSY;
22923 		goto err;
22924 	}
22925 	bit = ffs(sc->free_vdev_map) - 1;
22926 
22927 	arvif->vdev_id = bit;
22928 	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
22929 
22930 	switch (ic->ic_opmode) {
22931 	case IEEE80211_M_STA:
22932 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
22933 		break;
22934 #if 0
22935 	case NL80211_IFTYPE_MESH_POINT:
22936 		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
22937 		fallthrough;
22938 	case NL80211_IFTYPE_AP:
22939 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
22940 		break;
22941 	case NL80211_IFTYPE_MONITOR:
22942 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
22943 		ar->monitor_vdev_id = bit;
22944 		break;
22945 #endif
22946 	default:
22947 		printf("%s: invalid operating mode %d\n",
22948 		    sc->sc_dev.dv_xname, ic->ic_opmode);
22949 		ret = EINVAL;
22950 		goto err;
22951 	}
22952 
22953 	DNPRINTF(QWX_D_MAC,
22954 	    "%s: add interface id %d type %d subtype %d map 0x%x\n",
22955 	    __func__, arvif->vdev_id, arvif->vdev_type,
22956 	    arvif->vdev_subtype, sc->free_vdev_map);
22957 
22958 	ret = qwx_mac_setup_vdev_create_params(arvif, pdev, &vdev_param);
22959 	if (ret) {
22960 		printf("%s: failed to create vdev parameters %d: %d\n",
22961 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22962 		goto err;
22963 	}
22964 
22965 	ret = qwx_wmi_vdev_create(sc, ic->ic_myaddr, &vdev_param);
22966 	if (ret) {
22967 		printf("%s: failed to create WMI vdev %d %s: %d\n",
22968 		    sc->sc_dev.dv_xname, arvif->vdev_id,
22969 		    ether_sprintf(ic->ic_myaddr), ret);
22970 		goto err;
22971 	}
22972 
22973 	sc->num_created_vdevs++;
22974 	DNPRINTF(QWX_D_MAC, "%s: vdev %s created, vdev_id %d\n", __func__,
22975 	    ether_sprintf(ic->ic_myaddr), arvif->vdev_id);
22976 	sc->allocated_vdev_map |= 1U << arvif->vdev_id;
22977 	sc->free_vdev_map &= ~(1U << arvif->vdev_id);
22978 #ifdef notyet
22979 	spin_lock_bh(&ar->data_lock);
22980 #endif
22981 	TAILQ_INSERT_TAIL(&sc->vif_list, arvif, entry);
22982 #ifdef notyet
22983 	spin_unlock_bh(&ar->data_lock);
22984 #endif
22985 	ret = qwx_mac_op_update_vif_offload(sc, pdev, arvif);
22986 	if (ret)
22987 		goto err_vdev_del;
22988 
22989 	nss = qwx_get_num_chains(sc->cfg_tx_chainmask) ? : 1;
22990 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22991 	    WMI_VDEV_PARAM_NSS, nss);
22992 	if (ret) {
22993 		printf("%s: failed to set vdev %d chainmask 0x%x, nss %d: %d\n",
22994 		    sc->sc_dev.dv_xname, arvif->vdev_id, sc->cfg_tx_chainmask,
22995 		    nss, ret);
22996 		goto err_vdev_del;
22997 	}
22998 
22999 	switch (arvif->vdev_type) {
23000 #if 0
23001 	case WMI_VDEV_TYPE_AP:
23002 		peer_param.vdev_id = arvif->vdev_id;
23003 		peer_param.peer_addr = vif->addr;
23004 		peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
23005 		ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
23006 		if (ret) {
23007 			ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
23008 				    arvif->vdev_id, ret);
23009 			goto err_vdev_del;
23010 		}
23011 
23012 		ret = ath11k_mac_set_kickout(arvif);
23013 		if (ret) {
23014 			ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
23015 				    arvif->vdev_id, ret);
23016 			goto err_peer_del;
23017 		}
23018 
23019 		ath11k_mac_11d_scan_stop_all(ar->ab);
23020 		break;
23021 #endif
23022 	case WMI_VDEV_TYPE_STA:
23023 		param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
23024 		param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
23025 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23026 		    pdev->pdev_id, param_id, param_value);
23027 		if (ret) {
23028 			printf("%s: failed to set vdev %d RX wake policy: %d\n",
23029 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23030 			goto err_peer_del;
23031 		}
23032 
23033 		param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
23034 		param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
23035 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23036 		    pdev->pdev_id, param_id, param_value);
23037 		if (ret) {
23038 			printf("%s: failed to set vdev %d "
23039 			    "TX wake threshold: %d\n",
23040 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23041 			goto err_peer_del;
23042 		}
23043 
23044 		param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
23045 		param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
23046 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23047 		    pdev->pdev_id, param_id, param_value);
23048 		if (ret) {
23049 			printf("%s: failed to set vdev %d pspoll count: %d\n",
23050 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23051 			goto err_peer_del;
23052 		}
23053 
23054 		ret = qwx_wmi_pdev_set_ps_mode(sc, arvif->vdev_id,
23055 		    pdev->pdev_id, WMI_STA_PS_MODE_DISABLED);
23056 		if (ret) {
23057 			printf("%s: failed to disable vdev %d ps mode: %d\n",
23058 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23059 			goto err_peer_del;
23060 		}
23061 
23062 		if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
23063 			sc->completed_11d_scan = 0;
23064 			sc->state_11d = ATH11K_11D_PREPARING;
23065 		}
23066 		break;
23067 #if 0
23068 	case WMI_VDEV_TYPE_MONITOR:
23069 		set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
23070 		break;
23071 #endif
23072 	default:
23073 		printf("%s: invalid vdev type %d\n",
23074 		    sc->sc_dev.dv_xname, arvif->vdev_type);
23075 		ret = EINVAL;
23076 		goto err;
23077 	}
23078 
23079 	arvif->txpower = 40;
23080 	ret = qwx_mac_txpower_recalc(sc, pdev);
23081 	if (ret)
23082 		goto err_peer_del;
23083 
23084 	param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
23085 	param_value = ic->ic_rtsthreshold;
23086 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
23087 	    param_id, param_value);
23088 	if (ret) {
23089 		printf("%s: failed to set rts threshold for vdev %d: %d\n",
23090 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23091 		goto err_peer_del;
23092 	}
23093 
23094 	qwx_dp_vdev_tx_attach(sc, pdev, arvif);
23095 #if 0
23096 	if (vif->type != NL80211_IFTYPE_MONITOR &&
23097 	    test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
23098 		ret = ath11k_mac_monitor_vdev_create(ar);
23099 		if (ret)
23100 			ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
23101 				    ret);
23102 	}
23103 
23104 	mutex_unlock(&ar->conf_mutex);
23105 #endif
23106 	return 0;
23107 
23108 err_peer_del:
23109 #if 0
23110 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
23111 		fbret = qwx_peer_delete(sc, arvif->vdev_id, vif->addr);
23112 		if (fbret) {
23113 			printf("%s: fallback fail to delete peer addr %pM "
23114 			    "vdev_id %d ret %d\n", sc->sc_dev.dv_xname,
23115 			    vif->addr, arvif->vdev_id, fbret);
23116 			goto err;
23117 		}
23118 	}
23119 #endif
23120 err_vdev_del:
23121 	qwx_mac_vdev_delete(sc, arvif);
23122 #ifdef notyet
23123 	spin_lock_bh(&ar->data_lock);
23124 #endif
23125 	TAILQ_REMOVE(&sc->vif_list, arvif, entry);
23126 #ifdef notyet
23127 	spin_unlock_bh(&ar->data_lock);
23128 #endif
23129 
23130 err:
23131 #ifdef notyet
23132 	mutex_unlock(&ar->conf_mutex);
23133 #endif
23134 	qwx_vif_free(sc, arvif);
23135 	return ret;
23136 }
23137 
23138 int
23139 qwx_mac_start(struct qwx_softc *sc)
23140 {
23141 	struct qwx_pdev *pdev;
23142 	int i, error;
23143 
23144 	for (i = 0; i < sc->num_radios; i++) {
23145 		pdev = &sc->pdevs[i];
23146 		error = qwx_mac_op_start(pdev);
23147 		if (error)
23148 			return error;
23149 
23150 		error = qwx_mac_op_add_interface(pdev);
23151 		if (error)
23152 			return error;
23153 	}
23154 
23155 	return 0;
23156 }
23157 
23158 void
23159 qwx_init_task(void *arg)
23160 {
23161 	struct qwx_softc *sc = arg;
23162 	struct ifnet *ifp = &sc->sc_ic.ic_if;
23163 	int s = splnet();
23164 	rw_enter_write(&sc->ioctl_rwl);
23165 
23166 	if (ifp->if_flags & IFF_RUNNING)
23167 		qwx_stop(ifp);
23168 
23169 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
23170 		qwx_init(ifp);
23171 
23172 	rw_exit(&sc->ioctl_rwl);
23173 	splx(s);
23174 }
23175 
23176 void
23177 qwx_mac_11d_scan_start(struct qwx_softc *sc, struct qwx_vif *arvif)
23178 {
23179 	struct ieee80211com *ic = &sc->sc_ic;
23180 	struct wmi_11d_scan_start_params param;
23181 	int ret;
23182 #ifdef notyet
23183 	mutex_lock(&ar->ab->vdev_id_11d_lock);
23184 #endif
23185 	DNPRINTF(QWX_D_MAC, "%s: vdev id for 11d scan %d\n", __func__,
23186 	    sc->vdev_id_11d_scan);
23187 #if 0
23188 	if (ar->regdom_set_by_user)
23189 		goto fin;
23190 #endif
23191 	if (sc->vdev_id_11d_scan != QWX_11D_INVALID_VDEV_ID)
23192 		goto fin;
23193 
23194 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD))
23195 		goto fin;
23196 
23197 	if (ic->ic_opmode != IEEE80211_M_STA)
23198 		goto fin;
23199 
23200 	param.vdev_id = arvif->vdev_id;
23201 	param.start_interval_msec = 0;
23202 	param.scan_period_msec = QWX_SCAN_11D_INTERVAL;
23203 
23204 	DNPRINTF(QWX_D_MAC, "%s: start 11d scan\n", __func__);
23205 
23206 	ret = qwx_wmi_send_11d_scan_start_cmd(sc, &param,
23207 	   0 /* TODO: derive pdev ID from arvif somehow? */);
23208 	if (ret) {
23209 		if (ret != ESHUTDOWN) {
23210 			printf("%s: failed to start 11d scan; vdev: %d "
23211 			    "ret: %d\n", sc->sc_dev.dv_xname,
23212 			    arvif->vdev_id, ret);
23213 		}
23214 	} else {
23215 		sc->vdev_id_11d_scan = arvif->vdev_id;
23216 		if (sc->state_11d == ATH11K_11D_PREPARING)
23217 			sc->state_11d = ATH11K_11D_RUNNING;
23218 	}
23219 fin:
23220 	if (sc->state_11d == ATH11K_11D_PREPARING) {
23221 		sc->state_11d = ATH11K_11D_IDLE;
23222 		sc->completed_11d_scan = 0;
23223 	}
23224 #ifdef notyet
23225 	mutex_unlock(&ar->ab->vdev_id_11d_lock);
23226 #endif
23227 }
23228 
23229 void
23230 qwx_mac_scan_finish(struct qwx_softc *sc)
23231 {
23232 	struct ieee80211com *ic = &sc->sc_ic;
23233 	struct ifnet *ifp = &ic->ic_if;
23234 	enum ath11k_scan_state ostate;
23235 
23236 #ifdef notyet
23237 	lockdep_assert_held(&ar->data_lock);
23238 #endif
23239 	ostate = sc->scan.state;
23240 	switch (ostate) {
23241 	case ATH11K_SCAN_IDLE:
23242 		break;
23243 	case ATH11K_SCAN_RUNNING:
23244 	case ATH11K_SCAN_ABORTING:
23245 #if 0
23246 		if (ar->scan.is_roc && ar->scan.roc_notify)
23247 			ieee80211_remain_on_channel_expired(ar->hw);
23248 		fallthrough;
23249 #endif
23250 	case ATH11K_SCAN_STARTING:
23251 		sc->scan.state = ATH11K_SCAN_IDLE;
23252 		sc->scan_channel = 0;
23253 		sc->scan.roc_freq = 0;
23254 
23255 		timeout_del(&sc->scan.timeout);
23256 		if (!sc->scan.is_roc)
23257 			ieee80211_end_scan(ifp);
23258 #if 0
23259 		complete_all(&ar->scan.completed);
23260 #endif
23261 		break;
23262 	}
23263 }
23264 
23265 int
23266 qwx_mac_get_rate_hw_value(struct ieee80211com *ic,
23267     struct ieee80211_node *ni, int bitrate)
23268 {
23269 	uint32_t preamble;
23270 	uint16_t hw_value;
23271 	int shortpre = 0;
23272 
23273 	if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
23274 		preamble = WMI_RATE_PREAMBLE_CCK;
23275 	else
23276 		preamble = WMI_RATE_PREAMBLE_OFDM;
23277 
23278 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
23279 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
23280 		shortpre = 1;
23281 
23282 	switch (bitrate) {
23283 	case 2:
23284 		hw_value = ATH11K_HW_RATE_CCK_LP_1M;
23285 		break;
23286 	case 4:
23287 		if (shortpre)
23288 			hw_value = ATH11K_HW_RATE_CCK_SP_2M;
23289 		else
23290 			hw_value = ATH11K_HW_RATE_CCK_LP_2M;
23291 		break;
23292 	case 11:
23293 		if (shortpre)
23294 			hw_value = ATH11K_HW_RATE_CCK_SP_5_5M;
23295 		else
23296 			hw_value = ATH11K_HW_RATE_CCK_LP_5_5M;
23297 		break;
23298 	case 22:
23299 		if (shortpre)
23300 			hw_value = ATH11K_HW_RATE_CCK_SP_11M;
23301 		else
23302 			hw_value = ATH11K_HW_RATE_CCK_LP_11M;
23303 		break;
23304 	case 12:
23305 		hw_value = ATH11K_HW_RATE_OFDM_6M;
23306 		break;
23307 	case 18:
23308 		hw_value = ATH11K_HW_RATE_OFDM_9M;
23309 		break;
23310 	case 24:
23311 		hw_value = ATH11K_HW_RATE_OFDM_12M;
23312 		break;
23313 	case 36:
23314 		hw_value = ATH11K_HW_RATE_OFDM_18M;
23315 		break;
23316 	case 48:
23317 		hw_value = ATH11K_HW_RATE_OFDM_24M;
23318 		break;
23319 	case 72:
23320 		hw_value = ATH11K_HW_RATE_OFDM_36M;
23321 		break;
23322 	case 96:
23323 		hw_value = ATH11K_HW_RATE_OFDM_48M;
23324 		break;
23325 	case 108:
23326 		hw_value = ATH11K_HW_RATE_OFDM_54M;
23327 		break;
23328 	default:
23329 		return -1;
23330 	}
23331 
23332 	return ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
23333 }
23334 
23335 int
23336 qwx_peer_delete(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
23337     uint8_t *addr)
23338 {
23339 	int ret;
23340 
23341 	sc->peer_mapped = 0;
23342 	sc->peer_delete_done = 0;
23343 
23344 	ret = qwx_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
23345 	if (ret) {
23346 		printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
23347 		    sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
23348 		return ret;
23349 	}
23350 
23351 	while (!sc->peer_mapped) {
23352 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
23353 		    SEC_TO_NSEC(3));
23354 		if (ret) {
23355 			printf("%s: peer delete unmap timeout\n",
23356 			    sc->sc_dev.dv_xname);
23357 			return ret;
23358 		}
23359 	}
23360 
23361 	while (!sc->peer_delete_done) {
23362 		ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwxpeerd",
23363 		    SEC_TO_NSEC(3));
23364 		if (ret) {
23365 			printf("%s: peer delete command timeout\n",
23366 			    sc->sc_dev.dv_xname);
23367 			return ret;
23368 		}
23369 	}
23370 
23371 	sc->num_peers--;
23372 	return 0;
23373 }
23374 
23375 int
23376 qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
23377     struct ieee80211_node *ni, struct peer_create_params *param)
23378 {
23379 	struct ieee80211com *ic = &sc->sc_ic;
23380 	struct qwx_node *nq = (struct qwx_node *)ni;
23381 	struct ath11k_peer *peer;
23382 	int ret;
23383 #ifdef notyet
23384 	lockdep_assert_held(&ar->conf_mutex);
23385 #endif
23386 	if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
23387 		DPRINTF("%s: failed to create peer due to insufficient "
23388 		    "peer entry resource in firmware\n", __func__);
23389 		return ENOBUFS;
23390 	}
23391 #ifdef notyet
23392 	mutex_lock(&ar->ab->tbl_mtx_lock);
23393 	spin_lock_bh(&ar->ab->base_lock);
23394 #endif
23395 	peer = &nq->peer;
23396 	if (peer) {
23397 		if (peer->peer_id != HAL_INVALID_PEERID &&
23398 		    peer->vdev_id == param->vdev_id) {
23399 #ifdef notyet
23400 			spin_unlock_bh(&ar->ab->base_lock);
23401 			mutex_unlock(&ar->ab->tbl_mtx_lock);
23402 #endif
23403 			return EINVAL;
23404 		}
23405 #if 0
23406 		/* Assume sta is transitioning to another band.
23407 		 * Remove here the peer from rhash.
23408 		 */
23409 		ath11k_peer_rhash_delete(ar->ab, peer);
23410 #endif
23411 	}
23412 #ifdef notyet
23413 	spin_unlock_bh(&ar->ab->base_lock);
23414 	mutex_unlock(&ar->ab->tbl_mtx_lock);
23415 #endif
23416 	sc->peer_mapped = 0;
23417 
23418 	ret = qwx_wmi_send_peer_create_cmd(sc, pdev_id, param);
23419 	if (ret) {
23420 		printf("%s: failed to send peer create vdev_id %d ret %d\n",
23421 		    sc->sc_dev.dv_xname, param->vdev_id, ret);
23422 		return ret;
23423 	}
23424 
23425 	while (!sc->peer_mapped) {
23426 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
23427 		    SEC_TO_NSEC(3));
23428 		if (ret) {
23429 			printf("%s: peer create command timeout\n",
23430 			    sc->sc_dev.dv_xname);
23431 			return ret;
23432 		}
23433 	}
23434 
23435 #ifdef notyet
23436 	mutex_lock(&ar->ab->tbl_mtx_lock);
23437 	spin_lock_bh(&ar->ab->base_lock);
23438 #endif
23439 #if 0
23440 	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
23441 	if (!peer) {
23442 		spin_unlock_bh(&ar->ab->base_lock);
23443 		mutex_unlock(&ar->ab->tbl_mtx_lock);
23444 		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
23445 			    param->peer_addr, param->vdev_id);
23446 
23447 		ret = -ENOENT;
23448 		goto cleanup;
23449 	}
23450 
23451 	ret = ath11k_peer_rhash_add(ar->ab, peer);
23452 	if (ret) {
23453 		spin_unlock_bh(&ar->ab->base_lock);
23454 		mutex_unlock(&ar->ab->tbl_mtx_lock);
23455 		goto cleanup;
23456 	}
23457 #endif
23458 	peer->pdev_id = pdev_id;
23459 #if 0
23460 	peer->sta = sta;
23461 #endif
23462 	if (ic->ic_opmode == IEEE80211_M_STA) {
23463 		arvif->ast_hash = peer->ast_hash;
23464 		arvif->ast_idx = peer->hw_peer_id;
23465 	}
23466 #if 0
23467 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
23468 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
23469 
23470 	if (sta) {
23471 		struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
23472 		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
23473 				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
23474 						  peer->peer_id);
23475 
23476 		/* set HTT extension valid bit to 0 by default */
23477 		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
23478 	}
23479 #endif
23480 	sc->num_peers++;
23481 #ifdef notyet
23482 	spin_unlock_bh(&ar->ab->base_lock);
23483 	mutex_unlock(&ar->ab->tbl_mtx_lock);
23484 #endif
23485 	return 0;
23486 #if 0
23487 cleanup:
23488 	int fbret = qwx_peer_delete(sc, param->vdev_id, param->peer_addr);
23489 	if (fbret) {
23490 		printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
23491 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
23492 		    param->vdev_id, fbret);
23493 	}
23494 
23495 	return ret;
23496 #endif
23497 }
23498 
23499 int
23500 qwx_dp_tx_send_reo_cmd(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
23501     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd,
23502     void (*cb)(struct qwx_dp *, void *, enum hal_reo_cmd_status))
23503 {
23504 	struct qwx_dp *dp = &sc->dp;
23505 	struct dp_reo_cmd *dp_cmd;
23506 	struct hal_srng *cmd_ring;
23507 	int cmd_num;
23508 
23509 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
23510 		return ESHUTDOWN;
23511 
23512 	cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
23513 	cmd_num = qwx_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
23514 	/* cmd_num should start from 1, during failure return the error code */
23515 	if (cmd_num < 0)
23516 		return cmd_num;
23517 
23518 	/* reo cmd ring descriptors has cmd_num starting from 1 */
23519 	if (cmd_num == 0)
23520 		return EINVAL;
23521 
23522 	if (!cb)
23523 		return 0;
23524 
23525 	/* Can this be optimized so that we keep the pending command list only
23526 	 * for tid delete command to free up the resource on the command status
23527 	 * indication?
23528 	 */
23529 	dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
23530 	if (!dp_cmd)
23531 		return ENOMEM;
23532 
23533 	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
23534 	dp_cmd->cmd_num = cmd_num;
23535 	dp_cmd->handler = cb;
23536 #ifdef notyet
23537 	spin_lock_bh(&dp->reo_cmd_lock);
23538 #endif
23539 	TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
23540 #ifdef notyet
23541 	spin_unlock_bh(&dp->reo_cmd_lock);
23542 #endif
23543 	return 0;
23544 }
23545 
23546 uint32_t
23547 qwx_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
23548 {
23549 	uint32_t num_ext_desc;
23550 
23551 	if (ba_window_size <= 1) {
23552 		if (tid != HAL_DESC_REO_NON_QOS_TID)
23553 			num_ext_desc = 1;
23554 		else
23555 			num_ext_desc = 0;
23556 	} else if (ba_window_size <= 105) {
23557 		num_ext_desc = 1;
23558 	} else if (ba_window_size <= 210) {
23559 		num_ext_desc = 2;
23560 	} else {
23561 		num_ext_desc = 3;
23562 	}
23563 
23564 	return sizeof(struct hal_rx_reo_queue) +
23565 		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
23566 }
23567 
23568 void
23569 qwx_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
23570 {
23571 	hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
23572 		     FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
23573 
23574 	/* Magic pattern in reserved bits for debugging */
23575 	hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
23576 }
23577 
23578 void
23579 qwx_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
23580     uint32_t start_seq, enum hal_pn_type type)
23581 {
23582 	struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
23583 	struct hal_rx_reo_queue_ext *ext_desc;
23584 
23585 	memset(qdesc, 0, sizeof(*qdesc));
23586 
23587 	qwx_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
23588 	    HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
23589 
23590 	qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
23591 
23592 	qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
23593 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
23594 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwx_tid_to_ac(tid));
23595 
23596 	if (ba_window_size < 1)
23597 		ba_window_size = 1;
23598 
23599 	if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
23600 		ba_window_size++;
23601 
23602 	if (ba_window_size == 1)
23603 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
23604 
23605 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
23606 				   ba_window_size - 1);
23607 	switch (type) {
23608 	case HAL_PN_TYPE_NONE:
23609 	case HAL_PN_TYPE_WAPI_EVEN:
23610 	case HAL_PN_TYPE_WAPI_UNEVEN:
23611 		break;
23612 	case HAL_PN_TYPE_WPA:
23613 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
23614 		    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
23615 		    HAL_RX_REO_QUEUE_PN_SIZE_48);
23616 		break;
23617 	}
23618 
23619 	/* TODO: Set Ignore ampdu flags based on BA window size and/or
23620 	 * AMPDU capabilities
23621 	 */
23622 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
23623 
23624 	qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
23625 
23626 	if (start_seq <= 0xfff)
23627 		qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
23628 		    start_seq);
23629 
23630 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23631 		return;
23632 
23633 	ext_desc = qdesc->ext_desc;
23634 
23635 	/* TODO: HW queue descriptors are currently allocated for max BA
23636 	 * window size for all QOS TIDs so that same descriptor can be used
23637 	 * later when ADDBA request is received. This should be changed to
23638 	 * allocate HW queue descriptors based on BA window size being
23639 	 * negotiated (0 for non BA cases), and reallocate when BA window
23640 	 * size changes and also send WMI message to FW to change the REO
23641 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
23642 	 */
23643 	memset(ext_desc, 0, sizeof(*ext_desc));
23644 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23645 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
23646 	ext_desc++;
23647 	memset(ext_desc, 0, sizeof(*ext_desc));
23648 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23649 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
23650 	ext_desc++;
23651 	memset(ext_desc, 0, sizeof(*ext_desc));
23652 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23653 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
23654 }
23655 
23656 void
23657 qwx_dp_reo_cmd_free(struct qwx_dp *dp, void *ctx,
23658     enum hal_reo_cmd_status status)
23659 {
23660 	struct qwx_softc *sc = dp->sc;
23661 	struct dp_rx_tid *rx_tid = ctx;
23662 
23663 	if (status != HAL_REO_CMD_SUCCESS)
23664 		printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
23665 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23666 
23667 	if (rx_tid->mem) {
23668 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23669 		rx_tid->mem = NULL;
23670 		rx_tid->vaddr = NULL;
23671 		rx_tid->paddr = 0ULL;
23672 		rx_tid->size = 0;
23673 	}
23674 }
23675 
23676 void
23677 qwx_dp_reo_cache_flush(struct qwx_softc *sc, struct dp_rx_tid *rx_tid)
23678 {
23679 	struct ath11k_hal_reo_cmd cmd = {0};
23680 	unsigned long tot_desc_sz, desc_sz;
23681 	int ret;
23682 
23683 	tot_desc_sz = rx_tid->size;
23684 	desc_sz = qwx_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
23685 
23686 	while (tot_desc_sz > desc_sz) {
23687 		tot_desc_sz -= desc_sz;
23688 		cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
23689 		cmd.addr_hi = rx_tid->paddr >> 32;
23690 		ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
23691 		    HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
23692 		if (ret) {
23693 			printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
23694 			    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
23695 			    ret);
23696 		}
23697 	}
23698 
23699 	memset(&cmd, 0, sizeof(cmd));
23700 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23701 	cmd.addr_hi = rx_tid->paddr >> 32;
23702 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
23703 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
23704 	    &cmd, qwx_dp_reo_cmd_free);
23705 	if (ret) {
23706 		printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
23707 		    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
23708 		if (rx_tid->mem) {
23709 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23710 			rx_tid->mem = NULL;
23711 			rx_tid->vaddr = NULL;
23712 			rx_tid->paddr = 0ULL;
23713 			rx_tid->size = 0;
23714 		}
23715 	}
23716 }
23717 
23718 void
23719 qwx_dp_rx_tid_del_func(struct qwx_dp *dp, void *ctx,
23720     enum hal_reo_cmd_status status)
23721 {
23722 	struct qwx_softc *sc = dp->sc;
23723 	struct dp_rx_tid *rx_tid = ctx;
23724 	struct dp_reo_cache_flush_elem *elem, *tmp;
23725 	uint64_t now;
23726 
23727 	if (status == HAL_REO_CMD_DRAIN) {
23728 		goto free_desc;
23729 	} else if (status != HAL_REO_CMD_SUCCESS) {
23730 		/* Shouldn't happen! Cleanup in case of other failure? */
23731 		printf("%s: failed to delete rx tid %d hw descriptor %d\n",
23732 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23733 		return;
23734 	}
23735 
23736 	elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
23737 	if (!elem)
23738 		goto free_desc;
23739 
23740 	now = getnsecuptime();
23741 	elem->ts = now;
23742 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
23743 
23744 	rx_tid->mem = NULL;
23745 	rx_tid->vaddr = NULL;
23746 	rx_tid->paddr = 0ULL;
23747 	rx_tid->size = 0;
23748 
23749 #ifdef notyet
23750 	spin_lock_bh(&dp->reo_cmd_lock);
23751 #endif
23752 	TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
23753 	dp->reo_cmd_cache_flush_count++;
23754 
23755 	/* Flush and invalidate aged REO desc from HW cache */
23756 	TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
23757 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
23758 		    now >= elem->ts + MSEC_TO_NSEC(DP_REO_DESC_FREE_TIMEOUT_MS)) {
23759 			TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
23760 			dp->reo_cmd_cache_flush_count--;
23761 #ifdef notyet
23762 			spin_unlock_bh(&dp->reo_cmd_lock);
23763 #endif
23764 			qwx_dp_reo_cache_flush(sc, &elem->data);
23765 			free(elem, M_DEVBUF, sizeof(*elem));
23766 #ifdef notyet
23767 			spin_lock_bh(&dp->reo_cmd_lock);
23768 #endif
23769 		}
23770 	}
23771 #ifdef notyet
23772 	spin_unlock_bh(&dp->reo_cmd_lock);
23773 #endif
23774 	return;
23775 free_desc:
23776 	if (rx_tid->mem) {
23777 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23778 		rx_tid->mem = NULL;
23779 		rx_tid->vaddr = NULL;
23780 		rx_tid->paddr = 0ULL;
23781 		rx_tid->size = 0;
23782 	}
23783 }
23784 
23785 void
23786 qwx_peer_rx_tid_delete(struct qwx_softc *sc, struct ath11k_peer *peer,
23787     uint8_t tid)
23788 {
23789 	struct ath11k_hal_reo_cmd cmd = {0};
23790 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
23791 	int ret;
23792 
23793 	if (!rx_tid->active)
23794 		return;
23795 
23796 	rx_tid->active = 0;
23797 
23798 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23799 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23800 	cmd.addr_hi = rx_tid->paddr >> 32;
23801 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
23802 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23803 	    &cmd, qwx_dp_rx_tid_del_func);
23804 	if (ret) {
23805 		if (ret != ESHUTDOWN) {
23806 			printf("%s: failed to send "
23807 			    "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
23808 			    sc->sc_dev.dv_xname, tid, ret);
23809 		}
23810 
23811 		if (rx_tid->mem) {
23812 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23813 			rx_tid->mem = NULL;
23814 			rx_tid->vaddr = NULL;
23815 			rx_tid->paddr = 0ULL;
23816 			rx_tid->size = 0;
23817 		}
23818 	}
23819 }
23820 
23821 void
23822 qwx_dp_rx_frags_cleanup(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
23823     int rel_link_desc)
23824 {
23825 #ifdef notyet
23826 	lockdep_assert_held(&ab->base_lock);
23827 #endif
23828 #if 0
23829 	if (rx_tid->dst_ring_desc) {
23830 		if (rel_link_desc)
23831 			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
23832 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
23833 		kfree(rx_tid->dst_ring_desc);
23834 		rx_tid->dst_ring_desc = NULL;
23835 	}
23836 #endif
23837 	rx_tid->cur_sn = 0;
23838 	rx_tid->last_frag_no = 0;
23839 	rx_tid->rx_frag_bitmap = 0;
23840 #if 0
23841 	__skb_queue_purge(&rx_tid->rx_frags);
23842 #endif
23843 }
23844 
23845 void
23846 qwx_peer_frags_flush(struct qwx_softc *sc, struct ath11k_peer *peer)
23847 {
23848 	struct dp_rx_tid *rx_tid;
23849 	int i;
23850 #ifdef notyet
23851 	lockdep_assert_held(&ar->ab->base_lock);
23852 #endif
23853 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23854 		rx_tid = &peer->rx_tid[i];
23855 
23856 		qwx_dp_rx_frags_cleanup(sc, rx_tid, 1);
23857 #if 0
23858 		spin_unlock_bh(&ar->ab->base_lock);
23859 		del_timer_sync(&rx_tid->frag_timer);
23860 		spin_lock_bh(&ar->ab->base_lock);
23861 #endif
23862 	}
23863 }
23864 
23865 void
23866 qwx_peer_rx_tid_cleanup(struct qwx_softc *sc, struct ath11k_peer *peer)
23867 {
23868 	struct dp_rx_tid *rx_tid;
23869 	int i;
23870 #ifdef notyet
23871 	lockdep_assert_held(&ar->ab->base_lock);
23872 #endif
23873 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23874 		rx_tid = &peer->rx_tid[i];
23875 
23876 		qwx_peer_rx_tid_delete(sc, peer, i);
23877 		qwx_dp_rx_frags_cleanup(sc, rx_tid, 1);
23878 #if 0
23879 		spin_unlock_bh(&ar->ab->base_lock);
23880 		del_timer_sync(&rx_tid->frag_timer);
23881 		spin_lock_bh(&ar->ab->base_lock);
23882 #endif
23883 	}
23884 }
23885 
23886 int
23887 qwx_peer_rx_tid_reo_update(struct qwx_softc *sc, struct ath11k_peer *peer,
23888     struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
23889     int update_ssn)
23890 {
23891 	struct ath11k_hal_reo_cmd cmd = {0};
23892 	int ret;
23893 
23894 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23895 	cmd.addr_hi = rx_tid->paddr >> 32;
23896 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23897 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
23898 	cmd.ba_window_size = ba_win_sz;
23899 
23900 	if (update_ssn) {
23901 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
23902 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
23903 	}
23904 
23905 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23906 	    &cmd, NULL);
23907 	if (ret) {
23908 		printf("%s: failed to update rx tid queue, tid %d (%d)\n",
23909 		    sc->sc_dev.dv_xname, rx_tid->tid, ret);
23910 		return ret;
23911 	}
23912 
23913 	rx_tid->ba_win_sz = ba_win_sz;
23914 
23915 	return 0;
23916 }
23917 
23918 void
23919 qwx_dp_rx_tid_mem_free(struct qwx_softc *sc, struct ieee80211_node *ni,
23920     int vdev_id, uint8_t tid)
23921 {
23922 	struct qwx_node *nq = (struct qwx_node *)ni;
23923 	struct ath11k_peer *peer = &nq->peer;
23924 	struct dp_rx_tid *rx_tid;
23925 #ifdef notyet
23926 	spin_lock_bh(&ab->base_lock);
23927 #endif
23928 	rx_tid = &peer->rx_tid[tid];
23929 
23930 	if (rx_tid->mem) {
23931 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23932 		rx_tid->mem = NULL;
23933 		rx_tid->vaddr = NULL;
23934 		rx_tid->paddr = 0ULL;
23935 		rx_tid->size = 0;
23936 	}
23937 
23938 	rx_tid->active = 0;
23939 #ifdef notyet
23940 	spin_unlock_bh(&ab->base_lock);
23941 #endif
23942 }
23943 
23944 int
23945 qwx_peer_rx_tid_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
23946     int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
23947     enum hal_pn_type pn_type)
23948 {
23949 	struct qwx_node *nq = (struct qwx_node *)ni;
23950 	struct ath11k_peer *peer = &nq->peer;
23951 	struct dp_rx_tid *rx_tid;
23952 	uint32_t hw_desc_sz;
23953 	void *vaddr;
23954 	uint64_t paddr;
23955 	int ret;
23956 #ifdef notyet
23957 	spin_lock_bh(&ab->base_lock);
23958 #endif
23959 	rx_tid = &peer->rx_tid[tid];
23960 	/* Update the tid queue if it is already setup */
23961 	if (rx_tid->active) {
23962 		paddr = rx_tid->paddr;
23963 		ret = qwx_peer_rx_tid_reo_update(sc, peer, rx_tid,
23964 		    ba_win_sz, ssn, 1);
23965 #ifdef notyet
23966 		spin_unlock_bh(&ab->base_lock);
23967 #endif
23968 		if (ret) {
23969 			printf("%s: failed to update reo for peer %s "
23970 			    "rx tid %d\n: %d", sc->sc_dev.dv_xname,
23971 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23972 			return ret;
23973 		}
23974 
23975 		ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
23976 		    pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
23977 		if (ret)
23978 			printf("%s: failed to send wmi rx reorder queue "
23979 			    "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
23980 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23981 		return ret;
23982 	}
23983 
23984 	rx_tid->tid = tid;
23985 
23986 	rx_tid->ba_win_sz = ba_win_sz;
23987 
23988 	/* TODO: Optimize the memory allocation for qos tid based on
23989 	 * the actual BA window size in REO tid update path.
23990 	 */
23991 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23992 		hw_desc_sz = qwx_hal_reo_qdesc_size(ba_win_sz, tid);
23993 	else
23994 		hw_desc_sz = qwx_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
23995 
23996 	rx_tid->mem = qwx_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
23997 	    HAL_LINK_DESC_ALIGN);
23998 	if (rx_tid->mem == NULL) {
23999 #ifdef notyet
24000 		spin_unlock_bh(&ab->base_lock);
24001 #endif
24002 		return ENOMEM;
24003 	}
24004 
24005 	vaddr = QWX_DMA_KVA(rx_tid->mem);
24006 
24007 	qwx_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
24008 
24009 	paddr = QWX_DMA_DVA(rx_tid->mem);
24010 
24011 	rx_tid->vaddr = vaddr;
24012 	rx_tid->paddr = paddr;
24013 	rx_tid->size = hw_desc_sz;
24014 	rx_tid->active = 1;
24015 #ifdef notyet
24016 	spin_unlock_bh(&ab->base_lock);
24017 #endif
24018 	ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
24019 	    ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
24020 	if (ret) {
24021 		printf("%s: failed to setup rx reorder queue for peer %s "
24022 		    "tid %d: %d\n", sc->sc_dev.dv_xname,
24023 		    ether_sprintf(ni->ni_macaddr), tid, ret);
24024 		qwx_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
24025 	}
24026 
24027 	return ret;
24028 }
24029 
24030 int
24031 qwx_peer_rx_frag_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
24032     int vdev_id)
24033 {
24034 	struct qwx_node *nq = (struct qwx_node *)ni;
24035 	struct ath11k_peer *peer = &nq->peer;
24036 	struct dp_rx_tid *rx_tid;
24037 	int i;
24038 #ifdef notyet
24039 	spin_lock_bh(&ab->base_lock);
24040 #endif
24041 	for (i = 0; i <= nitems(peer->rx_tid); i++) {
24042 		rx_tid = &peer->rx_tid[i];
24043 #if 0
24044 		rx_tid->ab = ab;
24045 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
24046 #endif
24047 	}
24048 #if 0
24049 	peer->dp_setup_done = true;
24050 #endif
24051 #ifdef notyet
24052 	spin_unlock_bh(&ab->base_lock);
24053 #endif
24054 	return 0;
24055 }
24056 
24057 int
24058 qwx_dp_peer_setup(struct qwx_softc *sc, int vdev_id, int pdev_id,
24059     struct ieee80211_node *ni)
24060 {
24061 	struct qwx_node *nq = (struct qwx_node *)ni;
24062 	struct ath11k_peer *peer = &nq->peer;
24063 	uint32_t reo_dest;
24064 	int ret = 0, tid;
24065 
24066 	/* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
24067 	reo_dest = sc->pdev_dp.mac_id + 1;
24068 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
24069 	    WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
24070 	if (ret) {
24071 		printf("%s: failed to set default routing %d peer %s "
24072 		    "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
24073 		    ether_sprintf(ni->ni_macaddr), vdev_id);
24074 		return ret;
24075 	}
24076 
24077 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
24078 		ret = qwx_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
24079 		    tid, 1, 0, HAL_PN_TYPE_NONE);
24080 		if (ret) {
24081 			printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
24082 			    sc->sc_dev.dv_xname, tid, ret);
24083 			goto peer_clean;
24084 		}
24085 	}
24086 
24087 	ret = qwx_peer_rx_frag_setup(sc, ni, vdev_id);
24088 	if (ret) {
24089 		printf("%s: failed to setup rx defrag context\n",
24090 		    sc->sc_dev.dv_xname);
24091 		tid--;
24092 		goto peer_clean;
24093 	}
24094 
24095 	/* TODO: Setup other peer specific resource used in data path */
24096 
24097 	return 0;
24098 
24099 peer_clean:
24100 #ifdef notyet
24101 	spin_lock_bh(&ab->base_lock);
24102 #endif
24103 #if 0
24104 	peer = ath11k_peer_find(ab, vdev_id, addr);
24105 	if (!peer) {
24106 		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
24107 		spin_unlock_bh(&ab->base_lock);
24108 		return -ENOENT;
24109 	}
24110 #endif
24111 	for (; tid >= 0; tid--)
24112 		qwx_peer_rx_tid_delete(sc, peer, tid);
24113 #ifdef notyet
24114 	spin_unlock_bh(&ab->base_lock);
24115 #endif
24116 	return ret;
24117 }
24118 
24119 int
24120 qwx_dp_peer_rx_pn_replay_config(struct qwx_softc *sc, struct qwx_vif *arvif,
24121     struct ieee80211_node *ni, struct ieee80211_key *k, int delete_key)
24122 {
24123 	struct ath11k_hal_reo_cmd cmd = {0};
24124 	struct qwx_node *nq = (struct qwx_node *)ni;
24125 	struct ath11k_peer *peer = &nq->peer;
24126 	struct dp_rx_tid *rx_tid;
24127 	uint8_t tid;
24128 	int ret = 0;
24129 
24130 	/*
24131 	 * NOTE: Enable PN/TSC replay check offload only for unicast frames.
24132 	 * We use net80211 PN/TSC replay check functionality for bcast/mcast
24133 	 * for now.
24134 	 */
24135 	if (k->k_flags & IEEE80211_KEY_GROUP)
24136 		return 0;
24137 
24138 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
24139 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
24140 		    HAL_REO_CMD_UPD0_PN_SIZE |
24141 		    HAL_REO_CMD_UPD0_PN_VALID |
24142 		    HAL_REO_CMD_UPD0_PN_CHECK |
24143 		    HAL_REO_CMD_UPD0_SVLD;
24144 
24145 	switch (k->k_cipher) {
24146 	case IEEE80211_CIPHER_TKIP:
24147 	case IEEE80211_CIPHER_CCMP:
24148 #if 0
24149 	case WLAN_CIPHER_SUITE_CCMP_256:
24150 	case WLAN_CIPHER_SUITE_GCMP:
24151 	case WLAN_CIPHER_SUITE_GCMP_256:
24152 #endif
24153 		if (!delete_key) {
24154 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
24155 			cmd.pn_size = 48;
24156 		}
24157 		break;
24158 	default:
24159 		printf("%s: cipher %u is not supported\n",
24160 		    sc->sc_dev.dv_xname, k->k_cipher);
24161 		return EOPNOTSUPP;
24162 	}
24163 
24164 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
24165 		rx_tid = &peer->rx_tid[tid];
24166 		if (!rx_tid->active)
24167 			continue;
24168 		cmd.addr_lo = rx_tid->paddr & 0xffffffff;
24169 		cmd.addr_hi = (rx_tid->paddr >> 32);
24170 		ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
24171 		    HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL);
24172 		if (ret) {
24173 			printf("%s: failed to configure rx tid %d queue "
24174 			    "for pn replay detection %d\n",
24175 			    sc->sc_dev.dv_xname, tid, ret);
24176 			break;
24177 		}
24178 	}
24179 
24180 	return ret;
24181 }
24182 
24183 enum hal_tcl_encap_type
24184 qwx_dp_tx_get_encap_type(struct qwx_softc *sc)
24185 {
24186 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
24187 		return HAL_TCL_ENCAP_TYPE_RAW;
24188 #if 0
24189 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
24190 		return HAL_TCL_ENCAP_TYPE_ETHERNET;
24191 #endif
24192 	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
24193 }
24194 
24195 uint8_t
24196 qwx_dp_tx_get_tid(struct mbuf *m)
24197 {
24198 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
24199 	uint16_t qos = ieee80211_get_qos(wh);
24200 	uint8_t tid = qos & IEEE80211_QOS_TID;
24201 
24202 	return tid;
24203 }
24204 
24205 void
24206 qwx_hal_tx_cmd_desc_setup(struct qwx_softc *sc, void *cmd,
24207     struct hal_tx_info *ti)
24208 {
24209 	struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
24210 
24211 	tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
24212 	    ti->paddr);
24213 	tcl_cmd->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
24214 	    ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
24215 	tcl_cmd->buf_addr_info.info1 |= FIELD_PREP(
24216 	    BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
24217 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
24218 
24219 	tcl_cmd->info0 =
24220 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
24221 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
24222 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE, ti->encrypt_type) |
24223 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE, ti->search_type) |
24224 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN, ti->addr_search_flags) |
24225 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM, ti->meta_data_flags);
24226 
24227 	tcl_cmd->info1 = ti->flags0 |
24228 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
24229 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
24230 
24231 	tcl_cmd->info2 = ti->flags1 |
24232 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
24233 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
24234 
24235 	tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
24236 	    ti->dscp_tid_tbl_idx) |
24237 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX, ti->bss_ast_idx) |
24238 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash);
24239 	tcl_cmd->info4 = 0;
24240 #ifdef notyet
24241 	if (ti->enable_mesh)
24242 		ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
24243 #endif
24244 }
24245 
24246 int
24247 qwx_dp_tx(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
24248     struct ieee80211_node *ni, struct mbuf *m)
24249 {
24250 	struct ieee80211com *ic = &sc->sc_ic;
24251 	struct qwx_dp *dp = &sc->dp;
24252 	struct hal_tx_info ti = {0};
24253 	struct qwx_tx_data *tx_data;
24254 	struct hal_srng *tcl_ring;
24255 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
24256 	struct ieee80211_key *k = NULL;
24257 	struct dp_tx_ring *tx_ring;
24258 	void *hal_tcl_desc;
24259 	uint8_t pool_id;
24260 	uint8_t hal_ring_id;
24261 	int ret, msdu_id, off;
24262 	uint32_t ring_selector = 0;
24263 	uint8_t ring_map = 0;
24264 
24265 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
24266 		m_freem(m);
24267 		return ESHUTDOWN;
24268 	}
24269 #if 0
24270 	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
24271 		     !ieee80211_is_data(hdr->frame_control)))
24272 		return -ENOTSUPP;
24273 #endif
24274 	pool_id = 0;
24275 	ring_selector = 0;
24276 
24277 	ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
24278 	ti.rbm_id = sc->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
24279 
24280 	ring_map |= (1 << ti.ring_id);
24281 
24282 	tx_ring = &dp->tx_ring[ti.ring_id];
24283 
24284 	if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
24285 		m_freem(m);
24286 		return ENOSPC;
24287 	}
24288 
24289 	msdu_id = tx_ring->cur;
24290 	tx_data = &tx_ring->data[msdu_id];
24291 	if (tx_data->m != NULL) {
24292 		m_freem(m);
24293 		return ENOSPC;
24294 	}
24295 
24296 	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, pdev_id) |
24297 	    FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, msdu_id) |
24298 	    FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
24299 	ti.encap_type = qwx_dp_tx_get_encap_type(sc);
24300 
24301 	ti.meta_data_flags = arvif->tcl_metadata;
24302 
24303 	if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
24304 	    ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
24305 		k = ieee80211_get_txkey(ic, wh, ni);
24306 		if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags)) {
24307 			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
24308 		} else {
24309 			switch (k->k_cipher) {
24310 			case IEEE80211_CIPHER_CCMP:
24311 				ti.encrypt_type = HAL_ENCRYPT_TYPE_CCMP_128;
24312 				if (m_makespace(m, m->m_pkthdr.len,
24313 				    IEEE80211_CCMP_MICLEN, &off) == NULL) {
24314 					m_freem(m);
24315 					return ENOSPC;
24316 				}
24317 				break;
24318 			case IEEE80211_CIPHER_TKIP:
24319 				ti.encrypt_type = HAL_ENCRYPT_TYPE_TKIP_MIC;
24320 				if (m_makespace(m, m->m_pkthdr.len,
24321 				    IEEE80211_TKIP_MICLEN, &off) == NULL) {
24322 					m_freem(m);
24323 					return ENOSPC;
24324 				}
24325 				break;
24326 			default:
24327 				ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
24328 				break;
24329 			}
24330 		}
24331 
24332 		if (ti.encrypt_type == HAL_ENCRYPT_TYPE_OPEN) {
24333 			/* Using software crypto. */
24334 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
24335 				return ENOBUFS;
24336 			/* 802.11 header may have moved. */
24337 			wh = mtod(m, struct ieee80211_frame *);
24338 		}
24339 	}
24340 
24341 	ti.addr_search_flags = arvif->hal_addr_search_flags;
24342 	ti.search_type = arvif->search_type;
24343 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
24344 	ti.pkt_offset = 0;
24345 	ti.lmac_id = qwx_hw_get_mac_from_pdev_id(sc, pdev_id);
24346 	ti.bss_ast_hash = arvif->ast_hash;
24347 	ti.bss_ast_idx = arvif->ast_idx;
24348 	ti.dscp_tid_tbl_idx = 0;
24349 #if 0
24350 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
24351 		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
24352 		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
24353 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
24354 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
24355 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
24356 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
24357 	}
24358 
24359 	if (ieee80211_vif_is_mesh(arvif->vif))
24360 		ti.enable_mesh = true;
24361 #endif
24362 	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
24363 
24364 	ti.tid = qwx_dp_tx_get_tid(m);
24365 #if 0
24366 	switch (ti.encap_type) {
24367 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
24368 		ath11k_dp_tx_encap_nwifi(skb);
24369 		break;
24370 	case HAL_TCL_ENCAP_TYPE_RAW:
24371 		if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
24372 			ret = -EINVAL;
24373 			goto fail_remove_idr;
24374 		}
24375 		break;
24376 	case HAL_TCL_ENCAP_TYPE_ETHERNET:
24377 		/* no need to encap */
24378 		break;
24379 	case HAL_TCL_ENCAP_TYPE_802_3:
24380 	default:
24381 		/* TODO: Take care of other encap modes as well */
24382 		ret = -EINVAL;
24383 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
24384 		goto fail_remove_idr;
24385 	}
24386 #endif
24387 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
24388 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
24389 	if (ret && ret != EFBIG) {
24390 		printf("%s: failed to map Tx buffer: %d\n",
24391 		    sc->sc_dev.dv_xname, ret);
24392 		m_freem(m);
24393 		return ret;
24394 	}
24395 	if (ret) {
24396 		/* Too many DMA segments, linearize mbuf. */
24397 		if (m_defrag(m, M_DONTWAIT)) {
24398 			m_freem(m);
24399 			return ENOBUFS;
24400 		}
24401 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
24402 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
24403 		if (ret) {
24404 			printf("%s: failed to map Tx buffer: %d\n",
24405 			    sc->sc_dev.dv_xname, ret);
24406 			m_freem(m);
24407 			return ret;
24408 		}
24409 	}
24410 	ti.paddr = tx_data->map->dm_segs[0].ds_addr;
24411 
24412 	ti.data_len = m->m_pkthdr.len;
24413 
24414 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
24415 	tcl_ring = &sc->hal.srng_list[hal_ring_id];
24416 #ifdef notyet
24417 	spin_lock_bh(&tcl_ring->lock);
24418 #endif
24419 	qwx_hal_srng_access_begin(sc, tcl_ring);
24420 
24421 	hal_tcl_desc = (void *)qwx_hal_srng_src_get_next_entry(sc, tcl_ring);
24422 	if (!hal_tcl_desc) {
24423 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
24424 		 * desc because the desc is directly enqueued onto hw queue.
24425 		 */
24426 		qwx_hal_srng_access_end(sc, tcl_ring);
24427 #if 0
24428 		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
24429 #endif
24430 #ifdef notyet
24431 		spin_unlock_bh(&tcl_ring->lock);
24432 #endif
24433 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
24434 		m_freem(m);
24435 		return ENOMEM;
24436 	}
24437 
24438 	tx_data->m = m;
24439 	tx_data->ni = ni;
24440 
24441 	qwx_hal_tx_cmd_desc_setup(sc,
24442 	    hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti);
24443 
24444 	qwx_hal_srng_access_end(sc, tcl_ring);
24445 
24446 	qwx_dp_shadow_start_timer(sc, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
24447 #ifdef notyet
24448 	spin_unlock_bh(&tcl_ring->lock);
24449 #endif
24450 	tx_ring->queued++;
24451 	tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
24452 
24453 	if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
24454 		sc->qfullmsk |= (1 << ti.ring_id);
24455 
24456 	return 0;
24457 }
24458 
24459 int
24460 qwx_mac_station_remove(struct qwx_softc *sc, struct qwx_vif *arvif,
24461     uint8_t pdev_id, struct ieee80211_node *ni)
24462 {
24463 	struct qwx_node *nq = (struct qwx_node *)ni;
24464 	struct ath11k_peer *peer = &nq->peer;
24465 	int ret;
24466 
24467 	qwx_peer_rx_tid_cleanup(sc, peer);
24468 
24469 	ret = qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
24470 	if (ret) {
24471 		printf("%s: unable to delete BSS peer: %d\n",
24472 		   sc->sc_dev.dv_xname, ret);
24473 		return ret;
24474 	}
24475 
24476 	return 0;
24477 }
24478 
24479 int
24480 qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif,
24481     uint8_t pdev_id, struct ieee80211_node *ni)
24482 {
24483 	struct peer_create_params peer_param;
24484 	int ret;
24485 #ifdef notyet
24486 	lockdep_assert_held(&ar->conf_mutex);
24487 #endif
24488 	peer_param.vdev_id = arvif->vdev_id;
24489 	peer_param.peer_addr = ni->ni_macaddr;
24490 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
24491 
24492 	ret = qwx_peer_create(sc, arvif, pdev_id, ni, &peer_param);
24493 	if (ret) {
24494 		printf("%s: Failed to add peer: %s for VDEV: %d\n",
24495 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
24496 		    arvif->vdev_id);
24497 		return ret;
24498 	}
24499 
24500 	DNPRINTF(QWX_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
24501 	    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
24502 
24503 	ret = qwx_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
24504 	if (ret) {
24505 		printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
24506 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
24507 		    arvif->vdev_id, ret);
24508 		goto free_peer;
24509 	}
24510 
24511 	return 0;
24512 
24513 free_peer:
24514 	qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
24515 	return ret;
24516 }
24517 
24518 int
24519 qwx_mac_mgmt_tx_wmi(struct qwx_softc *sc, struct qwx_vif *arvif,
24520     uint8_t pdev_id, struct ieee80211_node *ni, struct mbuf *m)
24521 {
24522 	struct qwx_txmgmt_queue *txmgmt = &arvif->txmgmt;
24523 	struct qwx_tx_data *tx_data;
24524 	int buf_id;
24525 	int ret;
24526 
24527 	buf_id = txmgmt->cur;
24528 
24529 	DNPRINTF(QWX_D_MAC, "%s: tx mgmt frame, buf id %d\n", __func__, buf_id);
24530 
24531 	if (txmgmt->queued >= nitems(txmgmt->data))
24532 		return ENOSPC;
24533 
24534 	tx_data = &txmgmt->data[buf_id];
24535 #if 0
24536 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
24537 		if ((ieee80211_is_action(hdr->frame_control) ||
24538 		     ieee80211_is_deauth(hdr->frame_control) ||
24539 		     ieee80211_is_disassoc(hdr->frame_control)) &&
24540 		     ieee80211_has_protected(hdr->frame_control)) {
24541 			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
24542 		}
24543 	}
24544 #endif
24545 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
24546 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
24547 	if (ret && ret != EFBIG) {
24548 		printf("%s: failed to map mgmt Tx buffer: %d\n",
24549 		    sc->sc_dev.dv_xname, ret);
24550 		return ret;
24551 	}
24552 	if (ret) {
24553 		/* Too many DMA segments, linearize mbuf. */
24554 		if (m_defrag(m, M_DONTWAIT)) {
24555 			m_freem(m);
24556 			return ENOBUFS;
24557 		}
24558 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
24559 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
24560 		if (ret) {
24561 			printf("%s: failed to map mgmt Tx buffer: %d\n",
24562 			    sc->sc_dev.dv_xname, ret);
24563 			m_freem(m);
24564 			return ret;
24565 		}
24566 	}
24567 
24568 	ret = qwx_wmi_mgmt_send(sc, arvif, pdev_id, buf_id, m, tx_data);
24569 	if (ret) {
24570 		printf("%s: failed to send mgmt frame: %d\n",
24571 		    sc->sc_dev.dv_xname, ret);
24572 		goto err_unmap_buf;
24573 	}
24574 	tx_data->ni = ni;
24575 
24576 	txmgmt->cur = (txmgmt->cur + 1) % nitems(txmgmt->data);
24577 	txmgmt->queued++;
24578 
24579 	if (txmgmt->queued >= nitems(txmgmt->data) - 1)
24580 		sc->qfullmsk |= (1U << QWX_MGMT_QUEUE_ID);
24581 
24582 	return 0;
24583 
24584 err_unmap_buf:
24585 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
24586 	return ret;
24587 }
24588 
24589 void
24590 qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
24591 {
24592 	/* setup commonly used values */
24593 	arg->scan_req_id = 1;
24594 	if (sc->state_11d == ATH11K_11D_PREPARING)
24595 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
24596 	else
24597 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
24598 	arg->dwell_time_active = 50;
24599 	arg->dwell_time_active_2g = 0;
24600 	arg->dwell_time_passive = 150;
24601 	arg->dwell_time_active_6g = 40;
24602 	arg->dwell_time_passive_6g = 30;
24603 	arg->min_rest_time = 50;
24604 	arg->max_rest_time = 500;
24605 	arg->repeat_probe_time = 0;
24606 	arg->probe_spacing_time = 0;
24607 	arg->idle_time = 0;
24608 	arg->max_scan_time = 20000;
24609 	arg->probe_delay = 5;
24610 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
24611 	    WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
24612 	    WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
24613 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
24614 
24615 	if (isset(sc->wmi.svc_map,
24616 	    WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
24617 		arg->scan_ctrl_flags_ext |=
24618 		    WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
24619 
24620 	arg->num_bssid = 1;
24621 
24622 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
24623 	 * ZEROs in probe request
24624 	 */
24625 	IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
24626 }
24627 
24628 int
24629 qwx_wmi_set_peer_param(struct qwx_softc *sc, uint8_t *peer_addr,
24630     uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
24631 {
24632 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24633 	struct wmi_peer_set_param_cmd *cmd;
24634 	struct mbuf *m;
24635 	int ret;
24636 
24637 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
24638 	if (!m)
24639 		return ENOMEM;
24640 
24641 	cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
24642 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24643 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
24644 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24645 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
24646 	cmd->vdev_id = vdev_id;
24647 	cmd->param_id = param_id;
24648 	cmd->param_value = param_val;
24649 
24650 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
24651 	if (ret) {
24652 		if (ret != ESHUTDOWN) {
24653 			printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
24654 			    sc->sc_dev.dv_xname);
24655 		}
24656 		m_freem(m);
24657 		return ret;
24658 	}
24659 
24660 	DNPRINTF(QWX_D_WMI, "%s: cmd peer set param vdev %d peer %s "
24661 	    "set param %d value %d\n", __func__, vdev_id,
24662 	    ether_sprintf(peer_addr), param_id, param_val);
24663 
24664 	return 0;
24665 }
24666 
24667 int
24668 qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *sc, int vdev_id,
24669     int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
24670     uint8_t ba_window_size_valid, uint32_t ba_window_size)
24671 {
24672 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24673 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
24674 	struct mbuf *m;
24675 	int ret;
24676 
24677 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
24678 	if (!m)
24679 		return ENOMEM;
24680 
24681 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
24682 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24683 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
24684 	    WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
24685 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24686 
24687 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
24688 	cmd->vdev_id = vdev_id;
24689 	cmd->tid = tid;
24690 	cmd->queue_ptr_lo = paddr & 0xffffffff;
24691 	cmd->queue_ptr_hi = paddr >> 32;
24692 	cmd->queue_no = tid;
24693 	cmd->ba_window_size_valid = ba_window_size_valid;
24694 	cmd->ba_window_size = ba_window_size;
24695 
24696 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
24697 	if (ret) {
24698 		if (ret != ESHUTDOWN) {
24699 			printf("%s: failed to send "
24700 			    "WMI_PEER_REORDER_QUEUE_SETUP\n",
24701 			    sc->sc_dev.dv_xname);
24702 		}
24703 		m_freem(m);
24704 	}
24705 
24706 	DNPRINTF(QWX_D_WMI, "%s: cmd peer reorder queue setup addr %s "
24707 	    "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
24708 
24709 	return ret;
24710 }
24711 
24712 enum ath11k_spectral_mode
24713 qwx_spectral_get_mode(struct qwx_softc *sc)
24714 {
24715 #if 0
24716 	if (sc->spectral.enabled)
24717 		return ar->spectral.mode;
24718 	else
24719 #endif
24720 		return ATH11K_SPECTRAL_DISABLED;
24721 }
24722 
24723 void
24724 qwx_spectral_reset_buffer(struct qwx_softc *sc)
24725 {
24726 	printf("%s: not implemented\n", __func__);
24727 }
24728 
24729 int
24730 qwx_scan_stop(struct qwx_softc *sc)
24731 {
24732 	struct scan_cancel_param arg = {
24733 		.req_type = WLAN_SCAN_CANCEL_SINGLE,
24734 		.scan_id = ATH11K_SCAN_ID,
24735 	};
24736 	int ret;
24737 #ifdef notyet
24738 	lockdep_assert_held(&ar->conf_mutex);
24739 #endif
24740 	/* TODO: Fill other STOP Params */
24741 	arg.pdev_id = 0; /* TODO: derive pdev ID somehow? */
24742 	arg.vdev_id = sc->scan.vdev_id;
24743 
24744 	ret = qwx_wmi_send_scan_stop_cmd(sc, &arg);
24745 	if (ret) {
24746 		printf("%s: failed to stop wmi scan: %d\n",
24747 		    sc->sc_dev.dv_xname, ret);
24748 		goto out;
24749 	}
24750 
24751 	while (sc->scan.state != ATH11K_SCAN_IDLE) {
24752 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscstop",
24753 		    SEC_TO_NSEC(3));
24754 		if (ret) {
24755 			printf("%s: scan stop timeout\n", sc->sc_dev.dv_xname);
24756 			break;
24757 		}
24758 	}
24759 out:
24760 	/* Scan state should be updated upon scan completion but in case
24761 	 * firmware fails to deliver the event (for whatever reason) it is
24762 	 * desired to clean up scan state anyway. Firmware may have just
24763 	 * dropped the scan completion event delivery due to transport pipe
24764 	 * being overflown with data and/or it can recover on its own before
24765 	 * next scan request is submitted.
24766 	 */
24767 #ifdef notyet
24768 	spin_lock_bh(&ar->data_lock);
24769 #endif
24770 	if (sc->scan.state != ATH11K_SCAN_IDLE)
24771 		qwx_mac_scan_finish(sc);
24772 #ifdef notyet
24773 	spin_unlock_bh(&ar->data_lock);
24774 #endif
24775 	return ret;
24776 }
24777 
24778 void
24779 qwx_scan_timeout(void *arg)
24780 {
24781 	struct qwx_softc *sc = arg;
24782 	int s = splnet();
24783 
24784 #ifdef notyet
24785 	mutex_lock(&ar->conf_mutex);
24786 #endif
24787 	printf("%s\n", __func__);
24788 	qwx_scan_abort(sc);
24789 #ifdef notyet
24790 	mutex_unlock(&ar->conf_mutex);
24791 #endif
24792 	splx(s);
24793 }
24794 
24795 int
24796 qwx_start_scan(struct qwx_softc *sc, struct scan_req_params *arg)
24797 {
24798 	int ret;
24799 	unsigned long timeout = 1;
24800 #ifdef notyet
24801 	lockdep_assert_held(&ar->conf_mutex);
24802 #endif
24803 	if (qwx_spectral_get_mode(sc) == ATH11K_SPECTRAL_BACKGROUND)
24804 		qwx_spectral_reset_buffer(sc);
24805 
24806 	ret = qwx_wmi_send_scan_start_cmd(sc, arg);
24807 	if (ret)
24808 		return ret;
24809 
24810 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
24811 		timeout = 5;
24812 #if 0
24813 		if (ar->supports_6ghz)
24814 			timeout += 5 * HZ;
24815 #endif
24816 	}
24817 
24818 	while (sc->scan.state == ATH11K_SCAN_STARTING) {
24819 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscan",
24820 		    SEC_TO_NSEC(timeout));
24821 		if (ret) {
24822 			printf("%s: scan start timeout\n", sc->sc_dev.dv_xname);
24823 			qwx_scan_stop(sc);
24824 			break;
24825 		}
24826 	}
24827 
24828 #ifdef notyet
24829 	spin_lock_bh(&ar->data_lock);
24830 	spin_unlock_bh(&ar->data_lock);
24831 #endif
24832 	return ret;
24833 }
24834 
24835 #define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD		200 /* in msecs */
24836 
24837 int
24838 qwx_scan(struct qwx_softc *sc)
24839 {
24840 	struct ieee80211com *ic = &sc->sc_ic;
24841 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list);
24842 	struct scan_req_params *arg = NULL;
24843 	struct ieee80211_channel *chan, *lastc;
24844 	int ret = 0, num_channels, i;
24845 	uint32_t scan_timeout;
24846 
24847 	if (arvif == NULL) {
24848 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
24849 		return EINVAL;
24850 	}
24851 
24852 	/*
24853 	 * TODO Will we need separate scan iterations on devices with
24854 	 * multiple radios?
24855 	 */
24856 	if (sc->num_radios > 1)
24857 		printf("%s: TODO: only scanning with first vdev\n", __func__);
24858 
24859 	/* Firmwares advertising the support of triggering 11D algorithm
24860 	 * on the scan results of a regular scan expects driver to send
24861 	 * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
24862 	 * With this feature, separate 11D scan can be avoided since
24863 	 * regdomain can be determined with the scan results of the
24864 	 * regular scan.
24865 	 */
24866 	if (sc->state_11d == ATH11K_11D_PREPARING &&
24867 	    isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN))
24868 		qwx_mac_11d_scan_start(sc, arvif);
24869 #ifdef notyet
24870 	mutex_lock(&ar->conf_mutex);
24871 
24872 	spin_lock_bh(&ar->data_lock);
24873 #endif
24874 	switch (sc->scan.state) {
24875 	case ATH11K_SCAN_IDLE:
24876 		sc->scan.started = 0;
24877 		sc->scan.completed = 0;
24878 		sc->scan.state = ATH11K_SCAN_STARTING;
24879 		sc->scan.is_roc = 0;
24880 		sc->scan.vdev_id = arvif->vdev_id;
24881 		ret = 0;
24882 		break;
24883 	case ATH11K_SCAN_STARTING:
24884 	case ATH11K_SCAN_RUNNING:
24885 	case ATH11K_SCAN_ABORTING:
24886 		ret = EBUSY;
24887 		break;
24888 	}
24889 #ifdef notyet
24890 	spin_unlock_bh(&ar->data_lock);
24891 #endif
24892 	if (ret)
24893 		goto exit;
24894 
24895 	arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_NOWAIT);
24896 	if (!arg) {
24897 		ret = ENOMEM;
24898 		goto exit;
24899 	}
24900 
24901 	qwx_wmi_start_scan_init(sc, arg);
24902 	arg->vdev_id = arvif->vdev_id;
24903 	arg->scan_id = ATH11K_SCAN_ID;
24904 
24905 	if (ic->ic_des_esslen != 0) {
24906 		arg->num_ssids = 1;
24907 		arg->ssid[0].length  = ic->ic_des_esslen;
24908 		memcpy(&arg->ssid[0].ssid, ic->ic_des_essid,
24909 		    ic->ic_des_esslen);
24910 	} else
24911 		arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
24912 
24913 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
24914 	num_channels = 0;
24915 	for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24916 		if (chan->ic_flags == 0)
24917 			continue;
24918 		num_channels++;
24919 	}
24920 	if (num_channels) {
24921 		arg->num_chan = num_channels;
24922 		arg->chan_list = mallocarray(arg->num_chan,
24923 		    sizeof(*arg->chan_list), M_DEVBUF, M_NOWAIT | M_ZERO);
24924 
24925 		if (!arg->chan_list) {
24926 			ret = ENOMEM;
24927 			goto exit;
24928 		}
24929 
24930 		i = 0;
24931 		for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24932 			if (chan->ic_flags == 0)
24933 				continue;
24934 			if (isset(sc->wmi.svc_map,
24935 			    WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL)) {
24936 				arg->chan_list[i++] = chan->ic_freq &
24937 				    WMI_SCAN_CONFIG_PER_CHANNEL_MASK;
24938 #if 0
24939 				/* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
24940 				 * flags, then scan all PSC channels in 6 GHz band and
24941 				 * those non-PSC channels where RNR IE is found during
24942 				 * the legacy 2.4/5 GHz scan.
24943 				 * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
24944 				 * then all channels in 6 GHz will be scanned.
24945 				 */
24946 				if (req->channels[i]->band == NL80211_BAND_6GHZ &&
24947 				    req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
24948 				    !cfg80211_channel_is_psc(req->channels[i]))
24949 					arg->chan_list[i] |=
24950 						WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
24951 #endif
24952 			} else {
24953 				arg->chan_list[i++] = chan->ic_freq;
24954 			}
24955 		}
24956 	}
24957 #if 0
24958 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
24959 		arg->scan_f_add_spoofed_mac_in_probe = 1;
24960 		ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
24961 		ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
24962 	}
24963 #endif
24964 	scan_timeout = 5000;
24965 
24966 	/* Add a margin to account for event/command processing */
24967 	scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
24968 
24969 	ret = qwx_start_scan(sc, arg);
24970 	if (ret) {
24971 		if (ret != ESHUTDOWN) {
24972 			printf("%s: failed to start hw scan: %d\n",
24973 			    sc->sc_dev.dv_xname, ret);
24974 		}
24975 #ifdef notyet
24976 		spin_lock_bh(&ar->data_lock);
24977 #endif
24978 		sc->scan.state = ATH11K_SCAN_IDLE;
24979 #ifdef notyet
24980 		spin_unlock_bh(&ar->data_lock);
24981 #endif
24982 	} else {
24983 		/*
24984 		 * The current mode might have been fixed during association.
24985 		 * Ensure all channels get scanned.
24986 		 */
24987 		if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
24988 			ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
24989 	}
24990 #if 0
24991 	timeout_add_msec(&sc->scan.timeout, scan_timeout);
24992 #endif
24993 exit:
24994 	if (arg) {
24995 		free(arg->chan_list, M_DEVBUF,
24996 		    arg->num_chan * sizeof(*arg->chan_list));
24997 #if 0
24998 		kfree(arg->extraie.ptr);
24999 #endif
25000 		free(arg, M_DEVBUF, sizeof(*arg));
25001 	}
25002 #ifdef notyet
25003 	mutex_unlock(&ar->conf_mutex);
25004 #endif
25005 	if (sc->state_11d == ATH11K_11D_PREPARING)
25006 		qwx_mac_11d_scan_start(sc, arvif);
25007 
25008 	return ret;
25009 }
25010 
25011 void
25012 qwx_scan_abort(struct qwx_softc *sc)
25013 {
25014 	int ret;
25015 #ifdef notyet
25016 	lockdep_assert_held(&ar->conf_mutex);
25017 
25018 	spin_lock_bh(&ar->data_lock);
25019 #endif
25020 	switch (sc->scan.state) {
25021 	case ATH11K_SCAN_IDLE:
25022 		/* This can happen if timeout worker kicked in and called
25023 		 * abortion while scan completion was being processed.
25024 		 */
25025 		break;
25026 	case ATH11K_SCAN_STARTING:
25027 	case ATH11K_SCAN_ABORTING:
25028 		printf("%s: refusing scan abortion due to invalid "
25029 		    "scan state: %d\n", sc->sc_dev.dv_xname, sc->scan.state);
25030 		break;
25031 	case ATH11K_SCAN_RUNNING:
25032 		sc->scan.state = ATH11K_SCAN_ABORTING;
25033 #ifdef notyet
25034 		spin_unlock_bh(&ar->data_lock);
25035 #endif
25036 		ret = qwx_scan_stop(sc);
25037 		if (ret)
25038 			printf("%s: failed to abort scan: %d\n",
25039 			    sc->sc_dev.dv_xname, ret);
25040 #ifdef notyet
25041 		spin_lock_bh(&ar->data_lock);
25042 #endif
25043 		break;
25044 	}
25045 #ifdef notyet
25046 	spin_unlock_bh(&ar->data_lock);
25047 #endif
25048 }
25049 
25050 /*
25051  * Find a pdev which corresponds to a given channel.
25052  * This doesn't exactly match the semantics of the Linux driver
25053  * but because OpenBSD does not (yet) implement multi-bss mode
25054  * we can assume that only one PHY will be active in either the
25055  * 2 GHz or the 5 GHz band.
25056  */
25057 struct qwx_pdev *
25058 qwx_get_pdev_for_chan(struct qwx_softc *sc, struct ieee80211_channel *chan)
25059 {
25060 	struct qwx_pdev *pdev;
25061 	int i;
25062 
25063 	for (i = 0; i < sc->num_radios; i++) {
25064 		if ((sc->pdevs_active & (1 << i)) == 0)
25065 			continue;
25066 
25067 		pdev = &sc->pdevs[i];
25068 		if (IEEE80211_IS_CHAN_2GHZ(chan) &&
25069 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
25070 			return pdev;
25071 		if (IEEE80211_IS_CHAN_5GHZ(chan) &&
25072 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
25073 			return pdev;
25074 	}
25075 
25076 	return NULL;
25077 }
25078 
25079 void
25080 qwx_recalculate_mgmt_rate(struct qwx_softc *sc, struct ieee80211_node *ni,
25081     uint32_t vdev_id, uint32_t pdev_id)
25082 {
25083 	struct ieee80211com *ic = &sc->sc_ic;
25084 	int hw_rate_code;
25085 	uint32_t vdev_param;
25086 	int bitrate;
25087 	int ret;
25088 #ifdef notyet
25089 	lockdep_assert_held(&ar->conf_mutex);
25090 #endif
25091 	bitrate = ieee80211_min_basic_rate(ic);
25092 	hw_rate_code = qwx_mac_get_rate_hw_value(ic, ni, bitrate);
25093 	if (hw_rate_code < 0) {
25094 		DPRINTF("%s: bitrate not supported %d\n",
25095 		    sc->sc_dev.dv_xname, bitrate);
25096 		return;
25097 	}
25098 
25099 	vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
25100 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
25101 	    vdev_param, hw_rate_code);
25102 	if (ret)
25103 		printf("%s: failed to set mgmt tx rate\n",
25104 		    sc->sc_dev.dv_xname);
25105 #if 0
25106 	/* For WCN6855, firmware will clear this param when vdev starts, hence
25107 	 * cache it here so that we can reconfigure it once vdev starts.
25108 	 */
25109 	ab->hw_rate_code = hw_rate_code;
25110 #endif
25111 	vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
25112 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
25113 	    hw_rate_code);
25114 	if (ret)
25115 		printf("%s: failed to set beacon tx rate\n",
25116 		    sc->sc_dev.dv_xname);
25117 }
25118 
25119 int
25120 qwx_auth(struct qwx_softc *sc)
25121 {
25122 	struct ieee80211com *ic = &sc->sc_ic;
25123 	struct ieee80211_node *ni = ic->ic_bss;
25124 	uint32_t param_id;
25125 	struct qwx_vif *arvif;
25126 	struct qwx_pdev *pdev;
25127 	int ret;
25128 
25129 	arvif = TAILQ_FIRST(&sc->vif_list);
25130 	if (arvif == NULL) {
25131 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
25132 		return EINVAL;
25133 	}
25134 
25135 	pdev = qwx_get_pdev_for_chan(sc, ni->ni_chan);
25136 	if (pdev == NULL) {
25137 		printf("%s: no pdev found for channel %d\n",
25138 		    sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
25139 		return EINVAL;
25140 	}
25141 
25142 	param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
25143 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
25144 	    param_id, ni->ni_intval);
25145 	if (ret) {
25146 		printf("%s: failed to set beacon interval for VDEV: %d\n",
25147 		    sc->sc_dev.dv_xname, arvif->vdev_id);
25148 		return ret;
25149 	}
25150 
25151 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
25152 	ni->ni_txrate = 0;
25153 
25154 	ret = qwx_mac_station_add(sc, arvif, pdev->pdev_id, ni);
25155 	if (ret)
25156 		return ret;
25157 
25158 	/* Start vdev. */
25159 	ret = qwx_mac_vdev_start(sc, arvif, pdev->pdev_id);
25160 	if (ret) {
25161 		printf("%s: failed to start MAC for VDEV: %d\n",
25162 		    sc->sc_dev.dv_xname, arvif->vdev_id);
25163 		return ret;
25164 	}
25165 
25166 	/*
25167 	 * WCN6855 firmware clears basic-rate parameters when vdev starts.
25168 	 * Set it once more.
25169 	 */
25170 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
25171 
25172 	return ret;
25173 }
25174 
25175 int
25176 qwx_deauth(struct qwx_softc *sc)
25177 {
25178 	struct ieee80211com *ic = &sc->sc_ic;
25179 	struct ieee80211_node *ni = ic->ic_bss;
25180 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25181 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25182 	int ret;
25183 
25184 	ret = qwx_mac_vdev_stop(sc, arvif, pdev_id);
25185 	if (ret) {
25186 		printf("%s: unable to stop vdev vdev_id %d: %d\n",
25187 		   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
25188 		return ret;
25189 	}
25190 
25191 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
25192 	    pdev_id, WMI_PEER_AUTHORIZE, 0);
25193 	if (ret) {
25194 		printf("%s: unable to deauthorize BSS peer: %d\n",
25195 		   sc->sc_dev.dv_xname, ret);
25196 		return ret;
25197 	}
25198 
25199 	ret = qwx_mac_station_remove(sc, arvif, pdev_id, ni);
25200 	if (ret)
25201 		return ret;
25202 
25203 	DNPRINTF(QWX_D_MAC, "%s: disassociated from bssid %s aid %d\n",
25204 	    __func__, ether_sprintf(ni->ni_bssid), arvif->aid);
25205 
25206 	return 0;
25207 }
25208 
25209 void
25210 qwx_peer_assoc_h_basic(struct qwx_softc *sc, struct qwx_vif *arvif,
25211     struct ieee80211_node *ni, struct peer_assoc_params *arg)
25212 {
25213 #ifdef notyet
25214 	lockdep_assert_held(&ar->conf_mutex);
25215 #endif
25216 
25217 	IEEE80211_ADDR_COPY(arg->peer_mac, ni->ni_macaddr);
25218 	arg->vdev_id = arvif->vdev_id;
25219 	arg->peer_associd = ni->ni_associd;
25220 	arg->auth_flag = 1;
25221 	arg->peer_listen_intval = ni->ni_intval;
25222 	arg->peer_nss = 1;
25223 	arg->peer_caps = ni->ni_capinfo;
25224 }
25225 
25226 void
25227 qwx_peer_assoc_h_crypto(struct qwx_softc *sc, struct qwx_vif *arvif,
25228     struct ieee80211_node *ni, struct peer_assoc_params *arg)
25229 {
25230 	struct ieee80211com *ic = &sc->sc_ic;
25231 
25232 	if (ic->ic_flags & IEEE80211_F_RSNON) {
25233 		arg->need_ptk_4_way = 1;
25234 		if (ni->ni_rsnprotos == IEEE80211_PROTO_WPA)
25235 			arg->need_gtk_2_way = 1;
25236 	}
25237 #if 0
25238 	if (sta->mfp) {
25239 		/* TODO: Need to check if FW supports PMF? */
25240 		arg->is_pmf_enabled = true;
25241 	}
25242 #endif
25243 }
25244 
25245 int
25246 qwx_mac_rate_is_cck(uint8_t rate)
25247 {
25248 	return (rate == 2 || rate == 4 || rate == 11 || rate == 22);
25249 }
25250 
25251 void
25252 qwx_peer_assoc_h_rates(struct ieee80211_node *ni, struct peer_assoc_params *arg)
25253 {
25254 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
25255 	struct ieee80211_rateset *rs = &ni->ni_rates;
25256 	int i;
25257 
25258 	for (i = 0, rateset->num_rates = 0;
25259 	    i < rs->rs_nrates && rateset->num_rates < nitems(rateset->rates);
25260 	    i++, rateset->num_rates++) {
25261 		uint8_t rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
25262 		if (qwx_mac_rate_is_cck(rate))
25263 			rate |= 0x80;
25264 		rateset->rates[rateset->num_rates] = rate;
25265 	}
25266 }
25267 
25268 void
25269 qwx_peer_assoc_h_phymode(struct qwx_softc *sc, struct ieee80211_node *ni,
25270     struct peer_assoc_params *arg)
25271 {
25272 	struct ieee80211com *ic = &sc->sc_ic;
25273 	enum wmi_phy_mode phymode;
25274 
25275 	switch (ic->ic_curmode) {
25276 	case IEEE80211_MODE_11A:
25277 		phymode = MODE_11A;
25278 		break;
25279 	case IEEE80211_MODE_11B:
25280 		phymode = MODE_11B;
25281 		break;
25282 	case IEEE80211_MODE_11G:
25283 		phymode = MODE_11G;
25284 		break;
25285 	default:
25286 		phymode = MODE_UNKNOWN;
25287 		break;
25288 	}
25289 
25290 	DNPRINTF(QWX_D_MAC, "%s: peer %s phymode %s\n", __func__,
25291 	    ether_sprintf(ni->ni_macaddr), qwx_wmi_phymode_str(phymode));
25292 
25293 	arg->peer_phymode = phymode;
25294 }
25295 
25296 void
25297 qwx_peer_assoc_prepare(struct qwx_softc *sc, struct qwx_vif *arvif,
25298     struct ieee80211_node *ni, struct peer_assoc_params *arg, int reassoc)
25299 {
25300 	memset(arg, 0, sizeof(*arg));
25301 
25302 	arg->peer_new_assoc = !reassoc;
25303 	qwx_peer_assoc_h_basic(sc, arvif, ni, arg);
25304 	qwx_peer_assoc_h_crypto(sc, arvif, ni, arg);
25305 	qwx_peer_assoc_h_rates(ni, arg);
25306 	qwx_peer_assoc_h_phymode(sc, ni, arg);
25307 #if 0
25308 	qwx_peer_assoc_h_ht(sc, arvif, ni, arg);
25309 	qwx_peer_assoc_h_vht(sc, arvif, ni, arg);
25310 	qwx_peer_assoc_h_he(sc, arvif, ni, arg);
25311 	qwx_peer_assoc_h_he_6ghz(sc, arvif, ni, arg);
25312 	qwx_peer_assoc_h_qos(sc, arvif, ni, arg);
25313 	qwx_peer_assoc_h_smps(ni, arg);
25314 #endif
25315 #if 0
25316 	arsta->peer_nss = arg->peer_nss;
25317 #endif
25318 	/* TODO: amsdu_disable req? */
25319 }
25320 
25321 int
25322 qwx_run(struct qwx_softc *sc)
25323 {
25324 	struct ieee80211com *ic = &sc->sc_ic;
25325 	struct ieee80211_node *ni = ic->ic_bss;
25326 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25327 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25328 	struct peer_assoc_params peer_arg;
25329 	int ret;
25330 #ifdef notyet
25331 	lockdep_assert_held(&ar->conf_mutex);
25332 #endif
25333 
25334 	DNPRINTF(QWX_D_MAC, "%s: vdev %i assoc bssid %pM aid %d\n",
25335 	    __func__, arvif->vdev_id, arvif->bssid, arvif->aid);
25336 
25337 	qwx_peer_assoc_prepare(sc, arvif, ni, &peer_arg, 0);
25338 
25339 	peer_arg.is_assoc = 1;
25340 
25341 	sc->peer_assoc_done = 0;
25342 	ret = qwx_wmi_send_peer_assoc_cmd(sc, pdev_id, &peer_arg);
25343 	if (ret) {
25344 		printf("%s: failed to run peer assoc for %s vdev %i: %d\n",
25345 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
25346 		    arvif->vdev_id, ret);
25347 		return ret;
25348 	}
25349 
25350 	while (!sc->peer_assoc_done) {
25351 		ret = tsleep_nsec(&sc->peer_assoc_done, 0, "qwxassoc",
25352 		    SEC_TO_NSEC(1));
25353 		if (ret) {
25354 			printf("%s: failed to get peer assoc conf event "
25355 			    "for %s vdev %i\n", sc->sc_dev.dv_xname,
25356 			    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
25357 			return ret;
25358 		}
25359 	}
25360 #if 0
25361 	ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
25362 				     &sta->deflink.ht_cap,
25363 				     le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
25364 	if (ret) {
25365 		ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
25366 			    arvif->vdev_id, ret);
25367 		return ret;
25368 	}
25369 
25370 	if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
25371 		ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
25372 			    arvif->vdev_id, bss_conf->bssid);
25373 		return;
25374 	}
25375 
25376 	WARN_ON(arvif->is_up);
25377 #endif
25378 
25379 	arvif->aid = ni->ni_associd;
25380 	IEEE80211_ADDR_COPY(arvif->bssid, ni->ni_bssid);
25381 
25382 	ret = qwx_wmi_vdev_up(sc, arvif->vdev_id, pdev_id, arvif->aid,
25383 	    arvif->bssid, NULL, 0, 0);
25384 	if (ret) {
25385 		printf("%s: failed to set vdev %d up: %d\n",
25386 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
25387 		return ret;
25388 	}
25389 
25390 	arvif->is_up = 1;
25391 #if 0
25392 	arvif->rekey_data.enable_offload = 0;
25393 #endif
25394 
25395 	DNPRINTF(QWX_D_MAC, "%s: vdev %d up (associated) bssid %s aid %d\n",
25396 	    __func__, arvif->vdev_id, ether_sprintf(ni->ni_bssid), arvif->aid);
25397 
25398 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
25399 	    pdev_id, WMI_PEER_AUTHORIZE, 1);
25400 	if (ret) {
25401 		printf("%s: unable to authorize BSS peer: %d\n",
25402 		   sc->sc_dev.dv_xname, ret);
25403 		return ret;
25404 	}
25405 
25406 	return 0;
25407 }
25408 
25409 int
25410 qwx_run_stop(struct qwx_softc *sc)
25411 {
25412 	struct ieee80211com *ic = &sc->sc_ic;
25413 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25414 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25415 	struct qwx_node *nq = (void *)ic->ic_bss;
25416 	int ret;
25417 
25418 	sc->ops.irq_disable(sc);
25419 
25420 	if (ic->ic_opmode == IEEE80211_M_STA) {
25421 		ic->ic_bss->ni_txrate = 0;
25422 		nq->flags = 0;
25423 	}
25424 
25425 	ret = qwx_wmi_vdev_down(sc, arvif->vdev_id, pdev_id);
25426 	if (ret)
25427 		return ret;
25428 
25429 	arvif->is_up = 0;
25430 
25431 	DNPRINTF(QWX_D_MAC, "%s: vdev %d down\n", __func__, arvif->vdev_id);
25432 
25433 	return 0;
25434 }
25435 
25436 #if NBPFILTER > 0
25437 void
25438 qwx_radiotap_attach(struct qwx_softc *sc)
25439 {
25440 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
25441 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
25442 
25443 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
25444 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
25445 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
25446 
25447 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
25448 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
25449 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
25450 }
25451 #endif
25452 
25453 int
25454 qwx_attach(struct qwx_softc *sc)
25455 {
25456 	struct ieee80211com *ic = &sc->sc_ic;
25457 	struct ifnet *ifp = &ic->ic_if;
25458 	int error, i;
25459 
25460 	task_set(&sc->init_task, qwx_init_task, sc);
25461 	task_set(&sc->newstate_task, qwx_newstate_task, sc);
25462 	task_set(&sc->setkey_task, qwx_setkey_task, sc);
25463 	timeout_set_proc(&sc->scan.timeout, qwx_scan_timeout, sc);
25464 #if NBPFILTER > 0
25465 	qwx_radiotap_attach(sc);
25466 #endif
25467 	for (i = 0; i < nitems(sc->pdevs); i++)
25468 		sc->pdevs[i].sc = sc;
25469 
25470 	TAILQ_INIT(&sc->vif_list);
25471 
25472 	error = qwx_init(ifp);
25473 	if (error)
25474 		return error;
25475 
25476 	/* Turn device off until interface comes up. */
25477 	qwx_core_deinit(sc);
25478 
25479 	return 0;
25480 }
25481 
25482 void
25483 qwx_detach(struct qwx_softc *sc)
25484 {
25485 	if (sc->fwmem) {
25486 		qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
25487 		sc->fwmem = NULL;
25488 	}
25489 
25490 	if (sc->m3_mem) {
25491 		qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
25492 		sc->m3_mem = NULL;
25493 	}
25494 
25495 	qwx_free_firmware(sc);
25496 }
25497 
25498 struct qwx_dmamem *
25499 qwx_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
25500 {
25501 	struct qwx_dmamem *adm;
25502 	int nsegs;
25503 
25504 	adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT | M_ZERO);
25505 	if (adm == NULL)
25506 		return NULL;
25507 	adm->size = size;
25508 
25509 	if (bus_dmamap_create(dmat, size, 1, size, 0,
25510 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &adm->map) != 0)
25511 		goto admfree;
25512 
25513 	if (bus_dmamem_alloc_range(dmat, size, align, 0, &adm->seg, 1,
25514 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 0, 0xffffffff) != 0)
25515 		goto destroy;
25516 
25517 	if (bus_dmamem_map(dmat, &adm->seg, nsegs, size,
25518 	    &adm->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0)
25519 		goto free;
25520 
25521 	if (bus_dmamap_load_raw(dmat, adm->map, &adm->seg, nsegs, size,
25522 	    BUS_DMA_NOWAIT) != 0)
25523 		goto unmap;
25524 
25525 	bzero(adm->kva, size);
25526 
25527 	return adm;
25528 
25529 unmap:
25530 	bus_dmamem_unmap(dmat, adm->kva, size);
25531 free:
25532 	bus_dmamem_free(dmat, &adm->seg, 1);
25533 destroy:
25534 	bus_dmamap_destroy(dmat, adm->map);
25535 admfree:
25536 	free(adm, M_DEVBUF, sizeof(*adm));
25537 
25538 	return NULL;
25539 }
25540 
25541 void
25542 qwx_dmamem_free(bus_dma_tag_t dmat, struct qwx_dmamem *adm)
25543 {
25544 	bus_dmamem_unmap(dmat, adm->kva, adm->size);
25545 	bus_dmamem_free(dmat, &adm->seg, 1);
25546 	bus_dmamap_destroy(dmat, adm->map);
25547 	free(adm, M_DEVBUF, sizeof(*adm));
25548 }
25549 
25550 int
25551 qwx_activate(struct device *self, int act)
25552 {
25553 	struct qwx_softc *sc = (struct qwx_softc *)self;
25554 	struct ifnet *ifp = &sc->sc_ic.ic_if;
25555 	int err = 0;
25556 
25557 	switch (act) {
25558 	case DVACT_QUIESCE:
25559 		if (ifp->if_flags & IFF_RUNNING) {
25560 			rw_enter_write(&sc->ioctl_rwl);
25561 			qwx_stop(ifp);
25562 			rw_exit(&sc->ioctl_rwl);
25563 		}
25564 		break;
25565 	case DVACT_RESUME:
25566 		break;
25567 	case DVACT_WAKEUP:
25568 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
25569 			err = qwx_init(ifp);
25570 			if (err)
25571 				printf("%s: could not initialize hardware\n",
25572 				    sc->sc_dev.dv_xname);
25573 		}
25574 		break;
25575 	}
25576 
25577 	return 0;
25578 }
25579