xref: /openbsd-src/sys/dev/ic/qwz.c (revision 1b22ca7f39dbdc0597d33d09dbab7f7301abd668)
1 /*	$OpenBSD: qwz.c,v 1.6 2024/08/20 21:23:18 patrick Exp $	*/
2 
3 /*
4  * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2018-2019 The Linux Foundation.
21  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted (subject to the limitations in the disclaimer
26  * below) provided that the following conditions are met:
27  *
28  *  * Redistributions of source code must retain the above copyright notice,
29  *    this list of conditions and the following disclaimer.
30  *
31  *  * Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  *
35  *  * Neither the name of [Owner Organization] nor the names of its
36  *    contributors may be used to endorse or promote products derived from
37  *    this software without specific prior written permission.
38  *
39  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
40  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
41  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
42  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
43  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
44  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
45  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
46  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
47  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
48  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
49  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
50  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 /*
54  * Driver for Qualcomm Technologies 802.11be chipset.
55  */
56 
57 #include "bpfilter.h"
58 
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/device.h>
62 #include <sys/rwlock.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 
67 #include <sys/refcnt.h>
68 #include <sys/task.h>
69 
70 #include <machine/bus.h>
71 #include <machine/intr.h>
72 
73 #ifdef __HAVE_FDT
74 #include <dev/ofw/openfirm.h>
75 #endif
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 #include <net/if.h>
81 #include <net/if_media.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/if_ether.h>
85 
86 #include <net80211/ieee80211_var.h>
87 #include <net80211/ieee80211_radiotap.h>
88 
89 /* XXX linux porting goo */
90 #ifdef __LP64__
91 #define BITS_PER_LONG		64
92 #else
93 #define BITS_PER_LONG		32
94 #endif
95 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
96 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
97 #define ffz(x) ffs(~(x))
98 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
99 #define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
100 #define BIT(x)               (1UL << (x))
101 #define test_bit(i, a)  ((a) & (1 << (i)))
102 #define clear_bit(i, a) ((a)) &= ~(1 << (i))
103 #define set_bit(i, a)   ((a)) |= (1 << (i))
104 #define container_of(ptr, type, member) ({			\
105 	const __typeof( ((type *)0)->member ) *__mptr = (ptr);	\
106 	(type *)( (char *)__mptr - offsetof(type,member) );})
107 
108 /* #define QWZ_DEBUG */
109 
110 #include <dev/ic/qwzreg.h>
111 #include <dev/ic/qwzvar.h>
112 
113 #ifdef QWZ_DEBUG
114 uint32_t	qwz_debug = 0
115 		    | QWZ_D_MISC
116 /*		    | QWZ_D_MHI */
117 /*		    | QWZ_D_QMI */
118 /*		    | QWZ_D_WMI */
119 /*		    | QWZ_D_HTC */
120 /*		    | QWZ_D_HTT */
121 /*		    | QWZ_D_MAC */
122 /*		    | QWZ_D_MGMT */
123 		;
124 #endif
125 
126 int qwz_ce_init_pipes(struct qwz_softc *);
127 int qwz_hal_srng_create_config_wcn7850(struct qwz_softc *);
128 int qwz_hal_srng_src_num_free(struct qwz_softc *, struct hal_srng *, int);
129 int qwz_ce_per_engine_service(struct qwz_softc *, uint16_t);
130 int qwz_hal_srng_setup(struct qwz_softc *, enum hal_ring_type, int, int,
131     struct hal_srng_params *);
132 int qwz_ce_send(struct qwz_softc *, struct mbuf *, uint8_t, uint16_t);
133 int qwz_htc_connect_service(struct qwz_htc *, struct qwz_htc_svc_conn_req *,
134     struct qwz_htc_svc_conn_resp *);
135 void qwz_hal_srng_shadow_update_hp_tp(struct qwz_softc *, struct hal_srng *);
136 void qwz_wmi_free_dbring_caps(struct qwz_softc *);
137 int qwz_wmi_set_peer_param(struct qwz_softc *, uint8_t *, uint32_t,
138     uint32_t, uint32_t, uint32_t);
139 int qwz_wmi_peer_rx_reorder_queue_setup(struct qwz_softc *, int, int,
140     uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
141 const void **qwz_wmi_tlv_parse_alloc(struct qwz_softc *, const void *, size_t);
142 int qwz_core_init(struct qwz_softc *);
143 int qwz_qmi_event_server_arrive(struct qwz_softc *);
144 int qwz_mac_register(struct qwz_softc *);
145 int qwz_mac_start(struct qwz_softc *);
146 void qwz_mac_scan_finish(struct qwz_softc *);
147 int qwz_mac_mgmt_tx_wmi(struct qwz_softc *, struct qwz_vif *, uint8_t,
148     struct ieee80211_node *, struct mbuf *);
149 int qwz_dp_tx(struct qwz_softc *, struct qwz_vif *, uint8_t,
150     struct ieee80211_node *, struct mbuf *);
151 int qwz_dp_tx_send_reo_cmd(struct qwz_softc *, struct dp_rx_tid *,
152     enum hal_reo_cmd_type , struct ath12k_hal_reo_cmd *,
153     void (*func)(struct qwz_dp *, void *, enum hal_reo_cmd_status));
154 void qwz_dp_rx_deliver_msdu(struct qwz_softc *, struct qwz_rx_msdu *);
155 void qwz_dp_service_mon_ring(void *);
156 void qwz_peer_frags_flush(struct qwz_softc *, struct ath12k_peer *);
157 int qwz_wmi_vdev_install_key(struct qwz_softc *,
158     struct wmi_vdev_install_key_arg *, uint8_t);
159 int qwz_dp_peer_rx_pn_replay_config(struct qwz_softc *, struct qwz_vif *,
160     struct ieee80211_node *, struct ieee80211_key *, int);
161 void qwz_setkey_clear(struct qwz_softc *);
162 
163 int qwz_scan(struct qwz_softc *);
164 void qwz_scan_abort(struct qwz_softc *);
165 int qwz_auth(struct qwz_softc *);
166 int qwz_deauth(struct qwz_softc *);
167 int qwz_run(struct qwz_softc *);
168 int qwz_run_stop(struct qwz_softc *);
169 
170 struct ieee80211_node *
171 qwz_node_alloc(struct ieee80211com *ic)
172 {
173 	struct qwz_node *nq;
174 
175 	nq = malloc(sizeof(struct qwz_node), M_DEVBUF, M_NOWAIT | M_ZERO);
176 	if (nq != NULL)
177 		nq->peer.peer_id = HAL_INVALID_PEERID;
178 	return (struct ieee80211_node *)nq;
179 }
180 
181 int
182 qwz_init(struct ifnet *ifp)
183 {
184 	int error;
185 	struct qwz_softc *sc = ifp->if_softc;
186 	struct ieee80211com *ic = &sc->sc_ic;
187 
188 	sc->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
189 	/*
190 	 * There are several known hardware/software crypto issues
191 	 * on wcn6855 devices, firmware 0x1106196e. It is unclear
192 	 * if these are driver or firmware bugs.
193 	 *
194 	 * 1) Broadcast/Multicast frames will only be received on
195 	 *    encrypted networks if hardware crypto is used and a
196 	 *    CCMP group key is used. Otherwise such frames never
197 	 *    even trigger an interrupt. This breaks ARP and IPv6.
198 	 *    This issue is known to affect the Linux ath12k vendor
199 	 *    driver when software crypto mode is selected.
200 	 *    Workaround: Use hardware crypto on WPA2 networks.
201 	 *    However, even with hardware crypto broadcast frames
202 	 *    are never received if TKIP is used as the WPA2 group
203 	 *    cipher and we have no workaround for this.
204 	 *
205 	 * 2) Adding WEP keys for hardware crypto crashes the firmware.
206 	 *    Presumably, lack of WEP support is deliberate because the
207 	 *    Linux ath12k vendor driver rejects attempts to install
208 	 *    WEP keys to hardware.
209 	 *    Workaround: Use software crypto if WEP is enabled.
210 	 *    This suffers from the broadcast issues mentioned above.
211 	 *
212 	 * 3) A WPA1 group key handshake message from the AP is never
213 	 *    received if hardware crypto is used.
214 	 *    Workaround: Use software crypto if WPA1 is enabled.
215 	 *    This suffers from the broadcast issues mentioned above,
216 	 *    even on WPA2 networks when WPA1 and WPA2 are both enabled.
217 	 *    On OpenBSD, WPA1 is disabled by default.
218 	 *
219 	 * The only known fully working configurations are unencrypted
220 	 * networks, and WPA2/CCMP-only networks provided WPA1 remains
221 	 * disabled.
222 	 */
223 	if ((ic->ic_flags & IEEE80211_F_WEPON) ||
224 	    (ic->ic_rsnprotos & IEEE80211_PROTO_WPA))
225 		sc->crypto_mode = ATH12K_CRYPT_MODE_SW;
226 	else
227 		sc->crypto_mode = ATH12K_CRYPT_MODE_HW;
228 	sc->frame_mode = ATH12K_HW_TXRX_NATIVE_WIFI;
229 	ic->ic_state = IEEE80211_S_INIT;
230 	sc->ns_nstate = IEEE80211_S_INIT;
231 	sc->scan.state = ATH12K_SCAN_IDLE;
232 	sc->vdev_id_11d_scan = QWZ_11D_INVALID_VDEV_ID;
233 
234 	error = qwz_core_init(sc);
235 	if (error)
236 		return error;
237 
238 	memset(&sc->qrtr_server, 0, sizeof(sc->qrtr_server));
239 	sc->qrtr_server.node = QRTR_NODE_BCAST;
240 
241 	/* wait for QRTR init to be done */
242 	while (sc->qrtr_server.node == QRTR_NODE_BCAST) {
243 		error = tsleep_nsec(&sc->qrtr_server, 0, "qwzqrtr",
244 		    SEC_TO_NSEC(5));
245 		if (error) {
246 			printf("%s: qrtr init timeout\n", sc->sc_dev.dv_xname);
247 			return error;
248 		}
249 	}
250 
251 	error = qwz_qmi_event_server_arrive(sc);
252 	if (error)
253 		return error;
254 
255 	if (sc->attached) {
256 		/* Update MAC in case the upper layers changed it. */
257 		IEEE80211_ADDR_COPY(ic->ic_myaddr,
258 		    ((struct arpcom *)ifp)->ac_enaddr);
259 	} else {
260 		sc->attached = 1;
261 
262 		/* Configure channel information obtained from firmware. */
263 		ieee80211_channel_init(ifp);
264 
265 		/* Configure initial MAC address. */
266 		error = if_setlladdr(ifp, ic->ic_myaddr);
267 		if (error)
268 			printf("%s: could not set MAC address %s: %d\n",
269 			    sc->sc_dev.dv_xname, ether_sprintf(ic->ic_myaddr),
270 			    error);
271 
272 		ieee80211_media_init(ifp, qwz_media_change,
273 		    ieee80211_media_status);
274 	}
275 
276 	if (ifp->if_flags & IFF_UP) {
277 		refcnt_init(&sc->task_refs);
278 
279 		ifq_clr_oactive(&ifp->if_snd);
280 		ifp->if_flags |= IFF_RUNNING;
281 
282 		error = qwz_mac_start(sc);
283 		if (error)
284 			return error;
285 
286 		ieee80211_begin_scan(ifp);
287 	}
288 
289 	return 0;
290 }
291 
292 void
293 qwz_add_task(struct qwz_softc *sc, struct taskq *taskq, struct task *task)
294 {
295 	int s = splnet();
296 
297 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
298 		splx(s);
299 		return;
300 	}
301 
302 	refcnt_take(&sc->task_refs);
303 	if (!task_add(taskq, task))
304 		refcnt_rele_wake(&sc->task_refs);
305 	splx(s);
306 }
307 
308 void
309 qwz_del_task(struct qwz_softc *sc, struct taskq *taskq, struct task *task)
310 {
311 	if (task_del(taskq, task))
312 		refcnt_rele(&sc->task_refs);
313 }
314 
315 void
316 qwz_stop(struct ifnet *ifp)
317 {
318 	struct qwz_softc *sc = ifp->if_softc;
319 	struct ieee80211com *ic = &sc->sc_ic;
320 	int s = splnet();
321 
322 	rw_assert_wrlock(&sc->ioctl_rwl);
323 
324 	timeout_del(&sc->mon_reap_timer);
325 
326 	/* Disallow new tasks. */
327 	set_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
328 
329 	/* Cancel scheduled tasks and let any stale tasks finish up. */
330 	task_del(systq, &sc->init_task);
331 	qwz_del_task(sc, sc->sc_nswq, &sc->newstate_task);
332 	qwz_del_task(sc, systq, &sc->setkey_task);
333 	refcnt_finalize(&sc->task_refs, "qwzstop");
334 
335 	qwz_setkey_clear(sc);
336 
337 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
338 
339 	ifp->if_timer = sc->sc_tx_timer = 0;
340 
341 	ifp->if_flags &= ~IFF_RUNNING;
342 	ifq_clr_oactive(&ifp->if_snd);
343 
344 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
345 	sc->ns_nstate = IEEE80211_S_INIT;
346 	sc->scan.state = ATH12K_SCAN_IDLE;
347 	sc->vdev_id_11d_scan = QWZ_11D_INVALID_VDEV_ID;
348 	sc->pdevs_active = 0;
349 
350 	/* power off hardware */
351 	qwz_core_deinit(sc);
352 
353 	splx(s);
354 }
355 
356 void
357 qwz_free_firmware(struct qwz_softc *sc)
358 {
359 	int i;
360 
361 	for (i = 0; i < nitems(sc->fw_img); i++) {
362 		free(sc->fw_img[i].data, M_DEVBUF, sc->fw_img[i].size);
363 		sc->fw_img[i].data = NULL;
364 		sc->fw_img[i].size = 0;
365 	}
366 }
367 
368 int
369 qwz_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
370 {
371 	struct qwz_softc *sc = ifp->if_softc;
372 	int s, err = 0;
373 
374 	/*
375 	 * Prevent processes from entering this function while another
376 	 * process is tsleep'ing in it.
377 	 */
378 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
379 	if (err)
380 		return err;
381 	s = splnet();
382 
383 	switch (cmd) {
384 	case SIOCSIFADDR:
385 		ifp->if_flags |= IFF_UP;
386 		/* FALLTHROUGH */
387 	case SIOCSIFFLAGS:
388 		if (ifp->if_flags & IFF_UP) {
389 			if (!(ifp->if_flags & IFF_RUNNING)) {
390 				/* Force reload of firmware image from disk. */
391 				qwz_free_firmware(sc);
392 				err = qwz_init(ifp);
393 			}
394 		} else {
395 			if (ifp->if_flags & IFF_RUNNING)
396 				qwz_stop(ifp);
397 		}
398 		break;
399 
400 	default:
401 		err = ieee80211_ioctl(ifp, cmd, data);
402 	}
403 
404 	if (err == ENETRESET) {
405 		err = 0;
406 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
407 		    (IFF_UP | IFF_RUNNING)) {
408 			qwz_stop(ifp);
409 			err = qwz_init(ifp);
410 		}
411 	}
412 
413 	splx(s);
414 	rw_exit(&sc->ioctl_rwl);
415 
416 	return err;
417 }
418 
419 int
420 qwz_tx(struct qwz_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
421 {
422 	struct ieee80211_frame *wh;
423 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
424 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
425 	uint8_t frame_type;
426 
427 	wh = mtod(m, struct ieee80211_frame *);
428 	frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
429 
430 #if NBPFILTER > 0
431 	if (sc->sc_drvbpf != NULL) {
432 		struct qwz_tx_radiotap_header *tap = &sc->sc_txtap;
433 
434 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
435 		    m, BPF_DIRECTION_OUT);
436 	}
437 #endif
438 
439 	if (frame_type == IEEE80211_FC0_TYPE_MGT)
440 		return qwz_mac_mgmt_tx_wmi(sc, arvif, pdev_id, ni, m);
441 
442 	return qwz_dp_tx(sc, arvif, pdev_id, ni, m);
443 }
444 
445 void
446 qwz_start(struct ifnet *ifp)
447 {
448 	struct qwz_softc *sc = ifp->if_softc;
449 	struct ieee80211com *ic = &sc->sc_ic;
450 	struct ieee80211_node *ni;
451 	struct ether_header *eh;
452 	struct mbuf *m;
453 
454 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
455 		return;
456 
457 	for (;;) {
458 		/* why isn't this done per-queue? */
459 		if (sc->qfullmsk != 0) {
460 			ifq_set_oactive(&ifp->if_snd);
461 			break;
462 		}
463 
464 		/* need to send management frames even if we're not RUNning */
465 		m = mq_dequeue(&ic->ic_mgtq);
466 		if (m) {
467 			ni = m->m_pkthdr.ph_cookie;
468 			goto sendit;
469 		}
470 
471 		if (ic->ic_state != IEEE80211_S_RUN ||
472 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
473 			break;
474 
475 		m = ifq_dequeue(&ifp->if_snd);
476 		if (!m)
477 			break;
478 		if (m->m_len < sizeof (*eh) &&
479 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
480 			ifp->if_oerrors++;
481 			continue;
482 		}
483 #if NBPFILTER > 0
484 		if (ifp->if_bpf != NULL)
485 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
486 #endif
487 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
488 			ifp->if_oerrors++;
489 			continue;
490 		}
491 
492  sendit:
493 #if NBPFILTER > 0
494 		if (ic->ic_rawbpf != NULL)
495 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
496 #endif
497 		if (qwz_tx(sc, m, ni) != 0) {
498 			ieee80211_release_node(ic, ni);
499 			ifp->if_oerrors++;
500 			continue;
501 		}
502 
503 		if (ifp->if_flags & IFF_UP)
504 			ifp->if_timer = 1;
505 	}
506 }
507 
508 void
509 qwz_watchdog(struct ifnet *ifp)
510 {
511 	struct qwz_softc *sc = ifp->if_softc;
512 
513 	ifp->if_timer = 0;
514 
515 	if (sc->sc_tx_timer > 0) {
516 		if (--sc->sc_tx_timer == 0) {
517 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
518 			if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
519 				task_add(systq, &sc->init_task);
520 			ifp->if_oerrors++;
521 			return;
522 		}
523 		ifp->if_timer = 1;
524 	}
525 
526 	ieee80211_watchdog(ifp);
527 }
528 
529 int
530 qwz_media_change(struct ifnet *ifp)
531 {
532 	int err;
533 
534 	err = ieee80211_media_change(ifp);
535 	if (err != ENETRESET)
536 		return err;
537 
538 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
539 	    (IFF_UP | IFF_RUNNING)) {
540 		qwz_stop(ifp);
541 		err = qwz_init(ifp);
542 	}
543 
544 	return err;
545 }
546 
547 int
548 qwz_queue_setkey_cmd(struct ieee80211com *ic, struct ieee80211_node *ni,
549     struct ieee80211_key *k, int cmd)
550 {
551 	struct qwz_softc *sc = ic->ic_softc;
552 	struct qwz_setkey_task_arg *a;
553 
554 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg) ||
555 	    k->k_id > WMI_MAX_KEY_INDEX)
556 		return ENOSPC;
557 
558 	a = &sc->setkey_arg[sc->setkey_cur];
559 	a->ni = ieee80211_ref_node(ni);
560 	a->k = k;
561 	a->cmd = cmd;
562 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
563 	sc->setkey_nkeys++;
564 	qwz_add_task(sc, systq, &sc->setkey_task);
565 	return EBUSY;
566 }
567 
568 int
569 qwz_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
570     struct ieee80211_key *k)
571 {
572 	struct qwz_softc *sc = ic->ic_softc;
573 
574 	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
575 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
576 	    k->k_cipher == IEEE80211_CIPHER_WEP104)
577 		return ieee80211_set_key(ic, ni, k);
578 
579 	return qwz_queue_setkey_cmd(ic, ni, k, QWZ_ADD_KEY);
580 }
581 
582 void
583 qwz_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
584     struct ieee80211_key *k)
585 {
586 	struct qwz_softc *sc = ic->ic_softc;
587 
588 	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
589 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
590 	    k->k_cipher == IEEE80211_CIPHER_WEP104) {
591 		ieee80211_delete_key(ic, ni, k);
592 		return;
593 	}
594 
595 	if (ic->ic_state != IEEE80211_S_RUN) {
596 		/* Keys removed implicitly when firmware station is removed. */
597 		return;
598 	}
599 
600 	/*
601 	 * net80211 calls us with a NULL node when deleting group keys,
602 	 * but firmware expects a MAC address in the command.
603 	 */
604 	if (ni == NULL)
605 		ni = ic->ic_bss;
606 
607 	qwz_queue_setkey_cmd(ic, ni, k, QWZ_DEL_KEY);
608 }
609 
610 int
611 qwz_wmi_install_key_cmd(struct qwz_softc *sc, struct qwz_vif *arvif,
612     uint8_t *macaddr, struct ieee80211_key *k, uint32_t flags,
613     int delete_key)
614 {
615 	int ret;
616 	struct wmi_vdev_install_key_arg arg = {
617 		.vdev_id = arvif->vdev_id,
618 		.key_idx = k->k_id,
619 		.key_len = k->k_len,
620 		.key_data = k->k_key,
621 		.key_flags = flags,
622 		.macaddr = macaddr,
623 	};
624 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
625 #ifdef notyet
626 	lockdep_assert_held(&arvif->ar->conf_mutex);
627 
628 	reinit_completion(&ar->install_key_done);
629 #endif
630 	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
631 		return 0;
632 
633 	if (delete_key) {
634 		arg.key_cipher = WMI_CIPHER_NONE;
635 		arg.key_data = NULL;
636 	} else {
637 		switch (k->k_cipher) {
638 		case IEEE80211_CIPHER_CCMP:
639 			arg.key_cipher = WMI_CIPHER_AES_CCM;
640 #if 0
641 			/* TODO: Re-check if flag is valid */
642 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
643 #endif
644 			break;
645 		case IEEE80211_CIPHER_TKIP:
646 			arg.key_cipher = WMI_CIPHER_TKIP;
647 			arg.key_txmic_len = 8;
648 			arg.key_rxmic_len = 8;
649 			break;
650 #if 0
651 		case WLAN_CIPHER_SUITE_CCMP_256:
652 			arg.key_cipher = WMI_CIPHER_AES_CCM;
653 			break;
654 		case WLAN_CIPHER_SUITE_GCMP:
655 		case WLAN_CIPHER_SUITE_GCMP_256:
656 			arg.key_cipher = WMI_CIPHER_AES_GCM;
657 			break;
658 #endif
659 		default:
660 			printf("%s: cipher %u is not supported\n",
661 			    sc->sc_dev.dv_xname, k->k_cipher);
662 			return EOPNOTSUPP;
663 		}
664 #if 0
665 		if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
666 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
667 				      IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
668 #endif
669 	}
670 
671 	sc->install_key_done = 0;
672 	ret = qwz_wmi_vdev_install_key(sc, &arg, pdev_id);
673 	if (ret)
674 		return ret;
675 
676 	while (!sc->install_key_done) {
677 		ret = tsleep_nsec(&sc->install_key_done, 0, "qwzinstkey",
678 		    SEC_TO_NSEC(1));
679 		if (ret) {
680 			printf("%s: install key timeout\n",
681 			    sc->sc_dev.dv_xname);
682 			return -1;
683 		}
684 	}
685 
686 	return sc->install_key_status;
687 }
688 
689 int
690 qwz_add_sta_key(struct qwz_softc *sc, struct ieee80211_node *ni,
691     struct ieee80211_key *k)
692 {
693 	struct ieee80211com *ic = &sc->sc_ic;
694 	struct qwz_node *nq = (struct qwz_node *)ni;
695 	struct ath12k_peer *peer = &nq->peer;
696 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
697 	int ret = 0;
698 	uint32_t flags = 0;
699 	const int want_keymask = (QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY |
700 	    QWZ_NODE_FLAG_HAVE_GROUP_KEY);
701 
702 	/*
703 	 * Flush the fragments cache during key (re)install to
704 	 * ensure all frags in the new frag list belong to the same key.
705 	 */
706 	qwz_peer_frags_flush(sc, peer);
707 
708 	if (k->k_flags & IEEE80211_KEY_GROUP)
709 		flags |= WMI_KEY_GROUP;
710 	else
711 		flags |= WMI_KEY_PAIRWISE;
712 
713 	ret = qwz_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, flags, 0);
714 	if (ret) {
715 		printf("%s: installing crypto key failed (%d)\n",
716 		    sc->sc_dev.dv_xname, ret);
717 		return ret;
718 	}
719 
720 	ret = qwz_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 0);
721 	if (ret) {
722 		printf("%s: failed to offload PN replay detection %d\n",
723 		    sc->sc_dev.dv_xname, ret);
724 		return ret;
725 	}
726 
727 	if (k->k_flags & IEEE80211_KEY_GROUP)
728 		nq->flags |= QWZ_NODE_FLAG_HAVE_GROUP_KEY;
729 	else
730 		nq->flags |= QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY;
731 
732 	if ((nq->flags & want_keymask) == want_keymask) {
733 		DPRINTF("marking port %s valid\n",
734 		    ether_sprintf(ni->ni_macaddr));
735 		ni->ni_port_valid = 1;
736 		ieee80211_set_link_state(ic, LINK_STATE_UP);
737 	}
738 
739 	return 0;
740 }
741 
742 int
743 qwz_del_sta_key(struct qwz_softc *sc, struct ieee80211_node *ni,
744     struct ieee80211_key *k)
745 {
746 	struct qwz_node *nq = (struct qwz_node *)ni;
747 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
748 	int ret = 0;
749 
750 	ret = qwz_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, 0, 1);
751 	if (ret) {
752 		printf("%s: deleting crypto key failed (%d)\n",
753 		    sc->sc_dev.dv_xname, ret);
754 		return ret;
755 	}
756 
757 	ret = qwz_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 1);
758 	if (ret) {
759 		printf("%s: failed to disable PN replay detection %d\n",
760 		    sc->sc_dev.dv_xname, ret);
761 		return ret;
762 	}
763 
764 	if (k->k_flags & IEEE80211_KEY_GROUP)
765 		nq->flags &= ~QWZ_NODE_FLAG_HAVE_GROUP_KEY;
766 	else
767 		nq->flags &= ~QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY;
768 
769 	return 0;
770 }
771 
772 void
773 qwz_setkey_task(void *arg)
774 {
775 	struct qwz_softc *sc = arg;
776 	struct ieee80211com *ic = &sc->sc_ic;
777 	struct qwz_setkey_task_arg *a;
778 	int err = 0, s = splnet();
779 
780 	while (sc->setkey_nkeys > 0) {
781 		if (err || test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
782 			break;
783 		a = &sc->setkey_arg[sc->setkey_tail];
784 		KASSERT(a->cmd == QWZ_ADD_KEY || a->cmd == QWZ_DEL_KEY);
785 		if (ic->ic_state == IEEE80211_S_RUN) {
786 			if (a->cmd == QWZ_ADD_KEY)
787 				err = qwz_add_sta_key(sc, a->ni, a->k);
788 			else
789 				err = qwz_del_sta_key(sc, a->ni, a->k);
790 		}
791 		ieee80211_release_node(ic, a->ni);
792 		a->ni = NULL;
793 		a->k = NULL;
794 		sc->setkey_tail = (sc->setkey_tail + 1) %
795 		    nitems(sc->setkey_arg);
796 		sc->setkey_nkeys--;
797 	}
798 
799 	refcnt_rele_wake(&sc->task_refs);
800 	splx(s);
801 }
802 
803 void
804 qwz_setkey_clear(struct qwz_softc *sc)
805 {
806 	struct ieee80211com *ic = &sc->sc_ic;
807 	struct qwz_setkey_task_arg *a;
808 
809 	while (sc->setkey_nkeys > 0) {
810 		a = &sc->setkey_arg[sc->setkey_tail];
811 		ieee80211_release_node(ic, a->ni);
812 		a->ni = NULL;
813 		sc->setkey_tail = (sc->setkey_tail + 1) %
814 		    nitems(sc->setkey_arg);
815 		sc->setkey_nkeys--;
816 	}
817 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
818 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
819 }
820 
821 int
822 qwz_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
823 {
824 	struct ifnet *ifp = &ic->ic_if;
825 	struct qwz_softc *sc = ifp->if_softc;
826 
827 	/*
828 	 * Prevent attempts to transition towards the same state, unless
829 	 * we are scanning in which case a SCAN -> SCAN transition
830 	 * triggers another scan iteration. And AUTH -> AUTH is needed
831 	 * to support band-steering.
832 	 */
833 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
834 	    nstate != IEEE80211_S_AUTH)
835 		return 0;
836 	if (ic->ic_state == IEEE80211_S_RUN) {
837 #if 0
838 		qwz_del_task(sc, systq, &sc->ba_task);
839 #endif
840 		qwz_del_task(sc, systq, &sc->setkey_task);
841 		qwz_setkey_clear(sc);
842 #if 0
843 		qwz_del_task(sc, systq, &sc->bgscan_done_task);
844 #endif
845 	}
846 
847 	sc->ns_nstate = nstate;
848 	sc->ns_arg = arg;
849 
850 	qwz_add_task(sc, sc->sc_nswq, &sc->newstate_task);
851 
852 	return 0;
853 }
854 
855 void
856 qwz_newstate_task(void *arg)
857 {
858 	struct qwz_softc *sc = (struct qwz_softc *)arg;
859 	struct ieee80211com *ic = &sc->sc_ic;
860 	struct ifnet *ifp = &ic->ic_if;
861 	enum ieee80211_state nstate = sc->ns_nstate;
862 	enum ieee80211_state ostate = ic->ic_state;
863 	int err = 0, s = splnet();
864 
865 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
866 		/* qwz_stop() is waiting for us. */
867 		refcnt_rele_wake(&sc->task_refs);
868 		splx(s);
869 		return;
870 	}
871 
872 	if (ostate == IEEE80211_S_SCAN) {
873 		if (nstate == ostate) {
874 			if (sc->scan.state != ATH12K_SCAN_IDLE) {
875 				refcnt_rele_wake(&sc->task_refs);
876 				splx(s);
877 				return;
878 			}
879 			/* Firmware is no longer scanning. Do another scan. */
880 			goto next_scan;
881 		}
882 	}
883 
884 	if (nstate <= ostate) {
885 		switch (ostate) {
886 		case IEEE80211_S_RUN:
887 			err = qwz_run_stop(sc);
888 			if (err)
889 				goto out;
890 			/* FALLTHROUGH */
891 		case IEEE80211_S_ASSOC:
892 		case IEEE80211_S_AUTH:
893 			if (nstate <= IEEE80211_S_AUTH) {
894 				err = qwz_deauth(sc);
895 				if (err)
896 					goto out;
897 			}
898 			/* FALLTHROUGH */
899 		case IEEE80211_S_SCAN:
900 		case IEEE80211_S_INIT:
901 			break;
902 		}
903 
904 		/* Die now if qwz_stop() was called while we were sleeping. */
905 		if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
906 			refcnt_rele_wake(&sc->task_refs);
907 			splx(s);
908 			return;
909 		}
910 	}
911 
912 	switch (nstate) {
913 	case IEEE80211_S_INIT:
914 		break;
915 
916 	case IEEE80211_S_SCAN:
917 next_scan:
918 		err = qwz_scan(sc);
919 		if (err)
920 			break;
921 		if (ifp->if_flags & IFF_DEBUG)
922 			printf("%s: %s -> %s\n", ifp->if_xname,
923 			    ieee80211_state_name[ic->ic_state],
924 			    ieee80211_state_name[IEEE80211_S_SCAN]);
925 #if 0
926 		if ((sc->sc_flags & QWZ_FLAG_BGSCAN) == 0) {
927 #endif
928 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
929 			ieee80211_node_cleanup(ic, ic->ic_bss);
930 #if 0
931 		}
932 #endif
933 		ic->ic_state = IEEE80211_S_SCAN;
934 		refcnt_rele_wake(&sc->task_refs);
935 		splx(s);
936 		return;
937 
938 	case IEEE80211_S_AUTH:
939 		err = qwz_auth(sc);
940 		break;
941 
942 	case IEEE80211_S_ASSOC:
943 		break;
944 
945 	case IEEE80211_S_RUN:
946 		err = qwz_run(sc);
947 		break;
948 	}
949 out:
950 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
951 		if (err)
952 			task_add(systq, &sc->init_task);
953 		else
954 			sc->sc_newstate(ic, nstate, sc->ns_arg);
955 	}
956 	refcnt_rele_wake(&sc->task_refs);
957 	splx(s);
958 }
959 
960 struct cfdriver qwz_cd = {
961 	NULL, "qwz", DV_IFNET
962 };
963 
964 void
965 qwz_init_wmi_config_qca6390(struct qwz_softc *sc,
966     struct target_resource_config *config)
967 {
968 	config->num_vdevs = 4;
969 	config->num_peers = 16;
970 	config->num_tids = 32;
971 
972 	config->num_offload_peers = 3;
973 	config->num_offload_reorder_buffs = 3;
974 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
975 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
976 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
977 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
978 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
979 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
980 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
981 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
982 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
983 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
984 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
985 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
986 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
987 	config->num_mcast_groups = 0;
988 	config->num_mcast_table_elems = 0;
989 	config->mcast2ucast_mode = 0;
990 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
991 	config->num_wds_entries = 0;
992 	config->dma_burst_size = 0;
993 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
994 	config->vow_config = TARGET_VOW_CONFIG;
995 	config->gtk_offload_max_vdev = 2;
996 	config->num_msdu_desc = 0x400;
997 	config->beacon_tx_offload_max_vdev = 2;
998 	config->rx_batchmode = TARGET_RX_BATCHMODE;
999 
1000 	config->peer_map_unmap_v2_support = 0;
1001 	config->use_pdev_id = 1;
1002 	config->max_frag_entries = 0xa;
1003 	config->num_tdls_vdevs = 0x1;
1004 	config->num_tdls_conn_table_entries = 8;
1005 	config->beacon_tx_offload_max_vdev = 0x2;
1006 	config->num_multicast_filter_entries = 0x20;
1007 	config->num_wow_filters = 0x16;
1008 	config->num_keep_alive_pattern = 0;
1009 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
1010 }
1011 
1012 void
1013 qwz_hal_reo_hw_setup(struct qwz_softc *sc, uint32_t ring_hash_map)
1014 {
1015 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1016 	uint32_t val;
1017 
1018 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1019 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1020 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1021 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1022 
1023 	val = sc->ops.read32(sc, reo_base + HAL_REO1_MISC_CTRL_ADDR(sc));
1024 	val &= ~HAL_REO1_MISC_CTL_FRAG_DST_RING;
1025 	val &= ~HAL_REO1_MISC_CTL_BAR_DST_RING;
1026 	val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAG_DST_RING,
1027 	    HAL_SRNG_RING_ID_REO2SW0);
1028 	val |= FIELD_PREP(HAL_REO1_MISC_CTL_BAR_DST_RING,
1029 	    HAL_SRNG_RING_ID_REO2SW0);
1030 	sc->ops.write32(sc, reo_base + HAL_REO1_MISC_CTRL_ADDR(sc), val);
1031 
1032 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1033 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1034 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1035 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1036 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1037 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1038 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1039 	    ATH12K_HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
1040 
1041 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1042 	    ring_hash_map);
1043 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1044 	    ring_hash_map);
1045 }
1046 
1047 int
1048 qwz_hw_mac_id_to_pdev_id_ipq8074(struct ath12k_hw_params *hw, int mac_id)
1049 {
1050 	return mac_id;
1051 }
1052 
1053 int
1054 qwz_hw_mac_id_to_srng_id_ipq8074(struct ath12k_hw_params *hw, int mac_id)
1055 {
1056 	return 0;
1057 }
1058 
1059 int
1060 qwz_hw_mac_id_to_pdev_id_qca6390(struct ath12k_hw_params *hw, int mac_id)
1061 {
1062 	return 0;
1063 }
1064 
1065 int
1066 qwz_hw_mac_id_to_srng_id_qca6390(struct ath12k_hw_params *hw, int mac_id)
1067 {
1068 	return mac_id;
1069 }
1070 
1071 int
1072 qwz_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1073 {
1074 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
1075 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1076 }
1077 
1078 uint8_t
1079 qwz_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1080 {
1081 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1082 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1083 }
1084 
1085 uint8_t *
1086 qwz_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1087 {
1088 	return desc->u.ipq8074.hdr_status;
1089 }
1090 
1091 int
1092 qwz_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1093 {
1094 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1095 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1096 }
1097 
1098 uint32_t
1099 qwz_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1100 {
1101 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1102 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1103 }
1104 
1105 uint8_t
1106 qwz_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1107 {
1108 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1109 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1110 }
1111 
1112 uint8_t
1113 qwz_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1114 {
1115 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1116 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1117 }
1118 
1119 int
1120 qwz_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1121 {
1122 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1123 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1124 }
1125 
1126 int
1127 qwz_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1128 {
1129 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1130 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1131 }
1132 
1133 int
1134 qwz_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1135 {
1136 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1137 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1138 }
1139 
1140 uint16_t
1141 qwz_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1142 {
1143 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1144 	    le32toh(desc->u.ipq8074.mpdu_start.info1));
1145 }
1146 
1147 uint16_t
1148 qwz_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1149 {
1150 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1151 	    le32toh(desc->u.ipq8074.msdu_start.info1));
1152 }
1153 
1154 uint8_t
1155 qwz_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1156 {
1157 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1158 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1159 }
1160 
1161 uint8_t
1162 qwz_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1163 {
1164 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1165 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1166 }
1167 
1168 uint8_t
1169 qwz_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1170 {
1171 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1172 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1173 }
1174 
1175 uint32_t
1176 qwz_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1177 {
1178 	return le32toh(desc->u.ipq8074.msdu_start.phy_meta_data);
1179 }
1180 
1181 uint8_t
1182 qwz_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1183 {
1184 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1185 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1186 }
1187 
1188 uint8_t
1189 qwz_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1190 {
1191 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1192 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1193 }
1194 
1195 uint8_t
1196 qwz_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1197 {
1198 	return FIELD_GET(RX_MPDU_START_INFO2_TID,
1199 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1200 }
1201 
1202 uint16_t
1203 qwz_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1204 {
1205 	return le16toh(desc->u.ipq8074.mpdu_start.sw_peer_id);
1206 }
1207 
1208 void
1209 qwz_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1210 				       struct hal_rx_desc *ldesc)
1211 {
1212 	memcpy((uint8_t *)&fdesc->u.ipq8074.msdu_end, (uint8_t *)&ldesc->u.ipq8074.msdu_end,
1213 	       sizeof(struct rx_msdu_end_ipq8074));
1214 	memcpy((uint8_t *)&fdesc->u.ipq8074.attention, (uint8_t *)&ldesc->u.ipq8074.attention,
1215 	       sizeof(struct rx_attention));
1216 	memcpy((uint8_t *)&fdesc->u.ipq8074.mpdu_end, (uint8_t *)&ldesc->u.ipq8074.mpdu_end,
1217 	       sizeof(struct rx_mpdu_end));
1218 }
1219 
1220 uint32_t
1221 qwz_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1222 {
1223 	return FIELD_GET(HAL_TLV_HDR_TAG,
1224 	    le32toh(desc->u.ipq8074.mpdu_start_tag));
1225 }
1226 
1227 uint32_t
1228 qwz_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1229 {
1230 	return le16toh(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
1231 }
1232 
1233 void
1234 qwz_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1235 {
1236 	uint32_t info = le32toh(desc->u.ipq8074.msdu_start.info1);
1237 
1238 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1239 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1240 
1241 	desc->u.ipq8074.msdu_start.info1 = htole32(info);
1242 }
1243 
1244 int
1245 qwz_dp_rx_h_msdu_end_first_msdu(struct qwz_softc *sc, struct hal_rx_desc *desc)
1246 {
1247 	return sc->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
1248 }
1249 
1250 int
1251 qwz_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1252 {
1253 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1254 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1255 }
1256 
1257 uint8_t *
1258 qwz_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1259 {
1260 	return desc->u.ipq8074.mpdu_start.addr2;
1261 }
1262 
1263 struct rx_attention *
1264 qwz_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
1265 {
1266 	return &desc->u.ipq8074.attention;
1267 }
1268 
1269 uint8_t *
1270 qwz_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1271 {
1272 	return &desc->u.ipq8074.msdu_payload[0];
1273 }
1274 
1275 int
1276 qwz_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1277 {
1278 	return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
1279 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1280 }
1281 
1282 int
1283 qwz_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1284 {
1285 	return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
1286 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1287 }
1288 
1289 uint8_t
1290 qwz_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1291 {
1292 	return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
1293 	    le16toh(desc->u.qcn9074.msdu_end.info4));
1294 }
1295 
1296 uint8_t *
1297 qwz_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1298 {
1299 	return desc->u.qcn9074.hdr_status;
1300 }
1301 
1302 int
1303 qwz_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1304 {
1305 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1306 	       RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
1307 }
1308 
1309 uint32_t
1310 qwz_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1311 {
1312 	return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
1313 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1314 }
1315 
1316 uint8_t
1317 qwz_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1318 {
1319 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1320 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1321 }
1322 
1323 uint8_t
1324 qwz_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1325 {
1326 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1327 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1328 }
1329 
1330 int
1331 qwz_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1332 {
1333 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1334 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1335 }
1336 
1337 int
1338 qwz_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1339 {
1340 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
1341 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1342 }
1343 
1344 int
1345 qwz_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1346 {
1347 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
1348 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1349 }
1350 
1351 uint16_t
1352 qwz_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1353 {
1354 	return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
1355 	    le32toh(desc->u.qcn9074.mpdu_start.info11));
1356 }
1357 
1358 uint16_t
1359 qwz_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1360 {
1361 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1362 	    le32toh(desc->u.qcn9074.msdu_start.info1));
1363 }
1364 
1365 uint8_t
1366 qwz_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1367 {
1368 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1369 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1370 }
1371 
1372 uint8_t
1373 qwz_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1374 {
1375 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1376 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1377 }
1378 
1379 uint8_t
1380 qwz_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1381 {
1382 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1383 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1384 }
1385 
1386 uint32_t
1387 qwz_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1388 {
1389 	return le32toh(desc->u.qcn9074.msdu_start.phy_meta_data);
1390 }
1391 
1392 uint8_t
1393 qwz_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1394 {
1395 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1396 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1397 }
1398 
1399 uint8_t
1400 qwz_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1401 {
1402 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1403 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1404 }
1405 
1406 uint8_t
1407 qwz_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1408 {
1409 	return FIELD_GET(RX_MPDU_START_INFO9_TID,
1410 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1411 }
1412 
1413 uint16_t
1414 qwz_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1415 {
1416 	return le16toh(desc->u.qcn9074.mpdu_start.sw_peer_id);
1417 }
1418 
1419 void
1420 qwz_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1421 				       struct hal_rx_desc *ldesc)
1422 {
1423 	memcpy((uint8_t *)&fdesc->u.qcn9074.msdu_end, (uint8_t *)&ldesc->u.qcn9074.msdu_end,
1424 	       sizeof(struct rx_msdu_end_qcn9074));
1425 	memcpy((uint8_t *)&fdesc->u.qcn9074.attention, (uint8_t *)&ldesc->u.qcn9074.attention,
1426 	       sizeof(struct rx_attention));
1427 	memcpy((uint8_t *)&fdesc->u.qcn9074.mpdu_end, (uint8_t *)&ldesc->u.qcn9074.mpdu_end,
1428 	       sizeof(struct rx_mpdu_end));
1429 }
1430 
1431 uint32_t
1432 qwz_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1433 {
1434 	return FIELD_GET(HAL_TLV_HDR_TAG,
1435 	    le32toh(desc->u.qcn9074.mpdu_start_tag));
1436 }
1437 
1438 uint32_t
1439 qwz_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1440 {
1441 	return le16toh(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
1442 }
1443 
1444 void
1445 qwz_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1446 {
1447 	uint32_t info = le32toh(desc->u.qcn9074.msdu_start.info1);
1448 
1449 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1450 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1451 
1452 	desc->u.qcn9074.msdu_start.info1 = htole32(info);
1453 }
1454 
1455 struct rx_attention *
1456 qwz_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
1457 {
1458 	return &desc->u.qcn9074.attention;
1459 }
1460 
1461 uint8_t *
1462 qwz_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1463 {
1464 	return &desc->u.qcn9074.msdu_payload[0];
1465 }
1466 
1467 int
1468 qwz_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1469 {
1470 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1471 	       RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
1472 }
1473 
1474 uint8_t *
1475 qwz_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1476 {
1477 	return desc->u.qcn9074.mpdu_start.addr2;
1478 }
1479 
1480 int
1481 qwz_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1482 {
1483 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
1484 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1485 }
1486 
1487 int
1488 qwz_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1489 {
1490 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
1491 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1492 }
1493 
1494 uint8_t
1495 qwz_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1496 {
1497 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1498 	    le32toh(desc->u.wcn6855.msdu_end.info2));
1499 }
1500 
1501 uint8_t *
1502 qwz_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1503 {
1504 	return desc->u.wcn6855.hdr_status;
1505 }
1506 
1507 int
1508 qwz_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1509 {
1510 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1511 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1512 }
1513 
1514 uint32_t
1515 qwz_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1516 {
1517 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1518 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1519 }
1520 
1521 uint8_t
1522 qwz_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1523 {
1524 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1525 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1526 }
1527 
1528 uint8_t
1529 qwz_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1530 {
1531 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1532 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1533 }
1534 
1535 int
1536 qwz_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1537 {
1538 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1539 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1540 }
1541 
1542 int
1543 qwz_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1544 {
1545 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1546 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1547 }
1548 
1549 uint16_t
1550 qwz_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1551 {
1552 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1553 	    le32toh(desc->u.wcn6855.mpdu_start.info1));
1554 }
1555 
1556 uint16_t
1557 qwz_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1558 {
1559 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1560 	    le32toh(desc->u.wcn6855.msdu_start.info1));
1561 }
1562 
1563 uint8_t
1564 qwz_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1565 {
1566 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1567 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1568 }
1569 
1570 uint8_t
1571 qwz_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1572 {
1573 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1574 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1575 }
1576 
1577 uint8_t
1578 qwz_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1579 {
1580 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1581 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1582 }
1583 
1584 uint32_t
1585 qwz_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1586 {
1587 	return le32toh(desc->u.wcn6855.msdu_start.phy_meta_data);
1588 }
1589 
1590 uint8_t
1591 qwz_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1592 {
1593 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1594 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1595 }
1596 
1597 uint8_t
1598 qwz_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1599 {
1600 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1601 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1602 }
1603 
1604 uint8_t
1605 qwz_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1606 {
1607 	return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
1608 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1609 }
1610 
1611 uint16_t
1612 qwz_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1613 {
1614 	return le16toh(desc->u.wcn6855.mpdu_start.sw_peer_id);
1615 }
1616 
1617 void
1618 qwz_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1619     struct hal_rx_desc *ldesc)
1620 {
1621 	memcpy((uint8_t *)&fdesc->u.wcn6855.msdu_end, (uint8_t *)&ldesc->u.wcn6855.msdu_end,
1622 	       sizeof(struct rx_msdu_end_wcn6855));
1623 	memcpy((uint8_t *)&fdesc->u.wcn6855.attention, (uint8_t *)&ldesc->u.wcn6855.attention,
1624 	       sizeof(struct rx_attention));
1625 	memcpy((uint8_t *)&fdesc->u.wcn6855.mpdu_end, (uint8_t *)&ldesc->u.wcn6855.mpdu_end,
1626 	       sizeof(struct rx_mpdu_end));
1627 }
1628 
1629 uint32_t
1630 qwz_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1631 {
1632 	return FIELD_GET(HAL_TLV_HDR_TAG,
1633 	    le32toh(desc->u.wcn6855.mpdu_start_tag));
1634 }
1635 
1636 uint32_t
1637 qwz_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1638 {
1639 	return le16toh(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
1640 }
1641 
1642 void
1643 qwz_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1644 {
1645 	uint32_t info = le32toh(desc->u.wcn6855.msdu_start.info1);
1646 
1647 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1648 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1649 
1650 	desc->u.wcn6855.msdu_start.info1 = htole32(info);
1651 }
1652 
1653 struct rx_attention *
1654 qwz_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
1655 {
1656 	return &desc->u.wcn6855.attention;
1657 }
1658 
1659 uint8_t *
1660 qwz_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1661 {
1662 	return &desc->u.wcn6855.msdu_payload[0];
1663 }
1664 
1665 int
1666 qwz_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1667 {
1668 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1669 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1670 }
1671 
1672 uint8_t *
1673 qwz_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1674 {
1675 	return desc->u.wcn6855.mpdu_start.addr2;
1676 }
1677 
1678 /* Map from pdev index to hw mac index */
1679 uint8_t
1680 qwz_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
1681 {
1682 	switch (pdev_idx) {
1683 	case 0:
1684 		return 0;
1685 	case 1:
1686 		return 2;
1687 	case 2:
1688 		return 1;
1689 	default:
1690 		return ATH12K_INVALID_HW_MAC_ID;
1691 	}
1692 }
1693 
1694 uint8_t
1695 qwz_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
1696 {
1697 	return pdev_idx;
1698 }
1699 
1700 static inline int
1701 qwz_hw_get_mac_from_pdev_id(struct qwz_softc *sc, int pdev_idx)
1702 {
1703 	if (sc->hw_params.hw_ops->get_hw_mac_from_pdev_id)
1704 		return sc->hw_params.hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
1705 
1706 	return 0;
1707 }
1708 
1709 const struct ath12k_hw_ops wcn7850_ops = {
1710 	.get_hw_mac_from_pdev_id = qwz_hw_ipq6018_mac_from_pdev_id,
1711 	.mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_qca6390,
1712 	.mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_qca6390,
1713 };
1714 
1715 #define ATH12K_TX_RING_MASK_0 BIT(0)
1716 #define ATH12K_TX_RING_MASK_1 BIT(1)
1717 #define ATH12K_TX_RING_MASK_2 BIT(2)
1718 #define ATH12K_TX_RING_MASK_3 BIT(3)
1719 #define ATH12K_TX_RING_MASK_4 BIT(4)
1720 
1721 #define ATH12K_RX_RING_MASK_0 0x1
1722 #define ATH12K_RX_RING_MASK_1 0x2
1723 #define ATH12K_RX_RING_MASK_2 0x4
1724 #define ATH12K_RX_RING_MASK_3 0x8
1725 
1726 #define ATH12K_RX_ERR_RING_MASK_0 0x1
1727 
1728 #define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
1729 
1730 #define ATH12K_REO_STATUS_RING_MASK_0 0x1
1731 
1732 #define ATH12K_RXDMA2HOST_RING_MASK_0 0x1
1733 #define ATH12K_RXDMA2HOST_RING_MASK_1 0x2
1734 #define ATH12K_RXDMA2HOST_RING_MASK_2 0x4
1735 
1736 #define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
1737 #define ATH12K_HOST2RXDMA_RING_MASK_1 0x2
1738 #define ATH12K_HOST2RXDMA_RING_MASK_2 0x4
1739 
1740 #define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
1741 #define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
1742 #define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
1743 
1744 const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq8074 = {
1745 	.tx  = {
1746 		ATH12K_TX_RING_MASK_0,
1747 		ATH12K_TX_RING_MASK_1,
1748 		ATH12K_TX_RING_MASK_2,
1749 	},
1750 	.rx_mon_status = {
1751 		0, 0, 0, 0,
1752 		ATH12K_RX_MON_STATUS_RING_MASK_0,
1753 		ATH12K_RX_MON_STATUS_RING_MASK_1,
1754 		ATH12K_RX_MON_STATUS_RING_MASK_2,
1755 	},
1756 	.rx = {
1757 		0, 0, 0, 0, 0, 0, 0,
1758 		ATH12K_RX_RING_MASK_0,
1759 		ATH12K_RX_RING_MASK_1,
1760 		ATH12K_RX_RING_MASK_2,
1761 		ATH12K_RX_RING_MASK_3,
1762 	},
1763 	.rx_err = {
1764 		ATH12K_RX_ERR_RING_MASK_0,
1765 	},
1766 	.rx_wbm_rel = {
1767 		ATH12K_RX_WBM_REL_RING_MASK_0,
1768 	},
1769 	.reo_status = {
1770 		ATH12K_REO_STATUS_RING_MASK_0,
1771 	},
1772 	.rxdma2host = {
1773 		ATH12K_RXDMA2HOST_RING_MASK_0,
1774 		ATH12K_RXDMA2HOST_RING_MASK_1,
1775 		ATH12K_RXDMA2HOST_RING_MASK_2,
1776 	},
1777 	.host2rxdma = {
1778 		ATH12K_HOST2RXDMA_RING_MASK_0,
1779 		ATH12K_HOST2RXDMA_RING_MASK_1,
1780 		ATH12K_HOST2RXDMA_RING_MASK_2,
1781 	},
1782 	.tx_mon_dest = {
1783 	},
1784 };
1785 
1786 const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qca6390 = {
1787 	.tx  = {
1788 		ATH12K_TX_RING_MASK_0,
1789 	},
1790 	.rx_mon_status = {
1791 		0, 0, 0, 0,
1792 		ATH12K_RX_MON_STATUS_RING_MASK_0,
1793 		ATH12K_RX_MON_STATUS_RING_MASK_1,
1794 		ATH12K_RX_MON_STATUS_RING_MASK_2,
1795 	},
1796 	.rx = {
1797 		0, 0, 0, 0, 0, 0, 0,
1798 		ATH12K_RX_RING_MASK_0,
1799 		ATH12K_RX_RING_MASK_1,
1800 		ATH12K_RX_RING_MASK_2,
1801 		ATH12K_RX_RING_MASK_3,
1802 	},
1803 	.rx_err = {
1804 		ATH12K_RX_ERR_RING_MASK_0,
1805 	},
1806 	.rx_wbm_rel = {
1807 		ATH12K_RX_WBM_REL_RING_MASK_0,
1808 	},
1809 	.reo_status = {
1810 		ATH12K_REO_STATUS_RING_MASK_0,
1811 	},
1812 	.rxdma2host = {
1813 		ATH12K_RXDMA2HOST_RING_MASK_0,
1814 		ATH12K_RXDMA2HOST_RING_MASK_1,
1815 		ATH12K_RXDMA2HOST_RING_MASK_2,
1816 	},
1817 	.host2rxdma = {
1818 	},
1819 	.tx_mon_dest = {
1820 	},
1821 };
1822 
1823 const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9074 = {
1824 	.tx  = {
1825 		ATH12K_TX_RING_MASK_0,
1826 		ATH12K_TX_RING_MASK_1,
1827 		ATH12K_TX_RING_MASK_2,
1828 	},
1829 	.rx_mon_status = {
1830 		0, 0, 0,
1831 		ATH12K_RX_MON_STATUS_RING_MASK_0,
1832 		ATH12K_RX_MON_STATUS_RING_MASK_1,
1833 		ATH12K_RX_MON_STATUS_RING_MASK_2,
1834 	},
1835 	.rx = {
1836 		0, 0, 0, 0,
1837 		ATH12K_RX_RING_MASK_0,
1838 		ATH12K_RX_RING_MASK_1,
1839 		ATH12K_RX_RING_MASK_2,
1840 		ATH12K_RX_RING_MASK_3,
1841 	},
1842 	.rx_err = {
1843 		0, 0, 0,
1844 		ATH12K_RX_ERR_RING_MASK_0,
1845 	},
1846 	.rx_wbm_rel = {
1847 		0, 0, 0,
1848 		ATH12K_RX_WBM_REL_RING_MASK_0,
1849 	},
1850 	.reo_status = {
1851 		0, 0, 0,
1852 		ATH12K_REO_STATUS_RING_MASK_0,
1853 	},
1854 	.rxdma2host = {
1855 		0, 0, 0,
1856 		ATH12K_RXDMA2HOST_RING_MASK_0,
1857 	},
1858 	.host2rxdma = {
1859 		0, 0, 0,
1860 		ATH12K_HOST2RXDMA_RING_MASK_0,
1861 	},
1862 	.tx_mon_dest = {
1863 	},
1864 };
1865 
1866 const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn6750 = {
1867 	.tx  = {
1868 		ATH12K_TX_RING_MASK_0,
1869 		0,
1870 		ATH12K_TX_RING_MASK_2,
1871 		0,
1872 		ATH12K_TX_RING_MASK_4,
1873 	},
1874 	.rx_mon_status = {
1875 		0, 0, 0, 0, 0, 0,
1876 		ATH12K_RX_MON_STATUS_RING_MASK_0,
1877 	},
1878 	.rx = {
1879 		0, 0, 0, 0, 0, 0, 0,
1880 		ATH12K_RX_RING_MASK_0,
1881 		ATH12K_RX_RING_MASK_1,
1882 		ATH12K_RX_RING_MASK_2,
1883 		ATH12K_RX_RING_MASK_3,
1884 	},
1885 	.rx_err = {
1886 		0, ATH12K_RX_ERR_RING_MASK_0,
1887 	},
1888 	.rx_wbm_rel = {
1889 		0, ATH12K_RX_WBM_REL_RING_MASK_0,
1890 	},
1891 	.reo_status = {
1892 		0, ATH12K_REO_STATUS_RING_MASK_0,
1893 	},
1894 	.rxdma2host = {
1895 		ATH12K_RXDMA2HOST_RING_MASK_0,
1896 		ATH12K_RXDMA2HOST_RING_MASK_1,
1897 		ATH12K_RXDMA2HOST_RING_MASK_2,
1898 	},
1899 	.host2rxdma = {
1900 	},
1901 	.tx_mon_dest = {
1902 	},
1903 };
1904 
1905 const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
1906 	.tx  = {
1907 		ATH12K_TX_RING_MASK_0,
1908 		ATH12K_TX_RING_MASK_2,
1909 		ATH12K_TX_RING_MASK_4,
1910 	},
1911 	.rx_mon_status = {
1912 	},
1913 	.rx = {
1914 		0, 0, 0,
1915 		ATH12K_RX_RING_MASK_0,
1916 		ATH12K_RX_RING_MASK_1,
1917 		ATH12K_RX_RING_MASK_2,
1918 		ATH12K_RX_RING_MASK_3,
1919 	},
1920 	.rx_err = {
1921 		ATH12K_RX_ERR_RING_MASK_0,
1922 	},
1923 	.rx_wbm_rel = {
1924 		ATH12K_RX_WBM_REL_RING_MASK_0,
1925 	},
1926 	.reo_status = {
1927 		ATH12K_REO_STATUS_RING_MASK_0,
1928 	},
1929 	.rxdma2host = {
1930 	},
1931 	.host2rxdma = {
1932 	},
1933 	.tx_mon_dest = {
1934 	},
1935 };
1936 
1937 /* Target firmware's Copy Engine configuration. */
1938 const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq8074[] = {
1939 	/* CE0: host->target HTC control and raw streams */
1940 	{
1941 		.pipenum = htole32(0),
1942 		.pipedir = htole32(PIPEDIR_OUT),
1943 		.nentries = htole32(32),
1944 		.nbytes_max = htole32(2048),
1945 		.flags = htole32(CE_ATTR_FLAGS),
1946 		.reserved = htole32(0),
1947 	},
1948 
1949 	/* CE1: target->host HTT + HTC control */
1950 	{
1951 		.pipenum = htole32(1),
1952 		.pipedir = htole32(PIPEDIR_IN),
1953 		.nentries = htole32(32),
1954 		.nbytes_max = htole32(2048),
1955 		.flags = htole32(CE_ATTR_FLAGS),
1956 		.reserved = htole32(0),
1957 	},
1958 
1959 	/* CE2: target->host WMI */
1960 	{
1961 		.pipenum = htole32(2),
1962 		.pipedir = htole32(PIPEDIR_IN),
1963 		.nentries = htole32(32),
1964 		.nbytes_max = htole32(2048),
1965 		.flags = htole32(CE_ATTR_FLAGS),
1966 		.reserved = htole32(0),
1967 	},
1968 
1969 	/* CE3: host->target WMI */
1970 	{
1971 		.pipenum = htole32(3),
1972 		.pipedir = htole32(PIPEDIR_OUT),
1973 		.nentries = htole32(32),
1974 		.nbytes_max = htole32(2048),
1975 		.flags = htole32(CE_ATTR_FLAGS),
1976 		.reserved = htole32(0),
1977 	},
1978 
1979 	/* CE4: host->target HTT */
1980 	{
1981 		.pipenum = htole32(4),
1982 		.pipedir = htole32(PIPEDIR_OUT),
1983 		.nentries = htole32(256),
1984 		.nbytes_max = htole32(256),
1985 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1986 		.reserved = htole32(0),
1987 	},
1988 
1989 	/* CE5: target->host Pktlog */
1990 	{
1991 		.pipenum = htole32(5),
1992 		.pipedir = htole32(PIPEDIR_IN),
1993 		.nentries = htole32(32),
1994 		.nbytes_max = htole32(2048),
1995 		.flags = htole32(0),
1996 		.reserved = htole32(0),
1997 	},
1998 
1999 	/* CE6: Reserved for target autonomous hif_memcpy */
2000 	{
2001 		.pipenum = htole32(6),
2002 		.pipedir = htole32(PIPEDIR_INOUT),
2003 		.nentries = htole32(32),
2004 		.nbytes_max = htole32(65535),
2005 		.flags = htole32(CE_ATTR_FLAGS),
2006 		.reserved = htole32(0),
2007 	},
2008 
2009 	/* CE7 used only by Host */
2010 	{
2011 		.pipenum = htole32(7),
2012 		.pipedir = htole32(PIPEDIR_OUT),
2013 		.nentries = htole32(32),
2014 		.nbytes_max = htole32(2048),
2015 		.flags = htole32(CE_ATTR_FLAGS),
2016 		.reserved = htole32(0),
2017 	},
2018 
2019 	/* CE8 target->host used only by IPA */
2020 	{
2021 		.pipenum = htole32(8),
2022 		.pipedir = htole32(PIPEDIR_INOUT),
2023 		.nentries = htole32(32),
2024 		.nbytes_max = htole32(65535),
2025 		.flags = htole32(CE_ATTR_FLAGS),
2026 		.reserved = htole32(0),
2027 	},
2028 
2029 	/* CE9 host->target HTT */
2030 	{
2031 		.pipenum = htole32(9),
2032 		.pipedir = htole32(PIPEDIR_OUT),
2033 		.nentries = htole32(32),
2034 		.nbytes_max = htole32(2048),
2035 		.flags = htole32(CE_ATTR_FLAGS),
2036 		.reserved = htole32(0),
2037 	},
2038 
2039 	/* CE10 target->host HTT */
2040 	{
2041 		.pipenum = htole32(10),
2042 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2043 		.nentries = htole32(0),
2044 		.nbytes_max = htole32(0),
2045 		.flags = htole32(CE_ATTR_FLAGS),
2046 		.reserved = htole32(0),
2047 	},
2048 
2049 	/* CE11 Not used */
2050 };
2051 
2052 /* Map from service/endpoint to Copy Engine.
2053  * This table is derived from the CE_PCI TABLE, above.
2054  * It is passed to the Target at startup for use by firmware.
2055  */
2056 const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq8074[] = {
2057 	{
2058 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2059 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2060 		.pipenum = htole32(3),
2061 	},
2062 	{
2063 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2064 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2065 		.pipenum = htole32(2),
2066 	},
2067 	{
2068 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2069 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2070 		.pipenum = htole32(3),
2071 	},
2072 	{
2073 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2074 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2075 		.pipenum = htole32(2),
2076 	},
2077 	{
2078 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2079 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2080 		.pipenum = htole32(3),
2081 	},
2082 	{
2083 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2084 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2085 		.pipenum = htole32(2),
2086 	},
2087 	{
2088 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2089 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2090 		.pipenum = htole32(3),
2091 	},
2092 	{
2093 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2094 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2095 		.pipenum = htole32(2),
2096 	},
2097 	{
2098 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2099 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2100 		.pipenum = htole32(3),
2101 	},
2102 	{
2103 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2104 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2105 		.pipenum = htole32(2),
2106 	},
2107 	{
2108 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2109 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2110 		.pipenum = htole32(7),
2111 	},
2112 	{
2113 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2114 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2115 		.pipenum = htole32(2),
2116 	},
2117 	{
2118 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2119 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2120 		.pipenum = htole32(9),
2121 	},
2122 	{
2123 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2124 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2125 		.pipenum = htole32(2),
2126 	},
2127 	{
2128 		.service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2129 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2130 		.pipenum = htole32(0),
2131 	},
2132 	{
2133 		.service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2134 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2135 		.pipenum = htole32(1),
2136 	},
2137 	{ /* not used */
2138 		.service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2139 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2140 		.pipenum = htole32(0),
2141 	},
2142 	{ /* not used */
2143 		.service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2144 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2145 		.pipenum = htole32(1),
2146 	},
2147 	{
2148 		.service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2149 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2150 		.pipenum = htole32(4),
2151 	},
2152 	{
2153 		.service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2154 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2155 		.pipenum = htole32(1),
2156 	},
2157 	{
2158 		.service_id = htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
2159 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2160 		.pipenum = htole32(5),
2161 	},
2162 
2163 	/* (Additions here) */
2164 
2165 	{ /* terminator entry */ }
2166 };
2167 
2168 const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq6018[] = {
2169 	{
2170 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2171 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2172 		.pipenum = htole32(3),
2173 	},
2174 	{
2175 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2176 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2177 		.pipenum = htole32(2),
2178 	},
2179 	{
2180 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2181 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2182 		.pipenum = htole32(3),
2183 	},
2184 	{
2185 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2186 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2187 		.pipenum = htole32(2),
2188 	},
2189 	{
2190 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2191 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2192 		.pipenum = htole32(3),
2193 	},
2194 	{
2195 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2196 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2197 		.pipenum = htole32(2),
2198 	},
2199 	{
2200 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2201 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2202 		.pipenum = htole32(3),
2203 	},
2204 	{
2205 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2206 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2207 		.pipenum = htole32(2),
2208 	},
2209 	{
2210 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2211 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2212 		.pipenum = htole32(3),
2213 	},
2214 	{
2215 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2216 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2217 		.pipenum = htole32(2),
2218 	},
2219 	{
2220 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2221 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2222 		.pipenum = htole32(7),
2223 	},
2224 	{
2225 		.service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2226 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2227 		.pipenum = htole32(2),
2228 	},
2229 	{
2230 		.service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2231 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2232 		.pipenum = htole32(0),
2233 	},
2234 	{
2235 		.service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2236 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2237 		.pipenum = htole32(1),
2238 	},
2239 	{ /* not used */
2240 		.service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2241 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2242 		.pipenum = htole32(0),
2243 	},
2244 	{ /* not used */
2245 		.service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2246 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2247 		.pipenum = htole32(1),
2248 	},
2249 	{
2250 		.service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2251 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2252 		.pipenum = htole32(4),
2253 	},
2254 	{
2255 		.service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2256 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2257 		.pipenum = htole32(1),
2258 	},
2259 	{
2260 		.service_id = htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
2261 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2262 		.pipenum = htole32(5),
2263 	},
2264 
2265 	/* (Additions here) */
2266 
2267 	{ /* terminator entry */ }
2268 };
2269 
2270 /* Target firmware's Copy Engine configuration. */
2271 const struct ce_pipe_config ath12k_target_ce_config_wlan_qca6390[] = {
2272 	/* CE0: host->target HTC control and raw streams */
2273 	{
2274 		.pipenum = htole32(0),
2275 		.pipedir = htole32(PIPEDIR_OUT),
2276 		.nentries = htole32(32),
2277 		.nbytes_max = htole32(2048),
2278 		.flags = htole32(CE_ATTR_FLAGS),
2279 		.reserved = htole32(0),
2280 	},
2281 
2282 	/* CE1: target->host HTT + HTC control */
2283 	{
2284 		.pipenum = htole32(1),
2285 		.pipedir = htole32(PIPEDIR_IN),
2286 		.nentries = htole32(32),
2287 		.nbytes_max = htole32(2048),
2288 		.flags = htole32(CE_ATTR_FLAGS),
2289 		.reserved = htole32(0),
2290 	},
2291 
2292 	/* CE2: target->host WMI */
2293 	{
2294 		.pipenum = htole32(2),
2295 		.pipedir = htole32(PIPEDIR_IN),
2296 		.nentries = htole32(32),
2297 		.nbytes_max = htole32(2048),
2298 		.flags = htole32(CE_ATTR_FLAGS),
2299 		.reserved = htole32(0),
2300 	},
2301 
2302 	/* CE3: host->target WMI */
2303 	{
2304 		.pipenum = htole32(3),
2305 		.pipedir = htole32(PIPEDIR_OUT),
2306 		.nentries = htole32(32),
2307 		.nbytes_max = htole32(2048),
2308 		.flags = htole32(CE_ATTR_FLAGS),
2309 		.reserved = htole32(0),
2310 	},
2311 
2312 	/* CE4: host->target HTT */
2313 	{
2314 		.pipenum = htole32(4),
2315 		.pipedir = htole32(PIPEDIR_OUT),
2316 		.nentries = htole32(256),
2317 		.nbytes_max = htole32(256),
2318 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2319 		.reserved = htole32(0),
2320 	},
2321 
2322 	/* CE5: target->host Pktlog */
2323 	{
2324 		.pipenum = htole32(5),
2325 		.pipedir = htole32(PIPEDIR_IN),
2326 		.nentries = htole32(32),
2327 		.nbytes_max = htole32(2048),
2328 		.flags = htole32(CE_ATTR_FLAGS),
2329 		.reserved = htole32(0),
2330 	},
2331 
2332 	/* CE6: Reserved for target autonomous hif_memcpy */
2333 	{
2334 		.pipenum = htole32(6),
2335 		.pipedir = htole32(PIPEDIR_INOUT),
2336 		.nentries = htole32(32),
2337 		.nbytes_max = htole32(16384),
2338 		.flags = htole32(CE_ATTR_FLAGS),
2339 		.reserved = htole32(0),
2340 	},
2341 
2342 	/* CE7 used only by Host */
2343 	{
2344 		.pipenum = htole32(7),
2345 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2346 		.nentries = htole32(0),
2347 		.nbytes_max = htole32(0),
2348 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2349 		.reserved = htole32(0),
2350 	},
2351 
2352 	/* CE8 target->host used only by IPA */
2353 	{
2354 		.pipenum = htole32(8),
2355 		.pipedir = htole32(PIPEDIR_INOUT),
2356 		.nentries = htole32(32),
2357 		.nbytes_max = htole32(16384),
2358 		.flags = htole32(CE_ATTR_FLAGS),
2359 		.reserved = htole32(0),
2360 	},
2361 	/* CE 9, 10, 11 are used by MHI driver */
2362 };
2363 
2364 /* Map from service/endpoint to Copy Engine.
2365  * This table is derived from the CE_PCI TABLE, above.
2366  * It is passed to the Target at startup for use by firmware.
2367  */
2368 const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qca6390[] = {
2369 	{
2370 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2371 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2372 		htole32(3),
2373 	},
2374 	{
2375 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2376 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2377 		htole32(2),
2378 	},
2379 	{
2380 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2381 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2382 		htole32(3),
2383 	},
2384 	{
2385 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2386 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2387 		htole32(2),
2388 	},
2389 	{
2390 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2391 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2392 		htole32(3),
2393 	},
2394 	{
2395 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2396 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2397 		htole32(2),
2398 	},
2399 	{
2400 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2401 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2402 		htole32(3),
2403 	},
2404 	{
2405 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2406 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2407 		htole32(2),
2408 	},
2409 	{
2410 		htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2411 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2412 		htole32(3),
2413 	},
2414 	{
2415 		htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2416 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2417 		htole32(2),
2418 	},
2419 	{
2420 		htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2421 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2422 		htole32(0),
2423 	},
2424 	{
2425 		htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2426 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2427 		htole32(2),
2428 	},
2429 	{
2430 		htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2431 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2432 		htole32(4),
2433 	},
2434 	{
2435 		htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2436 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2437 		htole32(1),
2438 	},
2439 
2440 	/* (Additions here) */
2441 
2442 	{ /* must be last */
2443 		htole32(0),
2444 		htole32(0),
2445 		htole32(0),
2446 	},
2447 };
2448 
2449 /* Target firmware's Copy Engine configuration. */
2450 const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9074[] = {
2451 	/* CE0: host->target HTC control and raw streams */
2452 	{
2453 		.pipenum = htole32(0),
2454 		.pipedir = htole32(PIPEDIR_OUT),
2455 		.nentries = htole32(32),
2456 		.nbytes_max = htole32(2048),
2457 		.flags = htole32(CE_ATTR_FLAGS),
2458 		.reserved = htole32(0),
2459 	},
2460 
2461 	/* CE1: target->host HTT + HTC control */
2462 	{
2463 		.pipenum = htole32(1),
2464 		.pipedir = htole32(PIPEDIR_IN),
2465 		.nentries = htole32(32),
2466 		.nbytes_max = htole32(2048),
2467 		.flags = htole32(CE_ATTR_FLAGS),
2468 		.reserved = htole32(0),
2469 	},
2470 
2471 	/* CE2: target->host WMI */
2472 	{
2473 		.pipenum = htole32(2),
2474 		.pipedir = htole32(PIPEDIR_IN),
2475 		.nentries = htole32(32),
2476 		.nbytes_max = htole32(2048),
2477 		.flags = htole32(CE_ATTR_FLAGS),
2478 		.reserved = htole32(0),
2479 	},
2480 
2481 	/* CE3: host->target WMI */
2482 	{
2483 		.pipenum = htole32(3),
2484 		.pipedir = htole32(PIPEDIR_OUT),
2485 		.nentries = htole32(32),
2486 		.nbytes_max = htole32(2048),
2487 		.flags = htole32(CE_ATTR_FLAGS),
2488 		.reserved = htole32(0),
2489 	},
2490 
2491 	/* CE4: host->target HTT */
2492 	{
2493 		.pipenum = htole32(4),
2494 		.pipedir = htole32(PIPEDIR_OUT),
2495 		.nentries = htole32(256),
2496 		.nbytes_max = htole32(256),
2497 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2498 		.reserved = htole32(0),
2499 	},
2500 
2501 	/* CE5: target->host Pktlog */
2502 	{
2503 		.pipenum = htole32(5),
2504 		.pipedir = htole32(PIPEDIR_IN),
2505 		.nentries = htole32(32),
2506 		.nbytes_max = htole32(2048),
2507 		.flags = htole32(CE_ATTR_FLAGS),
2508 		.reserved = htole32(0),
2509 	},
2510 
2511 	/* CE6: Reserved for target autonomous hif_memcpy */
2512 	{
2513 		.pipenum = htole32(6),
2514 		.pipedir = htole32(PIPEDIR_INOUT),
2515 		.nentries = htole32(32),
2516 		.nbytes_max = htole32(16384),
2517 		.flags = htole32(CE_ATTR_FLAGS),
2518 		.reserved = htole32(0),
2519 	},
2520 
2521 	/* CE7 used only by Host */
2522 	{
2523 		.pipenum = htole32(7),
2524 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2525 		.nentries = htole32(0),
2526 		.nbytes_max = htole32(0),
2527 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2528 		.reserved = htole32(0),
2529 	},
2530 
2531 	/* CE8 target->host used only by IPA */
2532 	{
2533 		.pipenum = htole32(8),
2534 		.pipedir = htole32(PIPEDIR_INOUT),
2535 		.nentries = htole32(32),
2536 		.nbytes_max = htole32(16384),
2537 		.flags = htole32(CE_ATTR_FLAGS),
2538 		.reserved = htole32(0),
2539 	},
2540 	/* CE 9, 10, 11 are used by MHI driver */
2541 };
2542 
2543 /* Map from service/endpoint to Copy Engine.
2544  * This table is derived from the CE_PCI TABLE, above.
2545  * It is passed to the Target at startup for use by firmware.
2546  */
2547 const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9074[] = {
2548 	{
2549 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2550 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2551 		htole32(3),
2552 	},
2553 	{
2554 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
2555 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2556 		htole32(2),
2557 	},
2558 	{
2559 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2560 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2561 		htole32(3),
2562 	},
2563 	{
2564 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
2565 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2566 		htole32(2),
2567 	},
2568 	{
2569 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2570 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2571 		htole32(3),
2572 	},
2573 	{
2574 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
2575 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2576 		htole32(2),
2577 	},
2578 	{
2579 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2580 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2581 		htole32(3),
2582 	},
2583 	{
2584 		htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
2585 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2586 		htole32(2),
2587 	},
2588 	{
2589 		htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2590 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2591 		htole32(3),
2592 	},
2593 	{
2594 		htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
2595 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2596 		htole32(2),
2597 	},
2598 	{
2599 		htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2600 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2601 		htole32(0),
2602 	},
2603 	{
2604 		htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
2605 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2606 		htole32(1),
2607 	},
2608 	{
2609 		htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2610 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2611 		htole32(0),
2612 	},
2613 	{
2614 		htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
2615 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2616 		htole32(1),
2617 	},
2618 	{
2619 		htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2620 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2621 		htole32(4),
2622 	},
2623 	{
2624 		htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
2625 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2626 		htole32(1),
2627 	},
2628 	{
2629 		htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
2630 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2631 		htole32(5),
2632 	},
2633 
2634 	/* (Additions here) */
2635 
2636 	{ /* must be last */
2637 		htole32(0),
2638 		htole32(0),
2639 		htole32(0),
2640 	},
2641 };
2642 
2643 #define QWZ_CE_COUNT_IPQ8074	21
2644 
2645 const struct ce_attr qwz_host_ce_config_ipq8074[QWZ_CE_COUNT_IPQ8074] = {
2646 	/* CE0: host->target HTC control and raw streams */
2647 	{
2648 		.flags = CE_ATTR_FLAGS,
2649 		.src_nentries = 16,
2650 		.src_sz_max = 2048,
2651 		.dest_nentries = 0,
2652 	},
2653 
2654 	/* CE1: target->host HTT + HTC control */
2655 	{
2656 		.flags = CE_ATTR_FLAGS,
2657 		.src_nentries = 0,
2658 		.src_sz_max = 2048,
2659 		.dest_nentries = 512,
2660 		.recv_cb = qwz_htc_rx_completion_handler,
2661 	},
2662 
2663 	/* CE2: target->host WMI */
2664 	{
2665 		.flags = CE_ATTR_FLAGS,
2666 		.src_nentries = 0,
2667 		.src_sz_max = 2048,
2668 		.dest_nentries = 512,
2669 		.recv_cb = qwz_htc_rx_completion_handler,
2670 	},
2671 
2672 	/* CE3: host->target WMI (mac0) */
2673 	{
2674 		.flags = CE_ATTR_FLAGS,
2675 		.src_nentries = 32,
2676 		.src_sz_max = 2048,
2677 		.dest_nentries = 0,
2678 	},
2679 
2680 	/* CE4: host->target HTT */
2681 	{
2682 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2683 		.src_nentries = 2048,
2684 		.src_sz_max = 256,
2685 		.dest_nentries = 0,
2686 	},
2687 
2688 	/* CE5: target->host pktlog */
2689 	{
2690 		.flags = CE_ATTR_FLAGS,
2691 		.src_nentries = 0,
2692 		.src_sz_max = 2048,
2693 		.dest_nentries = 512,
2694 		.recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
2695 	},
2696 
2697 	/* CE6: target autonomous hif_memcpy */
2698 	{
2699 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2700 		.src_nentries = 0,
2701 		.src_sz_max = 0,
2702 		.dest_nentries = 0,
2703 	},
2704 
2705 	/* CE7: host->target WMI (mac1) */
2706 	{
2707 		.flags = CE_ATTR_FLAGS,
2708 		.src_nentries = 32,
2709 		.src_sz_max = 2048,
2710 		.dest_nentries = 0,
2711 	},
2712 
2713 	/* CE8: target autonomous hif_memcpy */
2714 	{
2715 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2716 		.src_nentries = 0,
2717 		.src_sz_max = 0,
2718 		.dest_nentries = 0,
2719 	},
2720 
2721 	/* CE9: host->target WMI (mac2) */
2722 	{
2723 		.flags = CE_ATTR_FLAGS,
2724 		.src_nentries = 32,
2725 		.src_sz_max = 2048,
2726 		.dest_nentries = 0,
2727 	},
2728 
2729 	/* CE10: target->host HTT */
2730 	{
2731 		.flags = CE_ATTR_FLAGS,
2732 		.src_nentries = 0,
2733 		.src_sz_max = 2048,
2734 		.dest_nentries = 512,
2735 		.recv_cb = qwz_htc_rx_completion_handler,
2736 	},
2737 
2738 	/* CE11: Not used */
2739 	{
2740 		.flags = CE_ATTR_FLAGS,
2741 		.src_nentries = 0,
2742 		.src_sz_max = 0,
2743 		.dest_nentries = 0,
2744 	},
2745 };
2746 
2747 #define QWZ_CE_COUNT_QCA6390	9
2748 
2749 const struct ce_attr qwz_host_ce_config_qca6390[QWZ_CE_COUNT_QCA6390] = {
2750 	/* CE0: host->target HTC control and raw streams */
2751 	{
2752 		.flags = CE_ATTR_FLAGS,
2753 		.src_nentries = 16,
2754 		.src_sz_max = 2048,
2755 		.dest_nentries = 0,
2756 	},
2757 
2758 	/* CE1: target->host HTT + HTC control */
2759 	{
2760 		.flags = CE_ATTR_FLAGS,
2761 		.src_nentries = 0,
2762 		.src_sz_max = 2048,
2763 		.dest_nentries = 512,
2764 		.recv_cb = qwz_htc_rx_completion_handler,
2765 	},
2766 
2767 	/* CE2: target->host WMI */
2768 	{
2769 		.flags = CE_ATTR_FLAGS,
2770 		.src_nentries = 0,
2771 		.src_sz_max = 2048,
2772 		.dest_nentries = 512,
2773 		.recv_cb = qwz_htc_rx_completion_handler,
2774 	},
2775 
2776 	/* CE3: host->target WMI (mac0) */
2777 	{
2778 		.flags = CE_ATTR_FLAGS,
2779 		.src_nentries = 32,
2780 		.src_sz_max = 2048,
2781 		.dest_nentries = 0,
2782 	},
2783 
2784 	/* CE4: host->target HTT */
2785 	{
2786 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2787 		.src_nentries = 2048,
2788 		.src_sz_max = 256,
2789 		.dest_nentries = 0,
2790 	},
2791 
2792 	/* CE5: target->host pktlog */
2793 	{
2794 		.flags = CE_ATTR_FLAGS,
2795 		.src_nentries = 0,
2796 		.src_sz_max = 2048,
2797 		.dest_nentries = 512,
2798 		.recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
2799 	},
2800 
2801 	/* CE6: target autonomous hif_memcpy */
2802 	{
2803 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2804 		.src_nentries = 0,
2805 		.src_sz_max = 0,
2806 		.dest_nentries = 0,
2807 	},
2808 
2809 	/* CE7: host->target WMI (mac1) */
2810 	{
2811 		.flags = CE_ATTR_FLAGS,
2812 		.src_nentries = 32,
2813 		.src_sz_max = 2048,
2814 		.dest_nentries = 0,
2815 	},
2816 
2817 	/* CE8: target autonomous hif_memcpy */
2818 	{
2819 		.flags = CE_ATTR_FLAGS,
2820 		.src_nentries = 0,
2821 		.src_sz_max = 0,
2822 		.dest_nentries = 0,
2823 	},
2824 
2825 };
2826 
2827 #define QWZ_CE_COUNT_QCN9074	6
2828 
2829 const struct ce_attr qwz_host_ce_config_qcn9074[QWZ_CE_COUNT_QCN9074] = {
2830 	/* CE0: host->target HTC control and raw streams */
2831 	{
2832 		.flags = CE_ATTR_FLAGS,
2833 		.src_nentries = 16,
2834 		.src_sz_max = 2048,
2835 		.dest_nentries = 0,
2836 	},
2837 
2838 	/* CE1: target->host HTT + HTC control */
2839 	{
2840 		.flags = CE_ATTR_FLAGS,
2841 		.src_nentries = 0,
2842 		.src_sz_max = 2048,
2843 		.dest_nentries = 512,
2844 		.recv_cb = qwz_htc_rx_completion_handler,
2845 	},
2846 
2847 	/* CE2: target->host WMI */
2848 	{
2849 		.flags = CE_ATTR_FLAGS,
2850 		.src_nentries = 0,
2851 		.src_sz_max = 2048,
2852 		.dest_nentries = 32,
2853 		.recv_cb = qwz_htc_rx_completion_handler,
2854 	},
2855 
2856 	/* CE3: host->target WMI (mac0) */
2857 	{
2858 		.flags = CE_ATTR_FLAGS,
2859 		.src_nentries = 32,
2860 		.src_sz_max = 2048,
2861 		.dest_nentries = 0,
2862 	},
2863 
2864 	/* CE4: host->target HTT */
2865 	{
2866 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2867 		.src_nentries = 2048,
2868 		.src_sz_max = 256,
2869 		.dest_nentries = 0,
2870 	},
2871 
2872 	/* CE5: target->host pktlog */
2873 	{
2874 		.flags = CE_ATTR_FLAGS,
2875 		.src_nentries = 0,
2876 		.src_sz_max = 2048,
2877 		.dest_nentries = 512,
2878 		.recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
2879 	},
2880 };
2881 
2882 const struct ce_attr qwz_host_ce_config_wcn7850[QWZ_CE_COUNT_QCA6390] = {
2883 	/* CE0: host->target HTC control and raw streams */
2884 	{
2885 		.flags = CE_ATTR_FLAGS,
2886 		.src_nentries = 16,
2887 		.src_sz_max = 2048,
2888 		.dest_nentries = 0,
2889 	},
2890 
2891 	/* CE1: target->host HTT + HTC control */
2892 	{
2893 		.flags = CE_ATTR_FLAGS,
2894 		.src_nentries = 0,
2895 		.src_sz_max = 2048,
2896 		.dest_nentries = 512,
2897 		.recv_cb = qwz_htc_rx_completion_handler,
2898 	},
2899 
2900 	/* CE2: target->host WMI */
2901 	{
2902 		.flags = CE_ATTR_FLAGS,
2903 		.src_nentries = 0,
2904 		.src_sz_max = 2048,
2905 		.dest_nentries = 64,
2906 		.recv_cb = qwz_htc_rx_completion_handler,
2907 	},
2908 
2909 	/* CE3: host->target WMI (mac0) */
2910 	{
2911 		.flags = CE_ATTR_FLAGS,
2912 		.src_nentries = 32,
2913 		.src_sz_max = 2048,
2914 		.dest_nentries = 0,
2915 	},
2916 
2917 	/* CE4: host->target HTT */
2918 	{
2919 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2920 		.src_nentries = 2048,
2921 		.src_sz_max = 256,
2922 		.dest_nentries = 0,
2923 	},
2924 
2925 	/* CE5: target->host pktlog */
2926 	{
2927 		.flags = CE_ATTR_FLAGS,
2928 		.src_nentries = 0,
2929 		.src_sz_max = 0,
2930 		.dest_nentries = 0,
2931 	},
2932 
2933 	/* CE6: target autonomous hif_memcpy */
2934 	{
2935 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2936 		.src_nentries = 0,
2937 		.src_sz_max = 0,
2938 		.dest_nentries = 0,
2939 	},
2940 
2941 	/* CE7: host->target WMI (mac1) */
2942 	{
2943 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2944 		.src_nentries = 0,
2945 		.src_sz_max = 2048,
2946 		.dest_nentries = 0,
2947 	},
2948 
2949 	/* CE8: target autonomous hif_memcpy */
2950 	{
2951 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2952 		.src_nentries = 0,
2953 		.src_sz_max = 0,
2954 		.dest_nentries = 0,
2955 	},
2956 };
2957 
2958 static const struct ath12k_hal_tcl_to_wbm_rbm_map
2959 ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
2960 	{
2961 		.wbm_ring_num = 0,
2962 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
2963 	},
2964 	{
2965 		.wbm_ring_num = 2,
2966 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
2967 	},
2968 	{
2969 		.wbm_ring_num = 4,
2970 		.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
2971 	},
2972 };
2973 
2974 static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
2975 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
2976 	.wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
2977 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
2978 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
2979 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
2980 };
2981 
2982 const struct hal_ops hal_wcn7850_ops = {
2983 	.create_srng_config = qwz_hal_srng_create_config_wcn7850,
2984 	.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
2985 };
2986 
2987 static const struct ath12k_hw_params ath12k_hw_params[] = {
2988 	{
2989 		.name = "wcn7850 hw2.0",
2990 		.hw_rev = ATH12K_HW_WCN7850_HW20,
2991 		.fw = {
2992 			.dir = "wcn7850-hw2.0",
2993 			.board_size = 256 * 1024,
2994 			.cal_offset = 256 * 1024,
2995 		},
2996 		.max_radios = 1,
2997 		.internal_sleep_clock = true,
2998 		.hw_ops = &wcn7850_ops,
2999 		.ring_mask = &ath12k_hw_ring_mask_wcn7850,
3000 		.regs = &wcn7850_regs,
3001 		.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
3002 		.host_ce_config = qwz_host_ce_config_wcn7850,
3003 		.ce_count = QWZ_CE_COUNT_QCA6390,
3004 		.target_ce_config = ath12k_target_ce_config_wlan_qca6390,
3005 		.target_ce_count = 9,
3006 		.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qca6390,
3007 		.svc_to_ce_map_len = 14,
3008 		.rxdma1_enable = false,
3009 		.num_rxmda_per_pdev = 2,
3010 		.num_rxdma_dst_ring = 1,
3011 		.credit_flow = true,
3012 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3013 		.htt_peer_map_v2 = false,
3014 		.supports_shadow_regs = true,
3015 		.fix_l1ss = false,
3016 		.hal_params = &ath12k_hw_hal_params_wcn7850,
3017 		.hal_ops = &hal_wcn7850_ops,
3018 		.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
3019 					   BIT(CNSS_PCIE_PERST_NO_PULL_V01),
3020 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3021 	},
3022 };
3023 
3024 const struct ath12k_hw_regs wcn7850_regs = {
3025 	/* SW2TCL(x) R0 ring configuration address */
3026 	.hal_tcl1_ring_id = 0x00000908,
3027 	.hal_tcl1_ring_misc = 0x00000910,
3028 	.hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
3029 	.hal_tcl1_ring_tp_addr_msb = 0x00000920,
3030 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
3031 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
3032 	.hal_tcl1_ring_msi1_base_lsb = 0x00000948,
3033 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
3034 	.hal_tcl1_ring_msi1_data = 0x00000950,
3035 	.hal_tcl_ring_base_lsb = 0x00000b58,
3036 
3037 	/* TCL STATUS ring address */
3038 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
3039 
3040 	.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
3041 	.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
3042 	.hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
3043 	.hal_wbm_r0_idle_list_size_addr = 0x00000244,
3044 	.hal_wbm_scattered_ring_base_lsb = 0x00000250,
3045 	.hal_wbm_scattered_ring_base_msb = 0x00000254,
3046 	.hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
3047 	.hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
3048 	.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
3049 	.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
3050 	.hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
3051 
3052 	.hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
3053 	.hal_wbm_sw1_release_ring_base_lsb = 0x00000284,
3054 	.hal_wbm0_release_ring_base_lsb = 0x00000e08,
3055 	.hal_wbm1_release_ring_base_lsb = 0x00000e80,
3056 
3057 	/* PCIe base address */
3058 	.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
3059 	.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
3060 
3061 	/* PPE release ring address */
3062 	.hal_ppe_rel_ring_base = 0x0000043c,
3063 
3064 	/* REO DEST ring address */
3065 	.hal_reo2_ring_base = 0x0000055c,
3066 	.hal_reo1_misc_ctrl_addr = 0x00000b7c,
3067 	.hal_reo1_sw_cookie_cfg0 = 0x00000050,
3068 	.hal_reo1_sw_cookie_cfg1 = 0x00000054,
3069 	.hal_reo1_qdesc_lut_base0 = 0x00000058,
3070 	.hal_reo1_qdesc_lut_base1 = 0x0000005c,
3071 	.hal_reo1_ring_base_lsb = 0x000004e4,
3072 	.hal_reo1_ring_base_msb = 0x000004e8,
3073 	.hal_reo1_ring_id = 0x000004ec,
3074 	.hal_reo1_ring_misc = 0x000004f4,
3075 	.hal_reo1_ring_hp_addr_lsb = 0x000004f8,
3076 	.hal_reo1_ring_hp_addr_msb = 0x000004fc,
3077 	.hal_reo1_ring_producer_int_setup = 0x00000508,
3078 	.hal_reo1_ring_msi1_base_lsb = 0x0000052C,
3079 	.hal_reo1_ring_msi1_base_msb = 0x00000530,
3080 	.hal_reo1_ring_msi1_data = 0x00000534,
3081 	.hal_reo1_aging_thres_ix0 = 0x00000b08,
3082 	.hal_reo1_aging_thres_ix1 = 0x00000b0c,
3083 	.hal_reo1_aging_thres_ix2 = 0x00000b10,
3084 	.hal_reo1_aging_thres_ix3 = 0x00000b14,
3085 
3086 	/* REO Exception ring address */
3087 	.hal_reo2_sw0_ring_base = 0x000008a4,
3088 
3089 	/* REO Reinject ring address */
3090 	.hal_sw2reo_ring_base = 0x00000304,
3091 	.hal_sw2reo1_ring_base = 0x0000037c,
3092 
3093 	/* REO cmd ring address */
3094 	.hal_reo_cmd_ring_base = 0x0000028c,
3095 
3096 	/* REO status ring address */
3097 	.hal_reo_status_ring_base = 0x00000a84,
3098 };
3099 
3100 #define QWZ_SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
3101 #define QWZ_HOST_CSTATE_BIT			0x04
3102 #define QWZ_PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
3103 
3104 static const struct qmi_elem_info qmi_response_type_v01_ei[] = {
3105 	{
3106 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
3107 		.elem_len	= 1,
3108 		.elem_size	= sizeof(uint16_t),
3109 		.array_type	= NO_ARRAY,
3110 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3111 		.offset		= offsetof(struct qmi_response_type_v01, result),
3112 		.ei_array	= NULL,
3113 	},
3114 	{
3115 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
3116 		.elem_len	= 1,
3117 		.elem_size	= sizeof(uint16_t),
3118 		.array_type	= NO_ARRAY,
3119 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3120 		.offset		= offsetof(struct qmi_response_type_v01, error),
3121 		.ei_array	= NULL,
3122 	},
3123 	{
3124 		.data_type	= QMI_EOTI,
3125 		.elem_len	= 0,
3126 		.elem_size	= 0,
3127 		.array_type	= NO_ARRAY,
3128 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3129 		.offset		= 0,
3130 		.ei_array	= NULL,
3131 	},
3132 };
3133 
3134 static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
3135 	{
3136 		.data_type	= QMI_OPT_FLAG,
3137 		.elem_len	= 1,
3138 		.elem_size	= sizeof(uint8_t),
3139 		.array_type	= NO_ARRAY,
3140 		.tlv_type	= 0x10,
3141 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3142 					   fw_ready_enable_valid),
3143 	},
3144 	{
3145 		.data_type	= QMI_UNSIGNED_1_BYTE,
3146 		.elem_len	= 1,
3147 		.elem_size	= sizeof(uint8_t),
3148 		.array_type	= NO_ARRAY,
3149 		.tlv_type	= 0x10,
3150 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3151 					   fw_ready_enable),
3152 	},
3153 	{
3154 		.data_type	= QMI_OPT_FLAG,
3155 		.elem_len	= 1,
3156 		.elem_size	= sizeof(uint8_t),
3157 		.array_type	= NO_ARRAY,
3158 		.tlv_type	= 0x11,
3159 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3160 					   initiate_cal_download_enable_valid),
3161 	},
3162 	{
3163 		.data_type	= QMI_UNSIGNED_1_BYTE,
3164 		.elem_len	= 1,
3165 		.elem_size	= sizeof(uint8_t),
3166 		.array_type	= NO_ARRAY,
3167 		.tlv_type	= 0x11,
3168 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3169 					   initiate_cal_download_enable),
3170 	},
3171 	{
3172 		.data_type	= QMI_OPT_FLAG,
3173 		.elem_len	= 1,
3174 		.elem_size	= sizeof(uint8_t),
3175 		.array_type	= NO_ARRAY,
3176 		.tlv_type	= 0x12,
3177 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3178 					   initiate_cal_update_enable_valid),
3179 	},
3180 	{
3181 		.data_type	= QMI_UNSIGNED_1_BYTE,
3182 		.elem_len	= 1,
3183 		.elem_size	= sizeof(uint8_t),
3184 		.array_type	= NO_ARRAY,
3185 		.tlv_type	= 0x12,
3186 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3187 					   initiate_cal_update_enable),
3188 	},
3189 	{
3190 		.data_type	= QMI_OPT_FLAG,
3191 		.elem_len	= 1,
3192 		.elem_size	= sizeof(uint8_t),
3193 		.array_type	= NO_ARRAY,
3194 		.tlv_type	= 0x13,
3195 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3196 					   msa_ready_enable_valid),
3197 	},
3198 	{
3199 		.data_type	= QMI_UNSIGNED_1_BYTE,
3200 		.elem_len	= 1,
3201 		.elem_size	= sizeof(uint8_t),
3202 		.array_type	= NO_ARRAY,
3203 		.tlv_type	= 0x13,
3204 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3205 					   msa_ready_enable),
3206 	},
3207 	{
3208 		.data_type	= QMI_OPT_FLAG,
3209 		.elem_len	= 1,
3210 		.elem_size	= sizeof(uint8_t),
3211 		.array_type	= NO_ARRAY,
3212 		.tlv_type	= 0x14,
3213 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3214 					   pin_connect_result_enable_valid),
3215 	},
3216 	{
3217 		.data_type	= QMI_UNSIGNED_1_BYTE,
3218 		.elem_len	= 1,
3219 		.elem_size	= sizeof(uint8_t),
3220 		.array_type	= NO_ARRAY,
3221 		.tlv_type	= 0x14,
3222 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3223 					   pin_connect_result_enable),
3224 	},
3225 	{
3226 		.data_type	= QMI_OPT_FLAG,
3227 		.elem_len	= 1,
3228 		.elem_size	= sizeof(uint8_t),
3229 		.array_type	= NO_ARRAY,
3230 		.tlv_type	= 0x15,
3231 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3232 					   client_id_valid),
3233 	},
3234 	{
3235 		.data_type	= QMI_UNSIGNED_4_BYTE,
3236 		.elem_len	= 1,
3237 		.elem_size	= sizeof(uint32_t),
3238 		.array_type	= NO_ARRAY,
3239 		.tlv_type	= 0x15,
3240 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3241 					   client_id),
3242 	},
3243 	{
3244 		.data_type	= QMI_OPT_FLAG,
3245 		.elem_len	= 1,
3246 		.elem_size	= sizeof(uint8_t),
3247 		.array_type	= NO_ARRAY,
3248 		.tlv_type	= 0x16,
3249 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3250 					   request_mem_enable_valid),
3251 	},
3252 	{
3253 		.data_type	= QMI_UNSIGNED_1_BYTE,
3254 		.elem_len	= 1,
3255 		.elem_size	= sizeof(uint8_t),
3256 		.array_type	= NO_ARRAY,
3257 		.tlv_type	= 0x16,
3258 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3259 					   request_mem_enable),
3260 	},
3261 	{
3262 		.data_type	= QMI_OPT_FLAG,
3263 		.elem_len	= 1,
3264 		.elem_size	= sizeof(uint8_t),
3265 		.array_type	= NO_ARRAY,
3266 		.tlv_type	= 0x17,
3267 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3268 					   fw_mem_ready_enable_valid),
3269 	},
3270 	{
3271 		.data_type	= QMI_UNSIGNED_1_BYTE,
3272 		.elem_len	= 1,
3273 		.elem_size	= sizeof(uint8_t),
3274 		.array_type	= NO_ARRAY,
3275 		.tlv_type	= 0x17,
3276 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3277 					   fw_mem_ready_enable),
3278 	},
3279 	{
3280 		.data_type	= QMI_OPT_FLAG,
3281 		.elem_len	= 1,
3282 		.elem_size	= sizeof(uint8_t),
3283 		.array_type	= NO_ARRAY,
3284 		.tlv_type	= 0x18,
3285 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3286 					   fw_init_done_enable_valid),
3287 	},
3288 	{
3289 		.data_type	= QMI_UNSIGNED_1_BYTE,
3290 		.elem_len	= 1,
3291 		.elem_size	= sizeof(uint8_t),
3292 		.array_type	= NO_ARRAY,
3293 		.tlv_type	= 0x18,
3294 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3295 					   fw_init_done_enable),
3296 	},
3297 
3298 	{
3299 		.data_type	= QMI_OPT_FLAG,
3300 		.elem_len	= 1,
3301 		.elem_size	= sizeof(uint8_t),
3302 		.array_type	= NO_ARRAY,
3303 		.tlv_type	= 0x19,
3304 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3305 					   rejuvenate_enable_valid),
3306 	},
3307 	{
3308 		.data_type	= QMI_UNSIGNED_1_BYTE,
3309 		.elem_len	= 1,
3310 		.elem_size	= sizeof(uint8_t),
3311 		.array_type	= NO_ARRAY,
3312 		.tlv_type	= 0x19,
3313 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3314 					   rejuvenate_enable),
3315 	},
3316 	{
3317 		.data_type	= QMI_OPT_FLAG,
3318 		.elem_len	= 1,
3319 		.elem_size	= sizeof(uint8_t),
3320 		.array_type	= NO_ARRAY,
3321 		.tlv_type	= 0x1A,
3322 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3323 					   xo_cal_enable_valid),
3324 	},
3325 	{
3326 		.data_type	= QMI_UNSIGNED_1_BYTE,
3327 		.elem_len	= 1,
3328 		.elem_size	= sizeof(uint8_t),
3329 		.array_type	= NO_ARRAY,
3330 		.tlv_type	= 0x1A,
3331 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3332 					   xo_cal_enable),
3333 	},
3334 	{
3335 		.data_type	= QMI_OPT_FLAG,
3336 		.elem_len	= 1,
3337 		.elem_size	= sizeof(uint8_t),
3338 		.array_type	= NO_ARRAY,
3339 		.tlv_type	= 0x1B,
3340 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3341 					   cal_done_enable_valid),
3342 	},
3343 	{
3344 		.data_type	= QMI_UNSIGNED_1_BYTE,
3345 		.elem_len	= 1,
3346 		.elem_size	= sizeof(uint8_t),
3347 		.array_type	= NO_ARRAY,
3348 		.tlv_type	= 0x1B,
3349 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3350 					   cal_done_enable),
3351 	},
3352 	{
3353 		.data_type	= QMI_EOTI,
3354 		.array_type	= NO_ARRAY,
3355 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3356 	},
3357 };
3358 
3359 static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
3360 	{
3361 		.data_type	= QMI_STRUCT,
3362 		.elem_len	= 1,
3363 		.elem_size	= sizeof(struct qmi_response_type_v01),
3364 		.array_type	= NO_ARRAY,
3365 		.tlv_type	= 0x02,
3366 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3367 					   resp),
3368 		.ei_array	= qmi_response_type_v01_ei,
3369 	},
3370 	{
3371 		.data_type	= QMI_OPT_FLAG,
3372 		.elem_len	= 1,
3373 		.elem_size	= sizeof(uint8_t),
3374 		.array_type	= NO_ARRAY,
3375 		.tlv_type	= 0x10,
3376 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3377 					   fw_status_valid),
3378 	},
3379 	{
3380 		.data_type	= QMI_UNSIGNED_8_BYTE,
3381 		.elem_len	= 1,
3382 		.elem_size	= sizeof(uint64_t),
3383 		.array_type	= NO_ARRAY,
3384 		.tlv_type	= 0x10,
3385 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3386 					   fw_status),
3387 	},
3388 	{
3389 		.data_type	= QMI_EOTI,
3390 		.array_type	= NO_ARRAY,
3391 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3392 	},
3393 };
3394 
3395 static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
3396 	{
3397 		.data_type      = QMI_UNSIGNED_1_BYTE,
3398 		.elem_len       = 1,
3399 		.elem_size      = sizeof(uint8_t),
3400 		.array_type	= NO_ARRAY,
3401 		.tlv_type       = 0,
3402 		.offset         = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
3403 					   chip_id),
3404 	},
3405 	{
3406 		.data_type      = QMI_UNSIGNED_1_BYTE,
3407 		.elem_len       = 1,
3408 		.elem_size      = sizeof(uint8_t),
3409 		.array_type	= NO_ARRAY,
3410 		.tlv_type       = 0,
3411 		.offset         = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
3412 					   num_local_links),
3413 	},
3414 	{
3415 		.data_type      = QMI_UNSIGNED_1_BYTE,
3416 		.elem_len       = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
3417 		.elem_size      = sizeof(uint8_t),
3418 		.array_type     = STATIC_ARRAY,
3419 		.tlv_type       = 0,
3420 		.offset         = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
3421 					   hw_link_id),
3422 	},
3423 	{
3424 		.data_type      = QMI_UNSIGNED_1_BYTE,
3425 		.elem_len       = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
3426 		.elem_size      = sizeof(uint8_t),
3427 		.array_type     = STATIC_ARRAY,
3428 		.tlv_type       = 0,
3429 		.offset         = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
3430 					   valid_mlo_link_id),
3431 	},
3432 	{
3433 		.data_type      = QMI_EOTI,
3434 		.array_type	= NO_ARRAY,
3435 		.tlv_type       = QMI_COMMON_TLV_TYPE,
3436 	},
3437 };
3438 
3439 static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
3440 	{
3441 		.data_type	= QMI_OPT_FLAG,
3442 		.elem_len	= 1,
3443 		.elem_size	= sizeof(uint8_t),
3444 		.array_type	= NO_ARRAY,
3445 		.tlv_type	= 0x10,
3446 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3447 					   num_clients_valid),
3448 	},
3449 	{
3450 		.data_type	= QMI_UNSIGNED_4_BYTE,
3451 		.elem_len	= 1,
3452 		.elem_size	= sizeof(uint32_t),
3453 		.array_type	= NO_ARRAY,
3454 		.tlv_type	= 0x10,
3455 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3456 					   num_clients),
3457 	},
3458 	{
3459 		.data_type	= QMI_OPT_FLAG,
3460 		.elem_len	= 1,
3461 		.elem_size	= sizeof(uint8_t),
3462 		.array_type	= NO_ARRAY,
3463 		.tlv_type	= 0x11,
3464 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3465 					   wake_msi_valid),
3466 	},
3467 	{
3468 		.data_type	= QMI_UNSIGNED_4_BYTE,
3469 		.elem_len	= 1,
3470 		.elem_size	= sizeof(uint32_t),
3471 		.array_type	= NO_ARRAY,
3472 		.tlv_type	= 0x11,
3473 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3474 					   wake_msi),
3475 	},
3476 	{
3477 		.data_type	= QMI_OPT_FLAG,
3478 		.elem_len	= 1,
3479 		.elem_size	= sizeof(uint8_t),
3480 		.array_type	= NO_ARRAY,
3481 		.tlv_type	= 0x12,
3482 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3483 					   gpios_valid),
3484 	},
3485 	{
3486 		.data_type	= QMI_DATA_LEN,
3487 		.elem_len	= 1,
3488 		.elem_size	= sizeof(uint8_t),
3489 		.array_type	= NO_ARRAY,
3490 		.tlv_type	= 0x12,
3491 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3492 					   gpios_len),
3493 	},
3494 	{
3495 		.data_type	= QMI_UNSIGNED_4_BYTE,
3496 		.elem_len	= QMI_WLFW_MAX_NUM_GPIO_V01,
3497 		.elem_size	= sizeof(uint32_t),
3498 		.array_type	= VAR_LEN_ARRAY,
3499 		.tlv_type	= 0x12,
3500 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3501 					   gpios),
3502 	},
3503 	{
3504 		.data_type	= QMI_OPT_FLAG,
3505 		.elem_len	= 1,
3506 		.elem_size	= sizeof(uint8_t),
3507 		.array_type	= NO_ARRAY,
3508 		.tlv_type	= 0x13,
3509 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3510 					   nm_modem_valid),
3511 	},
3512 	{
3513 		.data_type	= QMI_UNSIGNED_1_BYTE,
3514 		.elem_len	= 1,
3515 		.elem_size	= sizeof(uint8_t),
3516 		.array_type	= NO_ARRAY,
3517 		.tlv_type	= 0x13,
3518 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3519 					   nm_modem),
3520 	},
3521 	{
3522 		.data_type	= QMI_OPT_FLAG,
3523 		.elem_len	= 1,
3524 		.elem_size	= sizeof(uint8_t),
3525 		.array_type	= NO_ARRAY,
3526 		.tlv_type	= 0x14,
3527 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3528 					   bdf_support_valid),
3529 	},
3530 	{
3531 		.data_type	= QMI_UNSIGNED_1_BYTE,
3532 		.elem_len	= 1,
3533 		.elem_size	= sizeof(uint8_t),
3534 		.array_type	= NO_ARRAY,
3535 		.tlv_type	= 0x14,
3536 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3537 					   bdf_support),
3538 	},
3539 	{
3540 		.data_type	= QMI_OPT_FLAG,
3541 		.elem_len	= 1,
3542 		.elem_size	= sizeof(uint8_t),
3543 		.array_type	= NO_ARRAY,
3544 		.tlv_type	= 0x15,
3545 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3546 					   bdf_cache_support_valid),
3547 	},
3548 	{
3549 		.data_type	= QMI_UNSIGNED_1_BYTE,
3550 		.elem_len	= 1,
3551 		.elem_size	= sizeof(uint8_t),
3552 		.array_type	= NO_ARRAY,
3553 		.tlv_type	= 0x15,
3554 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3555 					   bdf_cache_support),
3556 	},
3557 	{
3558 		.data_type	= QMI_OPT_FLAG,
3559 		.elem_len	= 1,
3560 		.elem_size	= sizeof(uint8_t),
3561 		.array_type	= NO_ARRAY,
3562 		.tlv_type	= 0x16,
3563 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3564 					   m3_support_valid),
3565 	},
3566 	{
3567 		.data_type	= QMI_UNSIGNED_1_BYTE,
3568 		.elem_len	= 1,
3569 		.elem_size	= sizeof(uint8_t),
3570 		.array_type	= NO_ARRAY,
3571 		.tlv_type	= 0x16,
3572 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3573 					   m3_support),
3574 	},
3575 	{
3576 		.data_type	= QMI_OPT_FLAG,
3577 		.elem_len	= 1,
3578 		.elem_size	= sizeof(uint8_t),
3579 		.array_type	= NO_ARRAY,
3580 		.tlv_type	= 0x17,
3581 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3582 					   m3_cache_support_valid),
3583 	},
3584 	{
3585 		.data_type	= QMI_UNSIGNED_1_BYTE,
3586 		.elem_len	= 1,
3587 		.elem_size	= sizeof(uint8_t),
3588 		.array_type	= NO_ARRAY,
3589 		.tlv_type	= 0x17,
3590 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3591 					   m3_cache_support),
3592 	},
3593 	{
3594 		.data_type	= QMI_OPT_FLAG,
3595 		.elem_len	= 1,
3596 		.elem_size	= sizeof(uint8_t),
3597 		.array_type	= NO_ARRAY,
3598 		.tlv_type	= 0x18,
3599 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3600 					   cal_filesys_support_valid),
3601 	},
3602 	{
3603 		.data_type	= QMI_UNSIGNED_1_BYTE,
3604 		.elem_len	= 1,
3605 		.elem_size	= sizeof(uint8_t),
3606 		.array_type	= NO_ARRAY,
3607 		.tlv_type	= 0x18,
3608 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3609 					   cal_filesys_support),
3610 	},
3611 	{
3612 		.data_type	= QMI_OPT_FLAG,
3613 		.elem_len	= 1,
3614 		.elem_size	= sizeof(uint8_t),
3615 		.array_type	= NO_ARRAY,
3616 		.tlv_type	= 0x19,
3617 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3618 					   cal_cache_support_valid),
3619 	},
3620 	{
3621 		.data_type	= QMI_UNSIGNED_1_BYTE,
3622 		.elem_len	= 1,
3623 		.elem_size	= sizeof(uint8_t),
3624 		.array_type	= NO_ARRAY,
3625 		.tlv_type	= 0x19,
3626 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3627 					   cal_cache_support),
3628 	},
3629 	{
3630 		.data_type	= QMI_OPT_FLAG,
3631 		.elem_len	= 1,
3632 		.elem_size	= sizeof(uint8_t),
3633 		.array_type	= NO_ARRAY,
3634 		.tlv_type	= 0x1A,
3635 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3636 					   cal_done_valid),
3637 	},
3638 	{
3639 		.data_type	= QMI_UNSIGNED_1_BYTE,
3640 		.elem_len	= 1,
3641 		.elem_size	= sizeof(uint8_t),
3642 		.array_type	= NO_ARRAY,
3643 		.tlv_type	= 0x1A,
3644 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3645 					   cal_done),
3646 	},
3647 	{
3648 		.data_type	= QMI_OPT_FLAG,
3649 		.elem_len	= 1,
3650 		.elem_size	= sizeof(uint8_t),
3651 		.array_type	= NO_ARRAY,
3652 		.tlv_type	= 0x1B,
3653 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3654 					   mem_bucket_valid),
3655 	},
3656 	{
3657 		.data_type	= QMI_UNSIGNED_4_BYTE,
3658 		.elem_len	= 1,
3659 		.elem_size	= sizeof(uint32_t),
3660 		.array_type	= NO_ARRAY,
3661 		.tlv_type	= 0x1B,
3662 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3663 					   mem_bucket),
3664 	},
3665 	{
3666 		.data_type	= QMI_OPT_FLAG,
3667 		.elem_len	= 1,
3668 		.elem_size	= sizeof(uint8_t),
3669 		.array_type	= NO_ARRAY,
3670 		.tlv_type	= 0x1C,
3671 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3672 					   mem_cfg_mode_valid),
3673 	},
3674 	{
3675 		.data_type	= QMI_UNSIGNED_1_BYTE,
3676 		.elem_len	= 1,
3677 		.elem_size	= sizeof(uint8_t),
3678 		.array_type	= NO_ARRAY,
3679 		.tlv_type	= 0x1C,
3680 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3681 					   mem_cfg_mode),
3682 	},
3683 	{
3684 		.data_type	= QMI_OPT_FLAG,
3685 		.elem_len	= 1,
3686 		.elem_size	= sizeof(uint8_t),
3687 		.array_type	= NO_ARRAY,
3688 		.tlv_type	= 0x1D,
3689 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3690 					   cal_duration_valid),
3691 	},
3692 	{
3693 		.data_type	= QMI_UNSIGNED_2_BYTE,
3694 		.elem_len	= 1,
3695 		.elem_size	= sizeof(uint16_t),
3696 		.array_type	= NO_ARRAY,
3697 		.tlv_type	= 0x1D,
3698 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3699 					   cal_duraiton),
3700 	},
3701 	{
3702 		.data_type	= QMI_OPT_FLAG,
3703 		.elem_len	= 1,
3704 		.elem_size	= sizeof(uint8_t),
3705 		.array_type	= NO_ARRAY,
3706 		.tlv_type	= 0x1E,
3707 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3708 					   platform_name_valid),
3709 	},
3710 	{
3711 		.data_type	= QMI_STRING,
3712 		.elem_len	= QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1,
3713 		.elem_size	= sizeof(char),
3714 		.array_type	= NO_ARRAY,
3715 		.tlv_type	= 0x1E,
3716 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3717 					   platform_name),
3718 	},
3719 	{
3720 		.data_type	= QMI_OPT_FLAG,
3721 		.elem_len	= 1,
3722 		.elem_size	= sizeof(uint8_t),
3723 		.array_type	= NO_ARRAY,
3724 		.tlv_type	= 0x1F,
3725 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3726 					   ddr_range_valid),
3727 	},
3728 	{
3729 		.data_type	= QMI_STRUCT,
3730 		.elem_len	= QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01,
3731 		.elem_size	= sizeof(struct qmi_wlanfw_host_ddr_range),
3732 		.array_type	= STATIC_ARRAY,
3733 		.tlv_type	= 0x1F,
3734 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3735 					   ddr_range),
3736 	},
3737 	{
3738 		.data_type	= QMI_OPT_FLAG,
3739 		.elem_len	= 1,
3740 		.elem_size	= sizeof(uint8_t),
3741 		.array_type	= NO_ARRAY,
3742 		.tlv_type	= 0x20,
3743 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3744 					   host_build_type_valid),
3745 	},
3746 	{
3747 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
3748 		.elem_len	= 1,
3749 		.elem_size	= sizeof(enum qmi_wlanfw_host_build_type),
3750 		.array_type	= NO_ARRAY,
3751 		.tlv_type	= 0x20,
3752 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3753 					   host_build_type),
3754 	},
3755 	{
3756 		.data_type	= QMI_OPT_FLAG,
3757 		.elem_len	= 1,
3758 		.elem_size	= sizeof(uint8_t),
3759 		.array_type	= NO_ARRAY,
3760 		.tlv_type	= 0x21,
3761 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3762 					   mlo_capable_valid),
3763 	},
3764 	{
3765 		.data_type	= QMI_UNSIGNED_1_BYTE,
3766 		.elem_len	= 1,
3767 		.elem_size	= sizeof(uint8_t),
3768 		.array_type	= NO_ARRAY,
3769 		.tlv_type	= 0x21,
3770 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3771 					   mlo_capable),
3772 	},
3773 	{
3774 		.data_type	= QMI_OPT_FLAG,
3775 		.elem_len	= 1,
3776 		.elem_size	= sizeof(uint8_t),
3777 		.array_type	= NO_ARRAY,
3778 		.tlv_type	= 0x22,
3779 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3780 					   mlo_chip_id_valid),
3781 	},
3782 	{
3783 		.data_type	= QMI_UNSIGNED_2_BYTE,
3784 		.elem_len	= 1,
3785 		.elem_size	= sizeof(uint16_t),
3786 		.array_type	= NO_ARRAY,
3787 		.tlv_type	= 0x22,
3788 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3789 					   mlo_chip_id),
3790 	},
3791 	{
3792 		.data_type	= QMI_OPT_FLAG,
3793 		.elem_len	= 1,
3794 		.elem_size	= sizeof(uint8_t),
3795 		.array_type	= NO_ARRAY,
3796 		.tlv_type	= 0x23,
3797 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3798 					   mlo_group_id_valid),
3799 	},
3800 	{
3801 		.data_type	= QMI_UNSIGNED_1_BYTE,
3802 		.elem_len	= 1,
3803 		.elem_size	= sizeof(uint8_t),
3804 		.array_type	= NO_ARRAY,
3805 		.tlv_type	= 0x23,
3806 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3807 					   mlo_group_id),
3808 	},
3809 	{
3810 		.data_type	= QMI_OPT_FLAG,
3811 		.elem_len	= 1,
3812 		.elem_size	= sizeof(uint8_t),
3813 		.array_type	= NO_ARRAY,
3814 		.tlv_type	= 0x24,
3815 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3816 					   max_mlo_peer_valid),
3817 	},
3818 	{
3819 		.data_type	= QMI_UNSIGNED_2_BYTE,
3820 		.elem_len	= 1,
3821 		.elem_size	= sizeof(uint8_t),
3822 		.array_type	= NO_ARRAY,
3823 		.tlv_type	= 0x24,
3824 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3825 					   max_mlo_peer),
3826 	},
3827 	{
3828 		.data_type	= QMI_OPT_FLAG,
3829 		.elem_len	= 1,
3830 		.elem_size	= sizeof(uint8_t),
3831 		.array_type	= NO_ARRAY,
3832 		.tlv_type	= 0x25,
3833 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3834 					   mlo_num_chips_valid),
3835 	},
3836 	{
3837 		.data_type	= QMI_UNSIGNED_1_BYTE,
3838 		.elem_len	= 1,
3839 		.elem_size	= sizeof(uint8_t),
3840 		.array_type	= NO_ARRAY,
3841 		.tlv_type	= 0x25,
3842 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3843 					   mlo_num_chips),
3844 	},
3845 	{
3846 		.data_type	= QMI_OPT_FLAG,
3847 		.elem_len	= 1,
3848 		.elem_size	= sizeof(uint8_t),
3849 		.array_type	= NO_ARRAY,
3850 		.tlv_type	= 0x26,
3851 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3852 					   mlo_chip_info_valid),
3853 	},
3854 	{
3855 		.data_type	= QMI_STRUCT,
3856 		.elem_len	= QMI_WLFW_MAX_NUM_MLO_CHIPS_V01,
3857 		.elem_size	= sizeof(struct wlfw_host_mlo_chip_info_s_v01),
3858 		.array_type	= STATIC_ARRAY,
3859 		.tlv_type	= 0x26,
3860 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3861 					   mlo_chip_info),
3862 		.ei_array	= wlfw_host_mlo_chip_info_s_v01_ei,
3863 	},
3864 	{
3865 		.data_type	= QMI_OPT_FLAG,
3866 		.elem_len	= 1,
3867 		.elem_size	= sizeof(uint8_t),
3868 		.array_type	= NO_ARRAY,
3869 		.tlv_type	= 0x27,
3870 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3871 					   feature_list_valid),
3872 	},
3873 	{
3874 		.data_type	= QMI_UNSIGNED_8_BYTE,
3875 		.elem_len	= 1,
3876 		.elem_size	= sizeof(uint64_t),
3877 		.array_type	= NO_ARRAY,
3878 		.tlv_type	= 0x27,
3879 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3880 					   feature_list),
3881 	},
3882 	{
3883 		.data_type	= QMI_EOTI,
3884 		.array_type	= NO_ARRAY,
3885 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3886 	},
3887 };
3888 
3889 static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
3890 	{
3891 		.data_type	= QMI_STRUCT,
3892 		.elem_len	= 1,
3893 		.elem_size	= sizeof(struct qmi_response_type_v01),
3894 		.array_type	= NO_ARRAY,
3895 		.tlv_type	= 0x02,
3896 		.offset		= offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
3897 		.ei_array	= qmi_response_type_v01_ei,
3898 	},
3899 	{
3900 		.data_type	= QMI_EOTI,
3901 		.array_type	= NO_ARRAY,
3902 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3903 	},
3904 };
3905 
3906 static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
3907 	{
3908 		.data_type	= QMI_EOTI,
3909 		.array_type	= NO_ARRAY,
3910 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3911 	},
3912 };
3913 
3914 static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
3915 	{
3916 		.data_type	= QMI_STRUCT,
3917 		.elem_len	= 1,
3918 		.elem_size	= sizeof(struct qmi_response_type_v01),
3919 		.array_type	= NO_ARRAY,
3920 		.tlv_type	= 0x02,
3921 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
3922 		.ei_array	= qmi_response_type_v01_ei,
3923 	},
3924 	{
3925 		.data_type	= QMI_OPT_FLAG,
3926 		.elem_len	= 1,
3927 		.elem_size	= sizeof(uint8_t),
3928 		.array_type	= NO_ARRAY,
3929 		.tlv_type	= 0x10,
3930 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3931 					   num_phy_valid),
3932 	},
3933 	{
3934 		.data_type	= QMI_UNSIGNED_1_BYTE,
3935 		.elem_len	= 1,
3936 		.elem_size	= sizeof(uint8_t),
3937 		.array_type	= NO_ARRAY,
3938 		.tlv_type	= 0x10,
3939 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3940 					   num_phy),
3941 	},
3942 	{
3943 		.data_type	= QMI_OPT_FLAG,
3944 		.elem_len	= 1,
3945 		.elem_size	= sizeof(uint8_t),
3946 		.array_type	= NO_ARRAY,
3947 		.tlv_type	= 0x11,
3948 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3949 					   board_id_valid),
3950 	},
3951 	{
3952 		.data_type	= QMI_UNSIGNED_4_BYTE,
3953 		.elem_len	= 1,
3954 		.elem_size	= sizeof(uint32_t),
3955 		.array_type	= NO_ARRAY,
3956 		.tlv_type	= 0x11,
3957 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3958 					   board_id),
3959 	},
3960 	{
3961 		.data_type	= QMI_OPT_FLAG,
3962 		.elem_len	= 1,
3963 		.elem_size	= sizeof(uint8_t),
3964 		.array_type	= NO_ARRAY,
3965 		.tlv_type	= 0x13,
3966 		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3967 					   single_chip_mlo_support_valid),
3968 	},
3969 	{
3970 		.data_type	= QMI_UNSIGNED_1_BYTE,
3971 		.elem_len	= 1,
3972 		.elem_size	= sizeof(uint8_t),
3973 		.array_type	= NO_ARRAY,
3974 		.tlv_type	= 0x13,
3975 		.offset		 = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
3976 					    single_chip_mlo_support),
3977 	},
3978 	{
3979 		.data_type	= QMI_EOTI,
3980 		.array_type	= NO_ARRAY,
3981 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3982 	},
3983 };
3984 
3985 static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
3986 	{
3987 		.data_type	= QMI_UNSIGNED_8_BYTE,
3988 		.elem_len	= 1,
3989 		.elem_size	= sizeof(uint64_t),
3990 		.array_type	= NO_ARRAY,
3991 		.tlv_type	= 0,
3992 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
3993 	},
3994 	{
3995 		.data_type	= QMI_UNSIGNED_4_BYTE,
3996 		.elem_len	= 1,
3997 		.elem_size	= sizeof(uint32_t),
3998 		.array_type	= NO_ARRAY,
3999 		.tlv_type	= 0,
4000 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
4001 	},
4002 	{
4003 		.data_type	= QMI_UNSIGNED_1_BYTE,
4004 		.elem_len	= 1,
4005 		.elem_size	= sizeof(uint8_t),
4006 		.array_type	= NO_ARRAY,
4007 		.tlv_type	= 0,
4008 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
4009 	},
4010 	{
4011 		.data_type	= QMI_EOTI,
4012 		.array_type	= NO_ARRAY,
4013 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4014 	},
4015 };
4016 
4017 static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
4018 	{
4019 		.data_type	= QMI_UNSIGNED_4_BYTE,
4020 		.elem_len	= 1,
4021 		.elem_size	= sizeof(uint32_t),
4022 		.array_type	= NO_ARRAY,
4023 		.tlv_type	= 0,
4024 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01,
4025 				  size),
4026 	},
4027 	{
4028 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4029 		.elem_len	= 1,
4030 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
4031 		.array_type	= NO_ARRAY,
4032 		.tlv_type	= 0,
4033 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
4034 	},
4035 	{
4036 		.data_type	= QMI_DATA_LEN,
4037 		.elem_len	= 1,
4038 		.elem_size	= sizeof(uint8_t),
4039 		.array_type	= NO_ARRAY,
4040 		.tlv_type	= 0,
4041 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
4042 	},
4043 	{
4044 		.data_type	= QMI_STRUCT,
4045 		.elem_len	= QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
4046 		.elem_size	= sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
4047 		.array_type	= VAR_LEN_ARRAY,
4048 		.tlv_type	= 0,
4049 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
4050 		.ei_array	= qmi_wlanfw_mem_cfg_s_v01_ei,
4051 	},
4052 	{
4053 		.data_type	= QMI_EOTI,
4054 		.array_type	= NO_ARRAY,
4055 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4056 	},
4057 };
4058 
4059 static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
4060 	{
4061 		.data_type	= QMI_DATA_LEN,
4062 		.elem_len	= 1,
4063 		.elem_size	= sizeof(uint8_t),
4064 		.array_type	= NO_ARRAY,
4065 		.tlv_type	= 0x01,
4066 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
4067 					   mem_seg_len),
4068 	},
4069 	{
4070 		.data_type	= QMI_STRUCT,
4071 		.elem_len	= ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
4072 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_s_v01),
4073 		.array_type	= VAR_LEN_ARRAY,
4074 		.tlv_type	= 0x01,
4075 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
4076 					   mem_seg),
4077 		.ei_array	= qmi_wlanfw_mem_seg_s_v01_ei,
4078 	},
4079 	{
4080 		.data_type	= QMI_EOTI,
4081 		.array_type	= NO_ARRAY,
4082 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4083 	},
4084 };
4085 
4086 static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
4087 	{
4088 		.data_type	= QMI_UNSIGNED_8_BYTE,
4089 		.elem_len	= 1,
4090 		.elem_size	= sizeof(uint64_t),
4091 		.array_type	= NO_ARRAY,
4092 		.tlv_type	= 0,
4093 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
4094 	},
4095 	{
4096 		.data_type	= QMI_UNSIGNED_4_BYTE,
4097 		.elem_len	= 1,
4098 		.elem_size	= sizeof(uint32_t),
4099 		.array_type	= NO_ARRAY,
4100 		.tlv_type	= 0,
4101 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
4102 	},
4103 	{
4104 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4105 		.elem_len	= 1,
4106 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
4107 		.array_type	= NO_ARRAY,
4108 		.tlv_type	= 0,
4109 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
4110 	},
4111 	{
4112 		.data_type	= QMI_UNSIGNED_1_BYTE,
4113 		.elem_len	= 1,
4114 		.elem_size	= sizeof(uint8_t),
4115 		.array_type	= NO_ARRAY,
4116 		.tlv_type	= 0,
4117 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
4118 	},
4119 	{
4120 		.data_type	= QMI_EOTI,
4121 		.array_type	= NO_ARRAY,
4122 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4123 	},
4124 };
4125 
4126 static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
4127 	{
4128 		.data_type	= QMI_DATA_LEN,
4129 		.elem_len	= 1,
4130 		.elem_size	= sizeof(uint8_t),
4131 		.array_type	= NO_ARRAY,
4132 		.tlv_type	= 0x01,
4133 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
4134 					   mem_seg_len),
4135 	},
4136 	{
4137 		.data_type	= QMI_STRUCT,
4138 		.elem_len	= ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
4139 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
4140 		.array_type	= VAR_LEN_ARRAY,
4141 		.tlv_type	= 0x01,
4142 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
4143 					   mem_seg),
4144 		.ei_array	= qmi_wlanfw_mem_seg_resp_s_v01_ei,
4145 	},
4146 	{
4147 		.data_type	= QMI_EOTI,
4148 		.array_type	= NO_ARRAY,
4149 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4150 	},
4151 };
4152 
4153 static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
4154 	{
4155 		.data_type	= QMI_STRUCT,
4156 		.elem_len	= 1,
4157 		.elem_size	= sizeof(struct qmi_response_type_v01),
4158 		.array_type	= NO_ARRAY,
4159 		.tlv_type	= 0x02,
4160 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
4161 					   resp),
4162 		.ei_array	= qmi_response_type_v01_ei,
4163 	},
4164 	{
4165 		.data_type	= QMI_EOTI,
4166 		.array_type	= NO_ARRAY,
4167 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4168 	},
4169 };
4170 
4171 static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
4172 	{
4173 		.data_type	= QMI_EOTI,
4174 		.array_type	= NO_ARRAY,
4175 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4176 	},
4177 };
4178 
4179 static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
4180 	{
4181 		.data_type	= QMI_UNSIGNED_4_BYTE,
4182 		.elem_len	= 1,
4183 		.elem_size	= sizeof(uint32_t),
4184 		.array_type	= NO_ARRAY,
4185 		.tlv_type	= 0,
4186 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
4187 					   chip_id),
4188 	},
4189 	{
4190 		.data_type	= QMI_UNSIGNED_4_BYTE,
4191 		.elem_len	= 1,
4192 		.elem_size	= sizeof(uint32_t),
4193 		.array_type	= NO_ARRAY,
4194 		.tlv_type	= 0,
4195 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
4196 					   chip_family),
4197 	},
4198 	{
4199 		.data_type	= QMI_EOTI,
4200 		.array_type	= NO_ARRAY,
4201 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4202 	},
4203 };
4204 
4205 static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
4206 	{
4207 		.data_type	= QMI_UNSIGNED_4_BYTE,
4208 		.elem_len	= 1,
4209 		.elem_size	= sizeof(uint32_t),
4210 		.array_type	= NO_ARRAY,
4211 		.tlv_type	= 0,
4212 		.offset		= offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
4213 					   board_id),
4214 	},
4215 	{
4216 		.data_type	= QMI_EOTI,
4217 		.array_type	= NO_ARRAY,
4218 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4219 	},
4220 };
4221 
4222 static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
4223 	{
4224 		.data_type	= QMI_UNSIGNED_4_BYTE,
4225 		.elem_len	= 1,
4226 		.elem_size	= sizeof(uint32_t),
4227 		.array_type	= NO_ARRAY,
4228 		.tlv_type	= 0,
4229 		.offset		= offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
4230 	},
4231 	{
4232 		.data_type	= QMI_EOTI,
4233 		.array_type	= NO_ARRAY,
4234 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4235 	},
4236 };
4237 
4238 static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
4239 	{
4240 		.data_type	= QMI_UNSIGNED_8_BYTE,
4241 		.elem_len	= 1,
4242 		.elem_size	= sizeof(uint64_t),
4243 		.array_type	= NO_ARRAY,
4244 		.tlv_type	= 0,
4245 		.offset		= offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
4246 					   start),
4247 	},
4248 	{
4249 		.data_type	= QMI_UNSIGNED_8_BYTE,
4250 		.elem_len	= 1,
4251 		.elem_size	= sizeof(uint64_t),
4252 		.array_type	= NO_ARRAY,
4253 		.tlv_type	= 0,
4254 		.offset		= offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
4255 					   size),
4256 	},
4257 	{
4258 		.data_type	= QMI_EOTI,
4259 		.array_type	= NO_ARRAY,
4260 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4261 	},
4262 };
4263 
4264 static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
4265 	{
4266 		.data_type	= QMI_UNSIGNED_4_BYTE,
4267 		.elem_len	= 1,
4268 		.elem_size	= sizeof(uint32_t),
4269 		.array_type	= NO_ARRAY,
4270 		.tlv_type	= 0,
4271 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
4272 					   fw_version),
4273 	},
4274 	{
4275 		.data_type	= QMI_STRING,
4276 		.elem_len	= ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
4277 		.elem_size	= sizeof(char),
4278 		.array_type	= NO_ARRAY,
4279 		.tlv_type	= 0,
4280 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
4281 					   fw_build_timestamp),
4282 	},
4283 	{
4284 		.data_type	= QMI_EOTI,
4285 		.array_type	= NO_ARRAY,
4286 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4287 	},
4288 };
4289 
4290 static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
4291 	{
4292 		.data_type	= QMI_STRUCT,
4293 		.elem_len	= 1,
4294 		.elem_size	= sizeof(struct qmi_response_type_v01),
4295 		.array_type	= NO_ARRAY,
4296 		.tlv_type	= 0x02,
4297 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
4298 		.ei_array	= qmi_response_type_v01_ei,
4299 	},
4300 	{
4301 		.data_type	= QMI_OPT_FLAG,
4302 		.elem_len	= 1,
4303 		.elem_size	= sizeof(uint8_t),
4304 		.array_type	= NO_ARRAY,
4305 		.tlv_type	= 0x10,
4306 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4307 					   chip_info_valid),
4308 	},
4309 	{
4310 		.data_type	= QMI_STRUCT,
4311 		.elem_len	= 1,
4312 		.elem_size	= sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
4313 		.array_type	= NO_ARRAY,
4314 		.tlv_type	= 0x10,
4315 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4316 					   chip_info),
4317 		.ei_array	= qmi_wlanfw_rf_chip_info_s_v01_ei,
4318 	},
4319 	{
4320 		.data_type	= QMI_OPT_FLAG,
4321 		.elem_len	= 1,
4322 		.elem_size	= sizeof(uint8_t),
4323 		.array_type	= NO_ARRAY,
4324 		.tlv_type	= 0x11,
4325 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4326 					   board_info_valid),
4327 	},
4328 	{
4329 		.data_type	= QMI_STRUCT,
4330 		.elem_len	= 1,
4331 		.elem_size	= sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
4332 		.array_type	= NO_ARRAY,
4333 		.tlv_type	= 0x11,
4334 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4335 					   board_info),
4336 		.ei_array	= qmi_wlanfw_rf_board_info_s_v01_ei,
4337 	},
4338 	{
4339 		.data_type	= QMI_OPT_FLAG,
4340 		.elem_len	= 1,
4341 		.elem_size	= sizeof(uint8_t),
4342 		.array_type	= NO_ARRAY,
4343 		.tlv_type	= 0x12,
4344 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4345 					   soc_info_valid),
4346 	},
4347 	{
4348 		.data_type	= QMI_STRUCT,
4349 		.elem_len	= 1,
4350 		.elem_size	= sizeof(struct qmi_wlanfw_soc_info_s_v01),
4351 		.array_type	= NO_ARRAY,
4352 		.tlv_type	= 0x12,
4353 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4354 					   soc_info),
4355 		.ei_array	= qmi_wlanfw_soc_info_s_v01_ei,
4356 	},
4357 	{
4358 		.data_type	= QMI_OPT_FLAG,
4359 		.elem_len	= 1,
4360 		.elem_size	= sizeof(uint8_t),
4361 		.array_type	= NO_ARRAY,
4362 		.tlv_type	= 0x13,
4363 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4364 					   fw_version_info_valid),
4365 	},
4366 	{
4367 		.data_type	= QMI_STRUCT,
4368 		.elem_len	= 1,
4369 		.elem_size	= sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
4370 		.array_type	= NO_ARRAY,
4371 		.tlv_type	= 0x13,
4372 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4373 					   fw_version_info),
4374 		.ei_array	= qmi_wlanfw_fw_version_info_s_v01_ei,
4375 	},
4376 	{
4377 		.data_type	= QMI_OPT_FLAG,
4378 		.elem_len	= 1,
4379 		.elem_size	= sizeof(uint8_t),
4380 		.array_type	= NO_ARRAY,
4381 		.tlv_type	= 0x14,
4382 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4383 					   fw_build_id_valid),
4384 	},
4385 	{
4386 		.data_type	= QMI_STRING,
4387 		.elem_len	= ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
4388 		.elem_size	= sizeof(char),
4389 		.array_type	= NO_ARRAY,
4390 		.tlv_type	= 0x14,
4391 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4392 					   fw_build_id),
4393 	},
4394 	{
4395 		.data_type	= QMI_OPT_FLAG,
4396 		.elem_len	= 1,
4397 		.elem_size	= sizeof(uint8_t),
4398 		.array_type	= NO_ARRAY,
4399 		.tlv_type	= 0x15,
4400 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4401 					   num_macs_valid),
4402 	},
4403 	{
4404 		.data_type	= QMI_UNSIGNED_1_BYTE,
4405 		.elem_len	= 1,
4406 		.elem_size	= sizeof(uint8_t),
4407 		.array_type	= NO_ARRAY,
4408 		.tlv_type	= 0x15,
4409 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4410 					   num_macs),
4411 	},
4412 	{
4413 		.data_type      = QMI_OPT_FLAG,
4414 		.elem_len       = 1,
4415 		.elem_size      = sizeof(uint8_t),
4416 		.array_type     = NO_ARRAY,
4417 		.tlv_type       = 0x16,
4418 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4419 					   voltage_mv_valid),
4420 	},
4421 	{
4422 		.data_type      = QMI_UNSIGNED_4_BYTE,
4423 		.elem_len       = 1,
4424 		.elem_size      = sizeof(uint32_t),
4425 		.array_type     = NO_ARRAY,
4426 		.tlv_type       = 0x16,
4427 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4428 					   voltage_mv),
4429 	},
4430 	{
4431 		.data_type      = QMI_OPT_FLAG,
4432 		.elem_len       = 1,
4433 		.elem_size      = sizeof(uint8_t),
4434 		.array_type     = NO_ARRAY,
4435 		.tlv_type       = 0x17,
4436 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4437 					   time_freq_hz_valid),
4438 	},
4439 	{
4440 		.data_type      = QMI_UNSIGNED_4_BYTE,
4441 		.elem_len       = 1,
4442 		.elem_size      = sizeof(uint32_t),
4443 		.array_type     = NO_ARRAY,
4444 		.tlv_type       = 0x17,
4445 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4446 					   time_freq_hz),
4447 	},
4448 	{
4449 		.data_type      = QMI_OPT_FLAG,
4450 		.elem_len       = 1,
4451 		.elem_size      = sizeof(uint8_t),
4452 		.array_type     = NO_ARRAY,
4453 		.tlv_type       = 0x18,
4454 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4455 					   otp_version_valid),
4456 	},
4457 	{
4458 		.data_type      = QMI_UNSIGNED_4_BYTE,
4459 		.elem_len       = 1,
4460 		.elem_size      = sizeof(uint32_t),
4461 		.array_type     = NO_ARRAY,
4462 		.tlv_type       = 0x18,
4463 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4464 					   otp_version),
4465 	},
4466 	{
4467 		.data_type      = QMI_OPT_FLAG,
4468 		.elem_len       = 1,
4469 		.elem_size      = sizeof(uint8_t),
4470 		.array_type     = NO_ARRAY,
4471 		.tlv_type       = 0x19,
4472 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4473 					   eeprom_read_timeout_valid),
4474 	},
4475 	{
4476 		.data_type      = QMI_UNSIGNED_4_BYTE,
4477 		.elem_len       = 1,
4478 		.elem_size      = sizeof(uint32_t),
4479 		.array_type     = NO_ARRAY,
4480 		.tlv_type       = 0x19,
4481 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4482 					   eeprom_read_timeout),
4483 	},
4484 	{
4485 		.data_type	= QMI_OPT_FLAG,
4486 		.elem_len	= 1,
4487 		.elem_size	= sizeof(uint8_t),
4488 		.array_type	= NO_ARRAY,
4489 		.tlv_type	= 0x1A,
4490 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4491 					   fw_caps_valid),
4492 	},
4493 	{
4494 		.data_type	= QMI_UNSIGNED_8_BYTE,
4495 		.elem_len	= 1,
4496 		.elem_size	= sizeof(uint64_t),
4497 		.array_type	= NO_ARRAY,
4498 		.tlv_type	= 0x1A,
4499 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_caps),
4500 	},
4501 	{
4502 		.data_type	= QMI_OPT_FLAG,
4503 		.elem_len	= 1,
4504 		.elem_size	= sizeof(uint8_t),
4505 		.array_type	= NO_ARRAY,
4506 		.tlv_type	= 0x1B,
4507 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4508 					   rd_card_chain_cap_valid),
4509 	},
4510 	{
4511 		.data_type	= QMI_UNSIGNED_4_BYTE,
4512 		.elem_len	= 1,
4513 		.elem_size	= sizeof(uint32_t),
4514 		.array_type	= NO_ARRAY,
4515 		.tlv_type	= 0x1B,
4516 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4517 					   rd_card_chain_cap),
4518 	},
4519 	{
4520 		.data_type	= QMI_OPT_FLAG,
4521 		.elem_len	= 1,
4522 		.elem_size	= sizeof(uint8_t),
4523 		.array_type	= NO_ARRAY,
4524 		.tlv_type	= 0x1C,
4525 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4526 					   dev_mem_info_valid),
4527 	},
4528 	{
4529 		.data_type	= QMI_STRUCT,
4530 		.elem_len	= ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01,
4531 		.elem_size	= sizeof(struct qmi_wlanfw_dev_mem_info_s_v01),
4532 		.array_type	= STATIC_ARRAY,
4533 		.tlv_type	= 0x1C,
4534 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, dev_mem),
4535 		.ei_array	= qmi_wlanfw_dev_mem_info_s_v01_ei,
4536 	},
4537 	{
4538 		.data_type	= QMI_EOTI,
4539 		.array_type	= NO_ARRAY,
4540 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4541 	},
4542 };
4543 
4544 static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
4545 	{
4546 		.data_type	= QMI_UNSIGNED_1_BYTE,
4547 		.elem_len	= 1,
4548 		.elem_size	= sizeof(uint8_t),
4549 		.array_type	= NO_ARRAY,
4550 		.tlv_type	= 0x01,
4551 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4552 					   valid),
4553 	},
4554 	{
4555 		.data_type	= QMI_OPT_FLAG,
4556 		.elem_len	= 1,
4557 		.elem_size	= sizeof(uint8_t),
4558 		.array_type	= NO_ARRAY,
4559 		.tlv_type	= 0x10,
4560 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4561 					   file_id_valid),
4562 	},
4563 	{
4564 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4565 		.elem_len	= 1,
4566 		.elem_size	= sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
4567 		.array_type	= NO_ARRAY,
4568 		.tlv_type	= 0x10,
4569 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4570 					   file_id),
4571 	},
4572 	{
4573 		.data_type	= QMI_OPT_FLAG,
4574 		.elem_len	= 1,
4575 		.elem_size	= sizeof(uint8_t),
4576 		.array_type	= NO_ARRAY,
4577 		.tlv_type	= 0x11,
4578 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4579 					   total_size_valid),
4580 	},
4581 	{
4582 		.data_type	= QMI_UNSIGNED_4_BYTE,
4583 		.elem_len	= 1,
4584 		.elem_size	= sizeof(uint32_t),
4585 		.array_type	= NO_ARRAY,
4586 		.tlv_type	= 0x11,
4587 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4588 					   total_size),
4589 	},
4590 	{
4591 		.data_type	= QMI_OPT_FLAG,
4592 		.elem_len	= 1,
4593 		.elem_size	= sizeof(uint8_t),
4594 		.array_type	= NO_ARRAY,
4595 		.tlv_type	= 0x12,
4596 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4597 					   seg_id_valid),
4598 	},
4599 	{
4600 		.data_type	= QMI_UNSIGNED_4_BYTE,
4601 		.elem_len	= 1,
4602 		.elem_size	= sizeof(uint32_t),
4603 		.array_type	= NO_ARRAY,
4604 		.tlv_type	= 0x12,
4605 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4606 					   seg_id),
4607 	},
4608 	{
4609 		.data_type	= QMI_OPT_FLAG,
4610 		.elem_len	= 1,
4611 		.elem_size	= sizeof(uint8_t),
4612 		.array_type	= NO_ARRAY,
4613 		.tlv_type	= 0x13,
4614 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4615 					   data_valid),
4616 	},
4617 	{
4618 		.data_type	= QMI_DATA_LEN,
4619 		.elem_len	= 1,
4620 		.elem_size	= sizeof(uint16_t),
4621 		.array_type	= NO_ARRAY,
4622 		.tlv_type	= 0x13,
4623 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4624 					   data_len),
4625 	},
4626 	{
4627 		.data_type	= QMI_UNSIGNED_1_BYTE,
4628 		.elem_len	= QMI_WLANFW_MAX_DATA_SIZE_V01,
4629 		.elem_size	= sizeof(uint8_t),
4630 		.array_type	= VAR_LEN_ARRAY,
4631 		.tlv_type	= 0x13,
4632 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4633 					   data),
4634 	},
4635 	{
4636 		.data_type	= QMI_OPT_FLAG,
4637 		.elem_len	= 1,
4638 		.elem_size	= sizeof(uint8_t),
4639 		.array_type	= NO_ARRAY,
4640 		.tlv_type	= 0x14,
4641 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4642 					   end_valid),
4643 	},
4644 	{
4645 		.data_type	= QMI_UNSIGNED_1_BYTE,
4646 		.elem_len	= 1,
4647 		.elem_size	= sizeof(uint8_t),
4648 		.array_type	= NO_ARRAY,
4649 		.tlv_type	= 0x14,
4650 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4651 					   end),
4652 	},
4653 	{
4654 		.data_type	= QMI_OPT_FLAG,
4655 		.elem_len	= 1,
4656 		.elem_size	= sizeof(uint8_t),
4657 		.array_type	= NO_ARRAY,
4658 		.tlv_type	= 0x15,
4659 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4660 					   bdf_type_valid),
4661 	},
4662 	{
4663 		.data_type	= QMI_UNSIGNED_1_BYTE,
4664 		.elem_len	= 1,
4665 		.elem_size	= sizeof(uint8_t),
4666 		.array_type	= NO_ARRAY,
4667 		.tlv_type	= 0x15,
4668 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4669 					   bdf_type),
4670 	},
4671 
4672 	{
4673 		.data_type	= QMI_EOTI,
4674 		.array_type	= NO_ARRAY,
4675 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4676 	},
4677 };
4678 
4679 static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
4680 	{
4681 		.data_type	= QMI_STRUCT,
4682 		.elem_len	= 1,
4683 		.elem_size	= sizeof(struct qmi_response_type_v01),
4684 		.array_type	= NO_ARRAY,
4685 		.tlv_type	= 0x02,
4686 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
4687 					   resp),
4688 		.ei_array	= qmi_response_type_v01_ei,
4689 	},
4690 	{
4691 		.data_type	= QMI_EOTI,
4692 		.array_type	= NO_ARRAY,
4693 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4694 	},
4695 };
4696 
4697 static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
4698 	{
4699 		.data_type	= QMI_UNSIGNED_8_BYTE,
4700 		.elem_len	= 1,
4701 		.elem_size	= sizeof(uint64_t),
4702 		.array_type	= NO_ARRAY,
4703 		.tlv_type	= 0x01,
4704 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
4705 	},
4706 	{
4707 		.data_type	= QMI_UNSIGNED_4_BYTE,
4708 		.elem_len	= 1,
4709 		.elem_size	= sizeof(uint32_t),
4710 		.array_type	= NO_ARRAY,
4711 		.tlv_type	= 0x02,
4712 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
4713 	},
4714 	{
4715 		.data_type	= QMI_EOTI,
4716 		.array_type	= NO_ARRAY,
4717 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4718 	},
4719 };
4720 
4721 static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
4722 	{
4723 		.data_type	= QMI_STRUCT,
4724 		.elem_len	= 1,
4725 		.elem_size	= sizeof(struct qmi_response_type_v01),
4726 		.array_type	= NO_ARRAY,
4727 		.tlv_type	= 0x02,
4728 		.offset		= offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
4729 		.ei_array	= qmi_response_type_v01_ei,
4730 	},
4731 	{
4732 		.data_type	= QMI_EOTI,
4733 		.array_type	= NO_ARRAY,
4734 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4735 	},
4736 };
4737 
4738 static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
4739 	{
4740 		.data_type	= QMI_OPT_FLAG,
4741 		.elem_len	= 1,
4742 		.elem_size	= sizeof(uint8_t),
4743 		.array_type	= NO_ARRAY,
4744 		.tlv_type	= 0x10,
4745 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
4746 					   enablefwlog_valid),
4747 	},
4748 	{
4749 		.data_type	= QMI_UNSIGNED_1_BYTE,
4750 		.elem_len	= 1,
4751 		.elem_size	= sizeof(uint8_t),
4752 		.array_type	= NO_ARRAY,
4753 		.tlv_type	= 0x10,
4754 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
4755 					   enablefwlog),
4756 	},
4757 	{
4758 		.data_type	= QMI_EOTI,
4759 		.array_type	= NO_ARRAY,
4760 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4761 	},
4762 };
4763 
4764 static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
4765 	{
4766 		.data_type	= QMI_STRUCT,
4767 		.elem_len	= 1,
4768 		.elem_size	= sizeof(struct qmi_response_type_v01),
4769 		.array_type	= NO_ARRAY,
4770 		.tlv_type	= 0x02,
4771 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
4772 					   resp),
4773 		.ei_array	= qmi_response_type_v01_ei,
4774 	},
4775 	{
4776 		.data_type	= QMI_EOTI,
4777 		.array_type	= NO_ARRAY,
4778 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4779 	},
4780 };
4781 
4782 static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
4783 	{
4784 		.data_type	= QMI_UNSIGNED_4_BYTE,
4785 		.elem_len	= 1,
4786 		.elem_size	= sizeof(uint32_t),
4787 		.array_type	= NO_ARRAY,
4788 		.tlv_type	= 0,
4789 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4790 					   pipe_num),
4791 	},
4792 	{
4793 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4794 		.elem_len	= 1,
4795 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
4796 		.array_type	= NO_ARRAY,
4797 		.tlv_type	= 0,
4798 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4799 					   pipe_dir),
4800 	},
4801 	{
4802 		.data_type	= QMI_UNSIGNED_4_BYTE,
4803 		.elem_len	= 1,
4804 		.elem_size	= sizeof(uint32_t),
4805 		.array_type	= NO_ARRAY,
4806 		.tlv_type	= 0,
4807 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4808 					   nentries),
4809 	},
4810 	{
4811 		.data_type	= QMI_UNSIGNED_4_BYTE,
4812 		.elem_len	= 1,
4813 		.elem_size	= sizeof(uint32_t),
4814 		.array_type	= NO_ARRAY,
4815 		.tlv_type	= 0,
4816 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4817 					   nbytes_max),
4818 	},
4819 	{
4820 		.data_type	= QMI_UNSIGNED_4_BYTE,
4821 		.elem_len	= 1,
4822 		.elem_size	= sizeof(uint32_t),
4823 		.array_type	= NO_ARRAY,
4824 		.tlv_type	= 0,
4825 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4826 					   flags),
4827 	},
4828 	{
4829 		.data_type	= QMI_EOTI,
4830 		.array_type	= NO_ARRAY,
4831 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4832 	},
4833 };
4834 
4835 static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
4836 	{
4837 		.data_type	= QMI_UNSIGNED_4_BYTE,
4838 		.elem_len	= 1,
4839 		.elem_size	= sizeof(uint32_t),
4840 		.array_type	= NO_ARRAY,
4841 		.tlv_type	= 0,
4842 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4843 					   service_id),
4844 	},
4845 	{
4846 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4847 		.elem_len	= 1,
4848 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
4849 		.array_type	= NO_ARRAY,
4850 		.tlv_type	= 0,
4851 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4852 					   pipe_dir),
4853 	},
4854 	{
4855 		.data_type	= QMI_UNSIGNED_4_BYTE,
4856 		.elem_len	= 1,
4857 		.elem_size	= sizeof(uint32_t),
4858 		.array_type	= NO_ARRAY,
4859 		.tlv_type	= 0,
4860 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4861 					   pipe_num),
4862 	},
4863 	{
4864 		.data_type	= QMI_EOTI,
4865 		.array_type	= NO_ARRAY,
4866 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4867 	},
4868 };
4869 
4870 static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
4871 	{
4872 		.data_type	= QMI_UNSIGNED_2_BYTE,
4873 		.elem_len	= 1,
4874 		.elem_size	= sizeof(uint16_t),
4875 		.array_type	= NO_ARRAY,
4876 		.tlv_type	= 0,
4877 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
4878 	},
4879 	{
4880 		.data_type	= QMI_UNSIGNED_2_BYTE,
4881 		.elem_len	= 1,
4882 		.elem_size	= sizeof(uint16_t),
4883 		.array_type	= NO_ARRAY,
4884 		.tlv_type	= 0,
4885 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
4886 					   offset),
4887 	},
4888 	{
4889 		.data_type	= QMI_EOTI,
4890 		.array_type	= QMI_COMMON_TLV_TYPE,
4891 	},
4892 };
4893 
4894 static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
4895 	{
4896 		.data_type	= QMI_UNSIGNED_4_BYTE,
4897 		.elem_len	= 1,
4898 		.elem_size	= sizeof(uint32_t),
4899 		.array_type	= NO_ARRAY,
4900 		.tlv_type	= 0,
4901 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01,
4902 					   addr),
4903 	},
4904 	{
4905 		.data_type	= QMI_EOTI,
4906 		.array_type	= NO_ARRAY,
4907 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4908 	},
4909 };
4910 
4911 static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
4912 	{
4913 		.data_type	= QMI_UNSIGNED_4_BYTE,
4914 		.elem_len	= 1,
4915 		.elem_size	= sizeof(uint32_t),
4916 		.array_type	= NO_ARRAY,
4917 		.tlv_type	= 0x01,
4918 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4919 					   mode),
4920 	},
4921 	{
4922 		.data_type	= QMI_OPT_FLAG,
4923 		.elem_len	= 1,
4924 		.elem_size	= sizeof(uint8_t),
4925 		.array_type	= NO_ARRAY,
4926 		.tlv_type	= 0x10,
4927 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4928 					   hw_debug_valid),
4929 	},
4930 	{
4931 		.data_type	= QMI_UNSIGNED_1_BYTE,
4932 		.elem_len	= 1,
4933 		.elem_size	= sizeof(uint8_t),
4934 		.array_type	= NO_ARRAY,
4935 		.tlv_type	= 0x10,
4936 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4937 					   hw_debug),
4938 	},
4939 	{
4940 		.data_type	= QMI_EOTI,
4941 		.array_type	= NO_ARRAY,
4942 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4943 	},
4944 };
4945 
4946 static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
4947 	{
4948 		.data_type	= QMI_STRUCT,
4949 		.elem_len	= 1,
4950 		.elem_size	= sizeof(struct qmi_response_type_v01),
4951 		.array_type	= NO_ARRAY,
4952 		.tlv_type	= 0x02,
4953 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
4954 					   resp),
4955 		.ei_array	= qmi_response_type_v01_ei,
4956 	},
4957 	{
4958 		.data_type	= QMI_EOTI,
4959 		.array_type	= NO_ARRAY,
4960 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4961 	},
4962 };
4963 
4964 static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
4965 	{
4966 		.data_type	= QMI_OPT_FLAG,
4967 		.elem_len	= 1,
4968 		.elem_size	= sizeof(uint8_t),
4969 		.array_type	= NO_ARRAY,
4970 		.tlv_type	= 0x10,
4971 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4972 					   host_version_valid),
4973 	},
4974 	{
4975 		.data_type	= QMI_STRING,
4976 		.elem_len	= QMI_WLANFW_MAX_STR_LEN_V01 + 1,
4977 		.elem_size	= sizeof(char),
4978 		.array_type	= NO_ARRAY,
4979 		.tlv_type	= 0x10,
4980 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4981 					   host_version),
4982 	},
4983 	{
4984 		.data_type	= QMI_OPT_FLAG,
4985 		.elem_len	= 1,
4986 		.elem_size	= sizeof(uint8_t),
4987 		.array_type	= NO_ARRAY,
4988 		.tlv_type	= 0x11,
4989 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4990 					   tgt_cfg_valid),
4991 	},
4992 	{
4993 		.data_type	= QMI_DATA_LEN,
4994 		.elem_len	= 1,
4995 		.elem_size	= sizeof(uint8_t),
4996 		.array_type	= NO_ARRAY,
4997 		.tlv_type	= 0x11,
4998 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4999 					   tgt_cfg_len),
5000 	},
5001 	{
5002 		.data_type	= QMI_STRUCT,
5003 		.elem_len	= QMI_WLANFW_MAX_NUM_CE_V01,
5004 		.elem_size	= sizeof(
5005 				struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
5006 		.array_type	= VAR_LEN_ARRAY,
5007 		.tlv_type	= 0x11,
5008 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5009 					   tgt_cfg),
5010 		.ei_array	= qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
5011 	},
5012 	{
5013 		.data_type	= QMI_OPT_FLAG,
5014 		.elem_len	= 1,
5015 		.elem_size	= sizeof(uint8_t),
5016 		.array_type	= NO_ARRAY,
5017 		.tlv_type	= 0x12,
5018 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5019 					   svc_cfg_valid),
5020 	},
5021 	{
5022 		.data_type	= QMI_DATA_LEN,
5023 		.elem_len	= 1,
5024 		.elem_size	= sizeof(uint8_t),
5025 		.array_type	= NO_ARRAY,
5026 		.tlv_type	= 0x12,
5027 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5028 					   svc_cfg_len),
5029 	},
5030 	{
5031 		.data_type	= QMI_STRUCT,
5032 		.elem_len	= QMI_WLANFW_MAX_NUM_SVC_V01,
5033 		.elem_size	= sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
5034 		.array_type	= VAR_LEN_ARRAY,
5035 		.tlv_type	= 0x12,
5036 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5037 					   svc_cfg),
5038 		.ei_array	= qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
5039 	},
5040 	{
5041 		.data_type	= QMI_OPT_FLAG,
5042 		.elem_len	= 1,
5043 		.elem_size	= sizeof(uint8_t),
5044 		.array_type	= NO_ARRAY,
5045 		.tlv_type	= 0x13,
5046 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5047 					   shadow_reg_valid),
5048 	},
5049 	{
5050 		.data_type	= QMI_DATA_LEN,
5051 		.elem_len	= 1,
5052 		.elem_size	= sizeof(uint8_t),
5053 		.array_type	= NO_ARRAY,
5054 		.tlv_type	= 0x13,
5055 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5056 					   shadow_reg_len),
5057 	},
5058 	{
5059 		.data_type	= QMI_STRUCT,
5060 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
5061 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
5062 		.array_type	= VAR_LEN_ARRAY,
5063 		.tlv_type	= 0x13,
5064 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5065 					   shadow_reg),
5066 		.ei_array	= qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
5067 	},
5068 	{
5069 		.data_type	= QMI_OPT_FLAG,
5070 		.elem_len	= 1,
5071 		.elem_size	= sizeof(uint8_t),
5072 		.array_type	= NO_ARRAY,
5073 		.tlv_type	= 0x17,
5074 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5075 					   shadow_reg_v3_valid),
5076 	},
5077 	{
5078 		.data_type	= QMI_DATA_LEN,
5079 		.elem_len	= 1,
5080 		.elem_size	= sizeof(uint8_t),
5081 		.array_type	= NO_ARRAY,
5082 		.tlv_type	= 0x17,
5083 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5084 					   shadow_reg_v3_len),
5085 	},
5086 	{
5087 		.data_type	= QMI_STRUCT,
5088 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01,
5089 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01),
5090 		.array_type	= VAR_LEN_ARRAY,
5091 		.tlv_type	= 0x17,
5092 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5093 					   shadow_reg_v3),
5094 		.ei_array	= qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei,
5095 	},
5096 	{
5097 		.data_type	= QMI_EOTI,
5098 		.array_type	= NO_ARRAY,
5099 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5100 	},
5101 };
5102 
5103 static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
5104 	{
5105 		.data_type	= QMI_STRUCT,
5106 		.elem_len	= 1,
5107 		.elem_size	= sizeof(struct qmi_response_type_v01),
5108 		.array_type	= NO_ARRAY,
5109 		.tlv_type	= 0x02,
5110 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
5111 		.ei_array	= qmi_response_type_v01_ei,
5112 	},
5113 	{
5114 		.data_type	= QMI_EOTI,
5115 		.array_type	= NO_ARRAY,
5116 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5117 	},
5118 };
5119 
5120 int
5121 qwz_ce_intr(void *arg)
5122 {
5123 	struct qwz_ce_pipe *pipe = arg;
5124 	struct qwz_softc *sc = pipe->sc;
5125 
5126 	if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) ||
5127 	    ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) {
5128 		DPRINTF("%s: unexpected interrupt on pipe %d\n",
5129 		    __func__, pipe->pipe_num);
5130 		return 1;
5131 	}
5132 
5133 	return qwz_ce_per_engine_service(sc, pipe->pipe_num);
5134 }
5135 
5136 int
5137 qwz_ext_intr(void *arg)
5138 {
5139 	struct qwz_ext_irq_grp *irq_grp = arg;
5140 	struct qwz_softc *sc = irq_grp->sc;
5141 
5142 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
5143 		DPRINTF("%s: unexpected interrupt for ext group %d\n",
5144 		    __func__, irq_grp->grp_id);
5145 		return 1;
5146 	}
5147 
5148 	return qwz_dp_service_srng(sc, irq_grp->grp_id);
5149 }
5150 
5151 static const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = {
5152 	"EOTI",
5153 	"OPT_FLAG",
5154 	"DATA_LEN",
5155 	"UNSIGNED_1_BYTE",
5156 	"UNSIGNED_2_BYTE",
5157 	"UNSIGNED_4_BYTE",
5158 	"UNSIGNED_8_BYTE",
5159 	"SIGNED_2_BYTE_ENUM",
5160 	"SIGNED_4_BYTE_ENUM",
5161 	"STRUCT",
5162 	"STRING"
5163 };
5164 
5165 const struct qmi_elem_info *
5166 qwz_qmi_decode_get_elem(const struct qmi_elem_info *ei, uint8_t elem_type)
5167 {
5168 	while (ei->data_type != QMI_EOTI && ei->tlv_type != elem_type)
5169 		ei++;
5170 
5171 	DNPRINTF(QWZ_D_QMI, "%s: found elem 0x%x data type 0x%x\n", __func__,
5172 	    ei->tlv_type, ei->data_type);
5173 	return ei;
5174 }
5175 
5176 size_t
5177 qwz_qmi_decode_min_elem_size(const struct qmi_elem_info *ei, int nested)
5178 {
5179 	size_t min_size = 0;
5180 
5181 	switch (ei->data_type) {
5182 	case QMI_EOTI:
5183 	case QMI_OPT_FLAG:
5184 		break;
5185 	case QMI_DATA_LEN:
5186 		if (ei->elem_len == 1)
5187 			min_size += sizeof(uint8_t);
5188 		else
5189 			min_size += sizeof(uint16_t);
5190 		break;
5191 	case QMI_UNSIGNED_1_BYTE:
5192 	case QMI_UNSIGNED_2_BYTE:
5193 	case QMI_UNSIGNED_4_BYTE:
5194 	case QMI_UNSIGNED_8_BYTE:
5195 	case QMI_SIGNED_2_BYTE_ENUM:
5196 	case QMI_SIGNED_4_BYTE_ENUM:
5197 		min_size += ei->elem_len * ei->elem_size;
5198 		break;
5199 	case QMI_STRUCT:
5200 		if (nested > 2) {
5201 			printf("%s: QMI struct element 0x%x with "
5202 			    "data type %s (0x%x) is nested too "
5203 			    "deeply\n", __func__,
5204 			    ei->tlv_type,
5205 			    qmi_data_type_name[ei->data_type],
5206 			    ei->data_type);
5207 		}
5208 		ei = ei->ei_array;
5209 		while (ei->data_type != QMI_EOTI) {
5210 			min_size += qwz_qmi_decode_min_elem_size(ei,
5211 			    nested + 1);
5212 			ei++;
5213 		}
5214 		break;
5215 	case QMI_STRING:
5216 		min_size += 1;
5217 		/* Strings nested in structs use an in-band length field. */
5218 		if (nested) {
5219 			if (ei->elem_len <= 0xff)
5220 				min_size += sizeof(uint8_t);
5221 			else
5222 				min_size += sizeof(uint16_t);
5223 		}
5224 		break;
5225 	default:
5226 		printf("%s: unhandled data type 0x%x\n", __func__,
5227 		    ei->data_type);
5228 		break;
5229 	}
5230 
5231 	return min_size;
5232 }
5233 
5234 int
5235 qwz_qmi_decode_tlv_hdr(struct qwz_softc *sc,
5236     const struct qmi_elem_info **next_ei, uint16_t *actual_size,
5237     size_t output_len, const struct qmi_elem_info *ei0,
5238     uint8_t *input, size_t input_len)
5239 {
5240 	uint8_t *p = input;
5241 	size_t remain = input_len;
5242 	uint8_t elem_type;
5243 	uint16_t elem_size = 0;
5244 	const struct qmi_elem_info *ei;
5245 
5246 	*next_ei = NULL;
5247 	*actual_size = 0;
5248 
5249 	if (remain < 3) {
5250 		printf("%s: QMI message TLV header too short\n",
5251 		   sc->sc_dev.dv_xname);
5252 		return -1;
5253 	}
5254 	elem_type = *p;
5255 	p++;
5256 	remain--;
5257 
5258 	/*
5259 	 * By relying on TLV type information we can skip over EIs which
5260 	 * describe optional elements that have not been encoded.
5261 	 * Such elements will be left at their default value (zero) in
5262 	 * the decoded output struct.
5263 	 * XXX We currently allow elements to appear in any order and
5264 	 * we do not detect duplicates.
5265 	 */
5266 	ei = qwz_qmi_decode_get_elem(ei0, elem_type);
5267 
5268 	DNPRINTF(QWZ_D_QMI,
5269 	    "%s: decoding element 0x%x with data type %s (0x%x)\n",
5270 	    __func__, elem_type, qmi_data_type_name[ei->data_type],
5271 	    ei->data_type);
5272 
5273 	if (remain < 2) {
5274 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
5275 		return -1;
5276 	}
5277 
5278 	if (ei->data_type == QMI_DATA_LEN && ei->elem_len == 1) {
5279 		elem_size = p[0];
5280 		p++;
5281 		remain--;
5282 	} else {
5283 		elem_size = (p[0] | (p[1] << 8));
5284 		p += 2;
5285 		remain -= 2;
5286 	}
5287 
5288 	*next_ei = ei;
5289 	*actual_size = elem_size;
5290 
5291 	if (ei->data_type == QMI_EOTI) {
5292 		DNPRINTF(QWZ_D_QMI,
5293 		    "%s: unrecognized QMI element type 0x%x size %u\n",
5294 		    sc->sc_dev.dv_xname, elem_type, elem_size);
5295 		return 0;
5296 	}
5297 
5298 	/*
5299 	 * Is this an optional element which has been encoded?
5300 	 * If so, use info about this optional element for verification.
5301 	 */
5302 	if (ei->data_type == QMI_OPT_FLAG)
5303 		ei++;
5304 
5305 	DNPRINTF(QWZ_D_QMI, "%s: ei->size %u, actual size %u\n", __func__,
5306 	    ei->elem_size, *actual_size);
5307 
5308 	switch (ei->data_type) {
5309 	case QMI_UNSIGNED_1_BYTE:
5310 	case QMI_UNSIGNED_2_BYTE:
5311 	case QMI_UNSIGNED_4_BYTE:
5312 	case QMI_UNSIGNED_8_BYTE:
5313 	case QMI_SIGNED_2_BYTE_ENUM:
5314 	case QMI_SIGNED_4_BYTE_ENUM:
5315 		if (elem_size != ei->elem_size) {
5316 			printf("%s: QMI message element 0x%x "
5317 			    "data type %s (0x%x) with bad size: %u\n",
5318 			    sc->sc_dev.dv_xname, elem_type,
5319 			    qmi_data_type_name[ei->data_type],
5320 			    ei->data_type, elem_size);
5321 			return -1;
5322 		}
5323 		break;
5324 	case QMI_DATA_LEN:
5325 		break;
5326 	case QMI_STRING:
5327 	case QMI_STRUCT:
5328 		if (elem_size < qwz_qmi_decode_min_elem_size(ei, 0)) {
5329 			printf("%s: QMI message element 0x%x "
5330 			    "data type %s (0x%x) with bad size: %u\n",
5331 			    sc->sc_dev.dv_xname, elem_type,
5332 			    qmi_data_type_name[ei->data_type],
5333 			    ei->data_type, elem_size);
5334 			return -1;
5335 		}
5336 		break;
5337 	default:
5338 		printf("%s: unexpected QMI message element "
5339 		    "data type 0x%x\n", sc->sc_dev.dv_xname,
5340 		    ei->data_type);
5341 		return -1;
5342 	}
5343 
5344 	if (remain < elem_size) {
5345 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
5346 		return -1;
5347 	}
5348 
5349 	if (ei->offset + ei->elem_size > output_len) {
5350 		printf("%s: QMI message element type 0x%x too large: %u\n",
5351 		    sc->sc_dev.dv_xname, elem_type, ei->elem_size);
5352 		return -1;
5353 	}
5354 
5355 	return 0;
5356 }
5357 
5358 int
5359 qwz_qmi_decode_byte(void *output, const struct qmi_elem_info *ei, void *input)
5360 {
5361 	if (ei->elem_size != sizeof(uint8_t)) {
5362 		printf("%s: bad element size\n", __func__);
5363 		return -1;
5364 	}
5365 
5366 	DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5367 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5368 	memcpy(output, input, ei->elem_size);
5369 	return 0;
5370 }
5371 
5372 int
5373 qwz_qmi_decode_word(void *output, const struct qmi_elem_info *ei, void *input)
5374 {
5375 	if (ei->elem_size != sizeof(uint16_t)) {
5376 		printf("%s: bad element size\n", __func__);
5377 		return -1;
5378 	}
5379 
5380 	DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5381 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5382 	memcpy(output, input, ei->elem_size);
5383 	return 0;
5384 }
5385 
5386 int
5387 qwz_qmi_decode_dword(void *output, const struct qmi_elem_info *ei, void *input)
5388 {
5389 	if (ei->elem_size != sizeof(uint32_t)) {
5390 		printf("%s: bad element size\n", __func__);
5391 		return -1;
5392 	}
5393 
5394 	DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5395 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5396 	memcpy(output, input, ei->elem_size);
5397 	return 0;
5398 }
5399 
5400 int
5401 qwz_qmi_decode_qword(void *output, const struct qmi_elem_info *ei, void *input)
5402 {
5403 	if (ei->elem_size != sizeof(uint64_t)) {
5404 		printf("%s: bad element size\n", __func__);
5405 		return -1;
5406 	}
5407 
5408 	DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5409 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5410 	memcpy(output, input, ei->elem_size);
5411 	return 0;
5412 }
5413 
5414 int
5415 qwz_qmi_decode_datalen(struct qwz_softc *sc, size_t *used, uint32_t *datalen,
5416     void *output, size_t output_len, const struct qmi_elem_info *ei,
5417     uint8_t *input, uint16_t input_len)
5418 {
5419 	uint8_t *p = input;
5420 	size_t remain = input_len;
5421 
5422 	*datalen = 0;
5423 
5424 	DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
5425 	for (int i = 0; i < input_len; i++) {
5426 		DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
5427 	}
5428 	DNPRINTF(QWZ_D_QMI, "\n");
5429 
5430 	if (remain < ei->elem_size) {
5431 		printf("%s: QMI message too short: remain=%zu elem_size=%u\n", __func__, remain, ei->elem_size);
5432 		return -1;
5433 	}
5434 
5435 	switch (ei->elem_size) {
5436 	case sizeof(uint8_t):
5437 		*datalen = p[0];
5438 		break;
5439 	case sizeof(uint16_t):
5440 		*datalen = p[0] | (p[1] << 8);
5441 		break;
5442 	default:
5443 		printf("%s: bad datalen element size %u\n",
5444 		    sc->sc_dev.dv_xname, ei->elem_size);
5445 		return -1;
5446 
5447 	}
5448 	*used = ei->elem_size;
5449 
5450 	if (ei->offset + sizeof(*datalen) > output_len) {
5451 		printf("%s: QMI message element type 0x%x too large\n",
5452 		    sc->sc_dev.dv_xname, ei->tlv_type);
5453 		return -1;
5454 	}
5455 	memcpy(output + ei->offset, datalen, sizeof(*datalen));
5456 	return 0;
5457 }
5458 
5459 int
5460 qwz_qmi_decode_string(struct qwz_softc *sc, size_t *used_total,
5461     void *output, size_t output_len, const struct qmi_elem_info *ei,
5462     uint8_t *input, uint16_t input_len, uint16_t elem_size, int nested)
5463 {
5464 	uint8_t *p = input;
5465 	uint16_t len;
5466 	size_t remain = input_len;
5467 
5468 	*used_total = 0;
5469 
5470 	DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
5471 	for (int i = 0; i < input_len; i++) {
5472 		DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
5473 	}
5474 	DNPRINTF(QWZ_D_QMI, "\n");
5475 
5476 	if (nested) {
5477 		/* Strings nested in structs use an in-band length field. */
5478 		if (ei->elem_len <= 0xff) {
5479 			if (remain == 0) {
5480 				printf("%s: QMI string length header exceeds "
5481 				    "input buffer size\n", __func__);
5482 				return -1;
5483 			}
5484 			len = p[0];
5485 			p++;
5486 			(*used_total)++;
5487 			remain--;
5488 		} else {
5489 			if (remain < 2) {
5490 				printf("%s: QMI string length header exceeds "
5491 				    "input buffer size\n", __func__);
5492 				return -1;
5493 			}
5494 			len = p[0] | (p[1] << 8);
5495 			p += 2;
5496 			*used_total += 2;
5497 			remain -= 2;
5498 		}
5499 	} else
5500 		len = elem_size;
5501 
5502 	if (len > ei->elem_len) {
5503 		printf("%s: QMI string element of length %u exceeds "
5504 		    "maximum length %u\n", __func__, len, ei->elem_len);
5505 		return -1;
5506 	}
5507 	if (len > remain) {
5508 		printf("%s: QMI string element of length %u exceeds "
5509 		    "input buffer size %zu\n", __func__, len, remain);
5510 		return -1;
5511 	}
5512 	if (len > output_len) {
5513 		printf("%s: QMI string element of length %u exceeds "
5514 		    "output buffer size %zu\n", __func__, len, output_len);
5515 		return -1;
5516 	}
5517 
5518 	memcpy(output, p, len);
5519 
5520 	p = output;
5521 	p[len] = '\0';
5522 	DNPRINTF(QWZ_D_QMI, "%s: string (len %u): %s\n", __func__, len, p);
5523 
5524 	*used_total += len;
5525 	return 0;
5526 }
5527 
5528 int
5529 qwz_qmi_decode_struct(struct qwz_softc *sc, size_t *used_total,
5530     void *output, size_t output_len,
5531     const struct qmi_elem_info *struct_ei,
5532     uint8_t *input, uint16_t input_len,
5533     int nested)
5534 {
5535 	const struct qmi_elem_info *ei = struct_ei->ei_array;
5536 	uint32_t min_size;
5537 	uint8_t *p = input;
5538 	size_t remain = input_len;
5539 	size_t used = 0;
5540 
5541 	*used_total = 0;
5542 
5543 	DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
5544 	for (int i = 0; i < input_len; i++) {
5545 		DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
5546 	}
5547 	DNPRINTF(QWZ_D_QMI, "\n");
5548 
5549 	min_size = qwz_qmi_decode_min_elem_size(struct_ei, 0);
5550 	DNPRINTF(QWZ_D_QMI, "%s: minimum struct size: %u\n", __func__, min_size);
5551 	while (*used_total < min_size && ei->data_type != QMI_EOTI) {
5552 		if (remain == 0) {
5553 			printf("%s: QMI message too short\n", __func__);
5554 			return -1;
5555 		}
5556 
5557 		if (ei->data_type == QMI_DATA_LEN) {
5558 			uint32_t datalen;
5559 
5560 			used = 0;
5561 			if (qwz_qmi_decode_datalen(sc, &used, &datalen,
5562 			    output, output_len, ei, p, remain))
5563 				return -1;
5564 			DNPRINTF(QWZ_D_QMI, "%s: datalen %u used %zu bytes\n",
5565 			    __func__, datalen, used);
5566 			p += used;
5567 			remain -= used;
5568 			*used_total += used;
5569 			if (remain < datalen) {
5570 				printf("%s: QMI message too short\n", __func__);
5571 				return -1;
5572 			}
5573 			ei++;
5574 			DNPRINTF(QWZ_D_QMI, "%s: datalen is for data_type=0x%x "
5575 			    "tlv_type=0x%x elem_size=%u(0x%x) remain=%zu\n",
5576 			    __func__, ei->data_type, ei->tlv_type,
5577 			    ei->elem_size, ei->elem_size, remain);
5578 			if (datalen == 0) {
5579 				ei++;
5580 				DNPRINTF(QWZ_D_QMI,
5581 				    "%s: skipped to data_type=0x%x "
5582 				    "tlv_type=0x%x elem_size=%u(0x%x) "
5583 				    "remain=%zu\n", __func__,
5584 				    ei->data_type, ei->tlv_type,
5585 				    ei->elem_size, ei->elem_size, remain);
5586 				continue;
5587 			}
5588 		} else {
5589 			if (remain < ei->elem_size) {
5590 				printf("%s: QMI message too short\n",
5591 				    __func__);
5592 				return -1;
5593 			}
5594 		}
5595 
5596 		if (ei->offset + ei->elem_size > output_len) {
5597 			printf("%s: QMI message struct member element "
5598 			    "type 0x%x too large: %u\n", sc->sc_dev.dv_xname,
5599 			    ei->tlv_type, ei->elem_size);
5600 			return -1;
5601 		}
5602 
5603 		DNPRINTF(QWZ_D_QMI,
5604 		    "%s: decoding struct member element 0x%x with "
5605 		    "data type %s (0x%x) size=%u(0x%x) remain=%zu\n", __func__,
5606 		    ei->tlv_type, qmi_data_type_name[ei->data_type],
5607 		    ei->data_type, ei->elem_size, ei->elem_size, remain);
5608 		switch (ei->data_type) {
5609 		case QMI_UNSIGNED_1_BYTE:
5610 			if (qwz_qmi_decode_byte(output + ei->offset, ei, p))
5611 				return -1;
5612 			remain -= ei->elem_size;
5613 			p += ei->elem_size;
5614 			*used_total += ei->elem_size;
5615 			break;
5616 		case QMI_UNSIGNED_2_BYTE:
5617 		case QMI_SIGNED_2_BYTE_ENUM:
5618 			if (qwz_qmi_decode_word(output + ei->offset, ei, p))
5619 				return -1;
5620 			remain -= ei->elem_size;
5621 			p += ei->elem_size;
5622 			*used_total += ei->elem_size;
5623 			break;
5624 		case QMI_UNSIGNED_4_BYTE:
5625 		case QMI_SIGNED_4_BYTE_ENUM:
5626 			if (qwz_qmi_decode_dword(output + ei->offset, ei, p))
5627 				return -1;
5628 			remain -= ei->elem_size;
5629 			p += ei->elem_size;
5630 			*used_total += ei->elem_size;
5631 			break;
5632 		case QMI_UNSIGNED_8_BYTE:
5633 			if (qwz_qmi_decode_qword(output + ei->offset, ei, p))
5634 				return -1;
5635 			remain -= ei->elem_size;
5636 			p += ei->elem_size;
5637 			*used_total += ei->elem_size;
5638 			break;
5639 		case QMI_STRUCT:
5640 			if (nested > 2) {
5641 				printf("%s: QMI struct element data type 0x%x "
5642 				    "is nested too deeply\n",
5643 				    sc->sc_dev.dv_xname, ei->data_type);
5644 				return -1;
5645 			}
5646 			used = 0;
5647 			if (qwz_qmi_decode_struct(sc, &used,
5648 			    output + ei->offset, output_len - ei->offset,
5649 			    ei, p, remain, nested + 1))
5650 				return -1;
5651 			remain -= used;
5652 			p += used;
5653 			*used_total += used;
5654 			break;
5655 		case QMI_STRING:
5656 			used = 0;
5657 			if (qwz_qmi_decode_string(sc, &used,
5658 			    output + ei->offset, output_len - ei->offset,
5659 			    ei, p, remain, 0, 1))
5660 				return -1;
5661 			remain -= used;
5662 			p += used;
5663 			*used_total += used;
5664 			break;
5665 		default:
5666 			printf("%s: unhandled QMI struct element "
5667 			    "data type 0x%x\n", sc->sc_dev.dv_xname,
5668 			    ei->data_type);
5669 			return -1;
5670 		}
5671 
5672 		ei++;
5673 		DNPRINTF(QWZ_D_QMI, "%s: next ei 0x%x ei->data_type=0x%x\n",
5674 		    __func__, ei->tlv_type, ei->data_type);
5675 	}
5676 
5677 	DNPRINTF(QWZ_D_QMI, "%s: used_total=%zu ei->data_type=0x%x\n",
5678 	    __func__, *used_total, ei->data_type);
5679 
5680 	return 0;
5681 }
5682 
5683 int
5684 qwz_qmi_decode_msg(struct qwz_softc *sc, void *output, size_t output_len,
5685     const struct qmi_elem_info *ei0, uint8_t *input, uint16_t input_len)
5686 {
5687 	uint8_t *p = input;
5688 	size_t remain = input_len, used;
5689 	const struct qmi_elem_info *ei = ei0;
5690 
5691 	memset(output, 0, output_len);
5692 
5693 	DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
5694 	for (int i = 0; i < input_len; i++) {
5695 		DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
5696 	}
5697 	DNPRINTF(QWZ_D_QMI, "\n");
5698 
5699 	while (remain > 0 && ei->data_type != QMI_EOTI) {
5700 		uint32_t nelem = 1, i;
5701 		uint16_t datalen;
5702 
5703 		if (qwz_qmi_decode_tlv_hdr(sc, &ei, &datalen, output_len,
5704 		    ei0, p, remain))
5705 			return -1;
5706 
5707 		/* Skip unrecognized elements. */
5708 		if (ei->data_type == QMI_EOTI) {
5709 			p += 3 + datalen;
5710 			remain -= 3 + datalen;
5711 			ei = ei0;
5712 			continue;
5713 		}
5714 
5715 		/* Set 'valid' flag for optional fields in output struct. */
5716 		if (ei->data_type == QMI_OPT_FLAG) {
5717 			uint8_t *pvalid;
5718 
5719 			if (ei->offset + ei->elem_size > output_len) {
5720 				printf("%s: QMI message element type 0x%x "
5721 				    "too large: %u\n", sc->sc_dev.dv_xname,
5722 				    ei->tlv_type, ei->elem_size);
5723 			}
5724 
5725 			pvalid = (uint8_t *)output + ei->offset;
5726 			*pvalid = 1;
5727 
5728 			ei++;
5729 		}
5730 
5731 		p += 3;
5732 		remain -= 3;
5733 
5734 		if (ei->data_type == QMI_DATA_LEN) {
5735 			const struct qmi_elem_info *datalen_ei = ei;
5736 			uint8_t elem_type = ei->tlv_type;
5737 
5738 			/*
5739 			 * Size info in TLV header indicates the
5740 			 * total length of element data that follows.
5741 			 */
5742 			if (remain < datalen) {
5743 				printf("%s:%d QMI message too short\n",
5744 				    __func__, __LINE__);
5745 				return -1;
5746 			}
5747 
5748 			ei++;
5749 			DNPRINTF(QWZ_D_QMI,
5750 			    "%s: next ei data_type=0x%x tlv_type=0x%x "
5751 			    "dst elem_size=%u(0x%x) src total size=%u "
5752 			    "remain=%zu\n", __func__, ei->data_type,
5753 			    ei->tlv_type, ei->elem_size, ei->elem_size,
5754 			    datalen, remain);
5755 
5756 			/* Related EIs must have the same type. */
5757 			if (ei->tlv_type != elem_type) {
5758 				printf("%s: unexepected element type 0x%x; "
5759 				    "expected 0x%x\n", __func__,
5760 				    ei->tlv_type, elem_type);
5761 				return -1;
5762 			}
5763 
5764 			if (datalen == 0) {
5765 				if (ei->data_type != QMI_EOTI)
5766 					ei++;
5767 				continue;
5768 			}
5769 
5770 			/*
5771 			 * For variable length arrays a one- or two-byte
5772 			 * value follows the header, indicating the number
5773 			 * of elements in the array.
5774 			 */
5775 			if (ei->array_type == VAR_LEN_ARRAY) {
5776 				DNPRINTF(QWZ_D_QMI,
5777 				    "%s: variable length array\n", __func__);
5778 				used = 0;
5779 				if (qwz_qmi_decode_datalen(sc, &used, &nelem,
5780 				    output, output_len, datalen_ei, p, remain))
5781 					return -1;
5782 				p += used;
5783 				remain -= used;
5784 				/*
5785 				 * Previous datalen value included the total
5786 				 * amount of bytes following the DATALEN TLV
5787 				 * header.
5788 				 */
5789 				datalen -= used;
5790 
5791 				if (nelem == 0) {
5792 					if (ei->data_type != QMI_EOTI)
5793 						ei++;
5794 					continue;
5795 				}
5796 
5797 				DNPRINTF(QWZ_D_QMI,
5798 				    "%s: datalen %u used %zu bytes\n",
5799 				    __func__, nelem, used);
5800 
5801 				DNPRINTF(QWZ_D_QMI,
5802 				    "%s: decoding %u array elements with "
5803 				    "src size %u dest size %u\n", __func__,
5804 				    nelem, datalen / nelem, ei->elem_size);
5805 			}
5806 		}
5807 
5808 		if (remain < datalen) {
5809 			printf("%s:%d QMI message too short: remain=%zu, "
5810 			    "datalen=%u\n", __func__, __LINE__, remain,
5811 			    datalen);
5812 			return -1;
5813 		}
5814 		if (output_len < nelem * ei->elem_size) {
5815 			printf("%s: QMI output buffer too short: remain=%zu "
5816 			    "nelem=%u ei->elem_size=%u\n", __func__, remain,
5817 			    nelem, ei->elem_size);
5818 			return -1;
5819 		}
5820 
5821 		for (i = 0; i < nelem && remain > 0; i++) {
5822 			size_t outoff;
5823 
5824 			outoff = ei->offset + (ei->elem_size * i);
5825 			switch (ei->data_type) {
5826 			case QMI_STRUCT:
5827 				used = 0;
5828 				if (qwz_qmi_decode_struct(sc, &used,
5829 				    output + outoff, output_len - outoff,
5830 				    ei, p, remain, 0))
5831 					return -1;
5832 				remain -= used;
5833 				p += used;
5834 				if (used != datalen) {
5835 					DNPRINTF(QWZ_D_QMI,
5836 					    "%s struct used only %zu bytes "
5837 					    "of %u input bytes\n", __func__,
5838 					    used, datalen);
5839 				} else {
5840 					DNPRINTF(QWZ_D_QMI,
5841 					    "%s: struct used %zu bytes "
5842 					    "of input\n", __func__, used);
5843 				}
5844 				break;
5845 			case QMI_STRING:
5846 				used = 0;
5847 				if (qwz_qmi_decode_string(sc, &used,
5848 				    output + outoff, output_len - outoff,
5849 				    ei, p, remain, datalen, 0))
5850 					return -1;
5851 				remain -= used;
5852 				p += used;
5853 				if (used != datalen) {
5854 					DNPRINTF(QWZ_D_QMI,
5855 					    "%s: string used only %zu bytes "
5856 					    "of %u input bytes\n", __func__,
5857 					    used, datalen);
5858 				} else {
5859 					DNPRINTF(QWZ_D_QMI,
5860 					    "%s: string used %zu bytes "
5861 					    "of input\n", __func__, used);
5862 				}
5863 				break;
5864 			case QMI_UNSIGNED_1_BYTE:
5865 				if (remain < ei->elem_size) {
5866 					printf("%s: QMI message too "
5867 					    "short\n", __func__);
5868 					return -1;
5869 				}
5870 				if (qwz_qmi_decode_byte(output + outoff,
5871 				    ei, p))
5872 					return -1;
5873 				remain -= ei->elem_size;
5874 				p += ei->elem_size;
5875 				break;
5876 			case QMI_UNSIGNED_2_BYTE:
5877 			case QMI_SIGNED_2_BYTE_ENUM:
5878 				if (remain < ei->elem_size) {
5879 					printf("%s: QMI message too "
5880 					    "short\n", __func__);
5881 					return -1;
5882 				}
5883 				if (qwz_qmi_decode_word(output + outoff,
5884 				    ei, p))
5885 					return -1;
5886 				remain -= ei->elem_size;
5887 				p += ei->elem_size;
5888 				break;
5889 			case QMI_UNSIGNED_4_BYTE:
5890 			case QMI_SIGNED_4_BYTE_ENUM:
5891 				if (remain < ei->elem_size) {
5892 					printf("%s: QMI message too "
5893 					    "short\n", __func__);
5894 					return -1;
5895 				}
5896 				if (qwz_qmi_decode_dword(output + outoff,
5897 				    ei, p))
5898 					return -1;
5899 				remain -= ei->elem_size;
5900 				p += ei->elem_size;
5901 				break;
5902 			case QMI_UNSIGNED_8_BYTE:
5903 				if (remain < ei->elem_size) {
5904 					printf("%s: QMI message too "
5905 					    "short 4\n", __func__);
5906 					return -1;
5907 				}
5908 				if (qwz_qmi_decode_qword(output + outoff,
5909 				    ei, p))
5910 					return -1;
5911 				remain -= ei->elem_size;
5912 				p += ei->elem_size;
5913 				break;
5914 			default:
5915 				printf("%s: unhandled QMI message element "
5916 				    "data type 0x%x\n",
5917 				    sc->sc_dev.dv_xname, ei->data_type);
5918 				return -1;
5919 			}
5920 		}
5921 
5922 		ei++;
5923 		DNPRINTF(QWZ_D_QMI,
5924 		    "%s: next ei 0x%x ei->data_type=0x%x remain=%zu\n",
5925 		    __func__, ei->tlv_type, ei->data_type, remain);
5926 
5927 		DNPRINTF(QWZ_D_QMI, "%s: remaining input: ", __func__);
5928 		for (int i = 0; i < remain; i++)
5929 			DNPRINTF(QWZ_D_QMI, " %02x", p[i]);
5930 		DNPRINTF(QWZ_D_QMI, "\n");
5931 	}
5932 
5933 	return 0;
5934 }
5935 
5936 void
5937 qwz_qmi_recv_wlanfw_phy_cap_req_v1(struct qwz_softc *sc, struct mbuf *m,
5938     uint16_t txn_id, uint16_t msg_len)
5939 {
5940 	struct qmi_wlanfw_phy_cap_resp_msg_v01 resp;
5941 	const struct qmi_elem_info *ei;
5942 	uint8_t *msg = mtod(m, uint8_t *);
5943 
5944 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
5945 
5946 	ei = qmi_wlanfw_phy_cap_resp_msg_v01_ei;
5947 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5948 		return;
5949 
5950 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
5951 	    __func__, le16toh(resp.resp.result));
5952 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
5953 	    __func__, le16toh(resp.resp.error));
5954 	DNPRINTF(QWZ_D_QMI, "%s: resp.num_phy_valid=0x%x\n",
5955 	   __func__, resp.num_phy_valid);
5956 	DNPRINTF(QWZ_D_QMI, "%s: resp.num_phy=0x%x\n",
5957 	   __func__, resp.num_phy);
5958 	DNPRINTF(QWZ_D_QMI, "%s: resp.board_id_valid=0x%x\n",
5959 	   __func__, resp.board_id_valid);
5960 	DNPRINTF(QWZ_D_QMI, "%s: resp.board_id=0x%x\n",
5961 	   __func__, le32toh(resp.board_id));
5962 	DNPRINTF(QWZ_D_QMI, "%s: resp.single_chip_mlo_support_valid=0x%x\n",
5963 	   __func__, resp.single_chip_mlo_support_valid);
5964 	DNPRINTF(QWZ_D_QMI, "%s: resp.single_chip_mlo_support=0x%x\n",
5965 	   __func__, resp.single_chip_mlo_support);
5966 
5967 	sc->qmi_resp.result = le16toh(resp.resp.result);
5968 	sc->qmi_resp.error = le16toh(resp.resp.error);
5969 	wakeup(&sc->qmi_resp);
5970 }
5971 
5972 void
5973 qwz_qmi_recv_wlanfw_ind_register_req_v1(struct qwz_softc *sc, struct mbuf *m,
5974     uint16_t txn_id, uint16_t msg_len)
5975 {
5976 	struct qmi_wlanfw_ind_register_resp_msg_v01 resp;
5977 	const struct qmi_elem_info *ei;
5978 	uint8_t *msg = mtod(m, uint8_t *);
5979 
5980 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
5981 
5982 	ei = qmi_wlanfw_ind_register_resp_msg_v01_ei;
5983 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5984 		return;
5985 
5986 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
5987 	    __func__, le16toh(resp.resp.result));
5988 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
5989 	    __func__, le16toh(resp.resp.error));
5990 	DNPRINTF(QWZ_D_QMI, "%s: resp.fw_status=0x%llx\n",
5991 	   __func__, le64toh(resp.fw_status));
5992 
5993 	sc->qmi_resp.result = le16toh(resp.resp.result);
5994 	sc->qmi_resp.error = le16toh(resp.resp.error);
5995 	wakeup(&sc->qmi_resp);
5996 }
5997 
5998 void
5999 qwz_qmi_recv_wlanfw_host_cap_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6000     uint16_t txn_id, uint16_t msg_len)
6001 {
6002 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
6003 	const struct qmi_elem_info *ei;
6004 	uint8_t *msg = mtod(m, uint8_t *);
6005 
6006 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6007 
6008 	ei = qmi_wlanfw_host_cap_resp_msg_v01_ei;
6009 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6010 		return;
6011 
6012 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6013 	    __func__, le16toh(resp.resp.result));
6014 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6015 	    __func__, le16toh(resp.resp.error));
6016 
6017 	sc->qmi_resp.result = le16toh(resp.resp.result);
6018 	sc->qmi_resp.error = le16toh(resp.resp.error);
6019 	wakeup(&sc->qmi_resp);
6020 }
6021 
6022 void
6023 qwz_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6024     uint16_t txn_id, uint16_t msg_len)
6025 {
6026 	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
6027 	const struct qmi_elem_info *ei;
6028 	uint8_t *msg = mtod(m, uint8_t *);
6029 
6030 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6031 
6032 	ei = qmi_wlanfw_respond_mem_resp_msg_v01_ei;
6033 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6034 		return;
6035 
6036 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6037 	    __func__, le16toh(resp.resp.result));
6038 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6039 	    __func__, le16toh(resp.resp.error));
6040 
6041 	sc->qmi_resp.result = le16toh(resp.resp.result);
6042 	sc->qmi_resp.error = le16toh(resp.resp.error);
6043 	wakeup(&sc->qmi_resp);
6044 }
6045 
6046 void
6047 qwz_qmi_recv_wlanfw_cap_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6048     uint16_t txn_id, uint16_t msg_len)
6049 {
6050 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
6051 	const struct qmi_elem_info *ei;
6052 	uint8_t *msg = mtod(m, uint8_t *);
6053 	int i;
6054 
6055 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6056 
6057 	memset(&resp, 0, sizeof(resp));
6058 
6059 	ei = qmi_wlanfw_cap_resp_msg_v01_ei;
6060 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6061 		return;
6062 
6063 	if (resp.chip_info_valid) {
6064 		sc->qmi_target.chip_id = resp.chip_info.chip_id;
6065 		sc->qmi_target.chip_family = resp.chip_info.chip_family;
6066 	}
6067 
6068 	if (resp.board_info_valid)
6069 		sc->qmi_target.board_id = resp.board_info.board_id;
6070 	else
6071 		sc->qmi_target.board_id = 0xFF;
6072 
6073 	if (resp.soc_info_valid)
6074 		sc->qmi_target.soc_id = resp.soc_info.soc_id;
6075 
6076 	if (resp.fw_version_info_valid) {
6077 		sc->qmi_target.fw_version = resp.fw_version_info.fw_version;
6078 		strlcpy(sc->qmi_target.fw_build_timestamp,
6079 			resp.fw_version_info.fw_build_timestamp,
6080 			sizeof(sc->qmi_target.fw_build_timestamp));
6081 	}
6082 
6083 	if (resp.fw_build_id_valid)
6084 		strlcpy(sc->qmi_target.fw_build_id, resp.fw_build_id,
6085 			sizeof(sc->qmi_target.fw_build_id));
6086 
6087 	if (resp.dev_mem_info_valid) {
6088 		for (i = 0; i < ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
6089 			sc->qmi_dev_mem[i].start =
6090 				resp.dev_mem[i].start;
6091 			sc->qmi_dev_mem[i].size =
6092 				resp.dev_mem[i].size;
6093 			DNPRINTF(QWZ_D_QMI,
6094 				   "%s: devmem [%d] start 0x%llx size %llu\n",
6095 				   sc->sc_dev.dv_xname, i,
6096 				   sc->qmi_dev_mem[i].start,
6097 				   sc->qmi_dev_mem[i].size);
6098 		}
6099 	}
6100 
6101 	if (resp.eeprom_read_timeout_valid) {
6102 		sc->qmi_target.eeprom_caldata = resp.eeprom_read_timeout;
6103 		DNPRINTF(QWZ_D_QMI,
6104 		    "%s: qmi cal data supported from eeprom\n", __func__);
6105 	}
6106 
6107 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6108 	    __func__, le16toh(resp.resp.result));
6109 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6110 	    __func__, le16toh(resp.resp.error));
6111 
6112 	sc->qmi_resp.result = le16toh(resp.resp.result);
6113 	sc->qmi_resp.error = le16toh(resp.resp.error);
6114 	wakeup(&sc->qmi_resp);
6115 }
6116 
6117 void
6118 qwz_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6119     uint16_t txn_id, uint16_t msg_len)
6120 {
6121 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
6122 	const struct qmi_elem_info *ei;
6123 	uint8_t *msg = mtod(m, uint8_t *);
6124 
6125 	memset(&resp, 0, sizeof(resp));
6126 
6127 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6128 
6129 	ei = qmi_wlanfw_bdf_download_resp_msg_v01_ei;
6130 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6131 		return;
6132 
6133 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6134 	    __func__, le16toh(resp.resp.result));
6135 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6136 	    __func__, le16toh(resp.resp.error));
6137 
6138 	sc->qmi_resp.result = le16toh(resp.resp.result);
6139 	sc->qmi_resp.error = le16toh(resp.resp.error);
6140 	wakeup(&sc->qmi_resp);
6141 }
6142 
6143 void
6144 qwz_qmi_recv_wlanfw_m3_info_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6145     uint16_t txn_id, uint16_t msg_len)
6146 {
6147 	struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
6148 	const struct qmi_elem_info *ei;
6149 	uint8_t *msg = mtod(m, uint8_t *);
6150 
6151 	memset(&resp, 0, sizeof(resp));
6152 
6153 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6154 
6155 	ei = qmi_wlanfw_m3_info_resp_msg_v01_ei;
6156 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6157 		return;
6158 
6159 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6160 	    __func__, le16toh(resp.resp.result));
6161 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6162 	    __func__, le16toh(resp.resp.error));
6163 
6164 	sc->qmi_resp.result = le16toh(resp.resp.result);
6165 	sc->qmi_resp.error = le16toh(resp.resp.error);
6166 	wakeup(&sc->qmi_resp);
6167 }
6168 
6169 void
6170 qwz_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6171     uint16_t txn_id, uint16_t msg_len)
6172 {
6173 	struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp;
6174 	const struct qmi_elem_info *ei;
6175 	uint8_t *msg = mtod(m, uint8_t *);
6176 
6177 	memset(&resp, 0, sizeof(resp));
6178 
6179 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6180 
6181 	ei = qmi_wlanfw_wlan_ini_resp_msg_v01_ei;
6182 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6183 		return;
6184 
6185 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6186 	    __func__, le16toh(resp.resp.result));
6187 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6188 	    __func__, le16toh(resp.resp.error));
6189 
6190 	sc->qmi_resp.result = le16toh(resp.resp.result);
6191 	sc->qmi_resp.error = le16toh(resp.resp.error);
6192 	wakeup(&sc->qmi_resp);
6193 }
6194 
6195 void
6196 qwz_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6197     uint16_t txn_id, uint16_t msg_len)
6198 {
6199 	struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
6200 	const struct qmi_elem_info *ei;
6201 	uint8_t *msg = mtod(m, uint8_t *);
6202 
6203 	memset(&resp, 0, sizeof(resp));
6204 
6205 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6206 
6207 	ei = qmi_wlanfw_wlan_cfg_resp_msg_v01_ei;
6208 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6209 		return;
6210 
6211 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6212 	    __func__, le16toh(resp.resp.result));
6213 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6214 	    __func__, le16toh(resp.resp.error));
6215 
6216 	sc->qmi_resp.result = le16toh(resp.resp.result);
6217 	sc->qmi_resp.error = le16toh(resp.resp.error);
6218 	wakeup(&sc->qmi_resp);
6219 }
6220 
6221 void
6222 qwz_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwz_softc *sc, struct mbuf *m,
6223     uint16_t txn_id, uint16_t msg_len)
6224 {
6225 	struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
6226 	const struct qmi_elem_info *ei;
6227 	uint8_t *msg = mtod(m, uint8_t *);
6228 
6229 	memset(&resp, 0, sizeof(resp));
6230 
6231 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6232 
6233 	ei = qmi_wlanfw_wlan_mode_resp_msg_v01_ei;
6234 	if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6235 		return;
6236 
6237 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
6238 	    __func__, le16toh(resp.resp.result));
6239 	DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
6240 	    __func__, le16toh(resp.resp.error));
6241 
6242 	sc->qmi_resp.result = le16toh(resp.resp.result);
6243 	sc->qmi_resp.error = le16toh(resp.resp.error);
6244 	wakeup(&sc->qmi_resp);
6245 }
6246 
6247 void
6248 qwz_qmi_recv_response(struct qwz_softc *sc, struct mbuf *m,
6249     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
6250 {
6251 	switch (msg_id) {
6252 	case QMI_WLANFW_PHY_CAP_REQ_V01:
6253 		qwz_qmi_recv_wlanfw_phy_cap_req_v1(sc, m, txn_id, msg_len);
6254 		break;
6255 	case QMI_WLANFW_IND_REGISTER_REQ_V01:
6256 		qwz_qmi_recv_wlanfw_ind_register_req_v1(sc, m, txn_id, msg_len);
6257 		break;
6258 	case QMI_WLFW_HOST_CAP_RESP_V01:
6259 		qwz_qmi_recv_wlanfw_host_cap_resp_v1(sc, m, txn_id, msg_len);
6260 		break;
6261 	case QMI_WLFW_RESPOND_MEM_RESP_V01:
6262 		qwz_qmi_recv_wlanfw_respond_mem_resp_v1(sc, m, txn_id, msg_len);
6263 		break;
6264 	case QMI_WLANFW_CAP_RESP_V01:
6265 		qwz_qmi_recv_wlanfw_cap_resp_v1(sc, m, txn_id, msg_len);
6266 		break;
6267 	case QMI_WLANFW_BDF_DOWNLOAD_RESP_V01:
6268 		qwz_qmi_recv_wlanfw_bdf_download_resp_v1(sc, m, txn_id,
6269 		    msg_len);
6270 		break;
6271 	case QMI_WLANFW_M3_INFO_RESP_V01:
6272 		qwz_qmi_recv_wlanfw_m3_info_resp_v1(sc, m, txn_id, msg_len);
6273 		break;
6274 	case QMI_WLANFW_WLAN_INI_RESP_V01:
6275 		qwz_qmi_recv_wlanfw_wlan_ini_resp_v1(sc, m, txn_id, msg_len);
6276 		break;
6277 	case QMI_WLANFW_WLAN_CFG_RESP_V01:
6278 		qwz_qmi_recv_wlanfw_wlan_cfg_resp_v1(sc, m, txn_id, msg_len);
6279 		break;
6280 	case QMI_WLANFW_WLAN_MODE_RESP_V01:
6281 		qwz_qmi_recv_wlanfw_wlan_mode_resp_v1(sc, m, txn_id, msg_len);
6282 		break;
6283 	default:
6284 		printf("%s: unhandled QMI response 0x%x\n",
6285 		    sc->sc_dev.dv_xname, msg_id);
6286 		break;
6287 	}
6288 }
6289 
6290 void
6291 qwz_qmi_recv_wlanfw_request_mem_indication(struct qwz_softc *sc, struct mbuf *m,
6292     uint16_t txn_id, uint16_t msg_len)
6293 {
6294 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind = NULL;
6295 	const struct qmi_elem_info *ei;
6296 	uint8_t *msg = mtod(m, uint8_t *);
6297 
6298 	DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
6299 
6300 	if (!sc->expect_fwmem_req || sc->sc_req_mem_ind != NULL)
6301 		return;
6302 
6303 	/* This structure is too large for the stack. */
6304 	ind = malloc(sizeof(*ind), M_DEVBUF, M_NOWAIT | M_ZERO);
6305 	if (ind == NULL)
6306 		return;
6307 
6308 	ei = qmi_wlanfw_request_mem_ind_msg_v01_ei;
6309 	if (qwz_qmi_decode_msg(sc, ind, sizeof(*ind), ei, msg, msg_len)) {
6310 		free(ind, M_DEVBUF, sizeof(*ind));
6311 		return;
6312 	}
6313 
6314 	/* Handled by qwz_qmi_mem_seg_send() in process context */
6315 	sc->sc_req_mem_ind = ind;
6316 	wakeup(&sc->sc_req_mem_ind);
6317 }
6318 
6319 void
6320 qwz_qmi_recv_indication(struct qwz_softc *sc, struct mbuf *m,
6321     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
6322 {
6323 	switch (msg_id) {
6324 	case QMI_WLFW_REQUEST_MEM_IND_V01:
6325 		qwz_qmi_recv_wlanfw_request_mem_indication(sc, m,
6326 		    txn_id, msg_len);
6327 		break;
6328 	case QMI_WLFW_FW_MEM_READY_IND_V01:
6329 		sc->fwmem_ready = 1;
6330 		wakeup(&sc->fwmem_ready);
6331 		break;
6332 	case QMI_WLFW_FW_READY_IND_V01:
6333 		sc->fw_ready = 1;
6334 		wakeup(&sc->fw_ready);
6335 		break;
6336 	default:
6337 		printf("%s: unhandled QMI indication 0x%x\n",
6338 		    sc->sc_dev.dv_xname, msg_id);
6339 		break;
6340 	}
6341 }
6342 
6343 void
6344 qwz_qrtr_recv_data(struct qwz_softc *sc, struct mbuf *m, size_t size)
6345 {
6346 	struct qmi_header hdr;
6347 	uint16_t txn_id, msg_id, msg_len;
6348 
6349 	if (size < sizeof(hdr)) {
6350 		printf("%s: QMI message too short: %zu bytes\n",
6351 		    sc->sc_dev.dv_xname, size);
6352 		return;
6353 	}
6354 
6355 	memcpy(&hdr, mtod(m, void *), sizeof(hdr));
6356 
6357 	DNPRINTF(QWZ_D_QMI,
6358 	    "%s: QMI message type=0x%x txn=0x%x id=0x%x len=%u\n",
6359 	    __func__, hdr.type, le16toh(hdr.txn_id),
6360 	    le16toh(hdr.msg_id), le16toh(hdr.msg_len));
6361 
6362 	txn_id = le16toh(hdr.txn_id);
6363 	msg_id = le16toh(hdr.msg_id);
6364 	msg_len = le16toh(hdr.msg_len);
6365 	if (sizeof(hdr) + msg_len != size) {
6366 		printf("%s: bad length in QMI message header: %u\n",
6367 		    sc->sc_dev.dv_xname, msg_len);
6368 		return;
6369 	}
6370 
6371 	switch (hdr.type) {
6372 	case QMI_RESPONSE:
6373 		m_adj(m, sizeof(hdr));
6374 		qwz_qmi_recv_response(sc, m, txn_id, msg_id, msg_len);
6375 		break;
6376 	case QMI_INDICATION:
6377 		m_adj(m, sizeof(hdr));
6378 		qwz_qmi_recv_indication(sc, m, txn_id, msg_id, msg_len);
6379 		break;
6380 	default:
6381 		printf("%s: unhandled QMI message type %u\n",
6382 		    sc->sc_dev.dv_xname, hdr.type);
6383 		break;
6384 	}
6385 }
6386 
6387 int
6388 qwz_qrtr_say_hello(struct qwz_softc *sc)
6389 {
6390 	struct qrtr_hdr_v1 hdr;
6391 	struct qrtr_ctrl_pkt pkt;
6392 	struct mbuf *m;
6393 	size_t totlen, padlen;
6394 	int err;
6395 
6396 	totlen = sizeof(hdr) + sizeof(pkt);
6397 	padlen = roundup(totlen, 4);
6398 
6399 	m = m_gethdr(M_DONTWAIT, MT_DATA);
6400 	if (m == NULL) {
6401 		err = ENOBUFS;
6402 		goto done;
6403 	}
6404 
6405 	if (padlen <= MCLBYTES)
6406 		MCLGET(m, M_DONTWAIT);
6407 	else
6408 		MCLGETL(m, M_DONTWAIT, padlen);
6409 	if ((m->m_flags & M_EXT) == 0) {
6410 		err = ENOBUFS;
6411 		goto done;
6412 	}
6413 
6414 	m->m_len = m->m_pkthdr.len = padlen;
6415 
6416 	memset(&hdr, 0, sizeof(hdr));
6417 	hdr.version = htole32(QRTR_PROTO_VER_1);
6418 	hdr.type = htole32(QRTR_TYPE_HELLO);
6419 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
6420 	hdr.src_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
6421 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
6422 	hdr.dst_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
6423 	hdr.size = htole32(sizeof(pkt));
6424 
6425 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
6426 	if (err)
6427 		goto done;
6428 
6429 	memset(&pkt, 0, sizeof(pkt));
6430 	pkt.cmd = htole32(QRTR_TYPE_HELLO);
6431 
6432 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
6433 	if (err)
6434 		goto done;
6435 
6436 	/* Zero-pad the mbuf */
6437 	if (padlen != totlen) {
6438 		uint32_t pad = 0;
6439 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
6440 		if (err)
6441 			goto done;
6442 	}
6443 
6444 	err = sc->ops.submit_xfer(sc, m);
6445 done:
6446 	if (err)
6447 		m_freem(m);
6448 	return err;
6449 }
6450 
6451 int
6452 qwz_qrtr_resume_tx(struct qwz_softc *sc)
6453 {
6454 	struct qrtr_hdr_v1 hdr;
6455 	struct qrtr_ctrl_pkt pkt;
6456 	struct mbuf *m;
6457 	size_t totlen, padlen;
6458 	int err;
6459 
6460 	totlen = sizeof(hdr) + sizeof(pkt);
6461 	padlen = roundup(totlen, 4);
6462 
6463 	m = m_gethdr(M_DONTWAIT, MT_DATA);
6464 	if (m == NULL) {
6465 		err = ENOBUFS;
6466 		goto done;
6467 	}
6468 
6469 	if (padlen <= MCLBYTES)
6470 		MCLGET(m, M_DONTWAIT);
6471 	else
6472 		MCLGETL(m, M_DONTWAIT, padlen);
6473 	if ((m->m_flags & M_EXT) == 0) {
6474 		err = ENOBUFS;
6475 		goto done;
6476 	}
6477 
6478 	m->m_len = m->m_pkthdr.len = padlen;
6479 
6480 	memset(&hdr, 0, sizeof(hdr));
6481 	hdr.version = htole32(QRTR_PROTO_VER_1);
6482 	hdr.type = htole32(QRTR_TYPE_RESUME_TX);
6483 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
6484 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
6485 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
6486 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
6487 	hdr.size = htole32(sizeof(pkt));
6488 
6489 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
6490 	if (err)
6491 		goto done;
6492 
6493 	memset(&pkt, 0, sizeof(pkt));
6494 	pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
6495 	pkt.client.node = htole32(0x01);
6496 	pkt.client.port = htole32(0x4000);
6497 
6498 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
6499 	if (err)
6500 		goto done;
6501 
6502 	/* Zero-pad the mbuf */
6503 	if (padlen != totlen) {
6504 		uint32_t pad = 0;
6505 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
6506 		if (err)
6507 			goto done;
6508 	}
6509 
6510 	err = sc->ops.submit_xfer(sc, m);
6511 done:
6512 	if (err)
6513 		m_freem(m);
6514 	return err;
6515 }
6516 
6517 void
6518 qwz_qrtr_recv_msg(struct qwz_softc *sc, struct mbuf *m)
6519 {
6520 	struct qrtr_hdr_v1 *v1 = mtod(m, struct qrtr_hdr_v1 *);
6521 	struct qrtr_hdr_v2 *v2 = mtod(m, struct qrtr_hdr_v2 *);
6522 	struct qrtr_ctrl_pkt *pkt;
6523 	uint32_t type, size, hdrsize;
6524 	uint8_t ver, confirm_rx;
6525 
6526 	ver = *mtod(m, uint8_t *);
6527 	switch (ver) {
6528 	case QRTR_PROTO_VER_1:
6529 		DNPRINTF(QWZ_D_QMI,
6530 		    "%s: type %u size %u confirm_rx %u\n", __func__,
6531 		    letoh32(v1->type), letoh32(v1->size),
6532 		    letoh32(v1->confirm_rx));
6533 		type = letoh32(v1->type);
6534 		size = letoh32(v1->size);
6535 		confirm_rx = !!letoh32(v1->confirm_rx);
6536 		hdrsize = sizeof(*v1);
6537 		break;
6538 	case QRTR_PROTO_VER_2:
6539 		DNPRINTF(QWZ_D_QMI,
6540 		    "%s: type %u size %u confirm_rx %u\n", __func__,
6541 		    v2->type, letoh32(v2->size),
6542 		    !!(v2->flags & QRTR_FLAGS_CONFIRM_RX));
6543 		type = v2->type;
6544 		size = letoh32(v2->size);
6545 		confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
6546 		hdrsize = sizeof(*v2);
6547 		break;
6548 	default:
6549 		printf("%s: unsupported qrtr version %u\n",
6550 		    sc->sc_dev.dv_xname, ver);
6551 		return;
6552 	}
6553 
6554 	if (size > m->m_pkthdr.len) {
6555 		printf("%s: bad size in qrtr message header: %u\n",
6556 		    sc->sc_dev.dv_xname, size);
6557 		return;
6558 	}
6559 
6560 	switch (type) {
6561 	case QRTR_TYPE_DATA:
6562 		m_adj(m, hdrsize);
6563 		qwz_qrtr_recv_data(sc, m, size);
6564 		break;
6565 	case QRTR_TYPE_HELLO:
6566 		qwz_qrtr_say_hello(sc);
6567 		break;
6568 	case QRTR_TYPE_NEW_SERVER:
6569 		m_adj(m, hdrsize);
6570 		pkt = mtod(m, struct qrtr_ctrl_pkt *);
6571 		sc->qrtr_server.service = le32toh(pkt->server.service);
6572 		sc->qrtr_server.instance = le32toh(pkt->server.instance);
6573 		sc->qrtr_server.node = le32toh(pkt->server.node);
6574 		sc->qrtr_server.port = le32toh(pkt->server.port);
6575 		DNPRINTF(QWZ_D_QMI,
6576 		    "%s: new server: service=0x%x instance=0x%x node=0x%x "
6577 		    "port=0x%x\n", __func__, sc->qrtr_server.service,
6578 		    sc->qrtr_server.instance,
6579 		    sc->qrtr_server.node, sc->qrtr_server.port);
6580 		wakeup(&sc->qrtr_server);
6581 		break;
6582 	default:
6583 		DPRINTF("%s: unhandled qrtr type %u\n",
6584 		    sc->sc_dev.dv_xname, type);
6585 		return;
6586 	}
6587 
6588 	if (confirm_rx)
6589 		qwz_qrtr_resume_tx(sc);
6590 }
6591 
6592 // Not needed because we don't implenent QMI as a network service.
6593 #define qwz_qmi_init_service(sc)	(0)
6594 #define qwz_qmi_deinit_service(sc)	(0)
6595 
6596 int
6597 qwz_qmi_encode_datalen(uint8_t *p, uint32_t *datalen,
6598     const struct qmi_elem_info *ei, void *input)
6599 {
6600 	memcpy(datalen, input + ei->offset, sizeof(uint32_t));
6601 
6602 	if (ei->elem_size == sizeof(uint8_t)) {
6603 		p[0] = (*datalen & 0xff);
6604 	} else if (ei->elem_size == sizeof(uint16_t)) {
6605 		p[0] = (*datalen & 0xff);
6606 		p[1] = (*datalen >> 8) & 0xff;
6607 	} else {
6608 		printf("%s: bad element size\n", __func__);
6609 		return -1;
6610 	}
6611 
6612 	return 0;
6613 }
6614 
6615 int
6616 qwz_qmi_encode_byte(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6617     int i)
6618 {
6619 	if (ei->elem_size != sizeof(uint8_t)) {
6620 		printf("%s: bad element size\n", __func__);
6621 		return -1;
6622 	}
6623 
6624 	if (p == NULL)
6625 		return 0;
6626 
6627 	memcpy(p, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6628 	return 0;
6629 }
6630 
6631 int
6632 qwz_qmi_encode_word(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6633     int i)
6634 {
6635 	uint16_t val;
6636 
6637 	if (ei->elem_size != sizeof(val)) {
6638 		printf("%s: bad element size\n", __func__);
6639 		return -1;
6640 	}
6641 
6642 	if (p == NULL)
6643 		return 0;
6644 
6645 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6646 	val = htole16(val);
6647 	memcpy(p, &val, sizeof(val));
6648 	return 0;
6649 }
6650 
6651 int
6652 qwz_qmi_encode_dword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6653     int i)
6654 {
6655 	uint32_t val;
6656 
6657 	if (ei->elem_size != sizeof(val)) {
6658 		printf("%s: bad element size\n", __func__);
6659 		return -1;
6660 	}
6661 
6662 	if (p == NULL)
6663 		return 0;
6664 
6665 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6666 	val = htole32(val);
6667 	memcpy(p, &val, sizeof(val));
6668 	return 0;
6669 }
6670 
6671 int
6672 qwz_qmi_encode_qword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6673     int i)
6674 {
6675 	uint64_t val;
6676 
6677 	if (ei->elem_size != sizeof(val)) {
6678 		printf("%s: bad element size\n", __func__);
6679 		return -1;
6680 	}
6681 
6682 	if (p == NULL)
6683 		return 0;
6684 
6685 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6686 	val = htole64(val);
6687 	memcpy(p, &val, sizeof(val));
6688 	return 0;
6689 }
6690 
6691 int
6692 qwz_qmi_encode_struct(uint8_t *p, size_t *encoded_len,
6693     const struct qmi_elem_info *struct_ei, void *input, size_t input_len)
6694 {
6695 	const struct qmi_elem_info *ei = struct_ei->ei_array;
6696 	size_t remain = input_len;
6697 
6698 	*encoded_len = 0;
6699 
6700 	while (ei->data_type != QMI_EOTI) {
6701 		if (ei->data_type == QMI_OPT_FLAG) {
6702 			uint8_t do_encode, tlv_type;
6703 
6704 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6705 			ei++; /* Advance to element we might have to encode. */
6706 			if (ei->data_type == QMI_OPT_FLAG ||
6707 			    ei->data_type == QMI_EOTI) {
6708 				printf("%s: bad optional flag element\n",
6709 				    __func__);
6710 				return -1;
6711 			}
6712 			if (!do_encode) {
6713 				/* The element will not be encoded. Skip it. */
6714 				tlv_type = ei->tlv_type;
6715 				while (ei->data_type != QMI_EOTI &&
6716 				    ei->tlv_type == tlv_type)
6717 					ei++;
6718 				continue;
6719 			}
6720 		}
6721 
6722 		if (ei->elem_size > remain) {
6723 			printf("%s: QMI message buffer too short\n", __func__);
6724 			return -1;
6725 		}
6726 
6727 		switch (ei->data_type) {
6728 		case QMI_UNSIGNED_1_BYTE:
6729 			if (qwz_qmi_encode_byte(p, ei, input, 0))
6730 				return -1;
6731 			break;
6732 		case QMI_UNSIGNED_2_BYTE:
6733 			if (qwz_qmi_encode_word(p, ei, input, 0))
6734 				return -1;
6735 			break;
6736 		case QMI_UNSIGNED_4_BYTE:
6737 		case QMI_SIGNED_4_BYTE_ENUM:
6738 			if (qwz_qmi_encode_dword(p, ei, input, 0))
6739 				return -1;
6740 			break;
6741 		case QMI_UNSIGNED_8_BYTE:
6742 			if (qwz_qmi_encode_qword(p, ei, input, 0))
6743 				return -1;
6744 			break;
6745 		default:
6746 			printf("%s: unhandled QMI struct element type %d\n",
6747 			    __func__, ei->data_type);
6748 			return -1;
6749 		}
6750 
6751 		remain -= ei->elem_size;
6752 		if (p != NULL)
6753 			p += ei->elem_size;
6754 		*encoded_len += ei->elem_size;
6755 		ei++;
6756 	}
6757 
6758 	return 0;
6759 }
6760 
6761 int
6762 qwz_qmi_encode_string(uint8_t *p, size_t *encoded_len,
6763     const struct qmi_elem_info *string_ei, void *input, size_t input_len)
6764 {
6765 	*encoded_len = strnlen(input, input_len);
6766 	if (*encoded_len > string_ei->elem_len) {
6767 		printf("%s: QMI message buffer too short\n", __func__);
6768 		return -1;
6769 	}
6770 
6771 	if (p)
6772 		memcpy(p, input, *encoded_len);
6773 
6774 	return 0;
6775 }
6776 
6777 int
6778 qwz_qmi_encode_msg(uint8_t **encoded_msg, size_t *encoded_len, int type,
6779     uint16_t *txn_id, uint16_t msg_id, size_t msg_len,
6780     const struct qmi_elem_info *ei, void *input, size_t input_len)
6781 {
6782 	const struct qmi_elem_info *ei0 = ei;
6783 	struct qmi_header hdr;
6784 	size_t remain;
6785 	uint8_t *p, *op;
6786 
6787 	*encoded_msg = NULL;
6788 	*encoded_len = 0;
6789 
6790 	/* First pass: Determine length of encoded message. */
6791 	while (ei->data_type != QMI_EOTI) {
6792 		int nelem = 1, i;
6793 
6794 		if (ei->offset + ei->elem_size > input_len) {
6795 			printf("%s: bad input buffer offset at element 0x%x "
6796 			    "data type 0x%x\n",
6797 			    __func__, ei->tlv_type, ei->data_type);
6798 			goto err;
6799 		}
6800 
6801 		/*
6802 		 * OPT_FLAG determines whether the next element
6803 		 * should be considered for encoding.
6804 		 */
6805 		if (ei->data_type == QMI_OPT_FLAG) {
6806 			uint8_t do_encode, tlv_type;
6807 
6808 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6809 			ei++; /* Advance to element we might have to encode. */
6810 			if (ei->data_type == QMI_OPT_FLAG ||
6811 			    ei->data_type == QMI_EOTI) {
6812 				printf("%s: bad optional element\n", __func__);
6813 				goto err;
6814 			}
6815 			if (!do_encode) {
6816 				/* The element will not be encoded. Skip it. */
6817 				tlv_type = ei->tlv_type;
6818 				while (ei->data_type != QMI_EOTI &&
6819 				    ei->tlv_type == tlv_type)
6820 					ei++;
6821 				continue;
6822 			}
6823 		}
6824 
6825 		*encoded_len += 3; /* type, length */
6826 		if (ei->data_type == QMI_DATA_LEN) {
6827 			uint32_t datalen = 0;
6828 			uint8_t dummy[2];
6829 
6830 			if (qwz_qmi_encode_datalen(dummy, &datalen, ei, input))
6831 				goto err;
6832 			*encoded_len += ei->elem_size;
6833 			ei++;
6834 			if (ei->array_type != VAR_LEN_ARRAY) {
6835 				printf("%s: data len not for a var array\n",
6836 				    __func__);
6837 				goto err;
6838 			}
6839 			nelem = datalen;
6840 			if (ei->data_type == QMI_STRUCT) {
6841 				for (i = 0; i < nelem; i++) {
6842 					size_t encoded_struct_len = 0;
6843 					size_t inoff = ei->offset + (i * ei->elem_size);
6844 
6845 					if (qwz_qmi_encode_struct(NULL,
6846 					    &encoded_struct_len, ei,
6847 					    input + inoff, input_len - inoff))
6848 						goto err;
6849 
6850 					*encoded_len += encoded_struct_len;
6851 				}
6852 			} else
6853 				*encoded_len += nelem * ei->elem_size;
6854 			ei++;
6855 		} else if (ei->data_type == QMI_STRING) {
6856 			size_t encoded_string_len = 0;
6857 			size_t inoff = ei->offset;
6858 
6859 			if (qwz_qmi_encode_string(NULL,
6860 			    &encoded_string_len, ei,
6861 			    input + inoff, input_len - inoff))
6862 				goto err;
6863 			*encoded_len += encoded_string_len;
6864 			ei++;
6865 		} else {
6866 			*encoded_len += ei->elem_size;
6867 			ei++;
6868 		}
6869 	}
6870 
6871 	*encoded_len += sizeof(hdr);
6872 	*encoded_msg = malloc(*encoded_len, M_DEVBUF, M_NOWAIT | M_ZERO);
6873 	if (*encoded_msg == NULL)
6874 		return ENOMEM;
6875 
6876 	hdr.type = type;
6877 	hdr.txn_id = htole16(*txn_id);
6878 	hdr.msg_id = htole16(msg_id);
6879 	hdr.msg_len = htole16(*encoded_len - sizeof(hdr));
6880 	memcpy(*encoded_msg, &hdr, sizeof(hdr));
6881 
6882 	/* Second pass: Encode the message. */
6883 	ei = ei0;
6884 	p = *encoded_msg + sizeof(hdr);
6885 	remain = *encoded_len - sizeof(hdr);
6886 	while (ei->data_type != QMI_EOTI) {
6887 		uint32_t datalen = 0;
6888 		int nelem = 1, i;
6889 
6890 		if (ei->data_type == QMI_OPT_FLAG) {
6891 			uint8_t do_encode, tlv_type;
6892 
6893 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6894 			ei++; /* Advance to element we might have to encode. */
6895 			if (ei->data_type == QMI_OPT_FLAG ||
6896 			    ei->data_type == QMI_EOTI) {
6897 				printf("%s: bad optional flag element\n",
6898 				    __func__);
6899 				goto err;
6900 			}
6901 			if (!do_encode) {
6902 				/* The element will not be encoded. Skip it. */
6903 				tlv_type = ei->tlv_type;
6904 				while (ei->data_type != QMI_EOTI &&
6905 				    ei->tlv_type == tlv_type)
6906 					ei++;
6907 				continue;
6908 			}
6909 		}
6910 
6911 		if (ei->elem_size + 3 > remain) {
6912 			printf("%s: QMI message buffer too short\n", __func__);
6913 			goto err;
6914 		}
6915 
6916 		/* 3 bytes of type-length-value header, remember for later */
6917 		op = p;
6918 		p += 3;
6919 
6920 		if (ei->data_type == QMI_DATA_LEN) {
6921 			if (qwz_qmi_encode_datalen(p, &datalen, ei, input))
6922 				goto err;
6923 			p += ei->elem_size;
6924 			ei++;
6925 			if (ei->array_type == VAR_LEN_ARRAY)
6926 				nelem = datalen;
6927 		}
6928 
6929 		for (i = 0; i < nelem; i++) {
6930 			size_t encoded_struct_len = 0;
6931 			size_t encoded_string_len = 0;
6932 			size_t inoff = ei->offset + (i * ei->elem_size);
6933 
6934 			switch (ei->data_type) {
6935 			case QMI_UNSIGNED_1_BYTE:
6936 				if (qwz_qmi_encode_byte(p, ei, input, i))
6937 					goto err;
6938 				remain -= ei->elem_size;
6939 				p += ei->elem_size;
6940 				break;
6941 			case QMI_UNSIGNED_2_BYTE:
6942 			case QMI_SIGNED_2_BYTE_ENUM:
6943 				if (qwz_qmi_encode_word(p, ei, input, i))
6944 					goto err;
6945 				remain -= ei->elem_size;
6946 				p += ei->elem_size;
6947 				break;
6948 			case QMI_UNSIGNED_4_BYTE:
6949 			case QMI_SIGNED_4_BYTE_ENUM:
6950 				if (qwz_qmi_encode_dword(p, ei, input, i))
6951 					goto err;
6952 				remain -= ei->elem_size;
6953 				p += ei->elem_size;
6954 				break;
6955 			case QMI_UNSIGNED_8_BYTE:
6956 				if (qwz_qmi_encode_qword(p, ei, input, i))
6957 					goto err;
6958 				remain -= ei->elem_size;
6959 				p += ei->elem_size;
6960 				break;
6961 			case QMI_STRUCT:
6962 				if (qwz_qmi_encode_struct(p,
6963 				    &encoded_struct_len, ei,
6964 				    input + inoff, input_len - inoff))
6965 					goto err;
6966 				remain -= encoded_struct_len;
6967 				p += encoded_struct_len;
6968 				break;
6969 			case QMI_STRING:
6970 				if (qwz_qmi_encode_string(p,
6971 				    &encoded_string_len, ei,
6972 				    input + inoff, input_len - inoff))
6973 					goto err;
6974 				remain -= encoded_string_len;
6975 				p += encoded_string_len;
6976 				break;
6977 			default:
6978 				printf("%s: unhandled QMI message element type %d\n",
6979 				    __func__, ei->data_type);
6980 				goto err;
6981 			}
6982 		}
6983 
6984 		op[0] = ei->tlv_type;
6985 		op[1] = (p - (op + 3)) & 0xff;
6986 		op[2] = ((p - (op + 3)) >> 8) & 0xff;
6987 
6988 		ei++;
6989 	}
6990 
6991 	if (0) {
6992 		int i;
6993 		DNPRINTF(QWZ_D_QMI,
6994 		   "%s: message type 0x%x txnid 0x%x msgid 0x%x "
6995 		    "msglen %zu encoded:", __func__,
6996 		    type, *txn_id, msg_id, *encoded_len - sizeof(hdr));
6997 		for (i = 0; i < *encoded_len; i++) {
6998 			DNPRINTF(QWZ_D_QMI, "%s %.2x", i % 16 == 0 ? "\n" : "",
6999 			    (*encoded_msg)[i]);
7000 		}
7001 		if (i % 16)
7002 			DNPRINTF(QWZ_D_QMI, "\n");
7003 	}
7004 
7005 	(*txn_id)++; /* wrap-around is fine */
7006 	return 0;
7007 err:
7008 	free(*encoded_msg, M_DEVBUF, *encoded_len);
7009 	*encoded_msg = NULL;
7010 	*encoded_len = 0;
7011 	return -1;
7012 }
7013 
7014 int
7015 qwz_qmi_send_request(struct qwz_softc *sc, uint16_t msg_id, size_t msg_len,
7016     const struct qmi_elem_info *ei, void *req, size_t req_len)
7017 {
7018 	struct qrtr_hdr_v1 hdr;
7019 	struct mbuf *m;
7020 	uint8_t *encoded_msg;
7021 	size_t encoded_len;
7022 	size_t totlen, padlen;
7023 	int err;
7024 
7025 	if (qwz_qmi_encode_msg(&encoded_msg, &encoded_len, QMI_REQUEST,
7026 	    &sc->qmi_txn_id, msg_id, msg_len, ei, req, req_len))
7027 		return -1;
7028 
7029 	totlen = sizeof(hdr) + encoded_len;
7030 	padlen = roundup(totlen, 4);
7031 
7032 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7033 	if (m == NULL) {
7034 		err = ENOBUFS;
7035 		goto done;
7036 	}
7037 
7038 	if (padlen <= MCLBYTES)
7039 		MCLGET(m, M_DONTWAIT);
7040 	else
7041 		MCLGETL(m, M_DONTWAIT, padlen);
7042 	if ((m->m_flags & M_EXT) == 0) {
7043 		err = ENOBUFS;
7044 		goto done;
7045 	}
7046 
7047 	m->m_len = m->m_pkthdr.len = padlen;
7048 
7049 	memset(&hdr, 0, sizeof(hdr));
7050 	hdr.version = htole32(QRTR_PROTO_VER_1);
7051 	hdr.type = htole32(QRTR_TYPE_DATA);
7052 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7053 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
7054 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7055 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
7056 	hdr.size = htole32(encoded_len);
7057 
7058 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7059 	if (err)
7060 		goto done;
7061 
7062 	err = m_copyback(m, sizeof(hdr), encoded_len, encoded_msg, M_NOWAIT);
7063 	if (err)
7064 		goto done;
7065 
7066 	/* Zero-pad the mbuf */
7067 	if (padlen != totlen) {
7068 		uint32_t pad = 0;
7069 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7070 		if (err)
7071 			goto done;
7072 	}
7073 
7074 	err = sc->ops.submit_xfer(sc, m);
7075 done:
7076 	if (err)
7077 		m_freem(m);
7078 	free(encoded_msg, M_DEVBUF, encoded_len);
7079 	return err;
7080 }
7081 
7082 int
7083 qwz_qmi_phy_cap_send(struct qwz_softc *sc)
7084 {
7085 	struct qmi_wlanfw_phy_cap_req_msg_v01 req;
7086 	int ret;
7087 
7088 	memset(&req, 0, sizeof(req));
7089 
7090 	DNPRINTF(QWZ_D_QMI, "%s: qmi phy cap request\n", __func__);
7091 
7092 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_PHY_CAP_REQ_V01,
7093 			       QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
7094 			       qmi_wlanfw_phy_cap_req_msg_v01_ei,
7095 			       &req, sizeof(req));
7096 	if (ret) {
7097 		printf("%s: failed to send phy cap request: %d\n",
7098 		    sc->sc_dev.dv_xname, ret);
7099 		return -1;
7100 	}
7101 
7102 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7103 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7104 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzphycap",
7105 		    SEC_TO_NSEC(1));
7106 		if (ret) {
7107 			/* Not having a phy cap is OK */
7108 			return 0;
7109 		}
7110 	}
7111 
7112 	return 0;
7113 }
7114 
7115 int
7116 qwz_qmi_fw_ind_register_send(struct qwz_softc *sc)
7117 {
7118 	struct qmi_wlanfw_ind_register_req_msg_v01 req;
7119 	int ret;
7120 
7121 	memset(&req, 0, sizeof(req));
7122 
7123 	req.client_id_valid = 1;
7124 	req.client_id = QMI_WLANFW_CLIENT_ID;
7125 	req.fw_ready_enable_valid = 1;
7126 	req.fw_ready_enable = 1;
7127 	req.request_mem_enable_valid = 1;
7128 	req.request_mem_enable = 1;
7129 	req.fw_mem_ready_enable_valid = 1;
7130 	req.fw_mem_ready_enable = 1;
7131 	req.cal_done_enable_valid = 1;
7132 	req.cal_done_enable = 1;
7133 	req.fw_init_done_enable_valid = 1;
7134 	req.fw_init_done_enable = 1;
7135 
7136 	req.pin_connect_result_enable_valid = 0;
7137 	req.pin_connect_result_enable = 0;
7138 
7139 	DNPRINTF(QWZ_D_QMI, "%s: qmi indication register request\n", __func__);
7140 
7141 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_IND_REGISTER_REQ_V01,
7142 			       QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
7143 			       qmi_wlanfw_ind_register_req_msg_v01_ei,
7144 			       &req, sizeof(req));
7145 	if (ret) {
7146 		printf("%s: failed to send indication register request: %d\n",
7147 		    sc->sc_dev.dv_xname, ret);
7148 		return -1;
7149 	}
7150 
7151 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7152 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7153 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwind",
7154 		    SEC_TO_NSEC(1));
7155 		if (ret) {
7156 			printf("%s: fw indication register request timeout\n",
7157 			    sc->sc_dev.dv_xname);
7158 			return ret;
7159 		}
7160 	}
7161 
7162 	return 0;
7163 }
7164 
7165 int
7166 qwz_qmi_host_cap_send(struct qwz_softc *sc)
7167 {
7168 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
7169 	int ret;
7170 
7171 	memset(&req, 0, sizeof(req));
7172 	req.num_clients_valid = 1;
7173 	req.num_clients = 1;
7174 	req.mem_cfg_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
7175 	req.mem_cfg_mode_valid = 1;
7176 	req.bdf_support_valid = 1;
7177 	req.bdf_support = 1;
7178 
7179 	req.m3_support_valid = 1;
7180 	req.m3_support = 1;
7181 	req.m3_cache_support_valid = 1;
7182 	req.m3_cache_support = 1;
7183 
7184 	req.cal_done_valid = 1;
7185 	req.cal_done = sc->qmi_cal_done;
7186 
7187 	if (sc->hw_params.qmi_cnss_feature_bitmap) {
7188 		req.feature_list_valid = 1;
7189 		req.feature_list = sc->hw_params.qmi_cnss_feature_bitmap;
7190 	}
7191 
7192 	if (sc->hw_params.internal_sleep_clock) {
7193 		req.nm_modem_valid = 1;
7194 
7195 		/* Notify firmware that this is non-qualcomm platform. */
7196 		req.nm_modem |= QWZ_HOST_CSTATE_BIT;
7197 
7198 		/* Notify firmware about the sleep clock selection,
7199 		 * nm_modem_bit[1] is used for this purpose. Host driver on
7200 		 * non-qualcomm platforms should select internal sleep
7201 		 * clock.
7202 		 */
7203 		req.nm_modem |= QWZ_SLEEP_CLOCK_SELECT_INTERNAL_BIT;
7204 		req.nm_modem |= QWZ_PLATFORM_CAP_PCIE_GLOBAL_RESET;
7205 	}
7206 
7207 	DNPRINTF(QWZ_D_QMI, "%s: qmi host cap request\n", __func__);
7208 
7209 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_HOST_CAP_REQ_V01,
7210 			       QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
7211 			       qmi_wlanfw_host_cap_req_msg_v01_ei,
7212 			       &req, sizeof(req));
7213 	if (ret) {
7214 		printf("%s: failed to send host cap request: %d\n",
7215 		    sc->sc_dev.dv_xname, ret);
7216 		return -1;
7217 	}
7218 
7219 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7220 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7221 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwhcap",
7222 		    SEC_TO_NSEC(1));
7223 		if (ret) {
7224 			printf("%s: fw host cap request timeout\n",
7225 			    sc->sc_dev.dv_xname);
7226 			return ret;
7227 		}
7228 	}
7229 
7230 	return 0;
7231 }
7232 
7233 int
7234 qwz_qmi_mem_seg_send(struct qwz_softc *sc)
7235 {
7236 	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
7237 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind;
7238 	uint32_t mem_seg_len;
7239 	const uint32_t mem_seg_len_max = 64; /* bump if needed by future fw */
7240 	uint16_t expected_result;
7241 	size_t total_size;
7242 	int i, ret;
7243 
7244 	sc->fwmem_ready = 0;
7245 
7246 	while (sc->sc_req_mem_ind == NULL) {
7247 		ret = tsleep_nsec(&sc->sc_req_mem_ind, 0, "qwzfwmem",
7248 		    SEC_TO_NSEC(10));
7249 		if (ret) {
7250 			printf("%s: fw memory request timeout\n",
7251 			    sc->sc_dev.dv_xname);
7252 			return -1;
7253 		}
7254 	}
7255 
7256 	sc->expect_fwmem_req = 0;
7257 
7258 	ind = sc->sc_req_mem_ind;
7259 	mem_seg_len = le32toh(ind->mem_seg_len);
7260 	if (mem_seg_len > mem_seg_len_max) {
7261 		printf("%s: firmware requested too many memory segments: %u\n",
7262 		    sc->sc_dev.dv_xname, mem_seg_len);
7263 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
7264 		sc->sc_req_mem_ind = NULL;
7265 		return -1;
7266 	}
7267 
7268 	total_size = 0;
7269 	for (i = 0; i < mem_seg_len; i++) {
7270 		if (ind->mem_seg[i].size == 0) {
7271 			printf("%s: firmware requested zero-sized "
7272 			    "memory segment %u\n", sc->sc_dev.dv_xname, i);
7273 			free(sc->sc_req_mem_ind, M_DEVBUF,
7274 			    sizeof(*sc->sc_req_mem_ind));
7275 			sc->sc_req_mem_ind = NULL;
7276 			return -1;
7277 		}
7278 		total_size += le32toh(ind->mem_seg[i].size);
7279 	}
7280 
7281 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
7282 	if (req == NULL) {
7283 		printf("%s: failed to allocate respond memory request\n",
7284 		    sc->sc_dev.dv_xname);
7285 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
7286 		sc->sc_req_mem_ind = NULL;
7287 		return -1;
7288 	}
7289 
7290 	if (total_size == 0) {
7291 		/* Should not happen. Send back an empty allocation. */
7292 		printf("%s: firmware has requested no memory\n",
7293 		    sc->sc_dev.dv_xname);
7294 		mem_seg_len = 0;
7295 	} else if (sc->fwmem == NULL || QWZ_DMA_LEN(sc->fwmem) < total_size) {
7296 		if (sc->fwmem != NULL)
7297 			qwz_dmamem_free(sc->sc_dmat, sc->fwmem);
7298 		sc->fwmem = qwz_dmamem_alloc(sc->sc_dmat, total_size, 65536);
7299 		if (sc->fwmem == NULL) {
7300 			printf("%s: failed to allocate %zu bytes of DMA "
7301 			    "memory for firmware\n", sc->sc_dev.dv_xname,
7302 			    total_size);
7303 			/* Send back an empty allocation. */
7304 			mem_seg_len = 0;
7305 		} else
7306 			DPRINTF("%s: allocated %zu bytes of DMA memory for "
7307 			    "firmware\n", sc->sc_dev.dv_xname, total_size);
7308 	}
7309 
7310 	/* Chunk DMA memory block into segments as requested by firmware. */
7311 	req->mem_seg_len = htole32(mem_seg_len);
7312 	if (sc->fwmem) {
7313 		uint64_t paddr = QWZ_DMA_DVA(sc->fwmem);
7314 
7315 		for (i = 0; i < mem_seg_len; i++) {
7316 			DPRINTF("%s: mem seg[%d] addr=%llx size=%u type=%u\n",
7317 			    __func__, i, paddr, le32toh(ind->mem_seg[i].size),
7318 			    le32toh(ind->mem_seg[i].type));
7319 			req->mem_seg[i].addr = htole64(paddr);
7320 			paddr += le32toh(ind->mem_seg[i].size);
7321 
7322 			/* Values in 'ind' are in little-endian format. */
7323 			req->mem_seg[i].size = ind->mem_seg[i].size;
7324 			req->mem_seg[i].type = ind->mem_seg[i].type;
7325 		}
7326 	}
7327 
7328 	free(ind, M_DEVBUF, sizeof(*ind));
7329 	sc->sc_req_mem_ind = NULL;
7330 
7331 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_RESPOND_MEM_REQ_V01,
7332 			       QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
7333 			       qmi_wlanfw_respond_mem_req_msg_v01_ei,
7334 			       req, sizeof(*req));
7335 	free(req, M_DEVBUF, sizeof(*req));
7336 	if (ret) {
7337 		printf("%s: failed to send respond memory request: %d\n",
7338 		    sc->sc_dev.dv_xname, ret);
7339 		return -1;
7340 	}
7341 
7342 	if (mem_seg_len == 0) {
7343 		expected_result = QMI_RESULT_FAILURE_V01;
7344 		sc->qmi_resp.result = QMI_RESULT_SUCCESS_V01;
7345 	} else {
7346 		expected_result = QMI_RESULT_SUCCESS_V01;
7347 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7348 	}
7349 	while (sc->qmi_resp.result != expected_result) {
7350 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwrespmem",
7351 		    SEC_TO_NSEC(1));
7352 		if (ret) {
7353 			printf("%s: fw respond memory request timeout\n",
7354 			    sc->sc_dev.dv_xname);
7355 			return -1;
7356 		}
7357 	}
7358 
7359 	if (mem_seg_len == 0) {
7360 		sc->expect_fwmem_req = 1;
7361 		return EBUSY; /* retry */
7362 	}
7363 
7364 	while (!sc->fwmem_ready) {
7365 		ret = tsleep_nsec(&sc->fwmem_ready, 0, "qwzfwrdy",
7366 		    SEC_TO_NSEC(10));
7367 		if (ret) {
7368 			printf("%s: fw memory ready timeout\n",
7369 			    sc->sc_dev.dv_xname);
7370 			return -1;
7371 		}
7372 	}
7373 
7374 	return 0;
7375 }
7376 
7377 int
7378 qwz_core_check_smbios(struct qwz_softc *sc)
7379 {
7380 	return 0; /* TODO */
7381 }
7382 
7383 int
7384 qwz_core_check_dt(struct qwz_softc *sc)
7385 {
7386 #ifdef __HAVE_FDT
7387 	if (sc->sc_node == 0)
7388 		return 0;
7389 
7390 	OF_getprop(sc->sc_node, "qcom,ath12k-calibration-variant",
7391 	    sc->qmi_target.bdf_ext, sizeof(sc->qmi_target.bdf_ext) - 1);
7392 #endif
7393 
7394 	return 0;
7395 }
7396 
7397 int
7398 qwz_qmi_request_target_cap(struct qwz_softc *sc)
7399 {
7400 	struct qmi_wlanfw_cap_req_msg_v01 req;
7401 	int ret = 0;
7402 	int r;
7403 	char *fw_build_id;
7404 	int fw_build_id_mask_len;
7405 
7406 	memset(&req, 0, sizeof(req));
7407 
7408 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_CAP_REQ_V01,
7409 	    QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
7410 	    qmi_wlanfw_cap_req_msg_v01_ei, &req, sizeof(req));
7411 	if (ret) {
7412 		printf("%s: failed to send qmi cap request: %d\n",
7413 		    sc->sc_dev.dv_xname, ret);
7414 		goto out;
7415 	}
7416 
7417 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7418 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7419 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwcap",
7420 		    SEC_TO_NSEC(1));
7421 		if (ret) {
7422 			printf("%s: qmi cap request failed\n",
7423 			    sc->sc_dev.dv_xname);
7424 			return ret;
7425 		}
7426 	}
7427 
7428 	fw_build_id = sc->qmi_target.fw_build_id;
7429 	fw_build_id_mask_len = strlen(QWZ_FW_BUILD_ID_MASK);
7430 	if (!strncmp(fw_build_id, QWZ_FW_BUILD_ID_MASK, fw_build_id_mask_len))
7431 		fw_build_id = fw_build_id + fw_build_id_mask_len;
7432 
7433 	DPRINTF("%s: chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
7434 	    sc->sc_dev.dv_xname,
7435 	    sc->qmi_target.chip_id, sc->qmi_target.chip_family,
7436 	    sc->qmi_target.board_id, sc->qmi_target.soc_id);
7437 
7438 	DPRINTF("%s: fw_version 0x%x fw_build_timestamp %s fw_build_id %s\n",
7439 	    sc->sc_dev.dv_xname, sc->qmi_target.fw_version,
7440 	    sc->qmi_target.fw_build_timestamp, fw_build_id);
7441 
7442 	r = qwz_core_check_smbios(sc);
7443 	if (r)
7444 		DPRINTF("%s: SMBIOS bdf variant name not set\n", __func__);
7445 
7446 	r = qwz_core_check_dt(sc);
7447 	if (r)
7448 		DPRINTF("%s: DT bdf variant name not set\n", __func__);
7449 
7450 out:
7451 	return ret;
7452 }
7453 
7454 int
7455 _qwz_core_create_board_name(struct qwz_softc *sc, char *name,
7456     size_t name_len, int with_variant, int bus_type_mode)
7457 {
7458 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
7459 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
7460 
7461 	if (with_variant && sc->qmi_target.bdf_ext[0] != '\0')
7462 		snprintf(variant, sizeof(variant), ",variant=%s",
7463 		    sc->qmi_target.bdf_ext);
7464 
7465 	switch (sc->id.bdf_search) {
7466 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
7467 		if (bus_type_mode)
7468 			snprintf(name, name_len, "bus=%s", sc->sc_bus_str);
7469 		else
7470 			snprintf(name, name_len,
7471 			    "bus=%s,vendor=%04x,device=%04x,"
7472 			    "subsystem-vendor=%04x,subsystem-device=%04x,"
7473 			    "qmi-chip-id=%d,qmi-board-id=%d%s",
7474 			    sc->sc_bus_str, sc->id.vendor, sc->id.device,
7475 			    sc->id.subsystem_vendor, sc->id.subsystem_device,
7476 			    sc->qmi_target.chip_id, sc->qmi_target.board_id,
7477 			    variant);
7478 		break;
7479 	default:
7480 		snprintf(name, name_len,
7481 		    "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
7482 		    sc->sc_bus_str, sc->qmi_target.chip_id,
7483 		    sc->qmi_target.board_id, variant);
7484 		break;
7485 	}
7486 
7487 	DPRINTF("%s: using board name '%s'\n", __func__, name);
7488 
7489 	return 0;
7490 }
7491 
7492 int
7493 qwz_core_create_board_name(struct qwz_softc *sc, char *name, size_t name_len)
7494 {
7495 	return _qwz_core_create_board_name(sc, name, name_len, 1, 0);
7496 }
7497 
7498 int
7499 qwz_core_create_fallback_board_name(struct qwz_softc *sc, char *name,
7500     size_t name_len)
7501 {
7502 	return _qwz_core_create_board_name(sc, name, name_len, 0, 0);
7503 }
7504 
7505 int
7506 qwz_core_create_bus_type_board_name(struct qwz_softc *sc, char *name,
7507     size_t name_len)
7508 {
7509 	return _qwz_core_create_board_name(sc, name, name_len, 0, 1);
7510 }
7511 
7512 struct ath12k_fw_ie {
7513 	uint32_t id;
7514 	uint32_t len;
7515 	uint8_t data[];
7516 };
7517 
7518 enum ath12k_bd_ie_board_type {
7519 	ATH12K_BD_IE_BOARD_NAME = 0,
7520 	ATH12K_BD_IE_BOARD_DATA = 1,
7521 };
7522 
7523 enum ath12k_bd_ie_regdb_type {
7524 	ATH12K_BD_IE_REGDB_NAME = 0,
7525 	ATH12K_BD_IE_REGDB_DATA = 1,
7526 };
7527 
7528 enum ath12k_bd_ie_type {
7529 	/* contains sub IEs of enum ath12k_bd_ie_board_type */
7530 	ATH12K_BD_IE_BOARD = 0,
7531 	/* contains sub IEs of enum ath12k_bd_ie_regdb_type */
7532 	ATH12K_BD_IE_REGDB = 1,
7533 };
7534 
7535 static inline const char *
7536 qwz_bd_ie_type_str(enum ath12k_bd_ie_type type)
7537 {
7538 	switch (type) {
7539 	case ATH12K_BD_IE_BOARD:
7540 		return "board data";
7541 	case ATH12K_BD_IE_REGDB:
7542 		return "regdb data";
7543 	}
7544 
7545 	return "unknown";
7546 }
7547 
7548 int
7549 qwz_core_parse_bd_ie_board(struct qwz_softc *sc,
7550     const u_char **boardfw, size_t *boardfw_len,
7551     const void *buf, size_t buf_len,
7552     const char *boardname, int ie_id, int name_id, int data_id)
7553 {
7554 	const struct ath12k_fw_ie *hdr;
7555 	int name_match_found = 0;
7556 	int ret, board_ie_id;
7557 	size_t board_ie_len;
7558 	const void *board_ie_data;
7559 
7560 	*boardfw = NULL;
7561 	*boardfw_len = 0;
7562 
7563 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
7564 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
7565 		hdr = buf;
7566 		board_ie_id = le32toh(hdr->id);
7567 		board_ie_len = le32toh(hdr->len);
7568 		board_ie_data = hdr->data;
7569 
7570 		buf_len -= sizeof(*hdr);
7571 		buf += sizeof(*hdr);
7572 
7573 		if (buf_len < roundup(board_ie_len, 4)) {
7574 			printf("%s: invalid %s length: %zu < %zu\n",
7575 			    sc->sc_dev.dv_xname, qwz_bd_ie_type_str(ie_id),
7576 			    buf_len, roundup(board_ie_len, 4));
7577 			return EINVAL;
7578 		}
7579 
7580 		if (board_ie_id == name_id) {
7581 			if (board_ie_len != strlen(boardname))
7582 				goto next;
7583 
7584 			ret = memcmp(board_ie_data, boardname, board_ie_len);
7585 			if (ret)
7586 				goto next;
7587 
7588 			name_match_found = 1;
7589 			   DPRINTF("%s: found match %s for name '%s'", __func__,
7590 			       qwz_bd_ie_type_str(ie_id), boardname);
7591 		} else if (board_ie_id == data_id) {
7592 			if (!name_match_found)
7593 				/* no match found */
7594 				goto next;
7595 
7596 			DPRINTF("%s: found %s for '%s'", __func__,
7597 			    qwz_bd_ie_type_str(ie_id), boardname);
7598 
7599 			*boardfw = board_ie_data;
7600 			*boardfw_len = board_ie_len;
7601 			return 0;
7602 		} else {
7603 			printf("%s: unknown %s id found: %d\n", __func__,
7604 			    qwz_bd_ie_type_str(ie_id), board_ie_id);
7605 		}
7606 next:
7607 		/* jump over the padding */
7608 		board_ie_len = roundup(board_ie_len, 4);
7609 
7610 		buf_len -= board_ie_len;
7611 		buf += board_ie_len;
7612 	}
7613 
7614 	/* no match found */
7615 	return ENOENT;
7616 }
7617 
7618 int
7619 qwz_core_fetch_board_data_api_n(struct qwz_softc *sc,
7620     const u_char **boardfw, size_t *boardfw_len,
7621     u_char *fwdata, size_t fwdata_len,
7622     const char *boardname, int ie_id_match, int name_id, int data_id)
7623 {
7624 	size_t len, magic_len;
7625 	const uint8_t *data;
7626 	char *filename;
7627 	size_t ie_len;
7628 	struct ath12k_fw_ie *hdr;
7629 	int ret, ie_id;
7630 
7631 	filename = ATH12K_BOARD_API2_FILE;
7632 
7633 	*boardfw = NULL;
7634 	*boardfw_len = 0;
7635 
7636 	data = fwdata;
7637 	len = fwdata_len;
7638 
7639 	/* magic has extra null byte padded */
7640 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
7641 	if (len < magic_len) {
7642 		printf("%s: failed to find magic value in %s, "
7643 		    "file too short: %zu\n",
7644 		    sc->sc_dev.dv_xname, filename, len);
7645 		return EINVAL;
7646 	}
7647 
7648 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
7649 		DPRINTF("%s: found invalid board magic\n", sc->sc_dev.dv_xname);
7650 		return EINVAL;
7651 	}
7652 
7653 	/* magic is padded to 4 bytes */
7654 	magic_len = roundup(magic_len, 4);
7655 	if (len < magic_len) {
7656 		printf("%s: %s too small to contain board data, len: %zu\n",
7657 		    sc->sc_dev.dv_xname, filename, len);
7658 		return EINVAL;
7659 	}
7660 
7661 	data += magic_len;
7662 	len -= magic_len;
7663 
7664 	while (len > sizeof(struct ath12k_fw_ie)) {
7665 		hdr = (struct ath12k_fw_ie *)data;
7666 		ie_id = le32toh(hdr->id);
7667 		ie_len = le32toh(hdr->len);
7668 
7669 		len -= sizeof(*hdr);
7670 		data = hdr->data;
7671 
7672 		if (len < roundup(ie_len, 4)) {
7673 			printf("%s: invalid length for board ie_id %d "
7674 			    "ie_len %zu len %zu\n",
7675 			    sc->sc_dev.dv_xname, ie_id, ie_len, len);
7676 			return EINVAL;
7677 		}
7678 
7679 		if (ie_id == ie_id_match) {
7680 			ret = qwz_core_parse_bd_ie_board(sc,
7681 			    boardfw, boardfw_len, data, ie_len,
7682 			    boardname, ie_id_match, name_id, data_id);
7683 			if (ret == ENOENT)
7684 				/* no match found, continue */
7685 				goto next;
7686 			else if (ret)
7687 				/* there was an error, bail out */
7688 				return ret;
7689 			/* either found or error, so stop searching */
7690 			goto out;
7691 		}
7692 next:
7693 		/* jump over the padding */
7694 		ie_len = roundup(ie_len, 4);
7695 
7696 		len -= ie_len;
7697 		data += ie_len;
7698 	}
7699 
7700 out:
7701 	if (!*boardfw || !*boardfw_len) {
7702 		printf("%s: failed to fetch %s for %s from %s\n",
7703 		    __func__, qwz_bd_ie_type_str(ie_id_match),
7704 		    boardname, filename);
7705 		return ENOENT;
7706 	}
7707 
7708 	return 0;
7709 }
7710 
7711 int
7712 qwz_core_fetch_bdf(struct qwz_softc *sc, u_char **data, size_t *len,
7713     const u_char **boardfw, size_t *boardfw_len, const char *filename)
7714 {
7715 	char path[PATH_MAX];
7716 	char boardname[200];
7717 	int ret;
7718 
7719 	ret = snprintf(path, sizeof(path), "%s-%s-%s",
7720 	    ATH12K_FW_DIR, sc->hw_params.fw.dir, filename);
7721 	if (ret < 0 || ret >= sizeof(path))
7722 		return ENOSPC;
7723 
7724 	ret = qwz_core_create_board_name(sc, boardname, sizeof(boardname));
7725 	if (ret) {
7726 		DPRINTF("%s: failed to create board name: %d",
7727 		    sc->sc_dev.dv_xname, ret);
7728 		return ret;
7729 	}
7730 
7731 	ret = loadfirmware(path, data, len);
7732 	if (ret) {
7733 		printf("%s: could not read %s (error %d)\n",
7734 		    sc->sc_dev.dv_xname, path, ret);
7735 		return ret;
7736 	}
7737 
7738 	ret = qwz_core_fetch_board_data_api_n(sc, boardfw, boardfw_len,
7739 	    *data, *len, boardname, ATH12K_BD_IE_BOARD,
7740 	    ATH12K_BD_IE_BOARD_NAME, ATH12K_BD_IE_BOARD_DATA);
7741 	if (ret) {
7742 		DPRINTF("%s: failed to fetch board data for %s from %s\n",
7743 		    sc->sc_dev.dv_xname, boardname, path);
7744 		return ret;
7745 	}
7746 
7747 	return 0;
7748 }
7749 
7750 int
7751 qwz_qmi_load_file_target_mem(struct qwz_softc *sc, const u_char *data,
7752     size_t len, int type)
7753 {
7754 	struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
7755 	const uint8_t *p = data;
7756 #ifdef notyet
7757 	void *bdf_addr = NULL;
7758 #endif
7759 	int ret = EINVAL; /* empty fw image */
7760 	uint32_t remaining = len;
7761 
7762 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
7763 	if (!req) {
7764 		printf("%s: failed to allocate bfd download request\n",
7765 		    sc->sc_dev.dv_xname);
7766 		return ENOMEM;
7767 	}
7768 
7769 	while (remaining) {
7770 		req->valid = 1;
7771 		req->file_id_valid = 1;
7772 		req->file_id = sc->qmi_target.board_id;
7773 		req->total_size_valid = 1;
7774 		req->total_size = remaining;
7775 		req->seg_id_valid = 1;
7776 		req->data_valid = 1;
7777 		req->bdf_type = type;
7778 		req->bdf_type_valid = 1;
7779 		req->end_valid = 1;
7780 		req->end = 0;
7781 
7782 		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
7783 			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
7784 		} else {
7785 			req->data_len = remaining;
7786 			req->end = 1;
7787 		}
7788 
7789 		if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
7790 			req->data_valid = 0;
7791 			req->end = 1;
7792 			req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
7793 		} else {
7794 			memcpy(req->data, p, req->data_len);
7795 		}
7796 		DPRINTF("%s: bdf download req fixed addr type %d\n",
7797 		    __func__, type);
7798 
7799 		ret = qwz_qmi_send_request(sc,
7800 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
7801 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
7802 		    qmi_wlanfw_bdf_download_req_msg_v01_ei,
7803 		    req, sizeof(*req));
7804 		if (ret) {
7805 			printf("%s: failed to send bdf download request\n",
7806 			    sc->sc_dev.dv_xname);
7807 			goto err_free_req;
7808 		}
7809 
7810 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7811 		while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7812 			ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzbdf",
7813 			    SEC_TO_NSEC(1));
7814 			if (ret) {
7815 				printf("%s: bdf download request timeout\n",
7816 				    sc->sc_dev.dv_xname);
7817 				goto err_free_req;
7818 			}
7819 		}
7820 
7821 		if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
7822 			remaining = 0;
7823 		} else {
7824 			remaining -= req->data_len;
7825 			p += req->data_len;
7826 			req->seg_id++;
7827 			DPRINTF("%s: bdf download request remaining %i\n",
7828 			    __func__, remaining);
7829 		}
7830 	}
7831 
7832 err_free_req:
7833 	free(req, M_DEVBUF, sizeof(*req));
7834 
7835 	return ret;
7836 }
7837 
7838 #define QWZ_ELFMAG	"\177ELF"
7839 #define QWZ_SELFMAG	4
7840 
7841 int
7842 qwz_qmi_load_bdf_qmi(struct qwz_softc *sc, int regdb)
7843 {
7844 	u_char *data = NULL;
7845 	const u_char *boardfw;
7846 	size_t len = 0, boardfw_len;
7847 	uint32_t fw_size;
7848 	int ret = 0, bdf_type;
7849 #ifdef notyet
7850 	const uint8_t *tmp;
7851 	uint32_t file_type;
7852 #endif
7853 
7854 	if (sc->fw_img[QWZ_FW_BOARD].data) {
7855 		boardfw = sc->fw_img[QWZ_FW_BOARD].data;
7856 		boardfw_len = sc->fw_img[QWZ_FW_BOARD].size;
7857 	} else {
7858 		ret = qwz_core_fetch_bdf(sc, &data, &len,
7859 		    &boardfw, &boardfw_len,
7860 		    ATH12K_BOARD_API2_FILE);
7861 		if (ret)
7862 			return ret;
7863 
7864 		sc->fw_img[QWZ_FW_BOARD].data = malloc(boardfw_len, M_DEVBUF,
7865 		    M_NOWAIT);
7866 		if (sc->fw_img[QWZ_FW_BOARD].data) {
7867 			memcpy(sc->fw_img[QWZ_FW_BOARD].data, boardfw, boardfw_len);
7868 			sc->fw_img[QWZ_FW_BOARD].size = boardfw_len;
7869 		}
7870 	}
7871 
7872 	if (regdb)
7873 		bdf_type = ATH12K_QMI_BDF_TYPE_REGDB;
7874 	else if (boardfw_len >= QWZ_SELFMAG &&
7875 	    memcmp(boardfw, QWZ_ELFMAG, QWZ_SELFMAG) == 0)
7876 		bdf_type = ATH12K_QMI_BDF_TYPE_ELF;
7877 	else
7878 		bdf_type = ATH12K_QMI_BDF_TYPE_BIN;
7879 
7880 	DPRINTF("%s: bdf_type %d\n", __func__, bdf_type);
7881 
7882 	fw_size = MIN(sc->hw_params.fw.board_size, boardfw_len);
7883 
7884 	ret = qwz_qmi_load_file_target_mem(sc, boardfw, fw_size, bdf_type);
7885 	if (ret) {
7886 		printf("%s: failed to load bdf file\n", __func__);
7887 		goto out;
7888 	}
7889 
7890 	/* QCA6390/WCN6855 does not support cal data, skip it */
7891 	if (bdf_type == ATH12K_QMI_BDF_TYPE_ELF || bdf_type == ATH12K_QMI_BDF_TYPE_REGDB)
7892 		goto out;
7893 #ifdef notyet
7894 	if (ab->qmi.target.eeprom_caldata) {
7895 		file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
7896 		tmp = filename;
7897 		fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
7898 	} else {
7899 		file_type = ATH12K_QMI_FILE_TYPE_CALDATA;
7900 
7901 		/* cal-<bus>-<id>.bin */
7902 		snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
7903 			 ath12k_bus_str(ab->hif.bus), dev_name(dev));
7904 		fw_entry = ath12k_core_firmware_request(ab, filename);
7905 		if (!IS_ERR(fw_entry))
7906 			goto success;
7907 
7908 		fw_entry = ath12k_core_firmware_request(ab, ATH12K_DEFAULT_CAL_FILE);
7909 		if (IS_ERR(fw_entry)) {
7910 			/* Caldata may not be present during first time calibration in
7911 			 * factory hence allow to boot without loading caldata in ftm mode
7912 			 */
7913 			if (ath12k_ftm_mode) {
7914 				ath12k_info(ab,
7915 					    "Booting without cal data file in factory test mode\n");
7916 				return 0;
7917 			}
7918 			ret = PTR_ERR(fw_entry);
7919 			ath12k_warn(ab,
7920 				    "qmi failed to load CAL data file:%s\n",
7921 				    filename);
7922 			goto out;
7923 		}
7924 success:
7925 		fw_size = MIN(ab->hw_params.fw.board_size, fw_entry->size);
7926 		tmp = fw_entry->data;
7927 	}
7928 
7929 	ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
7930 	if (ret < 0) {
7931 		ath12k_warn(ab, "qmi failed to load caldata\n");
7932 		goto out_qmi_cal;
7933 	}
7934 
7935 	ath12k_dbg(ab, ATH12K_DBG_QMI, "caldata type: %u\n", file_type);
7936 
7937 out_qmi_cal:
7938 	if (!ab->qmi.target.eeprom_caldata)
7939 		release_firmware(fw_entry);
7940 #endif
7941 out:
7942 	free(data, M_DEVBUF, len);
7943 	if (ret == 0)
7944 		DPRINTF("%s: BDF download sequence completed\n", __func__);
7945 
7946 	return ret;
7947 }
7948 
7949 int
7950 qwz_qmi_event_load_bdf(struct qwz_softc *sc)
7951 {
7952 	int ret;
7953 
7954 	ret = qwz_qmi_request_target_cap(sc);
7955 	if (ret < 0) {
7956 		printf("%s: failed to request qmi target capabilities: %d\n",
7957 		    sc->sc_dev.dv_xname, ret);
7958 		return ret;
7959 	}
7960 
7961 	ret = qwz_qmi_load_bdf_qmi(sc, 1);
7962 	if (ret < 0) {
7963 		printf("%s: failed to load regdb file: %d\n",
7964 		    sc->sc_dev.dv_xname, ret);
7965 		return ret;
7966 	}
7967 
7968 	ret = qwz_qmi_load_bdf_qmi(sc, 0);
7969 	if (ret < 0) {
7970 		printf("%s: failed to load board data file: %d\n",
7971 		    sc->sc_dev.dv_xname, ret);
7972 		return ret;
7973 	}
7974 
7975 	return 0;
7976 }
7977 
7978 int
7979 qwz_qmi_m3_load(struct qwz_softc *sc)
7980 {
7981 	u_char *data;
7982 	size_t len;
7983 	char path[PATH_MAX];
7984 	int ret;
7985 
7986 	if (sc->fw_img[QWZ_FW_M3].data) {
7987 		data = sc->fw_img[QWZ_FW_M3].data;
7988 		len = sc->fw_img[QWZ_FW_M3].size;
7989 	} else {
7990 		ret = snprintf(path, sizeof(path), "%s-%s-%s",
7991 		    ATH12K_FW_DIR, sc->hw_params.fw.dir, ATH12K_M3_FILE);
7992 		if (ret < 0 || ret >= sizeof(path))
7993 			return ENOSPC;
7994 
7995 		ret = loadfirmware(path, &data, &len);
7996 		if (ret) {
7997 			printf("%s: could not read %s (error %d)\n",
7998 			    sc->sc_dev.dv_xname, path, ret);
7999 			return ret;
8000 		}
8001 
8002 		sc->fw_img[QWZ_FW_M3].data = data;
8003 		sc->fw_img[QWZ_FW_M3].size = len;
8004 	}
8005 
8006 	if (sc->m3_mem == NULL || QWZ_DMA_LEN(sc->m3_mem) < len) {
8007 		if (sc->m3_mem)
8008 			qwz_dmamem_free(sc->sc_dmat, sc->m3_mem);
8009 		sc->m3_mem = qwz_dmamem_alloc(sc->sc_dmat, len, 65536);
8010 		if (sc->m3_mem == NULL) {
8011 			printf("%s: failed to allocate %zu bytes of DMA "
8012 			    "memory for M3 firmware\n", sc->sc_dev.dv_xname,
8013 			    len);
8014 			return ENOMEM;
8015 		}
8016 	}
8017 
8018 	memcpy(QWZ_DMA_KVA(sc->m3_mem), data, len);
8019 	return 0;
8020 }
8021 
8022 int
8023 qwz_qmi_wlanfw_m3_info_send(struct qwz_softc *sc)
8024 {
8025 	struct qmi_wlanfw_m3_info_req_msg_v01 req;
8026 	int ret = 0;
8027 	uint64_t paddr;
8028 	uint32_t size;
8029 
8030 	memset(&req, 0, sizeof(req));
8031 
8032 	ret = qwz_qmi_m3_load(sc);
8033 	if (ret) {
8034 		printf("%s: failed to load m3 firmware: %d",
8035 		    sc->sc_dev.dv_xname, ret);
8036 		return ret;
8037 	}
8038 
8039 	paddr = QWZ_DMA_DVA(sc->m3_mem);
8040 	size = QWZ_DMA_LEN(sc->m3_mem);
8041 	req.addr = htole64(paddr);
8042 	req.size = htole32(size);
8043 
8044 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_M3_INFO_REQ_V01,
8045 	    QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
8046 	    qmi_wlanfw_m3_info_req_msg_v01_ei, &req, sizeof(req));
8047 	if (ret) {
8048 		printf("%s: failed to send m3 information request: %d\n",
8049 		    sc->sc_dev.dv_xname, ret);
8050 		return ret;
8051 	}
8052 
8053 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8054 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8055 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwm3",
8056 		    SEC_TO_NSEC(1));
8057 		if (ret) {
8058 			printf("%s: m3 information request timeout\n",
8059 			    sc->sc_dev.dv_xname);
8060 			return ret;
8061 		}
8062 	}
8063 
8064 	return 0;
8065 }
8066 
8067 void
8068 qwz_hal_dump_srng_stats(struct qwz_softc *sc)
8069 {
8070 	DPRINTF("%s not implemented\n", __func__);
8071 }
8072 
8073 uint16_t
8074 qwz_hal_srng_get_entrysize(struct qwz_softc *sc, uint32_t ring_type)
8075 {
8076 	struct hal_srng_config *srng_config;
8077 
8078 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
8079 
8080 	srng_config = &sc->hal.srng_config[ring_type];
8081 	return (srng_config->entry_size << 2);
8082 }
8083 
8084 uint32_t
8085 qwz_hal_srng_get_max_entries(struct qwz_softc *sc, uint32_t ring_type)
8086 {
8087 	struct hal_srng_config *srng_config;
8088 
8089 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
8090 
8091 	srng_config = &sc->hal.srng_config[ring_type];
8092 	return (srng_config->max_size / srng_config->entry_size);
8093 }
8094 
8095 uint32_t *
8096 qwz_hal_srng_dst_get_next_entry(struct qwz_softc *sc, struct hal_srng *srng)
8097 {
8098 	uint32_t *desc;
8099 #ifdef notyet
8100 	lockdep_assert_held(&srng->lock);
8101 #endif
8102 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
8103 		return NULL;
8104 
8105 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
8106 
8107 	srng->u.dst_ring.tp += srng->entry_size;
8108 
8109 	/* wrap around to start of ring */
8110 	if (srng->u.dst_ring.tp == srng->ring_size)
8111 		srng->u.dst_ring.tp = 0;
8112 #ifdef notyet
8113 	/* Try to prefetch the next descriptor in the ring */
8114 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
8115 		ath12k_hal_srng_prefetch_desc(ab, srng);
8116 #endif
8117 	return desc;
8118 }
8119 
8120 int
8121 qwz_hal_srng_dst_num_free(struct qwz_softc *sc, struct hal_srng *srng,
8122     int sync_hw_ptr)
8123 {
8124 	uint32_t tp, hp;
8125 #ifdef notyet
8126 	lockdep_assert_held(&srng->lock);
8127 #endif
8128 	tp = srng->u.dst_ring.tp;
8129 
8130 	if (sync_hw_ptr) {
8131 		hp = *srng->u.dst_ring.hp_addr;
8132 		srng->u.dst_ring.cached_hp = hp;
8133 	} else {
8134 		hp = srng->u.dst_ring.cached_hp;
8135 	}
8136 
8137 	if (hp >= tp)
8138 		return (hp - tp) / srng->entry_size;
8139 	else
8140 		return (srng->ring_size - tp + hp) / srng->entry_size;
8141 }
8142 
8143 uint32_t *
8144 qwz_hal_srng_src_get_next_reaped(struct qwz_softc *sc, struct hal_srng *srng)
8145 {
8146 	uint32_t *desc;
8147 #ifdef notyet
8148 	lockdep_assert_held(&srng->lock);
8149 #endif
8150 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
8151 		return NULL;
8152 
8153 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
8154 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
8155 			      srng->ring_size;
8156 
8157 	return desc;
8158 }
8159 
8160 uint32_t *
8161 qwz_hal_srng_src_peek(struct qwz_softc *sc, struct hal_srng *srng)
8162 {
8163 #ifdef notyet
8164 	lockdep_assert_held(&srng->lock);
8165 #endif
8166 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
8167 	    srng->u.src_ring.cached_tp)
8168 		return NULL;
8169 
8170 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
8171 }
8172 
8173 void
8174 qwz_get_msi_address(struct qwz_softc *sc, uint32_t *addr_lo,
8175     uint32_t *addr_hi)
8176 {
8177 	*addr_lo = sc->msi_addr_lo;
8178 	*addr_hi = sc->msi_addr_hi;
8179 }
8180 
8181 int
8182 qwz_dp_srng_find_ring_in_mask(int ring_num, const uint8_t *grp_mask)
8183 {
8184 	int ext_group_num;
8185 	uint8_t mask = 1 << ring_num;
8186 
8187 	for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
8188 	     ext_group_num++) {
8189 		if (mask & grp_mask[ext_group_num])
8190 			return ext_group_num;
8191 	}
8192 
8193 	return -1;
8194 }
8195 
8196 int
8197 qwz_dp_srng_calculate_msi_group(struct qwz_softc *sc, enum hal_ring_type type,
8198     int ring_num)
8199 {
8200 	const uint8_t *grp_mask;
8201 
8202 	switch (type) {
8203 	case HAL_WBM2SW_RELEASE:
8204 		if (ring_num == DP_RX_RELEASE_RING_NUM) {
8205 			grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
8206 			ring_num = 0;
8207 		} else {
8208 			grp_mask = &sc->hw_params.ring_mask->tx[0];
8209 		}
8210 		break;
8211 	case HAL_REO_EXCEPTION:
8212 		grp_mask = &sc->hw_params.ring_mask->rx_err[0];
8213 		break;
8214 	case HAL_REO_DST:
8215 		grp_mask = &sc->hw_params.ring_mask->rx[0];
8216 		break;
8217 	case HAL_REO_STATUS:
8218 		grp_mask = &sc->hw_params.ring_mask->reo_status[0];
8219 		break;
8220 	case HAL_RXDMA_MONITOR_STATUS:
8221 	case HAL_RXDMA_MONITOR_DST:
8222 		grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
8223 		break;
8224 	case HAL_TX_MONITOR_DST:
8225 		grp_mask = &sc->hw_params.ring_mask->tx_mon_dest[0];
8226 		break;
8227 	case HAL_RXDMA_DST:
8228 		grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
8229 		break;
8230 	case HAL_RXDMA_BUF:
8231 		grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
8232 		break;
8233 	case HAL_RXDMA_MONITOR_BUF:
8234 	case HAL_TCL_DATA:
8235 	case HAL_TCL_CMD:
8236 	case HAL_REO_CMD:
8237 	case HAL_SW2WBM_RELEASE:
8238 	case HAL_WBM_IDLE_LINK:
8239 	case HAL_TCL_STATUS:
8240 	case HAL_REO_REINJECT:
8241 	case HAL_CE_SRC:
8242 	case HAL_CE_DST:
8243 	case HAL_CE_DST_STATUS:
8244 	default:
8245 		return -1;
8246 	}
8247 
8248 	return qwz_dp_srng_find_ring_in_mask(ring_num, grp_mask);
8249 }
8250 
8251 void
8252 qwz_dp_srng_msi_setup(struct qwz_softc *sc, struct hal_srng_params *ring_params,
8253     enum hal_ring_type type, int ring_num)
8254 {
8255 	int msi_group_number;
8256 	uint32_t msi_data_start = 0;
8257 	uint32_t msi_data_count = 1;
8258 	uint32_t msi_irq_start = 0;
8259 	uint32_t addr_lo;
8260 	uint32_t addr_hi;
8261 	int ret;
8262 
8263 	ret = sc->ops.get_user_msi_vector(sc, "DP",
8264 	    &msi_data_count, &msi_data_start, &msi_irq_start);
8265 	if (ret)
8266 		return;
8267 
8268 	msi_group_number = qwz_dp_srng_calculate_msi_group(sc, type,
8269 	    ring_num);
8270 	if (msi_group_number < 0) {
8271 		DPRINTF("%s ring not part of an ext_group; ring_type %d,"
8272 		    "ring_num %d\n", __func__, type, ring_num);
8273 		ring_params->msi_addr = 0;
8274 		ring_params->msi_data = 0;
8275 		return;
8276 	}
8277 
8278 	qwz_get_msi_address(sc, &addr_lo, &addr_hi);
8279 
8280 	ring_params->msi_addr = addr_lo;
8281 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
8282 	ring_params->msi_data = (msi_group_number % msi_data_count) +
8283 	    msi_data_start;
8284 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
8285 }
8286 
8287 int
8288 qwz_dp_srng_setup(struct qwz_softc *sc, struct dp_srng *ring,
8289     enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
8290 {
8291 	struct hal_srng_params params = { 0 };
8292 	uint16_t entry_sz = qwz_hal_srng_get_entrysize(sc, type);
8293 	uint32_t max_entries = qwz_hal_srng_get_max_entries(sc, type);
8294 	int ret;
8295 	int cached = 0;
8296 
8297 	if (num_entries > max_entries)
8298 		num_entries = max_entries;
8299 
8300 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
8301 
8302 #ifdef notyet
8303 	if (sc->hw_params.alloc_cacheable_memory) {
8304 		/* Allocate the reo dst and tx completion rings from cacheable memory */
8305 		switch (type) {
8306 		case HAL_REO_DST:
8307 		case HAL_WBM2SW_RELEASE:
8308 			cached = true;
8309 			break;
8310 		default:
8311 			cached = false;
8312 		}
8313 
8314 		if (cached) {
8315 			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
8316 			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
8317 		}
8318 		if (!ring->vaddr_unaligned)
8319 			return -ENOMEM;
8320 	}
8321 #endif
8322 	if (!cached) {
8323 		ring->mem = qwz_dmamem_alloc(sc->sc_dmat, ring->size,
8324 		    PAGE_SIZE);
8325 		if (ring->mem == NULL) {
8326 			printf("%s: could not allocate DP SRNG DMA memory\n",
8327 			    sc->sc_dev.dv_xname);
8328 			return ENOMEM;
8329 
8330 		}
8331 	}
8332 
8333 	ring->vaddr = QWZ_DMA_KVA(ring->mem);
8334 	ring->paddr = QWZ_DMA_DVA(ring->mem);
8335 
8336 	params.ring_base_vaddr = ring->vaddr;
8337 	params.ring_base_paddr = ring->paddr;
8338 	params.num_entries = num_entries;
8339 	qwz_dp_srng_msi_setup(sc, &params, type, ring_num + mac_id);
8340 
8341 	switch (type) {
8342 	case HAL_REO_DST:
8343 		params.intr_batch_cntr_thres_entries =
8344 		    HAL_SRNG_INT_BATCH_THRESHOLD_RX;
8345 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
8346 		break;
8347 	case HAL_RXDMA_BUF:
8348 	case HAL_RXDMA_MONITOR_BUF:
8349 	case HAL_RXDMA_MONITOR_STATUS:
8350 		params.low_threshold = num_entries >> 3;
8351 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
8352 		params.intr_batch_cntr_thres_entries = 0;
8353 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
8354 		break;
8355 	case HAL_TX_MONITOR_DST:
8356 		params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
8357 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
8358 		params.intr_batch_cntr_thres_entries = 0;
8359 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
8360 		break;
8361 	case HAL_WBM2SW_RELEASE:
8362 		if (ring_num < 3) {
8363 			params.intr_batch_cntr_thres_entries =
8364 			    HAL_SRNG_INT_BATCH_THRESHOLD_TX;
8365 			params.intr_timer_thres_us =
8366 			    HAL_SRNG_INT_TIMER_THRESHOLD_TX;
8367 			break;
8368 		}
8369 		/* follow through when ring_num >= 3 */
8370 		/* FALLTHROUGH */
8371 	case HAL_REO_EXCEPTION:
8372 	case HAL_REO_REINJECT:
8373 	case HAL_REO_CMD:
8374 	case HAL_REO_STATUS:
8375 	case HAL_TCL_DATA:
8376 	case HAL_TCL_CMD:
8377 	case HAL_TCL_STATUS:
8378 	case HAL_WBM_IDLE_LINK:
8379 	case HAL_SW2WBM_RELEASE:
8380 	case HAL_RXDMA_DST:
8381 	case HAL_RXDMA_MONITOR_DST:
8382 	case HAL_RXDMA_MONITOR_DESC:
8383 		params.intr_batch_cntr_thres_entries =
8384 		    HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
8385 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
8386 		break;
8387 	case HAL_RXDMA_DIR_BUF:
8388 		break;
8389 	default:
8390 		printf("%s: Not a valid ring type in dp :%d\n",
8391 		    sc->sc_dev.dv_xname, type);
8392 		return EINVAL;
8393 	}
8394 
8395 	if (cached) {
8396 		params.flags |= HAL_SRNG_FLAGS_CACHED;
8397 		ring->cached = 1;
8398 	}
8399 
8400 	ret = qwz_hal_srng_setup(sc, type, ring_num, mac_id, &params);
8401 	if (ret < 0) {
8402 		printf("%s: failed to setup srng: %d ring_id %d\n",
8403 		    sc->sc_dev.dv_xname, ret, ring_num);
8404 		return ret;
8405 	}
8406 
8407 	ring->ring_id = ret;
8408 	return 0;
8409 }
8410 
8411 void
8412 qwz_hal_srng_access_begin(struct qwz_softc *sc, struct hal_srng *srng)
8413 {
8414 #ifdef notyet
8415 	lockdep_assert_held(&srng->lock);
8416 #endif
8417 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8418 		srng->u.src_ring.cached_tp =
8419 			*(volatile uint32_t *)srng->u.src_ring.tp_addr;
8420 	} else {
8421 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
8422 	}
8423 }
8424 
8425 void
8426 qwz_hal_srng_access_end(struct qwz_softc *sc, struct hal_srng *srng)
8427 {
8428 #ifdef notyet
8429 	lockdep_assert_held(&srng->lock);
8430 #endif
8431 	/* TODO: See if we need a write memory barrier here */
8432 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
8433 		/* For LMAC rings, ring pointer updates are done through FW and
8434 		 * hence written to a shared memory location that is read by FW
8435 		 */
8436 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8437 			srng->u.src_ring.last_tp =
8438 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
8439 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
8440 		} else {
8441 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
8442 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
8443 		}
8444 	} else {
8445 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8446 			srng->u.src_ring.last_tp =
8447 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
8448 			sc->ops.write32(sc,
8449 			    (unsigned long)srng->u.src_ring.hp_addr -
8450 			    (unsigned long)sc->mem, srng->u.src_ring.hp);
8451 		} else {
8452 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
8453 			sc->ops.write32(sc,
8454 			    (unsigned long)srng->u.dst_ring.tp_addr -
8455 			    (unsigned long)sc->mem, srng->u.dst_ring.tp);
8456 		}
8457 	}
8458 #ifdef notyet
8459 	srng->timestamp = jiffies;
8460 #endif
8461 }
8462 
8463 int
8464 qwz_wbm_idle_ring_setup(struct qwz_softc *sc, uint32_t *n_link_desc)
8465 {
8466 	struct qwz_dp *dp = &sc->dp;
8467 	uint32_t n_mpdu_link_desc, n_mpdu_queue_desc;
8468 	uint32_t n_tx_msdu_link_desc, n_rx_msdu_link_desc;
8469 	int ret = 0;
8470 
8471 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
8472 			   HAL_NUM_MPDUS_PER_LINK_DESC;
8473 
8474 	n_mpdu_queue_desc = n_mpdu_link_desc /
8475 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
8476 
8477 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
8478 			       DP_AVG_MSDUS_PER_FLOW) /
8479 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
8480 
8481 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
8482 			       DP_AVG_MSDUS_PER_MPDU) /
8483 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
8484 
8485 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
8486 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
8487 
8488 	if (*n_link_desc & (*n_link_desc - 1))
8489 		*n_link_desc = 1 << fls(*n_link_desc);
8490 
8491 	ret = qwz_dp_srng_setup(sc, &dp->wbm_idle_ring,
8492 	    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
8493 	if (ret) {
8494 		printf("%s: failed to setup wbm_idle_ring: %d\n",
8495 		    sc->sc_dev.dv_xname, ret);
8496 	}
8497 
8498 	return ret;
8499 }
8500 
8501 void
8502 qwz_dp_link_desc_bank_free(struct qwz_softc *sc,
8503     struct dp_link_desc_bank *link_desc_banks)
8504 {
8505 	int i;
8506 
8507 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
8508 		if (link_desc_banks[i].mem) {
8509 			qwz_dmamem_free(sc->sc_dmat, link_desc_banks[i].mem);
8510 			link_desc_banks[i].mem = NULL;
8511 		}
8512 	}
8513 }
8514 
8515 int
8516 qwz_dp_link_desc_bank_alloc(struct qwz_softc *sc,
8517     struct dp_link_desc_bank *desc_bank, int n_link_desc_bank,
8518     int last_bank_sz)
8519 {
8520 	struct qwz_dp *dp = &sc->dp;
8521 	int i;
8522 	int ret = 0;
8523 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
8524 
8525 	for (i = 0; i < n_link_desc_bank; i++) {
8526 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
8527 			desc_sz = last_bank_sz;
8528 
8529 		desc_bank[i].mem = qwz_dmamem_alloc(sc->sc_dmat, desc_sz,
8530 		    PAGE_SIZE);
8531 		if (!desc_bank[i].mem) {
8532 			ret = ENOMEM;
8533 			goto err;
8534 		}
8535 
8536 		desc_bank[i].vaddr = QWZ_DMA_KVA(desc_bank[i].mem);
8537 		desc_bank[i].paddr = QWZ_DMA_DVA(desc_bank[i].mem);
8538 		desc_bank[i].size = desc_sz;
8539 	}
8540 
8541 	return 0;
8542 
8543 err:
8544 	qwz_dp_link_desc_bank_free(sc, dp->link_desc_banks);
8545 
8546 	return ret;
8547 }
8548 
8549 void
8550 qwz_hal_setup_link_idle_list(struct qwz_softc *sc,
8551     struct hal_wbm_idle_scatter_list *sbuf,
8552     uint32_t nsbufs, uint32_t tot_link_desc, uint32_t end_offset)
8553 {
8554 	struct ath12k_buffer_addr *link_addr;
8555 	int i;
8556 	uint32_t reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
8557 	uint32_t val;
8558 
8559 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
8560 
8561 	for (i = 1; i < nsbufs; i++) {
8562 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
8563 		link_addr->info1 = FIELD_PREP(
8564 		    HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8565 		    (uint64_t)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
8566 		    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
8567 		    BASE_ADDR_MATCH_TAG_VAL);
8568 
8569 		link_addr = (void *)sbuf[i].vaddr +
8570 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE;
8571 	}
8572 
8573 	sc->ops.write32(sc,
8574 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(sc),
8575 	    FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
8576 	    FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
8577 	sc->ops.write32(sc,
8578 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(sc),
8579 	    FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
8580 	    reg_scatter_buf_sz * nsbufs));
8581 	sc->ops.write32(sc,
8582 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB(sc),
8583 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
8584 	    sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
8585 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_WBM_REG +
8586 	    HAL_WBM_SCATTERED_RING_BASE_MSB(sc),
8587 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8588 	    (uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
8589 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
8590 	    BASE_ADDR_MATCH_TAG_VAL));
8591 
8592 	/* Setup head and tail pointers for the idle list */
8593 	sc->ops.write32(sc,
8594 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
8595 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(sc),
8596 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr));
8597 	sc->ops.write32(sc,
8598 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(sc),
8599 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8600 	    ((uint64_t)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8601 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
8602 	    (end_offset >> 2)));
8603 	sc->ops.write32(sc,
8604 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
8605 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(sc),
8606 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
8607 
8608 	sc->ops.write32(sc,
8609 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(sc),
8610 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
8611 	sc->ops.write32(sc,
8612 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(sc),
8613 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8614 	    ((uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8615 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0));
8616 	sc->ops.write32(sc,
8617 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(sc),
8618 	    2 * tot_link_desc);
8619 
8620 	/* Enable the SRNG */
8621 	val = HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE;
8622 	val |= HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE;
8623 	sc->ops.write32(sc,
8624 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(sc),
8625 	    val);
8626 }
8627 
8628 void
8629 qwz_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, uint32_t cookie,
8630     bus_addr_t paddr)
8631 {
8632 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
8633 	    (paddr & HAL_ADDR_LSB_REG_MASK));
8634 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
8635 	    ((uint64_t)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8636 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
8637 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
8638 }
8639 
8640 void
8641 qwz_dp_scatter_idle_link_desc_cleanup(struct qwz_softc *sc)
8642 {
8643 	struct qwz_dp *dp = &sc->dp;
8644 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
8645 	int i;
8646 
8647 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
8648 		if (slist[i].mem == NULL)
8649 			continue;
8650 
8651 		qwz_dmamem_free(sc->sc_dmat, slist[i].mem);
8652 		slist[i].mem = NULL;
8653 		slist[i].vaddr = NULL;
8654 		slist[i].paddr = 0L;
8655 	}
8656 }
8657 
8658 int
8659 qwz_dp_scatter_idle_link_desc_setup(struct qwz_softc *sc, int size,
8660     uint32_t n_link_desc_bank, uint32_t n_link_desc, uint32_t last_bank_sz)
8661 {
8662 	struct qwz_dp *dp = &sc->dp;
8663 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
8664 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
8665 	uint32_t n_entries_per_buf;
8666 	int num_scatter_buf, scatter_idx;
8667 	struct hal_wbm_link_desc *scatter_buf;
8668 	int n_entries;
8669 	bus_addr_t paddr;
8670 	int rem_entries;
8671 	int i;
8672 	int ret = 0;
8673 	uint32_t end_offset;
8674 	uint32_t cookie;
8675 
8676 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
8677 	    qwz_hal_srng_get_entrysize(sc, HAL_WBM_IDLE_LINK);
8678 	num_scatter_buf = howmany(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
8679 
8680 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
8681 		return EINVAL;
8682 
8683 	for (i = 0; i < num_scatter_buf; i++) {
8684 		slist[i].mem = qwz_dmamem_alloc(sc->sc_dmat,
8685 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, PAGE_SIZE);
8686 		if (slist[i].mem == NULL) {
8687 			ret = ENOMEM;
8688 			goto err;
8689 		}
8690 
8691 		slist[i].vaddr = QWZ_DMA_KVA(slist[i].mem);
8692 		slist[i].paddr = QWZ_DMA_DVA(slist[i].mem);
8693 	}
8694 
8695 	scatter_idx = 0;
8696 	scatter_buf = slist[scatter_idx].vaddr;
8697 	rem_entries = n_entries_per_buf;
8698 
8699 	for (i = 0; i < n_link_desc_bank; i++) {
8700 		n_entries = DP_LINK_DESC_ALLOC_SIZE_THRESH / HAL_LINK_DESC_SIZE;
8701 		paddr = link_desc_banks[i].paddr;
8702 		while (n_entries) {
8703 			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
8704 			qwz_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
8705 			n_entries--;
8706 			paddr += HAL_LINK_DESC_SIZE;
8707 			if (rem_entries) {
8708 				rem_entries--;
8709 				scatter_buf++;
8710 				continue;
8711 			}
8712 
8713 			rem_entries = n_entries_per_buf;
8714 			scatter_idx++;
8715 			scatter_buf = slist[scatter_idx].vaddr;
8716 		}
8717 	}
8718 
8719 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
8720 	    sizeof(struct hal_wbm_link_desc);
8721 	qwz_hal_setup_link_idle_list(sc, slist, num_scatter_buf,
8722 	    n_link_desc, end_offset);
8723 
8724 	return 0;
8725 
8726 err:
8727 	qwz_dp_scatter_idle_link_desc_cleanup(sc);
8728 
8729 	return ret;
8730 }
8731 
8732 uint32_t *
8733 qwz_hal_srng_src_get_next_entry(struct qwz_softc *sc, struct hal_srng *srng)
8734 {
8735 	uint32_t *desc;
8736 	uint32_t next_hp;
8737 #ifdef notyet
8738 	lockdep_assert_held(&srng->lock);
8739 #endif
8740 
8741 	/* TODO: Using % is expensive, but we have to do this since size of some
8742 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
8743 	 * if separate function is defined for rings having power of 2 ring size
8744 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
8745 	 * overhead of % by using mask (with &).
8746 	 */
8747 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
8748 
8749 	if (next_hp == srng->u.src_ring.cached_tp)
8750 		return NULL;
8751 
8752 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
8753 	srng->u.src_ring.hp = next_hp;
8754 
8755 	/* TODO: Reap functionality is not used by all rings. If particular
8756 	 * ring does not use reap functionality, we need not update reap_hp
8757 	 * with next_hp pointer. Need to make sure a separate function is used
8758 	 * before doing any optimization by removing below code updating
8759 	 * reap_hp.
8760 	 */
8761 	srng->u.src_ring.reap_hp = next_hp;
8762 
8763 	return desc;
8764 }
8765 
8766 uint32_t *
8767 qwz_hal_srng_src_reap_next(struct qwz_softc *sc, struct hal_srng *srng)
8768 {
8769 	uint32_t *desc;
8770 	uint32_t next_reap_hp;
8771 #ifdef notyet
8772 	lockdep_assert_held(&srng->lock);
8773 #endif
8774 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
8775 	    srng->ring_size;
8776 
8777 	if (next_reap_hp == srng->u.src_ring.cached_tp)
8778 		return NULL;
8779 
8780 	desc = srng->ring_base_vaddr + next_reap_hp;
8781 	srng->u.src_ring.reap_hp = next_reap_hp;
8782 
8783 	return desc;
8784 }
8785 
8786 int
8787 qwz_dp_link_desc_setup(struct qwz_softc *sc,
8788     struct dp_link_desc_bank *link_desc_banks, uint32_t ring_type,
8789     struct hal_srng *srng, uint32_t n_link_desc)
8790 {
8791 	uint32_t tot_mem_sz;
8792 	uint32_t n_link_desc_bank, last_bank_sz;
8793 	uint32_t entry_sz, n_entries;
8794 	uint64_t paddr;
8795 	uint32_t *desc;
8796 	int i, ret;
8797 
8798 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
8799 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
8800 
8801 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
8802 		n_link_desc_bank = 1;
8803 		last_bank_sz = tot_mem_sz;
8804 	} else {
8805 		n_link_desc_bank = tot_mem_sz /
8806 		    (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN);
8807 		last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH -
8808 		    HAL_LINK_DESC_ALIGN);
8809 
8810 		if (last_bank_sz)
8811 			n_link_desc_bank += 1;
8812 	}
8813 
8814 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
8815 		return EINVAL;
8816 
8817 	ret = qwz_dp_link_desc_bank_alloc(sc, link_desc_banks,
8818 	    n_link_desc_bank, last_bank_sz);
8819 	if (ret)
8820 		return ret;
8821 
8822 	/* Setup link desc idle list for HW internal usage */
8823 	entry_sz = qwz_hal_srng_get_entrysize(sc, ring_type);
8824 	tot_mem_sz = entry_sz * n_link_desc;
8825 
8826 	/* Setup scatter desc list when the total memory requirement is more */
8827 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
8828 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
8829 		ret = qwz_dp_scatter_idle_link_desc_setup(sc, tot_mem_sz,
8830 		    n_link_desc_bank, n_link_desc, last_bank_sz);
8831 		if (ret) {
8832 			printf("%s: failed to setup scatting idle list "
8833 			    "descriptor :%d\n",
8834 			    sc->sc_dev.dv_xname, ret);
8835 			goto fail_desc_bank_free;
8836 		}
8837 
8838 		return 0;
8839 	}
8840 #if 0
8841 	spin_lock_bh(&srng->lock);
8842 #endif
8843 	qwz_hal_srng_access_begin(sc, srng);
8844 
8845 	for (i = 0; i < n_link_desc_bank; i++) {
8846 		n_entries = (link_desc_banks[i].size) / HAL_LINK_DESC_SIZE;
8847 		paddr = link_desc_banks[i].paddr;
8848 		while (n_entries &&
8849 		    (desc = qwz_hal_srng_src_get_next_entry(sc, srng))) {
8850 			qwz_hal_set_link_desc_addr(
8851 			    (struct hal_wbm_link_desc *) desc, i, paddr);
8852 			n_entries--;
8853 			paddr += HAL_LINK_DESC_SIZE;
8854 		}
8855 	}
8856 
8857 	qwz_hal_srng_access_end(sc, srng);
8858 #if 0
8859 	spin_unlock_bh(&srng->lock);
8860 #endif
8861 
8862 	return 0;
8863 
8864 fail_desc_bank_free:
8865 	qwz_dp_link_desc_bank_free(sc, link_desc_banks);
8866 
8867 	return ret;
8868 }
8869 
8870 void
8871 qwz_dp_srng_cleanup(struct qwz_softc *sc, struct dp_srng *ring)
8872 {
8873 	if (ring->mem == NULL)
8874 		return;
8875 
8876 #if 0
8877 	if (ring->cached)
8878 		kfree(ring->vaddr_unaligned);
8879 	else
8880 #endif
8881 		qwz_dmamem_free(sc->sc_dmat, ring->mem);
8882 
8883 	ring->mem = NULL;
8884 	ring->vaddr = NULL;
8885 	ring->paddr = 0;
8886 }
8887 
8888 void
8889 qwz_dp_shadow_stop_timer(struct qwz_softc *sc,
8890     struct qwz_hp_update_timer *update_timer)
8891 {
8892 	if (!sc->hw_params.supports_shadow_regs)
8893 		return;
8894 
8895 	timeout_del(&update_timer->timer);
8896 }
8897 
8898 void
8899 qwz_dp_shadow_start_timer(struct qwz_softc *sc, struct hal_srng *srng,
8900     struct qwz_hp_update_timer *update_timer)
8901 {
8902 #ifdef notyet
8903 	lockdep_assert_held(&srng->lock);
8904 #endif
8905 	if (!sc->hw_params.supports_shadow_regs)
8906 		return;
8907 
8908 	update_timer->tx_num++;
8909 	if (update_timer->started)
8910 		return;
8911 
8912 	update_timer->started = 1;
8913 	update_timer->timer_tx_num = update_timer->tx_num;
8914 
8915 	timeout_add_msec(&update_timer->timer, update_timer->interval);
8916 }
8917 
8918 void
8919 qwz_dp_shadow_timer_handler(void *arg)
8920 {
8921 	struct qwz_hp_update_timer *update_timer = arg;
8922 	struct qwz_softc *sc = update_timer->sc;
8923 	struct hal_srng	*srng = &sc->hal.srng_list[update_timer->ring_id];
8924 	int s;
8925 
8926 #ifdef notyet
8927 	spin_lock_bh(&srng->lock);
8928 #endif
8929 	s = splnet();
8930 
8931 	/*
8932 	 * Update HP if there were no TX operations during the timeout interval,
8933 	 * and stop the timer. Timer will be restarted if more TX happens.
8934 	 */
8935 	if (update_timer->timer_tx_num != update_timer->tx_num) {
8936 		update_timer->timer_tx_num = update_timer->tx_num;
8937 		timeout_add_msec(&update_timer->timer, update_timer->interval);
8938 	} else {
8939 		update_timer->started = 0;
8940 		qwz_hal_srng_shadow_update_hp_tp(sc, srng);
8941 	}
8942 #ifdef notyet
8943 	spin_unlock_bh(&srng->lock);
8944 #endif
8945 	splx(s);
8946 }
8947 
8948 void
8949 qwz_dp_stop_shadow_timers(struct qwz_softc *sc)
8950 {
8951 	int i;
8952 
8953 	for (i = 0; i < sc->hw_params.max_tx_ring; i++)
8954 		qwz_dp_shadow_stop_timer(sc, &sc->dp.tx_ring_timer[i]);
8955 
8956 	qwz_dp_shadow_stop_timer(sc, &sc->dp.reo_cmd_timer);
8957 }
8958 
8959 void
8960 qwz_dp_srng_common_cleanup(struct qwz_softc *sc)
8961 {
8962 	struct qwz_dp *dp = &sc->dp;
8963 	int i;
8964 
8965 	qwz_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
8966 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
8967 		qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
8968 		qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
8969 	}
8970 	qwz_dp_srng_cleanup(sc, &dp->reo_reinject_ring);
8971 	qwz_dp_srng_cleanup(sc, &dp->rx_rel_ring);
8972 	qwz_dp_srng_cleanup(sc, &dp->reo_except_ring);
8973 	qwz_dp_srng_cleanup(sc, &dp->reo_cmd_ring);
8974 	qwz_dp_srng_cleanup(sc, &dp->reo_status_ring);
8975 }
8976 
8977 void
8978 qwz_hal_srng_get_params(struct qwz_softc *sc, struct hal_srng *srng,
8979     struct hal_srng_params *params)
8980 {
8981 	params->ring_base_paddr = srng->ring_base_paddr;
8982 	params->ring_base_vaddr = srng->ring_base_vaddr;
8983 	params->num_entries = srng->num_entries;
8984 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
8985 	params->intr_batch_cntr_thres_entries =
8986 		srng->intr_batch_cntr_thres_entries;
8987 	params->low_threshold = srng->u.src_ring.low_threshold;
8988 	params->msi_addr = srng->msi_addr;
8989 	params->msi_data = srng->msi_data;
8990 	params->flags = srng->flags;
8991 }
8992 
8993 void
8994 qwz_hal_tx_init_data_ring(struct qwz_softc *sc, struct hal_srng *srng)
8995 {
8996 	struct hal_srng_params params;
8997 	struct hal_tlv_hdr *tlv;
8998 	int i, entry_size;
8999 	uint8_t *desc;
9000 
9001 	memset(&params, 0, sizeof(params));
9002 
9003 	entry_size = qwz_hal_srng_get_entrysize(sc, HAL_TCL_DATA);
9004 	qwz_hal_srng_get_params(sc, srng, &params);
9005 	desc = (uint8_t *)params.ring_base_vaddr;
9006 
9007 	for (i = 0; i < params.num_entries; i++) {
9008 		tlv = (struct hal_tlv_hdr *)desc;
9009 		tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
9010 		    FIELD_PREP(HAL_TLV_HDR_LEN,
9011 		    sizeof(struct hal_tcl_data_cmd));
9012 		desc += entry_size;
9013 	}
9014 }
9015 
9016 #define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
9017 
9018 /* dscp_tid_map - Default DSCP-TID mapping
9019  *
9020  * DSCP        TID
9021  * 000000      0
9022  * 001000      1
9023  * 010000      2
9024  * 011000      3
9025  * 100000      4
9026  * 101000      5
9027  * 110000      6
9028  * 111000      7
9029  */
9030 static const uint8_t dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
9031 	0, 0, 0, 0, 0, 0, 0, 0,
9032 	1, 1, 1, 1, 1, 1, 1, 1,
9033 	2, 2, 2, 2, 2, 2, 2, 2,
9034 	3, 3, 3, 3, 3, 3, 3, 3,
9035 	4, 4, 4, 4, 4, 4, 4, 4,
9036 	5, 5, 5, 5, 5, 5, 5, 5,
9037 	6, 6, 6, 6, 6, 6, 6, 6,
9038 	7, 7, 7, 7, 7, 7, 7, 7,
9039 };
9040 
9041 void
9042 qwz_hal_tx_set_dscp_tid_map(struct qwz_softc *sc, int id)
9043 {
9044 	uint32_t ctrl_reg_val;
9045 	uint32_t addr;
9046 	uint8_t hw_map_val[HAL_DSCP_TID_TBL_SIZE];
9047 	int i;
9048 	uint32_t value;
9049 	int cnt = 0;
9050 
9051 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9052 	    HAL_TCL1_RING_CMN_CTRL_REG);
9053 
9054 	/* Enable read/write access */
9055 	ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9056 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9057 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
9058 
9059 	addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
9060 	       (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
9061 
9062 	/* Configure each DSCP-TID mapping in three bits there by configure
9063 	 * three bytes in an iteration.
9064 	 */
9065 	for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
9066 		value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
9067 				   dscp_tid_map[i]) |
9068 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
9069 				   dscp_tid_map[i + 1]) |
9070 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
9071 				   dscp_tid_map[i + 2]) |
9072 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
9073 				   dscp_tid_map[i + 3]) |
9074 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
9075 				   dscp_tid_map[i + 4]) |
9076 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
9077 				   dscp_tid_map[i + 5]) |
9078 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
9079 				   dscp_tid_map[i + 6]) |
9080 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
9081 				   dscp_tid_map[i + 7]);
9082 		memcpy(&hw_map_val[cnt], (uint8_t *)&value, 3);
9083 		cnt += 3;
9084 	}
9085 
9086 	for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
9087 		sc->ops.write32(sc, addr, *(uint32_t *)&hw_map_val[i]);
9088 		addr += 4;
9089 	}
9090 
9091 	/* Disable read/write access */
9092 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9093 	    HAL_TCL1_RING_CMN_CTRL_REG);
9094 	ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9095 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9096 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
9097 }
9098 
9099 void
9100 qwz_dp_shadow_init_timer(struct qwz_softc *sc,
9101     struct qwz_hp_update_timer *update_timer,
9102     uint32_t interval, uint32_t ring_id)
9103 {
9104 	if (!sc->hw_params.supports_shadow_regs)
9105 		return;
9106 
9107 	update_timer->tx_num = 0;
9108 	update_timer->timer_tx_num = 0;
9109 	update_timer->sc = sc;
9110 	update_timer->ring_id = ring_id;
9111 	update_timer->interval = interval;
9112 	update_timer->init = 1;
9113 	timeout_set(&update_timer->timer, qwz_dp_shadow_timer_handler,
9114 	    update_timer);
9115 }
9116 
9117 void
9118 qwz_hal_reo_init_cmd_ring(struct qwz_softc *sc, struct hal_srng *srng)
9119 {
9120 	struct hal_srng_params params;
9121 	struct hal_tlv_hdr *tlv;
9122 	struct hal_reo_get_queue_stats *desc;
9123 	int i, cmd_num = 1;
9124 	int entry_size;
9125 	uint8_t *entry;
9126 
9127 	memset(&params, 0, sizeof(params));
9128 
9129 	entry_size = qwz_hal_srng_get_entrysize(sc, HAL_REO_CMD);
9130 	qwz_hal_srng_get_params(sc, srng, &params);
9131 	entry = (uint8_t *)params.ring_base_vaddr;
9132 
9133 	for (i = 0; i < params.num_entries; i++) {
9134 		tlv = (struct hal_tlv_hdr *)entry;
9135 		desc = (struct hal_reo_get_queue_stats *)tlv->value;
9136 		desc->cmd.info0 = FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER,
9137 		    cmd_num++);
9138 		entry += entry_size;
9139 	}
9140 }
9141 
9142 int
9143 qwz_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath12k_hal_reo_cmd *cmd)
9144 {
9145 	struct hal_reo_get_queue_stats *desc;
9146 
9147 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
9148 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
9149 
9150 	desc = (struct hal_reo_get_queue_stats *)tlv->value;
9151 
9152 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9153 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
9154 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9155 
9156 	desc->queue_addr_lo = cmd->addr_lo;
9157 	desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
9158 	    cmd->addr_hi);
9159 	if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
9160 		desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
9161 
9162 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
9163 }
9164 
9165 int
9166 qwz_hal_reo_cmd_flush_cache(struct ath12k_hal *hal, struct hal_tlv_hdr *tlv,
9167     struct ath12k_hal_reo_cmd *cmd)
9168 {
9169 	struct hal_reo_flush_cache *desc;
9170 	uint8_t avail_slot = ffz(hal->avail_blk_resource);
9171 
9172 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
9173 		if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
9174 			return ENOSPC;
9175 
9176 		hal->current_blk_index = avail_slot;
9177 	}
9178 
9179 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
9180 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
9181 
9182 	desc = (struct hal_reo_flush_cache *)tlv->value;
9183 
9184 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9185 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
9186 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9187 
9188 	desc->cache_addr_lo = cmd->addr_lo;
9189 	desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
9190 	    cmd->addr_hi);
9191 
9192 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
9193 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
9194 
9195 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
9196 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
9197 		desc->info0 |=
9198 		    FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
9199 		    avail_slot);
9200 	}
9201 
9202 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
9203 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
9204 
9205 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
9206 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
9207 
9208 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
9209 }
9210 
9211 int
9212 qwz_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
9213     struct ath12k_hal_reo_cmd *cmd)
9214 {
9215 	struct hal_reo_update_rx_queue *desc;
9216 
9217 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
9218 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
9219 
9220 	desc = (struct hal_reo_update_rx_queue *)tlv->value;
9221 
9222 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9223 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
9224 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
9225 
9226 	desc->queue_addr_lo = cmd->addr_lo;
9227 	desc->info0 =
9228 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
9229 		    cmd->addr_hi) |
9230 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
9231 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
9232 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
9233 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
9234 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
9235 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
9236 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
9237 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
9238 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
9239 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
9240 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
9241 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
9242 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
9243 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
9244 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
9245 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
9246 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
9247 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
9248 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
9249 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
9250 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
9251 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
9252 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
9253 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
9254 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
9255 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
9256 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
9257 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
9258 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
9259 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
9260 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
9261 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
9262 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
9263 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
9264 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
9265 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
9266 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
9267 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
9268 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
9269 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
9270 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
9271 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
9272 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
9273 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
9274 
9275 	desc->info1 =
9276 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
9277 		    cmd->rx_queue_num) |
9278 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
9279 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
9280 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
9281 		    FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
9282 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
9283 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
9284 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
9285 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
9286 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
9287 		    FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
9288 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
9289 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
9290 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
9291 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
9292 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
9293 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
9294 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
9295 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
9296 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
9297 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
9298 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
9299 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
9300 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
9301 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
9302 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
9303 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
9304 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
9305 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
9306 
9307 	if (cmd->pn_size == 24)
9308 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
9309 	else if (cmd->pn_size == 48)
9310 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
9311 	else if (cmd->pn_size == 128)
9312 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
9313 
9314 	if (cmd->ba_window_size < 1)
9315 		cmd->ba_window_size = 1;
9316 
9317 	if (cmd->ba_window_size == 1)
9318 		cmd->ba_window_size++;
9319 
9320 	desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
9321 	    cmd->ba_window_size - 1) |
9322 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
9323 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
9324 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
9325 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
9326 	        FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
9327 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
9328 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
9329 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
9330 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
9331 
9332 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
9333 }
9334 
9335 int
9336 qwz_hal_reo_cmd_send(struct qwz_softc *sc, struct hal_srng *srng,
9337     enum hal_reo_cmd_type type, struct ath12k_hal_reo_cmd *cmd)
9338 {
9339 	struct hal_tlv_hdr *reo_desc;
9340 	int ret;
9341 #ifdef notyet
9342 	spin_lock_bh(&srng->lock);
9343 #endif
9344 	qwz_hal_srng_access_begin(sc, srng);
9345 	reo_desc = (struct hal_tlv_hdr *)qwz_hal_srng_src_get_next_entry(sc, srng);
9346 	if (!reo_desc) {
9347 		ret = ENOBUFS;
9348 		goto out;
9349 	}
9350 
9351 	switch (type) {
9352 	case HAL_REO_CMD_GET_QUEUE_STATS:
9353 		ret = qwz_hal_reo_cmd_queue_stats(reo_desc, cmd);
9354 		break;
9355 	case HAL_REO_CMD_FLUSH_CACHE:
9356 		ret = qwz_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
9357 		break;
9358 	case HAL_REO_CMD_UPDATE_RX_QUEUE:
9359 		ret = qwz_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
9360 		break;
9361 	case HAL_REO_CMD_FLUSH_QUEUE:
9362 	case HAL_REO_CMD_UNBLOCK_CACHE:
9363 	case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
9364 		printf("%s: unsupported reo command %d\n",
9365 		   sc->sc_dev.dv_xname, type);
9366 		ret = ENOTSUP;
9367 		break;
9368 	default:
9369 		printf("%s: unknown reo command %d\n",
9370 		    sc->sc_dev.dv_xname, type);
9371 		ret = EINVAL;
9372 		break;
9373 	}
9374 
9375 out:
9376 	qwz_hal_srng_access_end(sc, srng);
9377 #ifdef notyet
9378 	spin_unlock_bh(&srng->lock);
9379 #endif
9380 	return ret;
9381 }
9382 
9383 int
9384 qwz_dp_srng_common_setup(struct qwz_softc *sc)
9385 {
9386 	struct qwz_dp *dp = &sc->dp;
9387 	const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
9388 	struct hal_srng *srng;
9389 	int i, ret;
9390 	uint8_t tx_comp_ring_num;
9391 
9392 	ret = qwz_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
9393 	    0, 0, DP_WBM_RELEASE_RING_SIZE);
9394 	if (ret) {
9395 		printf("%s: failed to set up wbm2sw_release ring :%d\n",
9396 		    sc->sc_dev.dv_xname, ret);
9397 		goto err;
9398 	}
9399 
9400 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9401 		map = sc->hw_params.hal_ops->tcl_to_wbm_rbm_map;
9402 		tx_comp_ring_num = map[i].wbm_ring_num;
9403 
9404 		ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
9405 		    HAL_TCL_DATA, i, 0, DP_TCL_DATA_RING_SIZE);
9406 		if (ret) {
9407 			printf("%s: failed to set up tcl_data ring (%d) :%d\n",
9408 			    sc->sc_dev.dv_xname, i, ret);
9409 			goto err;
9410 		}
9411 
9412 		ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
9413 		    HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0, DP_TX_COMP_RING_SIZE);
9414 		if (ret) {
9415 			printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
9416 			    sc->sc_dev.dv_xname, i, ret);
9417 			goto err;
9418 		}
9419 
9420 		srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
9421 		qwz_hal_tx_init_data_ring(sc, srng);
9422 	}
9423 
9424 	ret = qwz_dp_srng_setup(sc, &dp->reo_reinject_ring, HAL_REO_REINJECT,
9425 	    0, 0, DP_REO_REINJECT_RING_SIZE);
9426 	if (ret) {
9427 		printf("%s: failed to set up reo_reinject ring :%d\n",
9428 		    sc->sc_dev.dv_xname, ret);
9429 		goto err;
9430 	}
9431 
9432 	ret = qwz_dp_srng_setup(sc, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
9433 	    DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
9434 	if (ret) {
9435 		printf("%s: failed to set up rx_rel ring :%d\n",
9436 		    sc->sc_dev.dv_xname, ret);
9437 		goto err;
9438 	}
9439 
9440 	ret = qwz_dp_srng_setup(sc, &dp->reo_except_ring, HAL_REO_EXCEPTION,
9441 	    0, 0, DP_REO_EXCEPTION_RING_SIZE);
9442 	if (ret) {
9443 		printf("%s: failed to set up reo_exception ring :%d\n",
9444 		    sc->sc_dev.dv_xname, ret);
9445 		goto err;
9446 	}
9447 
9448 	ret = qwz_dp_srng_setup(sc, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0,
9449 	    DP_REO_CMD_RING_SIZE);
9450 	if (ret) {
9451 		printf("%s: failed to set up reo_cmd ring :%d\n",
9452 		    sc->sc_dev.dv_xname, ret);
9453 		goto err;
9454 	}
9455 
9456 	srng = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
9457 	qwz_hal_reo_init_cmd_ring(sc, srng);
9458 
9459 	ret = qwz_dp_srng_setup(sc, &dp->reo_status_ring, HAL_REO_STATUS,
9460 	    0, 0, DP_REO_STATUS_RING_SIZE);
9461 	if (ret) {
9462 		printf("%s: failed to set up reo_status ring :%d\n",
9463 		    sc->sc_dev.dv_xname, ret);
9464 		goto err;
9465 	}
9466 
9467 	/* When hash based routing of rx packet is enabled, 32 entries to map
9468 	 * the hash values to the ring will be configured.
9469 	 * Each hash entry uses four bits to map to a particular ring. */
9470 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
9471 	    HAL_HASH_ROUTING_RING_SW2 << 4 |
9472 	    HAL_HASH_ROUTING_RING_SW3 << 8 |
9473 	    HAL_HASH_ROUTING_RING_SW4 << 12 |
9474 	    HAL_HASH_ROUTING_RING_SW1 << 16 |
9475 	    HAL_HASH_ROUTING_RING_SW2 << 20 |
9476 	    HAL_HASH_ROUTING_RING_SW3 << 24 |
9477 	    HAL_HASH_ROUTING_RING_SW4 << 28;
9478 
9479 	qwz_hal_reo_hw_setup(sc, ring_hash_map);
9480 	return 0;
9481 
9482 err:
9483 	qwz_dp_srng_common_cleanup(sc);
9484 
9485 	return ret;
9486 }
9487 
9488 void
9489 qwz_dp_link_desc_cleanup(struct qwz_softc *sc,
9490     struct dp_link_desc_bank *desc_bank, uint32_t ring_type,
9491     struct dp_srng *ring)
9492 {
9493 	qwz_dp_link_desc_bank_free(sc, desc_bank);
9494 
9495 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
9496 		qwz_dp_srng_cleanup(sc, ring);
9497 		qwz_dp_scatter_idle_link_desc_cleanup(sc);
9498 	}
9499 }
9500 
9501 void
9502 qwz_dp_tx_ring_free_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
9503 {
9504 	int i;
9505 
9506 	if (tx_ring->data == NULL)
9507 		return;
9508 
9509 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
9510 		struct qwz_tx_data *tx_data = &tx_ring->data[i];
9511 
9512 		if (tx_data->map) {
9513 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
9514 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
9515 		}
9516 
9517 		m_freem(tx_data->m);
9518 	}
9519 
9520 	free(tx_ring->data, M_DEVBUF,
9521 	    sc->hw_params.tx_ring_size * sizeof(struct qwz_tx_data));
9522 	tx_ring->data = NULL;
9523 }
9524 
9525 int
9526 qwz_dp_tx_ring_alloc_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
9527 {
9528 	int i, ret;
9529 
9530 	tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
9531 	   sizeof(struct qwz_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO);
9532 	if (tx_ring->data == NULL)
9533 		return ENOMEM;
9534 
9535 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
9536 		struct qwz_tx_data *tx_data = &tx_ring->data[i];
9537 
9538 		ret = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
9539 		    BUS_DMA_NOWAIT, &tx_data->map);
9540 		if (ret)
9541 			return ret;
9542 	}
9543 
9544 	return 0;
9545 }
9546 
9547 enum ath12k_dp_desc_type {
9548 	ATH12K_DP_TX_DESC,
9549 	ATH12K_DP_RX_DESC,
9550 };
9551 
9552 int
9553 qwz_dp_cmem_init(struct qwz_softc *sc, struct qwz_dp *dp,
9554     enum ath12k_dp_desc_type type)
9555 {
9556 	uint32_t cmem_base;
9557 	int i, start, end;
9558 
9559 	cmem_base = sc->qmi_dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
9560 
9561 	switch (type) {
9562 	case ATH12K_DP_TX_DESC:
9563 		start = ATH12K_TX_SPT_PAGE_OFFSET;
9564 		end = start + ATH12K_NUM_TX_SPT_PAGES;
9565 		break;
9566 	case ATH12K_DP_RX_DESC:
9567 		start = ATH12K_RX_SPT_PAGE_OFFSET;
9568 		end = start + ATH12K_NUM_RX_SPT_PAGES;
9569 		break;
9570 	default:
9571 		printf("%s: invalid descriptor type %d in cmem init\n",
9572 		    sc->sc_dev.dv_xname, type);
9573 		return EINVAL;
9574 	}
9575 
9576 	/* Write to PPT in CMEM */
9577 	for (i = start; i < end; i++)
9578 		sc->ops.write32(sc, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
9579 		    QWZ_DMA_DVA(dp->spt_info[i].mem) >> ATH12K_SPT_4K_ALIGN_OFFSET);
9580 
9581 	return 0;
9582 }
9583 
9584 uint32_t qwz_dp_cc_cookie_gen(uint16_t ppt_idx, uint16_t spt_idx)
9585 {
9586 	return (uint32_t)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
9587 }
9588 
9589 void *ath12k_dp_cc_get_desc_addr_ptr(struct qwz_softc *sc,
9590     uint16_t ppt_idx, uint16_t spt_idx)
9591 {
9592 	struct qwz_dp *dp = &sc->dp;
9593 
9594 	return QWZ_DMA_KVA(dp->spt_info[ppt_idx].mem) + spt_idx;
9595 }
9596 
9597 int
9598 qwz_dp_cc_desc_init(struct qwz_softc *sc)
9599 {
9600 	struct qwz_dp *dp = &sc->dp;
9601 	struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
9602 	struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
9603 	uint32_t i, j, pool_id, tx_spt_page;
9604 	uint32_t ppt_idx;
9605 
9606 #ifdef notyet
9607 	spin_lock_bh(&dp->rx_desc_lock);
9608 #endif
9609 
9610 	/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
9611 	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
9612 		rx_descs = mallocarray(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
9613 		    M_DEVBUF, M_NOWAIT | M_ZERO);
9614 
9615 		if (!rx_descs) {
9616 #ifdef notyet
9617 			spin_unlock_bh(&dp->rx_desc_lock);
9618 #endif
9619 			return ENOMEM;
9620 		}
9621 
9622 		ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
9623 		dp->spt_info->rxbaddr[i] = &rx_descs[0];
9624 
9625 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
9626 			rx_descs[j].cookie = qwz_dp_cc_cookie_gen(ppt_idx, j);
9627 			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
9628 			TAILQ_INSERT_TAIL(&dp->rx_desc_free_list,
9629 			    &rx_descs[j], entry);
9630 
9631 			/* Update descriptor VA in SPT */
9632 			rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(sc, ppt_idx, j);
9633 			*rx_desc_addr = &rx_descs[j];
9634 		}
9635 	}
9636 
9637 #ifdef notyet
9638 	spin_unlock_bh(&dp->rx_desc_lock);
9639 #endif
9640 
9641 	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
9642 #ifdef notyet
9643 		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
9644 #endif
9645 		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
9646 			tx_descs = mallocarray(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
9647 			    M_DEVBUF, M_NOWAIT | M_ZERO);
9648 
9649 			if (!tx_descs) {
9650 #ifdef notyet
9651 				spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
9652 #endif
9653 				/* Caller takes care of TX pending and RX desc cleanup */
9654 				return ENOMEM;
9655 			}
9656 
9657 			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
9658 			ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
9659 
9660 			dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
9661 
9662 			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
9663 				tx_descs[j].desc_id = qwz_dp_cc_cookie_gen(ppt_idx, j);
9664 				tx_descs[j].pool_id = pool_id;
9665 				TAILQ_INSERT_TAIL(&dp->tx_desc_free_list[pool_id],
9666 				    &tx_descs[j], entry);
9667 
9668 				/* Update descriptor VA in SPT */
9669 				tx_desc_addr =
9670 					ath12k_dp_cc_get_desc_addr_ptr(sc, ppt_idx, j);
9671 				*tx_desc_addr = &tx_descs[j];
9672 			}
9673 		}
9674 #ifdef notyet
9675 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
9676 #endif
9677 	}
9678 	return 0;
9679 }
9680 
9681 void
9682 qwz_dp_cc_cleanup(struct qwz_softc *sc)
9683 {
9684 	// FIXME
9685 }
9686 
9687 int
9688 qwz_dp_cc_init(struct qwz_softc *sc)
9689 {
9690 	struct qwz_dp *dp = &sc->dp;
9691 	int i, ret = 0;
9692 
9693 	TAILQ_INIT(&dp->rx_desc_free_list);
9694 #ifdef notyet
9695 	spin_lock_init(&dp->rx_desc_lock);
9696 #endif
9697 
9698 	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
9699 		TAILQ_INIT(&dp->tx_desc_free_list[i]);
9700 		TAILQ_INIT(&dp->tx_desc_used_list[i]);
9701 #ifdef notyet
9702 		spin_lock_init(&dp->tx_desc_lock[i]);
9703 #endif
9704 	}
9705 
9706 	dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
9707 	if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
9708 		dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
9709 
9710 	dp->spt_info = mallocarray(dp->num_spt_pages,
9711 	    sizeof(struct ath12k_spt_info),
9712 	    M_DEVBUF, M_NOWAIT | M_ZERO);
9713 	if (!dp->spt_info) {
9714 		printf("%s: SPT page allocation failure\n",
9715 		    sc->sc_dev.dv_xname);
9716 		return ENOMEM;
9717 	}
9718 
9719 	for (i = 0; i < dp->num_spt_pages; i++) {
9720 		dp->spt_info[i].mem = qwz_dmamem_alloc(sc->sc_dmat,
9721 		    ATH12K_PAGE_SIZE, PAGE_SIZE);
9722 		if (!dp->spt_info[i].mem) {
9723 			ret = ENOMEM;
9724 			goto free;
9725 		}
9726 
9727 		if (QWZ_DMA_DVA(dp->spt_info[i].mem) & ATH12K_SPT_4K_ALIGN_CHECK) {
9728 			printf("%s: SPT allocated memory is not 4K aligned\n",
9729 			    sc->sc_dev.dv_xname);
9730 			ret = EINVAL;
9731 			goto free;
9732 		}
9733 	}
9734 
9735 	ret = qwz_dp_cmem_init(sc, dp, ATH12K_DP_TX_DESC);
9736 	if (ret) {
9737 		printf("%s: HW CC Tx cmem init failed: %d\n",
9738 		    sc->sc_dev.dv_xname, ret);
9739 		goto free;
9740 	}
9741 
9742 	ret = qwz_dp_cmem_init(sc, dp, ATH12K_DP_RX_DESC);
9743 	if (ret) {
9744 		printf("%s: HW CC Rx cmem init failed: %d\n",
9745 		    sc->sc_dev.dv_xname, ret);
9746 		goto free;
9747 	}
9748 
9749 	ret = qwz_dp_cc_desc_init(sc);
9750 	if (ret) {
9751 		printf("%s: HW CC desc init failed: %d\n",
9752 		    sc->sc_dev.dv_xname, ret);
9753 		goto free;
9754 	}
9755 
9756 	return 0;
9757 free:
9758 	qwz_dp_cc_cleanup(sc);
9759 	return ret;
9760 }
9761 
9762 int
9763 qwz_dp_init_bank_profiles(struct qwz_softc *sc)
9764 {
9765 	return 0;
9766 }
9767 
9768 void
9769 qwz_dp_deinit_bank_profiles(struct qwz_softc *sc)
9770 {
9771 	// FIXME
9772 }
9773 
9774 int qwz_dp_rxdma_ring_buf_setup(struct qwz_softc *, struct dp_rxdma_ring *, uint32_t);
9775 
9776 int
9777 qwz_dp_rxdma_buf_setup(struct qwz_softc *sc)
9778 {
9779 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
9780 	struct dp_rxdma_ring *rx_ring;
9781 	int ret;
9782 
9783 	rx_ring = &dp->rx_refill_buf_ring;
9784 	ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
9785 	if (ret)
9786 		return ret;
9787 
9788 	return 0;
9789 }
9790 
9791 int
9792 qwz_dp_rx_alloc(struct qwz_softc *sc)
9793 {
9794 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
9795 	int i, ret;
9796 
9797 #if notyet
9798 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
9799 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
9800 
9801 	idr_init(&dp->tx_mon_buf_ring.bufs_idr);
9802 	spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
9803 #endif
9804 
9805 	ret = qwz_dp_srng_setup(sc, &dp->rx_refill_buf_ring.refill_buf_ring,
9806 	    HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
9807 	if (ret) {
9808 		printf("%s: failed to setup rx_refill_buf_ring\n",
9809 		    sc->sc_dev.dv_xname);
9810 		return ret;
9811 	}
9812 
9813 	if (sc->hw_params.rx_mac_buf_ring) {
9814 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
9815 			ret = qwz_dp_srng_setup(sc, &dp->rx_mac_buf_ring[i],
9816 			    HAL_RXDMA_BUF, 1, dp->mac_id + i, 2048);
9817 			if (ret) {
9818 				printf("%s: failed to setup "
9819 				    "rx_mac_buf_ring %d\n",
9820 				    sc->sc_dev.dv_xname, i);
9821 				return ret;
9822 			}
9823 		}
9824 	}
9825 
9826 	for (i = 0; i < sc->hw_params.num_rxdma_dst_ring; i++) {
9827 		ret = qwz_dp_srng_setup(sc, &dp->rxdma_err_dst_ring[i],
9828 		    HAL_RXDMA_BUF, 0, dp->mac_id + i,
9829 		    DP_RXDMA_ERR_DST_RING_SIZE);
9830 		if (ret) {
9831 			printf("%s: failed to setup "
9832 			    "rxdma_err_dst_Ring %d\n",
9833 			    sc->sc_dev.dv_xname, i);
9834 			return ret;
9835 		}
9836 	}
9837 
9838 	ret = qwz_dp_rxdma_buf_setup(sc);
9839 	if (ret) {
9840 		printf("%s: failed to setup rxdma ring\n",
9841 		    sc->sc_dev.dv_xname);
9842 		return ret;
9843 	}
9844 
9845 	return 0;
9846 }
9847 
9848 void
9849 qwz_dp_rx_free(struct qwz_softc *sc)
9850 {
9851 	/* FIXME */
9852 }
9853 
9854 int
9855 qwz_dp_alloc(struct qwz_softc *sc)
9856 {
9857 	struct qwz_dp *dp = &sc->dp;
9858 	struct hal_srng *srng = NULL;
9859 	size_t size = 0;
9860 	uint32_t n_link_desc = 0;
9861 	int ret;
9862 	int i;
9863 
9864 	dp->sc = sc;
9865 
9866 	TAILQ_INIT(&dp->reo_cmd_list);
9867 	TAILQ_INIT(&dp->reo_cmd_cache_flush_list);
9868 #if 0
9869 	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
9870 	spin_lock_init(&dp->reo_cmd_lock);
9871 #endif
9872 
9873 	dp->reo_cmd_cache_flush_count = 0;
9874 
9875 	ret = qwz_wbm_idle_ring_setup(sc, &n_link_desc);
9876 	if (ret) {
9877 		printf("%s: failed to setup wbm_idle_ring: %d\n",
9878 		    sc->sc_dev.dv_xname, ret);
9879 		return ret;
9880 	}
9881 
9882 	srng = &sc->hal.srng_list[dp->wbm_idle_ring.ring_id];
9883 
9884 	ret = qwz_dp_link_desc_setup(sc, dp->link_desc_banks,
9885 	    HAL_WBM_IDLE_LINK, srng, n_link_desc);
9886 	if (ret) {
9887 		printf("%s: failed to setup link desc: %d\n",
9888 		   sc->sc_dev.dv_xname, ret);
9889 		return ret;
9890 	}
9891 
9892 	ret = qwz_dp_cc_init(sc);
9893 	if (ret)
9894 		goto fail_link_desc_cleanup;
9895 
9896 	ret = qwz_dp_init_bank_profiles(sc);
9897 	if (ret)
9898 		goto fail_hw_cc_cleanup;
9899 
9900 	ret = qwz_dp_srng_common_setup(sc);
9901 	if (ret)
9902 		goto fail_dp_bank_profiles_cleanup;
9903 
9904 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
9905 
9906 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9907 #if 0
9908 		idr_init(&dp->tx_ring[i].txbuf_idr);
9909 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
9910 #endif
9911 		ret = qwz_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
9912 		if (ret)
9913 			goto fail_cmn_srng_cleanup;
9914 
9915 		dp->tx_ring[i].cur = 0;
9916 		dp->tx_ring[i].queued = 0;
9917 		dp->tx_ring[i].tcl_data_ring_id = i;
9918 		dp->tx_ring[i].tx_status_head = 0;
9919 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
9920 		dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
9921 		    M_NOWAIT | M_ZERO);
9922 		if (!dp->tx_ring[i].tx_status) {
9923 			ret = ENOMEM;
9924 			goto fail_cmn_srng_cleanup;
9925 		}
9926 	}
9927 
9928 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
9929 		qwz_hal_tx_set_dscp_tid_map(sc, i);
9930 
9931 	ret = qwz_dp_rx_alloc(sc);
9932 	if (ret)
9933 		goto fail_dp_rx_free;
9934 
9935 	/* Init any SOC level resource for DP */
9936 
9937 	return 0;
9938 fail_dp_rx_free:
9939 	qwz_dp_rx_free(sc);
9940 fail_cmn_srng_cleanup:
9941 	qwz_dp_srng_common_cleanup(sc);
9942 fail_dp_bank_profiles_cleanup:
9943 	qwz_dp_deinit_bank_profiles(sc);
9944 fail_hw_cc_cleanup:
9945 	qwz_dp_cc_cleanup(sc);
9946 fail_link_desc_cleanup:
9947 	qwz_dp_link_desc_cleanup(sc, dp->link_desc_banks, HAL_WBM_IDLE_LINK,
9948 	    &dp->wbm_idle_ring);
9949 
9950 	return ret;
9951 }
9952 
9953 void
9954 qwz_dp_reo_cmd_list_cleanup(struct qwz_softc *sc)
9955 {
9956 	struct qwz_dp *dp = &sc->dp;
9957 	struct dp_reo_cmd *cmd, *tmp;
9958 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
9959 	struct dp_rx_tid *rx_tid;
9960 #ifdef notyet
9961 	spin_lock_bh(&dp->reo_cmd_lock);
9962 #endif
9963 	TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
9964 		TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
9965 		rx_tid = &cmd->data;
9966 		if (rx_tid->mem) {
9967 			qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
9968 			rx_tid->mem = NULL;
9969 			rx_tid->vaddr = NULL;
9970 			rx_tid->paddr = 0ULL;
9971 			rx_tid->size = 0;
9972 		}
9973 		free(cmd, M_DEVBUF, sizeof(*cmd));
9974 	}
9975 
9976 	TAILQ_FOREACH_SAFE(cmd_cache, &dp->reo_cmd_cache_flush_list,
9977 	    entry, tmp_cache) {
9978 		TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, cmd_cache, entry);
9979 		dp->reo_cmd_cache_flush_count--;
9980 		rx_tid = &cmd_cache->data;
9981 		if (rx_tid->mem) {
9982 			qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
9983 			rx_tid->mem = NULL;
9984 			rx_tid->vaddr = NULL;
9985 			rx_tid->paddr = 0ULL;
9986 			rx_tid->size = 0;
9987 		}
9988 		free(cmd_cache, M_DEVBUF, sizeof(*cmd_cache));
9989 	}
9990 #ifdef notyet
9991 	spin_unlock_bh(&dp->reo_cmd_lock);
9992 #endif
9993 }
9994 
9995 void
9996 qwz_dp_free(struct qwz_softc *sc)
9997 {
9998 	struct qwz_dp *dp = &sc->dp;
9999 	int i;
10000 
10001 	qwz_dp_link_desc_cleanup(sc, dp->link_desc_banks,
10002 	    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
10003 
10004 	qwz_dp_srng_common_cleanup(sc);
10005 	qwz_dp_reo_cmd_list_cleanup(sc);
10006 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10007 #if 0
10008 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
10009 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
10010 			     ath12k_dp_tx_pending_cleanup, ab);
10011 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
10012 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
10013 #endif
10014 		qwz_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
10015 		free(dp->tx_ring[i].tx_status, M_DEVBUF,
10016 		    sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
10017 		dp->tx_ring[i].tx_status = NULL;
10018 	}
10019 
10020 	/* Deinit any SOC level resource */
10021 }
10022 
10023 int
10024 qwz_qmi_wlanfw_wlan_ini_send(struct qwz_softc *sc)
10025 {
10026 	int ret;
10027 	struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
10028 
10029 	req.enablefwlog_valid = 1;
10030 	req.enablefwlog = 1;
10031 
10032 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_INI_REQ_V01,
10033 	    QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
10034 	    qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req, sizeof(req));
10035 	if (ret) {
10036 		printf("%s: failed to send wlan ini request, err = %d\n",
10037 		    sc->sc_dev.dv_xname, ret);
10038 		return ret;
10039 	}
10040 
10041 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10042 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10043 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzini",
10044 		    SEC_TO_NSEC(1));
10045 		if (ret) {
10046 			printf("%s: wlan ini request timeout\n",
10047 			    sc->sc_dev.dv_xname);
10048 			return ret;
10049 		}
10050 	}
10051 
10052 	return 0;
10053 }
10054 
10055 int
10056 qwz_qmi_wlanfw_wlan_cfg_send(struct qwz_softc *sc)
10057 {
10058 	struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
10059 	const struct ce_pipe_config *ce_cfg;
10060 	const struct service_to_pipe *svc_cfg;
10061 	int ret = 0, pipe_num;
10062 
10063 	ce_cfg	= sc->hw_params.target_ce_config;
10064 	svc_cfg	= sc->hw_params.svc_to_ce_map;
10065 
10066 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
10067 	if (!req)
10068 		return ENOMEM;
10069 
10070 	req->host_version_valid = 1;
10071 	strlcpy(req->host_version, ATH12K_HOST_VERSION_STRING,
10072 	    sizeof(req->host_version));
10073 
10074 	req->tgt_cfg_valid = 1;
10075 	/* This is number of CE configs */
10076 	req->tgt_cfg_len = sc->hw_params.target_ce_count;
10077 	for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
10078 		req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
10079 		req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
10080 		req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
10081 		req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
10082 		req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
10083 	}
10084 
10085 	req->svc_cfg_valid = 1;
10086 	/* This is number of Service/CE configs */
10087 	req->svc_cfg_len = sc->hw_params.svc_to_ce_map_len;
10088 	for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
10089 		req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
10090 		req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
10091 		req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
10092 	}
10093 
10094 	/* set shadow v3 configuration */
10095 	if (sc->hw_params.supports_shadow_regs) {
10096 		req->shadow_reg_v3_valid = 1;
10097 		req->shadow_reg_v3_len = MIN(sc->qmi_ce_cfg.shadow_reg_v3_len,
10098 		    QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01);
10099 		memcpy(&req->shadow_reg_v3, sc->qmi_ce_cfg.shadow_reg_v3,
10100 		       sizeof(uint32_t) * req->shadow_reg_v3_len);
10101 	} else {
10102 		req->shadow_reg_v3_valid = 0;
10103 	}
10104 
10105 	DNPRINTF(QWZ_D_QMI, "%s: wlan cfg req\n", __func__);
10106 
10107 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_CFG_REQ_V01,
10108 	    QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
10109 	    qmi_wlanfw_wlan_cfg_req_msg_v01_ei,
10110 	    req, sizeof(*req));
10111 	if (ret) {
10112 		printf("%s: failed to send wlan config request: %d\n",
10113 		    sc->sc_dev.dv_xname, ret);
10114 		goto out;
10115 	}
10116 
10117 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10118 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10119 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzwlancfg",
10120 		    SEC_TO_NSEC(1));
10121 		if (ret) {
10122 			printf("%s: wlan config request failed\n",
10123 			    sc->sc_dev.dv_xname);
10124 			goto out;
10125 		}
10126 	}
10127 out:
10128 	free(req, M_DEVBUF, sizeof(*req));
10129 	return ret;
10130 }
10131 
10132 int
10133 qwz_qmi_wlanfw_mode_send(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
10134 {
10135 	int ret;
10136 	struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
10137 
10138 	req.mode = mode;
10139 	req.hw_debug_valid = 1;
10140 	req.hw_debug = 0;
10141 
10142 	ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_MODE_REQ_V01,
10143 	    QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
10144 	    qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req, sizeof(req));
10145 	if (ret) {
10146 		printf("%s: failed to send wlan mode request, err = %d\n",
10147 		    sc->sc_dev.dv_xname, ret);
10148 		return ret;
10149 	}
10150 
10151 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10152 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10153 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwmode",
10154 		    SEC_TO_NSEC(1));
10155 		if (ret) {
10156 			if (mode == ATH12K_FIRMWARE_MODE_OFF)
10157 				return 0;
10158 			printf("%s: wlan mode request timeout\n",
10159 			    sc->sc_dev.dv_xname);
10160 			return ret;
10161 		}
10162 	}
10163 
10164 	return 0;
10165 }
10166 
10167 int
10168 qwz_qmi_firmware_start(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
10169 {
10170 	int ret;
10171 
10172 	DPRINTF("%s: firmware start\n", sc->sc_dev.dv_xname);
10173 
10174 	ret = qwz_qmi_wlanfw_wlan_ini_send(sc);
10175 	if (ret < 0) {
10176 		printf("%s: qmi failed to send wlan fw ini: %d\n",
10177 		    sc->sc_dev.dv_xname, ret);
10178 		return ret;
10179 	}
10180 
10181 	ret = qwz_qmi_wlanfw_wlan_cfg_send(sc);
10182 	if (ret) {
10183 		printf("%s: qmi failed to send wlan cfg: %d\n",
10184 		    sc->sc_dev.dv_xname, ret);
10185 		return ret;
10186 	}
10187 
10188 	ret = qwz_qmi_wlanfw_mode_send(sc, mode);
10189 	if (ret) {
10190 		printf("%s: qmi failed to send wlan fw mode: %d\n",
10191 		    sc->sc_dev.dv_xname, ret);
10192 		return ret;
10193 	}
10194 
10195 	return 0;
10196 }
10197 
10198 void
10199 qwz_qmi_firmware_stop(struct qwz_softc *sc)
10200 {
10201 	int ret;
10202 
10203 	ret = qwz_qmi_wlanfw_mode_send(sc, ATH12K_FIRMWARE_MODE_OFF);
10204 	if (ret) {
10205 		printf("%s: qmi failed to send wlan mode off: %d\n",
10206 		    sc->sc_dev.dv_xname, ret);
10207 	}
10208 }
10209 
10210 int
10211 qwz_core_start_firmware(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
10212 {
10213 	int ret;
10214 
10215 	qwz_ce_get_shadow_config(sc, &sc->qmi_ce_cfg.shadow_reg_v3,
10216 	    &sc->qmi_ce_cfg.shadow_reg_v3_len);
10217 
10218 	ret = qwz_qmi_firmware_start(sc, mode);
10219 	if (ret) {
10220 		printf("%s: failed to send firmware start: %d\n",
10221 		    sc->sc_dev.dv_xname, ret);
10222 		return ret;
10223 	}
10224 
10225 	return ret;
10226 }
10227 
10228 int
10229 qwz_wmi_pdev_attach(struct qwz_softc *sc, uint8_t pdev_id)
10230 {
10231 	struct qwz_pdev_wmi *wmi_handle;
10232 
10233 	if (pdev_id >= sc->hw_params.max_radios)
10234 		return EINVAL;
10235 
10236 	wmi_handle = &sc->wmi.wmi[pdev_id];
10237 	wmi_handle->wmi = &sc->wmi;
10238 
10239 	wmi_handle->tx_ce_desc = 1;
10240 
10241 	return 0;
10242 }
10243 
10244 void
10245 qwz_wmi_detach(struct qwz_softc *sc)
10246 {
10247 	qwz_wmi_free_dbring_caps(sc);
10248 }
10249 
10250 int
10251 qwz_wmi_attach(struct qwz_softc *sc)
10252 {
10253 	int ret;
10254 
10255 	ret = qwz_wmi_pdev_attach(sc, 0);
10256 	if (ret)
10257 		return ret;
10258 
10259 	sc->wmi.sc = sc;
10260 	sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
10261 	sc->wmi.tx_credits = 1;
10262 
10263 	/* It's overwritten when service_ext_ready is handled */
10264 	if (sc->hw_params.single_pdev_only &&
10265 	    sc->hw_params.num_rxmda_per_pdev > 1)
10266 		sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
10267 
10268 	return 0;
10269 }
10270 
10271 void
10272 qwz_wmi_htc_tx_complete(struct qwz_softc *sc, struct mbuf *m)
10273 {
10274 	struct qwz_pdev_wmi *wmi = NULL;
10275 	uint32_t i;
10276 	uint8_t wmi_ep_count;
10277 	uint8_t eid;
10278 
10279 	eid = (uintptr_t)m->m_pkthdr.ph_cookie;
10280 	m_freem(m);
10281 
10282 	if (eid >= ATH12K_HTC_EP_COUNT)
10283 		return;
10284 
10285 	wmi_ep_count = sc->htc.wmi_ep_count;
10286 	if (wmi_ep_count > sc->hw_params.max_radios)
10287 		return;
10288 
10289 	for (i = 0; i < sc->htc.wmi_ep_count; i++) {
10290 		if (sc->wmi.wmi[i].eid == eid) {
10291 			wmi = &sc->wmi.wmi[i];
10292 			break;
10293 		}
10294 	}
10295 
10296 	if (wmi)
10297 		wakeup(&wmi->tx_ce_desc);
10298 }
10299 
10300 int
10301 qwz_wmi_tlv_services_parser(struct qwz_softc *sc, uint16_t tag, uint16_t len,
10302     const void *ptr, void *data)
10303 {
10304 	const struct wmi_service_available_event *ev;
10305 	uint32_t *wmi_ext2_service_bitmap;
10306 	int i, j;
10307 
10308 	switch (tag) {
10309 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
10310 		ev = (struct wmi_service_available_event *)ptr;
10311 		for (i = 0, j = WMI_MAX_SERVICE;
10312 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10313 		    j < WMI_MAX_EXT_SERVICE;
10314 		    i++) {
10315 			do {
10316 				if (ev->wmi_service_segment_bitmap[i] &
10317 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10318 					setbit(sc->wmi.svc_map, j);
10319 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10320 		}
10321 
10322 		DNPRINTF(QWZ_D_WMI,
10323 		    "%s: wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, "
10324 		    "2:0x%04x, 3:0x%04x\n", __func__,
10325 		    ev->wmi_service_segment_bitmap[0],
10326 		    ev->wmi_service_segment_bitmap[1],
10327 		    ev->wmi_service_segment_bitmap[2],
10328 		    ev->wmi_service_segment_bitmap[3]);
10329 		break;
10330 	case WMI_TAG_ARRAY_UINT32:
10331 		wmi_ext2_service_bitmap = (uint32_t *)ptr;
10332 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
10333 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10334 		    j < WMI_MAX_EXT2_SERVICE;
10335 		    i++) {
10336 			do {
10337 				if (wmi_ext2_service_bitmap[i] &
10338 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10339 					setbit(sc->wmi.svc_map, j);
10340 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10341 		}
10342 
10343 		DNPRINTF(QWZ_D_WMI,
10344 		    "%s: wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, "
10345 		    "2:0x%04x, 3:0x%04x\n", __func__,
10346 		    wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
10347 		    wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
10348 		break;
10349 	}
10350 
10351 	return 0;
10352 }
10353 
10354 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
10355 	[WMI_TAG_ARRAY_BYTE]
10356 		= { .min_len = 0 },
10357 	[WMI_TAG_ARRAY_UINT32]
10358 		= { .min_len = 0 },
10359 	[WMI_TAG_SERVICE_READY_EVENT]
10360 		= { .min_len = sizeof(struct wmi_service_ready_event) },
10361 	[WMI_TAG_SERVICE_READY_EXT_EVENT]
10362 		= { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
10363 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
10364 		= { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
10365 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES]
10366 		= { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
10367 	[WMI_TAG_VDEV_START_RESPONSE_EVENT]
10368 		= { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
10369 	[WMI_TAG_PEER_DELETE_RESP_EVENT]
10370 		= { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
10371 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
10372 		= { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
10373 	[WMI_TAG_VDEV_STOPPED_EVENT]
10374 		= { .min_len = sizeof(struct wmi_vdev_stopped_event) },
10375 	[WMI_TAG_REG_CHAN_LIST_CC_EVENT]
10376 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
10377 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
10378 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
10379 	[WMI_TAG_MGMT_RX_HDR]
10380 		= { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
10381 	[WMI_TAG_MGMT_TX_COMPL_EVENT]
10382 		= { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
10383 	[WMI_TAG_SCAN_EVENT]
10384 		= { .min_len = sizeof(struct wmi_scan_event) },
10385 	[WMI_TAG_PEER_STA_KICKOUT_EVENT]
10386 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
10387 	[WMI_TAG_ROAM_EVENT]
10388 		= { .min_len = sizeof(struct wmi_roam_event) },
10389 	[WMI_TAG_CHAN_INFO_EVENT]
10390 		= { .min_len = sizeof(struct wmi_chan_info_event) },
10391 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
10392 		= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
10393 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
10394 		= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
10395 	[WMI_TAG_READY_EVENT] = {
10396 		.min_len = sizeof(struct wmi_ready_event_min) },
10397 	[WMI_TAG_SERVICE_AVAILABLE_EVENT]
10398 		= {.min_len = sizeof(struct wmi_service_available_event) },
10399 	[WMI_TAG_PEER_ASSOC_CONF_EVENT]
10400 		= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
10401 	[WMI_TAG_STATS_EVENT]
10402 		= { .min_len = sizeof(struct wmi_stats_event) },
10403 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
10404 		= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
10405 	[WMI_TAG_HOST_SWFDA_EVENT] = {
10406 		.min_len = sizeof(struct wmi_fils_discovery_event) },
10407 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
10408 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
10409 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
10410 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
10411 	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
10412 		.min_len = sizeof(struct wmi_obss_color_collision_event) },
10413 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
10414 		.min_len = sizeof(struct wmi_11d_new_cc_ev) },
10415 	[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
10416 		.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
10417 	[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
10418 		.min_len = sizeof(struct wmi_twt_add_dialog_event) },
10419 };
10420 
10421 int
10422 qwz_wmi_tlv_iter(struct qwz_softc *sc, const void *ptr, size_t len,
10423     int (*iter)(struct qwz_softc *sc, uint16_t tag, uint16_t len,
10424     const void *ptr, void *data), void *data)
10425 {
10426 	const void *begin = ptr;
10427 	const struct wmi_tlv *tlv;
10428 	uint16_t tlv_tag, tlv_len;
10429 	int ret;
10430 
10431 	while (len > 0) {
10432 		if (len < sizeof(*tlv)) {
10433 			printf("%s: wmi tlv parse failure at byte %zd "
10434 			    "(%zu bytes left, %zu expected)\n", __func__,
10435 			    ptr - begin, len, sizeof(*tlv));
10436 			return EINVAL;
10437 		}
10438 
10439 		tlv = ptr;
10440 		tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
10441 		tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
10442 		ptr += sizeof(*tlv);
10443 		len -= sizeof(*tlv);
10444 
10445 		if (tlv_len > len) {
10446 			printf("%s: wmi tlv parse failure of tag %u "
10447 			    "at byte %zd (%zu bytes left, %u expected)\n",
10448 			    __func__, tlv_tag, ptr - begin, len, tlv_len);
10449 			return EINVAL;
10450 		}
10451 
10452 		if (tlv_tag < nitems(wmi_tlv_policies) &&
10453 		    wmi_tlv_policies[tlv_tag].min_len &&
10454 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
10455 			printf("%s: wmi tlv parse failure of tag %u "
10456 			    "at byte %zd (%u bytes is less than "
10457 			    "min length %zu)\n", __func__,
10458 			    tlv_tag, ptr - begin, tlv_len,
10459 			    wmi_tlv_policies[tlv_tag].min_len);
10460 			return EINVAL;
10461 		}
10462 
10463 		ret = iter(sc, tlv_tag, tlv_len, ptr, data);
10464 		if (ret)
10465 			return ret;
10466 
10467 		ptr += tlv_len;
10468 		len -= tlv_len;
10469 	}
10470 
10471 	return 0;
10472 }
10473 
10474 int
10475 qwz_pull_service_ready_tlv(struct qwz_softc *sc, const void *evt_buf,
10476     struct ath12k_targ_cap *cap)
10477 {
10478 	const struct wmi_service_ready_event *ev = evt_buf;
10479 
10480 	if (!ev)
10481 		return EINVAL;
10482 
10483 	cap->phy_capability = ev->phy_capability;
10484 	cap->max_frag_entry = ev->max_frag_entry;
10485 	cap->num_rf_chains = ev->num_rf_chains;
10486 	cap->ht_cap_info = ev->ht_cap_info;
10487 	cap->vht_cap_info = ev->vht_cap_info;
10488 	cap->vht_supp_mcs = ev->vht_supp_mcs;
10489 	cap->hw_min_tx_power = ev->hw_min_tx_power;
10490 	cap->hw_max_tx_power = ev->hw_max_tx_power;
10491 	cap->sys_cap_info = ev->sys_cap_info;
10492 	cap->min_pkt_size_enable = ev->min_pkt_size_enable;
10493 	cap->max_bcn_ie_size = ev->max_bcn_ie_size;
10494 	cap->max_num_scan_channels = ev->max_num_scan_channels;
10495 	cap->max_supported_macs = ev->max_supported_macs;
10496 	cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
10497 	cap->txrx_chainmask = ev->txrx_chainmask;
10498 	cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
10499 	cap->num_msdu_desc = ev->num_msdu_desc;
10500 
10501 	return 0;
10502 }
10503 
10504 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
10505  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
10506  * 4-byte word.
10507  */
10508 void
10509 qwz_wmi_service_bitmap_copy(struct qwz_pdev_wmi *wmi,
10510     const uint32_t *wmi_svc_bm)
10511 {
10512 	int i, j = 0;
10513 
10514 	for (i = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
10515 		do {
10516 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
10517 				setbit(wmi->wmi->svc_map, j);
10518 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
10519 	}
10520 }
10521 
10522 int
10523 qwz_wmi_tlv_svc_rdy_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
10524     const void *ptr, void *data)
10525 {
10526 	struct wmi_tlv_svc_ready_parse *svc_ready = data;
10527 	struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
10528 	uint16_t expect_len;
10529 
10530 	switch (tag) {
10531 	case WMI_TAG_SERVICE_READY_EVENT:
10532 		if (qwz_pull_service_ready_tlv(sc, ptr, &sc->target_caps))
10533 			return EINVAL;
10534 		break;
10535 
10536 	case WMI_TAG_ARRAY_UINT32:
10537 		if (!svc_ready->wmi_svc_bitmap_done) {
10538 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(uint32_t);
10539 			if (len < expect_len) {
10540 				printf("%s: invalid len %d for the tag 0x%x\n",
10541 				    __func__, len, tag);
10542 				return EINVAL;
10543 			}
10544 
10545 			qwz_wmi_service_bitmap_copy(wmi_handle, ptr);
10546 
10547 			svc_ready->wmi_svc_bitmap_done = 1;
10548 		}
10549 		break;
10550 	default:
10551 		break;
10552 	}
10553 
10554 	return 0;
10555 }
10556 
10557 void
10558 qwz_service_ready_event(struct qwz_softc *sc, struct mbuf *m)
10559 {
10560 	struct wmi_tlv_svc_ready_parse svc_ready = { };
10561 	int ret;
10562 
10563 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
10564 	    qwz_wmi_tlv_svc_rdy_parse, &svc_ready);
10565 	if (ret) {
10566 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10567 		return;
10568 	}
10569 
10570 	DNPRINTF(QWZ_D_WMI, "%s: event service ready\n", __func__);
10571 }
10572 
10573 int
10574 qwz_pull_svc_ready_ext(struct qwz_pdev_wmi *wmi_handle, const void *ptr,
10575     struct ath12k_service_ext_param *param)
10576 {
10577 	const struct wmi_service_ready_ext_event *ev = ptr;
10578 
10579 	if (!ev)
10580 		return EINVAL;
10581 
10582 	/* Move this to host based bitmap */
10583 	param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
10584 	param->default_fw_config_bits =	ev->default_fw_config_bits;
10585 	param->he_cap_info = ev->he_cap_info;
10586 	param->mpdu_density = ev->mpdu_density;
10587 	param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
10588 	memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
10589 
10590 	return 0;
10591 }
10592 
10593 int
10594 qwz_pull_mac_phy_cap_svc_ready_ext(struct qwz_pdev_wmi *wmi_handle,
10595     struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
10596     struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
10597     struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
10598     struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
10599     uint8_t hw_mode_id, uint8_t phy_id, struct qwz_pdev *pdev)
10600 {
10601 	struct wmi_mac_phy_capabilities *mac_phy_caps;
10602 	struct qwz_softc *sc = wmi_handle->wmi->sc;
10603 	struct ath12k_band_cap *cap_band;
10604 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
10605 	uint32_t phy_map;
10606 	uint32_t hw_idx, phy_idx = 0;
10607 
10608 	if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
10609 		return EINVAL;
10610 
10611 	for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
10612 		if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
10613 			break;
10614 
10615 		phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
10616 		while (phy_map) {
10617 			phy_map >>= 1;
10618 			phy_idx++;
10619 		}
10620 	}
10621 
10622 	if (hw_idx == hw_caps->num_hw_modes)
10623 		return EINVAL;
10624 
10625 	phy_idx += phy_id;
10626 	if (phy_id >= hal_reg_caps->num_phy)
10627 		return EINVAL;
10628 
10629 	mac_phy_caps = wmi_mac_phy_caps + phy_idx;
10630 
10631 	pdev->pdev_id = mac_phy_caps->pdev_id;
10632 	pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
10633 	pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
10634 	sc->target_pdev_ids[sc->target_pdev_count].supported_bands =
10635 	    mac_phy_caps->supported_bands;
10636 	sc->target_pdev_ids[sc->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
10637 	sc->target_pdev_count++;
10638 
10639 	if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
10640 	    !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
10641 		return EINVAL;
10642 
10643 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
10644 	 * band to band for a single radio, need to see how this should be
10645 	 * handled.
10646 	 */
10647 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
10648 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
10649 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
10650 	}
10651 
10652 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
10653 		pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
10654 		pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
10655 		pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
10656 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
10657 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
10658 		pdev_cap->nss_ratio_enabled =
10659 		    WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
10660 		pdev_cap->nss_ratio_info =
10661 		    WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
10662 	}
10663 
10664 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
10665 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
10666 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
10667 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
10668 	 * will be advertised for second mac or vice-versa. Compute the shift value
10669 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
10670 	 * mac80211.
10671 	 */
10672 	pdev_cap->tx_chain_mask_shift = ffs(pdev_cap->tx_chain_mask);
10673 	pdev_cap->rx_chain_mask_shift = ffs(pdev_cap->rx_chain_mask);
10674 
10675 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
10676 		cap_band = &pdev_cap->band[0];
10677 		cap_band->phy_id = mac_phy_caps->phy_id;
10678 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
10679 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
10680 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
10681 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
10682 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
10683 		memcpy(cap_band->he_cap_phy_info,
10684 		    &mac_phy_caps->he_cap_phy_info_2g,
10685 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
10686 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
10687 		    sizeof(struct ath12k_ppe_threshold));
10688 	}
10689 
10690 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
10691 		cap_band = &pdev_cap->band[1];
10692 		cap_band->phy_id = mac_phy_caps->phy_id;
10693 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
10694 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
10695 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
10696 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
10697 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
10698 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
10699 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
10700 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
10701 		    sizeof(struct ath12k_ppe_threshold));
10702 #if 0
10703 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
10704 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
10705 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
10706 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
10707 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
10708 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
10709 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
10710 		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
10711 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
10712 		       sizeof(struct ath12k_ppe_threshold));
10713 #endif
10714 	}
10715 
10716 	return 0;
10717 }
10718 
10719 int
10720 qwz_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwz_softc *sc, uint16_t len,
10721     const void *ptr, void *data)
10722 {
10723 	struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
10724 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10725 	uint8_t hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
10726 	uint32_t phy_id_map;
10727 	int pdev_index = 0;
10728 	int ret;
10729 
10730 	svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
10731 	svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
10732 
10733 	sc->num_radios = 0;
10734 	sc->target_pdev_count = 0;
10735 	phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
10736 
10737 	while (phy_id_map && sc->num_radios < MAX_RADIOS) {
10738 		ret = qwz_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
10739 		    svc_rdy_ext->hw_caps,
10740 		    svc_rdy_ext->hw_mode_caps,
10741 		    svc_rdy_ext->soc_hal_reg_caps,
10742 		    svc_rdy_ext->mac_phy_caps,
10743 		    hw_mode_id, sc->num_radios, &sc->pdevs[pdev_index]);
10744 		if (ret) {
10745 			printf("%s: failed to extract mac caps, idx: %d\n",
10746 			    __func__, sc->num_radios);
10747 			return ret;
10748 		}
10749 
10750 		sc->num_radios++;
10751 
10752 		/* For QCA6390, save mac_phy capability in the same pdev */
10753 		if (sc->hw_params.single_pdev_only)
10754 			pdev_index = 0;
10755 		else
10756 			pdev_index = sc->num_radios;
10757 
10758 		/* TODO: mac_phy_cap prints */
10759 		phy_id_map >>= 1;
10760 	}
10761 
10762 	/* For QCA6390, set num_radios to 1 because host manages
10763 	 * both 2G and 5G radio in one pdev.
10764 	 * Set pdev_id = 0 and 0 means soc level.
10765 	 */
10766 	if (sc->hw_params.single_pdev_only) {
10767 		sc->num_radios = 1;
10768 		sc->pdevs[0].pdev_id = 0;
10769 	}
10770 
10771 	return 0;
10772 }
10773 
10774 int
10775 qwz_wmi_tlv_hw_mode_caps_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
10776     const void *ptr, void *data)
10777 {
10778 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10779 	struct wmi_hw_mode_capabilities *hw_mode_cap;
10780 	uint32_t phy_map = 0;
10781 
10782 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
10783 		return EPROTO;
10784 
10785 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
10786 		return ENOBUFS;
10787 
10788 	hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
10789 	    hw_mode_id);
10790 	svc_rdy_ext->n_hw_mode_caps++;
10791 
10792 	phy_map = hw_mode_cap->phy_id_map;
10793 	while (phy_map) {
10794 		svc_rdy_ext->tot_phy_id++;
10795 		phy_map = phy_map >> 1;
10796 	}
10797 
10798 	return 0;
10799 }
10800 
10801 #define PRIMAP(_hw_mode_) \
10802 	[_hw_mode_] = _hw_mode_##_PRI
10803 
10804 static const int qwz_hw_mode_pri_map[] = {
10805 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
10806 	PRIMAP(WMI_HOST_HW_MODE_DBS),
10807 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
10808 	PRIMAP(WMI_HOST_HW_MODE_SBS),
10809 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
10810 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
10811 	/* keep last */
10812 	PRIMAP(WMI_HOST_HW_MODE_MAX),
10813 };
10814 
10815 int
10816 qwz_wmi_tlv_hw_mode_caps(struct qwz_softc *sc, uint16_t len,
10817     const void *ptr, void *data)
10818 {
10819 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10820 	struct wmi_hw_mode_capabilities *hw_mode_caps;
10821 	enum wmi_host_hw_mode_config_type mode, pref;
10822 	uint32_t i;
10823 	int ret;
10824 
10825 	svc_rdy_ext->n_hw_mode_caps = 0;
10826 	svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
10827 
10828 	ret = qwz_wmi_tlv_iter(sc, ptr, len,
10829 	    qwz_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext);
10830 	if (ret) {
10831 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10832 		return ret;
10833 	}
10834 
10835 	i = 0;
10836 	while (i < svc_rdy_ext->n_hw_mode_caps) {
10837 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
10838 		mode = hw_mode_caps->hw_mode_id;
10839 		pref = sc->wmi.preferred_hw_mode;
10840 
10841 		if (qwz_hw_mode_pri_map[mode] < qwz_hw_mode_pri_map[pref]) {
10842 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
10843 			sc->wmi.preferred_hw_mode = mode;
10844 		}
10845 		i++;
10846 	}
10847 
10848 	DNPRINTF(QWZ_D_WMI, "%s: preferred_hw_mode: %d\n", __func__,
10849 	    sc->wmi.preferred_hw_mode);
10850 	if (sc->wmi.preferred_hw_mode >= WMI_HOST_HW_MODE_MAX)
10851 		return EINVAL;
10852 
10853 	return 0;
10854 }
10855 
10856 int
10857 qwz_wmi_tlv_mac_phy_caps_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
10858     const void *ptr, void *data)
10859 {
10860 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10861 
10862 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
10863 		return EPROTO;
10864 
10865 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
10866 		return ENOBUFS;
10867 
10868 	len = MIN(len, sizeof(struct wmi_mac_phy_capabilities));
10869 	if (!svc_rdy_ext->n_mac_phy_caps) {
10870 		svc_rdy_ext->mac_phy_caps = mallocarray(
10871 		    svc_rdy_ext->tot_phy_id,
10872 		    sizeof(struct wmi_mac_phy_capabilities),
10873 		    M_DEVBUF, M_NOWAIT | M_ZERO);
10874 		if (!svc_rdy_ext->mac_phy_caps)
10875 			return ENOMEM;
10876 		svc_rdy_ext->mac_phy_caps_size = len * svc_rdy_ext->tot_phy_id;
10877 	}
10878 
10879 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps,
10880 	    ptr, len);
10881 	svc_rdy_ext->n_mac_phy_caps++;
10882 	return 0;
10883 }
10884 
10885 int
10886 qwz_wmi_tlv_ext_hal_reg_caps_parse(struct qwz_softc *sc,
10887     uint16_t tag, uint16_t len, const void *ptr, void *data)
10888 {
10889 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10890 
10891 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
10892 		return EPROTO;
10893 
10894 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
10895 		return ENOBUFS;
10896 
10897 	svc_rdy_ext->n_ext_hal_reg_caps++;
10898 	return 0;
10899 }
10900 
10901 int
10902 qwz_pull_reg_cap_svc_rdy_ext(struct qwz_pdev_wmi *wmi_handle,
10903     struct wmi_soc_hal_reg_capabilities *reg_caps,
10904     struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
10905     uint8_t phy_idx, struct ath12k_hal_reg_capabilities_ext *param)
10906 {
10907 	struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
10908 
10909 	if (!reg_caps || !wmi_ext_reg_cap)
10910 		return EINVAL;
10911 
10912 	if (phy_idx >= reg_caps->num_phy)
10913 		return EINVAL;
10914 
10915 	ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
10916 
10917 	param->phy_id = ext_reg_cap->phy_id;
10918 	param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
10919 	param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext;
10920 	param->regcap1 = ext_reg_cap->regcap1;
10921 	param->regcap2 = ext_reg_cap->regcap2;
10922 	/* check if param->wireless_mode is needed */
10923 	param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
10924 	param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
10925 	param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
10926 	param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
10927 
10928 	return 0;
10929 }
10930 
10931 int
10932 qwz_wmi_tlv_ext_hal_reg_caps(struct qwz_softc *sc, uint16_t len,
10933     const void *ptr, void *data)
10934 {
10935 	struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
10936 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10937 	struct ath12k_hal_reg_capabilities_ext reg_cap;
10938 	int ret;
10939 	uint32_t i;
10940 
10941 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
10942 	svc_rdy_ext->ext_hal_reg_caps =
10943 	    (struct wmi_hal_reg_capabilities_ext *)ptr;
10944 	ret = qwz_wmi_tlv_iter(sc, ptr, len,
10945 	    qwz_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext);
10946 	if (ret) {
10947 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10948 		return ret;
10949 	}
10950 
10951 	for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
10952 		ret = qwz_pull_reg_cap_svc_rdy_ext(wmi_handle,
10953 		    svc_rdy_ext->soc_hal_reg_caps,
10954 		    svc_rdy_ext->ext_hal_reg_caps, i, &reg_cap);
10955 		if (ret) {
10956 			printf("%s: failed to extract reg cap %d\n",
10957 			    __func__, i);
10958 			return ret;
10959 		}
10960 
10961 		memcpy(&sc->hal_reg_cap[reg_cap.phy_id], &reg_cap,
10962 		    sizeof(sc->hal_reg_cap[0]));
10963 	}
10964 
10965 	return 0;
10966 }
10967 
10968 int
10969 qwz_wmi_tlv_dma_ring_caps_parse(struct qwz_softc *sc, uint16_t tag,
10970     uint16_t len, const void *ptr, void *data)
10971 {
10972 	struct wmi_tlv_dma_ring_caps_parse *parse = data;
10973 
10974 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
10975 		return EPROTO;
10976 
10977 	parse->n_dma_ring_caps++;
10978 	return 0;
10979 }
10980 
10981 int
10982 qwz_wmi_alloc_dbring_caps(struct qwz_softc *sc, uint32_t num_cap)
10983 {
10984 	void *ptr;
10985 
10986 	ptr = mallocarray(num_cap, sizeof(struct qwz_dbring_cap),
10987 	    M_DEVBUF, M_NOWAIT | M_ZERO);
10988 	if (!ptr)
10989 		return ENOMEM;
10990 
10991 	sc->db_caps = ptr;
10992 	sc->num_db_cap = num_cap;
10993 
10994 	return 0;
10995 }
10996 
10997 void
10998 qwz_wmi_free_dbring_caps(struct qwz_softc *sc)
10999 {
11000 	free(sc->db_caps, M_DEVBUF,
11001 	    sc->num_db_cap * sizeof(struct qwz_dbring_cap));
11002 	sc->db_caps = NULL;
11003 	sc->num_db_cap = 0;
11004 }
11005 
11006 int
11007 qwz_wmi_tlv_dma_ring_caps(struct qwz_softc *sc, uint16_t len,
11008     const void *ptr, void *data)
11009 {
11010 	struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
11011 	struct wmi_dma_ring_capabilities *dma_caps;
11012 	struct qwz_dbring_cap *dir_buff_caps;
11013 	int ret;
11014 	uint32_t i;
11015 
11016 	dma_caps_parse->n_dma_ring_caps = 0;
11017 	dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
11018 	ret = qwz_wmi_tlv_iter(sc, ptr, len,
11019 	    qwz_wmi_tlv_dma_ring_caps_parse, dma_caps_parse);
11020 	if (ret) {
11021 		printf("%s: failed to parse dma ring caps tlv %d\n",
11022 		    __func__, ret);
11023 		return ret;
11024 	}
11025 
11026 	if (!dma_caps_parse->n_dma_ring_caps)
11027 		return 0;
11028 
11029 	if (sc->num_db_cap) {
11030 		DNPRINTF(QWZ_D_WMI,
11031 		    "%s: Already processed, so ignoring dma ring caps\n",
11032 		    __func__);
11033 		return 0;
11034 	}
11035 
11036 	ret = qwz_wmi_alloc_dbring_caps(sc, dma_caps_parse->n_dma_ring_caps);
11037 	if (ret)
11038 		return ret;
11039 
11040 	dir_buff_caps = sc->db_caps;
11041 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
11042 		if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
11043 			printf("%s: Invalid module id %d\n", __func__,
11044 			    dma_caps[i].module_id);
11045 			ret = EINVAL;
11046 			goto free_dir_buff;
11047 		}
11048 
11049 		dir_buff_caps[i].id = dma_caps[i].module_id;
11050 		dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
11051 		dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
11052 		dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
11053 		dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
11054 	}
11055 
11056 	return 0;
11057 
11058 free_dir_buff:
11059 	qwz_wmi_free_dbring_caps(sc);
11060 	return ret;
11061 }
11062 
11063 int
11064 qwz_wmi_tlv_svc_rdy_ext_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
11065     const void *ptr, void *data)
11066 {
11067 	struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11068 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11069 	int ret;
11070 
11071 	switch (tag) {
11072 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
11073 		ret = qwz_pull_svc_ready_ext(wmi_handle, ptr,
11074 		    &svc_rdy_ext->param);
11075 		if (ret) {
11076 			printf("%s: unable to extract ext params\n", __func__);
11077 			return ret;
11078 		}
11079 		break;
11080 
11081 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
11082 		svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
11083 		svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
11084 		break;
11085 
11086 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
11087 		ret = qwz_wmi_tlv_ext_soc_hal_reg_caps_parse(sc, len, ptr,
11088 		    svc_rdy_ext);
11089 		if (ret)
11090 			return ret;
11091 		break;
11092 
11093 	case WMI_TAG_ARRAY_STRUCT:
11094 		if (!svc_rdy_ext->hw_mode_done) {
11095 			ret = qwz_wmi_tlv_hw_mode_caps(sc, len, ptr,
11096 			    svc_rdy_ext);
11097 			if (ret)
11098 				return ret;
11099 
11100 			svc_rdy_ext->hw_mode_done = 1;
11101 		} else if (!svc_rdy_ext->mac_phy_done) {
11102 			svc_rdy_ext->n_mac_phy_caps = 0;
11103 			ret = qwz_wmi_tlv_iter(sc, ptr, len,
11104 			    qwz_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext);
11105 			if (ret) {
11106 				printf("%s: failed to parse tlv %d\n",
11107 				    __func__, ret);
11108 				return ret;
11109 			}
11110 
11111 			svc_rdy_ext->mac_phy_done = 1;
11112 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
11113 			ret = qwz_wmi_tlv_ext_hal_reg_caps(sc, len, ptr,
11114 			    svc_rdy_ext);
11115 			if (ret)
11116 				return ret;
11117 
11118 			svc_rdy_ext->ext_hal_reg_done = 1;
11119 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
11120 			svc_rdy_ext->mac_phy_chainmask_combo_done = 1;
11121 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
11122 			svc_rdy_ext->mac_phy_chainmask_cap_done = 1;
11123 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
11124 			svc_rdy_ext->oem_dma_ring_cap_done = 1;
11125 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
11126 			ret = qwz_wmi_tlv_dma_ring_caps(sc, len, ptr,
11127 			    &svc_rdy_ext->dma_caps_parse);
11128 			if (ret)
11129 				return ret;
11130 
11131 			svc_rdy_ext->dma_ring_cap_done = 1;
11132 		}
11133 		break;
11134 
11135 	default:
11136 		break;
11137 	}
11138 
11139 	return 0;
11140 }
11141 
11142 void
11143 qwz_service_ready_ext_event(struct qwz_softc *sc, struct mbuf *m)
11144 {
11145 	struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
11146 	int ret;
11147 
11148 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11149 	    qwz_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext);
11150 	if (ret) {
11151 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11152 		qwz_wmi_free_dbring_caps(sc);
11153 		return;
11154 	}
11155 
11156 	DNPRINTF(QWZ_D_WMI, "%s: event service ready ext\n", __func__);
11157 
11158 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_EXT2_MSG))
11159 		wakeup(&sc->wmi.service_ready);
11160 
11161 	free(svc_rdy_ext.mac_phy_caps, M_DEVBUF,
11162 	    svc_rdy_ext.mac_phy_caps_size);
11163 }
11164 
11165 int
11166 qwz_wmi_tlv_svc_rdy_ext2_parse(struct qwz_softc *sc,
11167     uint16_t tag, uint16_t len, const void *ptr, void *data)
11168 {
11169 	struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
11170 	int ret;
11171 
11172 	switch (tag) {
11173 	case WMI_TAG_ARRAY_STRUCT:
11174 		if (!parse->dma_ring_cap_done) {
11175 			ret = qwz_wmi_tlv_dma_ring_caps(sc, len, ptr,
11176 			    &parse->dma_caps_parse);
11177 			if (ret)
11178 				return ret;
11179 
11180 			parse->dma_ring_cap_done = 1;
11181 		}
11182 		break;
11183 	default:
11184 		break;
11185 	}
11186 
11187 	return 0;
11188 }
11189 
11190 void
11191 qwz_service_ready_ext2_event(struct qwz_softc *sc, struct mbuf *m)
11192 {
11193 	struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
11194 	int ret;
11195 
11196 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11197 	    qwz_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2);
11198 	if (ret) {
11199 		printf("%s: failed to parse ext2 event tlv %d\n",
11200 		    __func__, ret);
11201 		qwz_wmi_free_dbring_caps(sc);
11202 		return;
11203 	}
11204 
11205 	DNPRINTF(QWZ_D_WMI, "%s: event service ready ext2\n", __func__);
11206 
11207 	sc->wmi.service_ready = 1;
11208 	wakeup(&sc->wmi.service_ready);
11209 }
11210 
11211 void
11212 qwz_service_available_event(struct qwz_softc *sc, struct mbuf *m)
11213 {
11214 	int ret;
11215 
11216 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11217 	    qwz_wmi_tlv_services_parser, NULL);
11218 	if (ret)
11219 		printf("%s: failed to parse services available tlv %d\n",
11220 		    sc->sc_dev.dv_xname, ret);
11221 
11222 	DNPRINTF(QWZ_D_WMI, "%s: event service available\n", __func__);
11223 }
11224 
11225 int
11226 qwz_pull_peer_assoc_conf_ev(struct qwz_softc *sc, struct mbuf *m,
11227     struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
11228 {
11229 	const void **tb;
11230 	const struct wmi_peer_assoc_conf_event *ev;
11231 	int ret;
11232 
11233 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11234 	if (tb == NULL) {
11235 		ret = ENOMEM;
11236 		printf("%s: failed to parse tlv: %d\n",
11237 		    sc->sc_dev.dv_xname, ret);
11238 		return ret;
11239 	}
11240 
11241 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
11242 	if (!ev) {
11243 		printf("%s: failed to fetch peer assoc conf ev\n",
11244 		    sc->sc_dev.dv_xname);
11245 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11246 		return EPROTO;
11247 	}
11248 
11249 	peer_assoc_conf->vdev_id = ev->vdev_id;
11250 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
11251 
11252 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11253 	return 0;
11254 }
11255 
11256 void
11257 qwz_peer_assoc_conf_event(struct qwz_softc *sc, struct mbuf *m)
11258 {
11259 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
11260 
11261 	if (qwz_pull_peer_assoc_conf_ev(sc, m, &peer_assoc_conf) != 0) {
11262 		printf("%s: failed to extract peer assoc conf event\n",
11263 		   sc->sc_dev.dv_xname);
11264 		return;
11265 	}
11266 
11267 	DNPRINTF(QWZ_D_WMI, "%s: event peer assoc conf ev vdev id %d "
11268 	    "macaddr %s\n", __func__, peer_assoc_conf.vdev_id,
11269 	    ether_sprintf((u_char *)peer_assoc_conf.macaddr));
11270 
11271 	sc->peer_assoc_done = 1;
11272 	wakeup(&sc->peer_assoc_done);
11273 }
11274 
11275 int
11276 qwz_wmi_tlv_rdy_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
11277     const void *ptr, void *data)
11278 {
11279 	struct wmi_tlv_rdy_parse *rdy_parse = data;
11280 	struct wmi_ready_event fixed_param;
11281 	struct wmi_mac_addr *addr_list;
11282 	struct qwz_pdev *pdev;
11283 	uint32_t num_mac_addr;
11284 	int i;
11285 
11286 	switch (tag) {
11287 	case WMI_TAG_READY_EVENT:
11288 		memset(&fixed_param, 0, sizeof(fixed_param));
11289 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
11290 		       MIN(sizeof(fixed_param), len));
11291 		sc->wlan_init_status = fixed_param.ready_event_min.status;
11292 		rdy_parse->num_extra_mac_addr =
11293 			fixed_param.ready_event_min.num_extra_mac_addr;
11294 
11295 		IEEE80211_ADDR_COPY(sc->mac_addr,
11296 		    fixed_param.ready_event_min.mac_addr.addr);
11297 		sc->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
11298 		sc->wmi_ready = 1;
11299 		break;
11300 	case WMI_TAG_ARRAY_FIXED_STRUCT:
11301 		addr_list = (struct wmi_mac_addr *)ptr;
11302 		num_mac_addr = rdy_parse->num_extra_mac_addr;
11303 
11304 		if (!(sc->num_radios > 1 && num_mac_addr >= sc->num_radios))
11305 			break;
11306 
11307 		for (i = 0; i < sc->num_radios; i++) {
11308 			pdev = &sc->pdevs[i];
11309 			IEEE80211_ADDR_COPY(pdev->mac_addr, addr_list[i].addr);
11310 		}
11311 		sc->pdevs_macaddr_valid = 1;
11312 		break;
11313 	default:
11314 		break;
11315 	}
11316 
11317 	return 0;
11318 }
11319 
11320 void
11321 qwz_ready_event(struct qwz_softc *sc, struct mbuf *m)
11322 {
11323 	struct wmi_tlv_rdy_parse rdy_parse = { };
11324 	int ret;
11325 
11326 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11327 	    qwz_wmi_tlv_rdy_parse, &rdy_parse);
11328 	if (ret) {
11329 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11330 		return;
11331 	}
11332 
11333 	DNPRINTF(QWZ_D_WMI, "%s: event ready", __func__);
11334 
11335 	sc->wmi.unified_ready = 1;
11336 	wakeup(&sc->wmi.unified_ready);
11337 }
11338 
11339 int
11340 qwz_pull_peer_del_resp_ev(struct qwz_softc *sc, struct mbuf *m,
11341     struct wmi_peer_delete_resp_event *peer_del_resp)
11342 {
11343 	const void **tb;
11344 	const struct wmi_peer_delete_resp_event *ev;
11345 	int ret;
11346 
11347 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11348 	if (tb == NULL) {
11349 		ret = ENOMEM;
11350 		printf("%s: failed to parse tlv: %d\n",
11351 		    sc->sc_dev.dv_xname, ret);
11352 		return ret;
11353 	}
11354 
11355 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
11356 	if (!ev) {
11357 		printf("%s: failed to fetch peer delete resp ev\n",
11358 		    sc->sc_dev.dv_xname);
11359 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11360 		return EPROTO;
11361 	}
11362 
11363 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
11364 
11365 	peer_del_resp->vdev_id = ev->vdev_id;
11366 	IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
11367 	    ev->peer_macaddr.addr);
11368 
11369 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11370 	return 0;
11371 }
11372 
11373 void
11374 qwz_peer_delete_resp_event(struct qwz_softc *sc, struct mbuf *m)
11375 {
11376 	struct wmi_peer_delete_resp_event peer_del_resp;
11377 
11378 	if (qwz_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
11379 		printf("%s: failed to extract peer delete resp",
11380 		    sc->sc_dev.dv_xname);
11381 		return;
11382 	}
11383 
11384 	sc->peer_delete_done = 1;
11385 	wakeup(&sc->peer_delete_done);
11386 
11387 	DNPRINTF(QWZ_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
11388 	    __func__, peer_del_resp.vdev_id,
11389 	    ether_sprintf(peer_del_resp.peer_macaddr.addr));
11390 }
11391 
11392 const char *
11393 qwz_wmi_vdev_resp_print(uint32_t vdev_resp_status)
11394 {
11395 	switch (vdev_resp_status) {
11396 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
11397 		return "invalid vdev id";
11398 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
11399 		return "not supported";
11400 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
11401 		return "dfs violation";
11402 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
11403 		return "invalid regdomain";
11404 	default:
11405 		return "unknown";
11406 	}
11407 }
11408 
11409 int
11410 qwz_pull_vdev_start_resp_tlv(struct qwz_softc *sc, struct mbuf *m,
11411     struct wmi_vdev_start_resp_event *vdev_rsp)
11412 {
11413 	const void **tb;
11414 	const struct wmi_vdev_start_resp_event *ev;
11415 	int ret;
11416 
11417 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11418 	if (tb == NULL) {
11419 		ret = ENOMEM;
11420 		printf("%s: failed to parse tlv: %d\n",
11421 		    sc->sc_dev.dv_xname, ret);
11422 		return ret;
11423 	}
11424 
11425 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
11426 	if (!ev) {
11427 		printf("%s: failed to fetch vdev start resp ev\n",
11428 		    sc->sc_dev.dv_xname);
11429 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11430 		return EPROTO;
11431 	}
11432 
11433 	memset(vdev_rsp, 0, sizeof(*vdev_rsp));
11434 
11435 	vdev_rsp->vdev_id = ev->vdev_id;
11436 	vdev_rsp->requestor_id = ev->requestor_id;
11437 	vdev_rsp->resp_type = ev->resp_type;
11438 	vdev_rsp->status = ev->status;
11439 	vdev_rsp->chain_mask = ev->chain_mask;
11440 	vdev_rsp->smps_mode = ev->smps_mode;
11441 	vdev_rsp->mac_id = ev->mac_id;
11442 	vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
11443 	vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
11444 
11445 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11446 	return 0;
11447 }
11448 
11449 void
11450 qwz_vdev_start_resp_event(struct qwz_softc *sc, struct mbuf *m)
11451 {
11452 	struct wmi_vdev_start_resp_event vdev_start_resp;
11453 	uint32_t status;
11454 
11455 	if (qwz_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
11456 		printf("%s: failed to extract vdev start resp",
11457 		    sc->sc_dev.dv_xname);
11458 		return;
11459 	}
11460 
11461 	status = vdev_start_resp.status;
11462 	if (status) {
11463 		printf("%s: vdev start resp error status %d (%s)\n",
11464 		    sc->sc_dev.dv_xname, status,
11465 		   qwz_wmi_vdev_resp_print(status));
11466 	}
11467 
11468 	sc->vdev_setup_done = 1;
11469 	wakeup(&sc->vdev_setup_done);
11470 
11471 	DNPRINTF(QWZ_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
11472 	    vdev_start_resp.vdev_id);
11473 }
11474 
11475 int
11476 qwz_pull_vdev_stopped_param_tlv(struct qwz_softc *sc, struct mbuf *m,
11477     uint32_t *vdev_id)
11478 {
11479 	const void **tb;
11480 	const struct wmi_vdev_stopped_event *ev;
11481 	int ret;
11482 
11483 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11484 	if (tb == NULL) {
11485 		ret = ENOMEM;
11486 		printf("%s: failed to parse tlv: %d\n",
11487 		    sc->sc_dev.dv_xname, ret);
11488 		return ret;
11489 	}
11490 
11491 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
11492 	if (!ev) {
11493 		printf("%s: failed to fetch vdev stop ev\n",
11494 		    sc->sc_dev.dv_xname);
11495 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11496 		return EPROTO;
11497 	}
11498 
11499 	*vdev_id = ev->vdev_id;
11500 
11501 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11502 	return 0;
11503 }
11504 
11505 void
11506 qwz_vdev_stopped_event(struct qwz_softc *sc, struct mbuf *m)
11507 {
11508 	uint32_t vdev_id = 0;
11509 
11510 	if (qwz_pull_vdev_stopped_param_tlv(sc, m, &vdev_id) != 0) {
11511 		printf("%s: failed to extract vdev stopped event\n",
11512 		    sc->sc_dev.dv_xname);
11513 		return;
11514 	}
11515 
11516 	sc->vdev_setup_done = 1;
11517 	wakeup(&sc->vdev_setup_done);
11518 
11519 	DNPRINTF(QWZ_D_WMI, "%s: vdev stopped for vdev id %d", __func__,
11520 	    vdev_id);
11521 }
11522 
11523 int
11524 qwz_wmi_tlv_iter_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
11525     const void *ptr, void *data)
11526 {
11527 	const void **tb = data;
11528 
11529 	if (tag < WMI_TAG_MAX)
11530 		tb[tag] = ptr;
11531 
11532 	return 0;
11533 }
11534 
11535 int
11536 qwz_wmi_tlv_parse(struct qwz_softc *sc, const void **tb,
11537     const void *ptr, size_t len)
11538 {
11539 	return qwz_wmi_tlv_iter(sc, ptr, len, qwz_wmi_tlv_iter_parse,
11540 	    (void *)tb);
11541 }
11542 
11543 const void **
11544 qwz_wmi_tlv_parse_alloc(struct qwz_softc *sc, const void *ptr, size_t len)
11545 {
11546 	const void **tb;
11547 	int ret;
11548 
11549 	tb = mallocarray(WMI_TAG_MAX, sizeof(*tb), M_DEVBUF, M_NOWAIT | M_ZERO);
11550 	if (!tb)
11551 		return NULL;
11552 
11553 	ret = qwz_wmi_tlv_parse(sc, tb, ptr, len);
11554 	if (ret) {
11555 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11556 		return NULL;
11557 	}
11558 
11559 	return tb;
11560 }
11561 
11562 static void
11563 qwz_print_reg_rule(struct qwz_softc *sc, const char *band,
11564     uint32_t num_reg_rules, struct cur_reg_rule *reg_rule_ptr)
11565 {
11566 	struct cur_reg_rule *reg_rule = reg_rule_ptr;
11567 	uint32_t count;
11568 
11569 	DNPRINTF(QWZ_D_WMI, "%s: number of reg rules in %s band: %d\n",
11570 	    __func__, band, num_reg_rules);
11571 
11572 	for (count = 0; count < num_reg_rules; count++) {
11573 		DNPRINTF(QWZ_D_WMI,
11574 		    "%s: reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
11575 		    __func__, count + 1, reg_rule->start_freq,
11576 		    reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain,
11577 		    reg_rule->reg_power, reg_rule->flags);
11578 		reg_rule++;
11579 	}
11580 }
11581 
11582 struct cur_reg_rule *
11583 qwz_create_reg_rules_from_wmi(uint32_t num_reg_rules,
11584     struct wmi_regulatory_rule_struct *wmi_reg_rule)
11585 {
11586 	struct cur_reg_rule *reg_rule_ptr;
11587 	uint32_t count;
11588 
11589 	reg_rule_ptr = mallocarray(num_reg_rules, sizeof(*reg_rule_ptr),
11590 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11591 	if (!reg_rule_ptr)
11592 		return NULL;
11593 
11594 	for (count = 0; count < num_reg_rules; count++) {
11595 		reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ,
11596 		    wmi_reg_rule[count].freq_info);
11597 		reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ,
11598 		    wmi_reg_rule[count].freq_info);
11599 		reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW,
11600 		    wmi_reg_rule[count].bw_pwr_info);
11601 		reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR,
11602 		    wmi_reg_rule[count].bw_pwr_info);
11603 		reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN,
11604 		    wmi_reg_rule[count].bw_pwr_info);
11605 		reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS,
11606 		    wmi_reg_rule[count].flag_info);
11607 	}
11608 
11609 	return reg_rule_ptr;
11610 }
11611 
11612 int
11613 qwz_pull_reg_chan_list_update_ev(struct qwz_softc *sc, struct mbuf *m,
11614     struct cur_regulatory_info *reg_info)
11615 {
11616 	const void **tb;
11617 	const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
11618 	struct wmi_regulatory_rule_struct *wmi_reg_rule;
11619 	uint32_t num_2ghz_reg_rules, num_5ghz_reg_rules;
11620 	int ret;
11621 
11622 	DNPRINTF(QWZ_D_WMI, "%s: processing regulatory channel list\n",
11623 	    __func__);
11624 
11625 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11626 	if (tb == NULL) {
11627 		ret = ENOMEM; /* XXX allocation failure or parsing failure? */
11628 		printf("%s: failed to parse tlv: %d\n", __func__, ret);
11629 		return ENOMEM;
11630 	}
11631 
11632 	chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
11633 	if (!chan_list_event_hdr) {
11634 		printf("%s: failed to fetch reg chan list update ev\n",
11635 		    __func__);
11636 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11637 		return EPROTO;
11638 	}
11639 
11640 	reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
11641 	reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
11642 
11643 	if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
11644 		printf("%s: No regulatory rules available in the event info\n",
11645 		    __func__);
11646 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11647 		return EINVAL;
11648 	}
11649 
11650 	memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN);
11651 	reg_info->dfs_region = chan_list_event_hdr->dfs_region;
11652 	reg_info->phybitmap = chan_list_event_hdr->phybitmap;
11653 	reg_info->num_phy = chan_list_event_hdr->num_phy;
11654 	reg_info->phy_id = chan_list_event_hdr->phy_id;
11655 	reg_info->ctry_code = chan_list_event_hdr->country_id;
11656 	reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
11657 
11658 	DNPRINTF(QWZ_D_WMI, "%s: CC status_code %s\n", __func__,
11659 	    qwz_cc_status_to_str(reg_info->status_code));
11660 
11661 	reg_info->status_code =
11662 		qwz_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
11663 
11664 	reg_info->is_ext_reg_event = false;
11665 
11666 	reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
11667 	reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
11668 	reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
11669 	reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
11670 
11671 	num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
11672 	num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
11673 
11674 	DNPRINTF(QWZ_D_WMI,
11675 	    "%s: cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d "
11676 	    "max_5ghz %d\n", __func__, reg_info->alpha2, reg_info->dfs_region,
11677 	    reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
11678 	    reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
11679 
11680 	DNPRINTF(QWZ_D_WMI,
11681 	    "%s: num_2ghz_reg_rules %d num_5ghz_reg_rules %d\n", __func__,
11682 	    num_2ghz_reg_rules, num_5ghz_reg_rules);
11683 
11684 	wmi_reg_rule = (struct wmi_regulatory_rule_struct *)
11685 	    ((uint8_t *)chan_list_event_hdr + sizeof(*chan_list_event_hdr)
11686 	    + sizeof(struct wmi_tlv));
11687 
11688 	if (num_2ghz_reg_rules) {
11689 		reg_info->reg_rules_2ghz_ptr = qwz_create_reg_rules_from_wmi(
11690 		    num_2ghz_reg_rules, wmi_reg_rule);
11691 		if (!reg_info->reg_rules_2ghz_ptr) {
11692 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11693 			printf("%s: Unable to allocate memory for "
11694 			    "2 GHz rules\n", __func__);
11695 			return ENOMEM;
11696 		}
11697 
11698 		qwz_print_reg_rule(sc, "2 GHz", num_2ghz_reg_rules,
11699 		    reg_info->reg_rules_2ghz_ptr);
11700 	}
11701 
11702 	if (num_5ghz_reg_rules) {
11703 		wmi_reg_rule += num_2ghz_reg_rules;
11704 		reg_info->reg_rules_5ghz_ptr = qwz_create_reg_rules_from_wmi(
11705 		    num_5ghz_reg_rules, wmi_reg_rule);
11706 		if (!reg_info->reg_rules_5ghz_ptr) {
11707 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11708 			printf("%s: Unable to allocate memory for "
11709 			    "5 GHz rules\n", __func__);
11710 			return ENOMEM;
11711 		}
11712 
11713 		qwz_print_reg_rule(sc, "5 GHz", num_5ghz_reg_rules,
11714 		    reg_info->reg_rules_5ghz_ptr);
11715 	}
11716 
11717 	DNPRINTF(QWZ_D_WMI, "%s: processed regulatory channel list\n",
11718 	    __func__);
11719 
11720 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11721 	return 0;
11722 }
11723 
11724 int
11725 qwz_pull_reg_chan_list_ext_update_ev(struct qwz_softc *sc, struct mbuf *m,
11726     struct cur_regulatory_info *reg_info)
11727 {
11728 	printf("%s: not implemented\n", __func__);
11729 	return ENOTSUP;
11730 }
11731 
11732 void
11733 qwz_init_channels(struct qwz_softc *sc, struct cur_regulatory_info *reg_info)
11734 {
11735 	struct ieee80211com *ic = &sc->sc_ic;
11736 	struct ieee80211_channel *chan;
11737 	struct cur_reg_rule *rule;
11738 	int i, chnum;
11739 	uint16_t freq;
11740 
11741 	for (i = 0; i < reg_info->num_2ghz_reg_rules; i++) {
11742 		rule = &reg_info->reg_rules_2ghz_ptr[i];
11743 		if (rule->start_freq < 2402 ||
11744 		    rule->start_freq > 2500 ||
11745 		    rule->start_freq > rule->end_freq) {
11746 			DPRINTF("%s: bad regulatory rule: start freq %u, "
11747 			    "end freq %u\n", __func__, rule->start_freq,
11748 			    rule->end_freq);
11749 			continue;
11750 		}
11751 
11752 		freq = rule->start_freq + 10;
11753 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_2GHZ);
11754 		if (chnum < 1 || chnum > 14) {
11755 			DPRINTF("%s: bad regulatory rule: freq %u, "
11756 			    "channel %u\n", __func__, freq, chnum);
11757 			continue;
11758 		}
11759 		while (freq <= rule->end_freq && chnum <= 14) {
11760 			chan = &ic->ic_channels[chnum];
11761 			if (rule->flags & REGULATORY_CHAN_DISABLED) {
11762 				chan->ic_freq = 0;
11763 				chan->ic_flags = 0;
11764 			} else {
11765 				chan->ic_freq = freq;
11766 				chan->ic_flags = IEEE80211_CHAN_CCK |
11767 				    IEEE80211_CHAN_OFDM |
11768 				    IEEE80211_CHAN_DYN |
11769 				    IEEE80211_CHAN_2GHZ;
11770 			}
11771 			chnum++;
11772 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_2GHZ);
11773 		}
11774 	}
11775 
11776 	for (i = 0; i < reg_info->num_5ghz_reg_rules; i++) {
11777 		rule = &reg_info->reg_rules_5ghz_ptr[i];
11778 		if (rule->start_freq < 5170 ||
11779 		    rule->start_freq > 6000 ||
11780 		    rule->start_freq > rule->end_freq) {
11781 			DPRINTF("%s: bad regulatory rule: start freq %u, "
11782 			    "end freq %u\n", __func__, rule->start_freq,
11783 			    rule->end_freq);
11784 			continue;
11785 		}
11786 
11787 		freq = rule->start_freq + 10;
11788 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_5GHZ);
11789 		if (chnum < 36 || chnum > IEEE80211_CHAN_MAX) {
11790 			DPRINTF("%s: bad regulatory rule: freq %u, "
11791 			    "channel %u\n", __func__, freq, chnum);
11792 			continue;
11793 		}
11794 		while (freq <= rule->end_freq && freq <= 5885 &&
11795 		    chnum <= IEEE80211_CHAN_MAX) {
11796 			chan = &ic->ic_channels[chnum];
11797 			if (rule->flags & (REGULATORY_CHAN_DISABLED |
11798 			    REGULATORY_CHAN_NO_OFDM)) {
11799 				chan->ic_freq = 0;
11800 				chan->ic_flags = 0;
11801 			} else {
11802 				chan->ic_freq = freq;
11803 				chan->ic_flags = IEEE80211_CHAN_A;
11804 				if (rule->flags & (REGULATORY_CHAN_RADAR |
11805 				    REGULATORY_CHAN_NO_IR |
11806 				    REGULATORY_CHAN_INDOOR_ONLY)) {
11807 					chan->ic_flags |=
11808 					    IEEE80211_CHAN_PASSIVE;
11809 				}
11810 			}
11811 			chnum += 4;
11812 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_5GHZ);
11813 		}
11814 	}
11815 }
11816 
11817 int
11818 qwz_reg_chan_list_event(struct qwz_softc *sc, struct mbuf *m,
11819     enum wmi_reg_chan_list_cmd_type id)
11820 {
11821 	struct cur_regulatory_info *reg_info = NULL;
11822 	int ret = 0;
11823 #if 0
11824 	struct ieee80211_regdomain *regd = NULL;
11825 	bool intersect = false;
11826 	int pdev_idx, i, j;
11827 	struct ath12k *ar;
11828 #endif
11829 
11830 	reg_info = malloc(sizeof(*reg_info), M_DEVBUF, M_NOWAIT | M_ZERO);
11831 	if (!reg_info) {
11832 		ret = ENOMEM;
11833 		goto fallback;
11834 	}
11835 
11836 	if (id == WMI_REG_CHAN_LIST_CC_ID)
11837 		ret = qwz_pull_reg_chan_list_update_ev(sc, m, reg_info);
11838 	else
11839 		ret = qwz_pull_reg_chan_list_ext_update_ev(sc, m, reg_info);
11840 
11841 	if (ret) {
11842 		printf("%s: failed to extract regulatory info from "
11843 		    "received event\n", sc->sc_dev.dv_xname);
11844 		goto fallback;
11845 	}
11846 
11847 	DNPRINTF(QWZ_D_WMI, "%s: event reg chan list id %d\n", __func__, id);
11848 
11849 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
11850 		/* In case of failure to set the requested ctry,
11851 		 * fw retains the current regd. We print a failure info
11852 		 * and return from here.
11853 		 */
11854 		printf("%s: Failed to set the requested Country "
11855 		    "regulatory setting\n", __func__);
11856 		goto mem_free;
11857 	}
11858 
11859 	qwz_init_channels(sc, reg_info);
11860 #if 0
11861 	pdev_idx = reg_info->phy_id;
11862 
11863 	/* Avoid default reg rule updates sent during FW recovery if
11864 	 * it is already available
11865 	 */
11866 	spin_lock(&ab->base_lock);
11867 	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags) &&
11868 	    ab->default_regd[pdev_idx]) {
11869 		spin_unlock(&ab->base_lock);
11870 		goto mem_free;
11871 	}
11872 	spin_unlock(&ab->base_lock);
11873 
11874 	if (pdev_idx >= ab->num_radios) {
11875 		/* Process the event for phy0 only if single_pdev_only
11876 		 * is true. If pdev_idx is valid but not 0, discard the
11877 		 * event. Otherwise, it goes to fallback.
11878 		 */
11879 		if (ab->hw_params.single_pdev_only &&
11880 		    pdev_idx < ab->hw_params.num_rxmda_per_pdev)
11881 			goto mem_free;
11882 		else
11883 			goto fallback;
11884 	}
11885 
11886 	/* Avoid multiple overwrites to default regd, during core
11887 	 * stop-start after mac registration.
11888 	 */
11889 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
11890 	    !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
11891 		    (char *)reg_info->alpha2, 2))
11892 		goto mem_free;
11893 
11894 	/* Intersect new rules with default regd if a new country setting was
11895 	 * requested, i.e a default regd was already set during initialization
11896 	 * and the regd coming from this event has a valid country info.
11897 	 */
11898 	if (ab->default_regd[pdev_idx] &&
11899 	    !ath12k_reg_is_world_alpha((char *)
11900 		ab->default_regd[pdev_idx]->alpha2) &&
11901 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
11902 		intersect = true;
11903 
11904 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
11905 	if (!regd) {
11906 		ath12k_warn(ab, "failed to build regd from reg_info\n");
11907 		goto fallback;
11908 	}
11909 
11910 	spin_lock(&ab->base_lock);
11911 	if (ab->default_regd[pdev_idx]) {
11912 		/* The initial rules from FW after WMI Init is to build
11913 		 * the default regd. From then on, any rules updated for
11914 		 * the pdev could be due to user reg changes.
11915 		 * Free previously built regd before assigning the newly
11916 		 * generated regd to ar. NULL pointer handling will be
11917 		 * taken care by kfree itself.
11918 		 */
11919 		ar = ab->pdevs[pdev_idx].ar;
11920 		kfree(ab->new_regd[pdev_idx]);
11921 		ab->new_regd[pdev_idx] = regd;
11922 		queue_work(ab->workqueue, &ar->regd_update_work);
11923 	} else {
11924 		/* This regd would be applied during mac registration and is
11925 		 * held constant throughout for regd intersection purpose
11926 		 */
11927 		ab->default_regd[pdev_idx] = regd;
11928 	}
11929 	ab->dfs_region = reg_info->dfs_region;
11930 	spin_unlock(&ab->base_lock);
11931 #endif
11932 	goto mem_free;
11933 
11934 fallback:
11935 	/* Fallback to older reg (by sending previous country setting
11936 	 * again if fw has succeeded and we failed to process here.
11937 	 * The Regdomain should be uniform across driver and fw. Since the
11938 	 * FW has processed the command and sent a success status, we expect
11939 	 * this function to succeed as well. If it doesn't, CTRY needs to be
11940 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
11941 	 */
11942 	/* TODO: This is rare, but still should also be handled */
11943 mem_free:
11944 	if (reg_info) {
11945 		free(reg_info->reg_rules_2ghz_ptr, M_DEVBUF,
11946 		    reg_info->num_2ghz_reg_rules *
11947 		    sizeof(*reg_info->reg_rules_2ghz_ptr));
11948 		free(reg_info->reg_rules_5ghz_ptr, M_DEVBUF,
11949 		    reg_info->num_5ghz_reg_rules *
11950 		    sizeof(*reg_info->reg_rules_5ghz_ptr));
11951 #if 0
11952 		if (reg_info->is_ext_reg_event) {
11953 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
11954 				kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
11955 
11956 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
11957 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
11958 					kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
11959 		}
11960 #endif
11961 		free(reg_info, M_DEVBUF, sizeof(*reg_info));
11962 	}
11963 	return ret;
11964 }
11965 
11966 const char *
11967 qwz_wmi_event_scan_type_str(enum wmi_scan_event_type type,
11968     enum wmi_scan_completion_reason reason)
11969 {
11970 	switch (type) {
11971 	case WMI_SCAN_EVENT_STARTED:
11972 		return "started";
11973 	case WMI_SCAN_EVENT_COMPLETED:
11974 		switch (reason) {
11975 		case WMI_SCAN_REASON_COMPLETED:
11976 			return "completed";
11977 		case WMI_SCAN_REASON_CANCELLED:
11978 			return "completed [cancelled]";
11979 		case WMI_SCAN_REASON_PREEMPTED:
11980 			return "completed [preempted]";
11981 		case WMI_SCAN_REASON_TIMEDOUT:
11982 			return "completed [timedout]";
11983 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
11984 			return "completed [internal err]";
11985 		case WMI_SCAN_REASON_MAX:
11986 			break;
11987 		}
11988 		return "completed [unknown]";
11989 	case WMI_SCAN_EVENT_BSS_CHANNEL:
11990 		return "bss channel";
11991 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
11992 		return "foreign channel";
11993 	case WMI_SCAN_EVENT_DEQUEUED:
11994 		return "dequeued";
11995 	case WMI_SCAN_EVENT_PREEMPTED:
11996 		return "preempted";
11997 	case WMI_SCAN_EVENT_START_FAILED:
11998 		return "start failed";
11999 	case WMI_SCAN_EVENT_RESTARTED:
12000 		return "restarted";
12001 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12002 		return "foreign channel exit";
12003 	default:
12004 		return "unknown";
12005 	}
12006 }
12007 
12008 const char *
12009 qwz_scan_state_str(enum ath12k_scan_state state)
12010 {
12011 	switch (state) {
12012 	case ATH12K_SCAN_IDLE:
12013 		return "idle";
12014 	case ATH12K_SCAN_STARTING:
12015 		return "starting";
12016 	case ATH12K_SCAN_RUNNING:
12017 		return "running";
12018 	case ATH12K_SCAN_ABORTING:
12019 		return "aborting";
12020 	}
12021 
12022 	return "unknown";
12023 }
12024 
12025 int
12026 qwz_pull_scan_ev(struct qwz_softc *sc, struct mbuf *m,
12027     struct wmi_scan_event *scan_evt_param)
12028 {
12029 	const void **tb;
12030 	const struct wmi_scan_event *ev;
12031 
12032 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12033 	if (tb == NULL) {
12034 		DPRINTF("%s: failed to parse tlv\n", __func__);
12035 		return EINVAL;
12036 	}
12037 
12038 	ev = tb[WMI_TAG_SCAN_EVENT];
12039 	if (!ev) {
12040 		DPRINTF("%s: failed to fetch scan ev\n", __func__);
12041 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12042 		return EPROTO;
12043 	}
12044 
12045 	scan_evt_param->event_type = ev->event_type;
12046 	scan_evt_param->reason = ev->reason;
12047 	scan_evt_param->channel_freq = ev->channel_freq;
12048 	scan_evt_param->scan_req_id = ev->scan_req_id;
12049 	scan_evt_param->scan_id = ev->scan_id;
12050 	scan_evt_param->vdev_id = ev->vdev_id;
12051 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
12052 
12053 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12054 	return 0;
12055 }
12056 
12057 void
12058 qwz_wmi_event_scan_started(struct qwz_softc *sc)
12059 {
12060 #ifdef notyet
12061 	lockdep_assert_held(&ar->data_lock);
12062 #endif
12063 	switch (sc->scan.state) {
12064 	case ATH12K_SCAN_IDLE:
12065 	case ATH12K_SCAN_RUNNING:
12066 	case ATH12K_SCAN_ABORTING:
12067 		printf("%s: received scan started event in an invalid "
12068 		"scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12069 		qwz_scan_state_str(sc->scan.state), sc->scan.state);
12070 		break;
12071 	case ATH12K_SCAN_STARTING:
12072 		sc->scan.state = ATH12K_SCAN_RUNNING;
12073 #if 0
12074 		if (ar->scan.is_roc)
12075 			ieee80211_ready_on_channel(ar->hw);
12076 #endif
12077 		wakeup(&sc->scan.state);
12078 		break;
12079 	}
12080 }
12081 
12082 void
12083 qwz_wmi_event_scan_completed(struct qwz_softc *sc)
12084 {
12085 #ifdef notyet
12086 	lockdep_assert_held(&ar->data_lock);
12087 #endif
12088 	switch (sc->scan.state) {
12089 	case ATH12K_SCAN_IDLE:
12090 	case ATH12K_SCAN_STARTING:
12091 		/* One suspected reason scan can be completed while starting is
12092 		 * if firmware fails to deliver all scan events to the host,
12093 		 * e.g. when transport pipe is full. This has been observed
12094 		 * with spectral scan phyerr events starving wmi transport
12095 		 * pipe. In such case the "scan completed" event should be (and
12096 		 * is) ignored by the host as it may be just firmware's scan
12097 		 * state machine recovering.
12098 		 */
12099 		printf("%s: received scan completed event in an invalid "
12100 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12101 		    qwz_scan_state_str(sc->scan.state), sc->scan.state);
12102 		break;
12103 	case ATH12K_SCAN_RUNNING:
12104 	case ATH12K_SCAN_ABORTING:
12105 		qwz_mac_scan_finish(sc);
12106 		break;
12107 	}
12108 }
12109 
12110 void
12111 qwz_wmi_event_scan_bss_chan(struct qwz_softc *sc)
12112 {
12113 #ifdef notyet
12114 	lockdep_assert_held(&ar->data_lock);
12115 #endif
12116 	switch (sc->scan.state) {
12117 	case ATH12K_SCAN_IDLE:
12118 	case ATH12K_SCAN_STARTING:
12119 		printf("%s: received scan bss chan event in an invalid "
12120 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12121 		    qwz_scan_state_str(sc->scan.state), sc->scan.state);
12122 		break;
12123 	case ATH12K_SCAN_RUNNING:
12124 	case ATH12K_SCAN_ABORTING:
12125 		sc->scan_channel = 0;
12126 		break;
12127 	}
12128 }
12129 
12130 void
12131 qwz_wmi_event_scan_foreign_chan(struct qwz_softc *sc, uint32_t freq)
12132 {
12133 #ifdef notyet
12134 	lockdep_assert_held(&ar->data_lock);
12135 #endif
12136 	switch (sc->scan.state) {
12137 	case ATH12K_SCAN_IDLE:
12138 	case ATH12K_SCAN_STARTING:
12139 		printf("%s: received scan foreign chan event in an invalid "
12140 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12141 		    qwz_scan_state_str(sc->scan.state), sc->scan.state);
12142 		break;
12143 	case ATH12K_SCAN_RUNNING:
12144 	case ATH12K_SCAN_ABORTING:
12145 		sc->scan_channel = ieee80211_mhz2ieee(freq, 0);
12146 #if 0
12147 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
12148 			complete(&ar->scan.on_channel);
12149 #endif
12150 		break;
12151 	}
12152 }
12153 
12154 void
12155 qwz_wmi_event_scan_start_failed(struct qwz_softc *sc)
12156 {
12157 #ifdef notyet
12158 	lockdep_assert_held(&ar->data_lock);
12159 #endif
12160 	switch (sc->scan.state) {
12161 	case ATH12K_SCAN_IDLE:
12162 	case ATH12K_SCAN_RUNNING:
12163 	case ATH12K_SCAN_ABORTING:
12164 		printf("%s: received scan start failed event in an invalid "
12165 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12166 		    qwz_scan_state_str(sc->scan.state), sc->scan.state);
12167 		break;
12168 	case ATH12K_SCAN_STARTING:
12169 		wakeup(&sc->scan.state);
12170 		qwz_mac_scan_finish(sc);
12171 		break;
12172 	}
12173 }
12174 
12175 
12176 void
12177 qwz_scan_event(struct qwz_softc *sc, struct mbuf *m)
12178 {
12179 	struct wmi_scan_event scan_ev = { 0 };
12180 	struct qwz_vif *arvif;
12181 
12182 	if (qwz_pull_scan_ev(sc, m, &scan_ev) != 0) {
12183 		printf("%s: failed to extract scan event",
12184 		    sc->sc_dev.dv_xname);
12185 		return;
12186 	}
12187 #ifdef notyet
12188 	rcu_read_lock();
12189 #endif
12190 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12191 		if (arvif->vdev_id == scan_ev.vdev_id)
12192 			break;
12193 	}
12194 
12195 	if (!arvif) {
12196 		printf("%s: received scan event for unknown vdev\n",
12197 		    sc->sc_dev.dv_xname);
12198 #if 0
12199 		rcu_read_unlock();
12200 #endif
12201 		return;
12202 	}
12203 #if 0
12204 	spin_lock_bh(&ar->data_lock);
12205 #endif
12206 	DNPRINTF(QWZ_D_WMI,
12207 	    "%s: event scan %s type %d reason %d freq %d req_id %d scan_id %d "
12208 	    "vdev_id %d state %s (%d)\n", __func__,
12209 	    qwz_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
12210 	    scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
12211 	    scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
12212 	    qwz_scan_state_str(sc->scan.state), sc->scan.state);
12213 
12214 	switch (scan_ev.event_type) {
12215 	case WMI_SCAN_EVENT_STARTED:
12216 		qwz_wmi_event_scan_started(sc);
12217 		break;
12218 	case WMI_SCAN_EVENT_COMPLETED:
12219 		qwz_wmi_event_scan_completed(sc);
12220 		break;
12221 	case WMI_SCAN_EVENT_BSS_CHANNEL:
12222 		qwz_wmi_event_scan_bss_chan(sc);
12223 		break;
12224 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
12225 		qwz_wmi_event_scan_foreign_chan(sc, scan_ev.channel_freq);
12226 		break;
12227 	case WMI_SCAN_EVENT_START_FAILED:
12228 		printf("%s: received scan start failure event\n",
12229 		    sc->sc_dev.dv_xname);
12230 		qwz_wmi_event_scan_start_failed(sc);
12231 		break;
12232 	case WMI_SCAN_EVENT_DEQUEUED:
12233 		qwz_mac_scan_finish(sc);
12234 		break;
12235 	case WMI_SCAN_EVENT_PREEMPTED:
12236 	case WMI_SCAN_EVENT_RESTARTED:
12237 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12238 	default:
12239 		break;
12240 	}
12241 #if 0
12242 	spin_unlock_bh(&ar->data_lock);
12243 
12244 	rcu_read_unlock();
12245 #endif
12246 }
12247 
12248 int
12249 qwz_pull_chan_info_ev(struct qwz_softc *sc, uint8_t *evt_buf, uint32_t len,
12250     struct wmi_chan_info_event *ch_info_ev)
12251 {
12252 	const void **tb;
12253 	const struct wmi_chan_info_event *ev;
12254 
12255 	tb = qwz_wmi_tlv_parse_alloc(sc, evt_buf, len);
12256 	if (tb == NULL) {
12257 		printf("%s: failed to parse tlv\n", sc->sc_dev.dv_xname);
12258 		return EINVAL;
12259 	}
12260 
12261 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
12262 	if (!ev) {
12263 		printf("%s: failed to fetch chan info ev\n",
12264 		    sc->sc_dev.dv_xname);
12265 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12266 		return EPROTO;
12267 	}
12268 
12269 	ch_info_ev->err_code = ev->err_code;
12270 	ch_info_ev->freq = ev->freq;
12271 	ch_info_ev->cmd_flags = ev->cmd_flags;
12272 	ch_info_ev->noise_floor = ev->noise_floor;
12273 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
12274 	ch_info_ev->cycle_count = ev->cycle_count;
12275 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
12276 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
12277 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
12278 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
12279 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
12280 	ch_info_ev->vdev_id = ev->vdev_id;
12281 
12282 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12283 	return 0;
12284 }
12285 
12286 void
12287 qwz_chan_info_event(struct qwz_softc *sc, struct mbuf *m)
12288 {
12289 	struct qwz_vif *arvif;
12290 	struct wmi_chan_info_event ch_info_ev = {0};
12291 	struct qwz_survey_info *survey;
12292 	int idx;
12293 	/* HW channel counters frequency value in hertz */
12294 	uint32_t cc_freq_hz = sc->cc_freq_hz;
12295 
12296 	if (qwz_pull_chan_info_ev(sc, mtod(m, void *), m->m_pkthdr.len,
12297 	    &ch_info_ev) != 0) {
12298 		printf("%s: failed to extract chan info event\n",
12299 		    sc->sc_dev.dv_xname);
12300 		return;
12301 	}
12302 
12303 	DNPRINTF(QWZ_D_WMI, "%s: event chan info vdev_id %d err_code %d "
12304 	    "freq %d cmd_flags %d noise_floor %d rx_clear_count %d "
12305 	    "cycle_count %d mac_clk_mhz %d\n", __func__,
12306 	    ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
12307 	    ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
12308 	    ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
12309 	    ch_info_ev.mac_clk_mhz);
12310 
12311 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
12312 		DNPRINTF(QWZ_D_WMI, "chan info report completed\n");
12313 		return;
12314 	}
12315 #ifdef notyet
12316 	rcu_read_lock();
12317 #endif
12318 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12319 		if (arvif->vdev_id == ch_info_ev.vdev_id)
12320 			break;
12321 	}
12322 	if (!arvif) {
12323 		printf("%s: invalid vdev id in chan info ev %d\n",
12324 		   sc->sc_dev.dv_xname, ch_info_ev.vdev_id);
12325 #ifdef notyet
12326 		rcu_read_unlock();
12327 #endif
12328 		return;
12329 	}
12330 #ifdef notyet
12331 	spin_lock_bh(&ar->data_lock);
12332 #endif
12333 	switch (sc->scan.state) {
12334 	case ATH12K_SCAN_IDLE:
12335 	case ATH12K_SCAN_STARTING:
12336 		printf("%s: received chan info event without a scan request, "
12337 		    "ignoring\n", sc->sc_dev.dv_xname);
12338 		goto exit;
12339 	case ATH12K_SCAN_RUNNING:
12340 	case ATH12K_SCAN_ABORTING:
12341 		break;
12342 	}
12343 
12344 	idx = ieee80211_mhz2ieee(ch_info_ev.freq, 0);
12345 	if (idx >= nitems(sc->survey)) {
12346 		printf("%s: invalid frequency %d (idx %d out of bounds)\n",
12347 		    sc->sc_dev.dv_xname, ch_info_ev.freq, idx);
12348 		goto exit;
12349 	}
12350 
12351 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
12352 	 * HW channel counters frequency value
12353 	 */
12354 	if (ch_info_ev.mac_clk_mhz)
12355 		cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
12356 
12357 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
12358 		survey = &sc->survey[idx];
12359 		memset(survey, 0, sizeof(*survey));
12360 		survey->noise = ch_info_ev.noise_floor;
12361 		survey->time = ch_info_ev.cycle_count / cc_freq_hz;
12362 		survey->time_busy = ch_info_ev.rx_clear_count / cc_freq_hz;
12363 	}
12364 exit:
12365 #ifdef notyet
12366 	spin_unlock_bh(&ar->data_lock);
12367 	rcu_read_unlock();
12368 #else
12369 	return;
12370 #endif
12371 }
12372 
12373 int
12374 qwz_wmi_tlv_mgmt_rx_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
12375     const void *ptr, void *data)
12376 {
12377 	struct wmi_tlv_mgmt_rx_parse *parse = data;
12378 
12379 	switch (tag) {
12380 	case WMI_TAG_MGMT_RX_HDR:
12381 		parse->fixed = ptr;
12382 		break;
12383 	case WMI_TAG_ARRAY_BYTE:
12384 		if (!parse->frame_buf_done) {
12385 			parse->frame_buf = ptr;
12386 			parse->frame_buf_done = 1;
12387 		}
12388 		break;
12389 	}
12390 	return 0;
12391 }
12392 
12393 int
12394 qwz_pull_mgmt_rx_params_tlv(struct qwz_softc *sc, struct mbuf *m,
12395     struct mgmt_rx_event_params *hdr)
12396 {
12397 	struct wmi_tlv_mgmt_rx_parse parse = { 0 };
12398 	const struct wmi_mgmt_rx_hdr *ev;
12399 	const uint8_t *frame;
12400 	int ret;
12401 	size_t totlen, hdrlen;
12402 
12403 	ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
12404 	    qwz_wmi_tlv_mgmt_rx_parse, &parse);
12405 	if (ret) {
12406 		printf("%s: failed to parse mgmt rx tlv %d\n",
12407 		    sc->sc_dev.dv_xname, ret);
12408 		return ret;
12409 	}
12410 
12411 	ev = parse.fixed;
12412 	frame = parse.frame_buf;
12413 
12414 	if (!ev || !frame) {
12415 		printf("%s: failed to fetch mgmt rx hdr\n",
12416 		    sc->sc_dev.dv_xname);
12417 		return EPROTO;
12418 	}
12419 
12420 	hdr->pdev_id =  ev->pdev_id;
12421 	hdr->chan_freq = le32toh(ev->chan_freq);
12422 	hdr->channel = le32toh(ev->channel);
12423 	hdr->snr = le32toh(ev->snr);
12424 	hdr->rate = le32toh(ev->rate);
12425 	hdr->phy_mode = le32toh(ev->phy_mode);
12426 	hdr->buf_len = le32toh(ev->buf_len);
12427 	hdr->status = le32toh(ev->status);
12428 	hdr->flags = le32toh(ev->flags);
12429 	hdr->rssi = le32toh(ev->rssi);
12430 	hdr->tsf_delta = le32toh(ev->tsf_delta);
12431 	memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
12432 
12433 	if (frame < mtod(m, uint8_t *) ||
12434 	    frame >= mtod(m, uint8_t *) + m->m_pkthdr.len) {
12435 		printf("%s: invalid mgmt rx frame pointer\n",
12436 		    sc->sc_dev.dv_xname);
12437 		return EPROTO;
12438 	}
12439 	hdrlen = frame - mtod(m, uint8_t *);
12440 
12441 	if (hdrlen + hdr->buf_len < hdr->buf_len) {
12442 		printf("%s: length overflow in mgmt rx hdr ev\n",
12443 		    sc->sc_dev.dv_xname);
12444 		return EPROTO;
12445 	}
12446 	totlen = hdrlen + hdr->buf_len;
12447 	if (m->m_pkthdr.len < totlen) {
12448 		printf("%s: invalid length in mgmt rx hdr ev\n",
12449 		    sc->sc_dev.dv_xname);
12450 		return EPROTO;
12451 	}
12452 
12453 	/* shift the mbuf to point at `frame` */
12454 	m->m_len = m->m_pkthdr.len = totlen;
12455 	m_adj(m, hdrlen);
12456 
12457 #if 0 /* Not needed on OpenBSD? */
12458 	ath12k_ce_byte_swap(skb->data, hdr->buf_len);
12459 #endif
12460 	return 0;
12461 }
12462 
12463 void
12464 qwz_mgmt_rx_event(struct qwz_softc *sc, struct mbuf *m)
12465 {
12466 	struct ieee80211com *ic = &sc->sc_ic;
12467 	struct ifnet *ifp = &ic->ic_if;
12468 	struct mgmt_rx_event_params rx_ev = {0};
12469 	struct ieee80211_rxinfo rxi;
12470 	struct ieee80211_frame *wh;
12471 	struct ieee80211_node *ni;
12472 
12473 	if (qwz_pull_mgmt_rx_params_tlv(sc, m, &rx_ev) != 0) {
12474 		printf("%s: failed to extract mgmt rx event\n",
12475 		    sc->sc_dev.dv_xname);
12476 		m_freem(m);
12477 		return;
12478 	}
12479 
12480 	memset(&rxi, 0, sizeof(rxi));
12481 
12482 	DNPRINTF(QWZ_D_MGMT, "%s: event mgmt rx status %08x\n", __func__,
12483 	    rx_ev.status);
12484 #ifdef notyet
12485 	rcu_read_lock();
12486 #endif
12487 	if (rx_ev.pdev_id >= nitems(sc->pdevs)) {
12488 		printf("%s: invalid pdev_id %d in mgmt_rx_event\n",
12489 		    sc->sc_dev.dv_xname, rx_ev.pdev_id);
12490 		m_freem(m);
12491 		goto exit;
12492 	}
12493 
12494 	if ((test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) ||
12495 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
12496 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
12497 		m_freem(m);
12498 		goto exit;
12499 	}
12500 
12501 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) {
12502 		ic->ic_stats.is_ccmp_dec_errs++;
12503 		m_freem(m);
12504 		goto exit;
12505 	}
12506 
12507 	rxi.rxi_chan = rx_ev.channel;
12508 	rxi.rxi_rssi = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
12509 #if 0
12510 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
12511 #endif
12512 
12513 	wh = mtod(m, struct ieee80211_frame *);
12514 	ni = ieee80211_find_rxnode(ic, wh);
12515 #if 0
12516 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set.
12517 	 * Don't clear that. Also, FW delivers broadcast management frames
12518 	 * (ex: group privacy action frames in mesh) as encrypted payload.
12519 	 */
12520 	if (ieee80211_has_protected(hdr->frame_control) &&
12521 	    !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
12522 		status->flag |= RX_FLAG_DECRYPTED;
12523 
12524 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
12525 			status->flag |= RX_FLAG_IV_STRIPPED |
12526 					RX_FLAG_MMIC_STRIPPED;
12527 			hdr->frame_control = __cpu_to_le16(fc &
12528 					     ~IEEE80211_FCTL_PROTECTED);
12529 		}
12530 	}
12531 
12532 	if (ieee80211_is_beacon(hdr->frame_control))
12533 		ath12k_mac_handle_beacon(ar, skb);
12534 #endif
12535 
12536 	DNPRINTF(QWZ_D_MGMT,
12537 	    "%s: event mgmt rx skb %p len %d ftype %02x stype %02x\n",
12538 	    __func__, m, m->m_pkthdr.len,
12539 	    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK,
12540 	    wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
12541 
12542 	DNPRINTF(QWZ_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
12543 	    __func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
12544 
12545 #if NBPFILTER > 0
12546 	if (sc->sc_drvbpf != NULL) {
12547 		struct qwz_rx_radiotap_header *tap = &sc->sc_rxtap;
12548 
12549 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
12550 		    m, BPF_DIRECTION_IN);
12551 	}
12552 #endif
12553 	ieee80211_input(ifp, m, ni, &rxi);
12554 	ieee80211_release_node(ic, ni);
12555 exit:
12556 #ifdef notyet
12557 	rcu_read_unlock();
12558 #else
12559 	return;
12560 #endif
12561 }
12562 
12563 int
12564 qwz_pull_mgmt_tx_compl_param_tlv(struct qwz_softc *sc, struct mbuf *m,
12565     struct wmi_mgmt_tx_compl_event *param)
12566 {
12567 	const void **tb;
12568 	const struct wmi_mgmt_tx_compl_event *ev;
12569 	int ret = 0;
12570 
12571 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12572 	if (tb == NULL) {
12573 		ret = ENOMEM;
12574 		printf("%s: failed to parse tlv: %d\n",
12575 		    sc->sc_dev.dv_xname, ret);
12576 		return ENOMEM;
12577 	}
12578 
12579 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
12580 	if (!ev) {
12581 		printf("%s: failed to fetch mgmt tx compl ev\n",
12582 		    sc->sc_dev.dv_xname);
12583 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12584 		return EPROTO;
12585 	}
12586 
12587 	param->pdev_id = ev->pdev_id;
12588 	param->desc_id = ev->desc_id;
12589 	param->status = ev->status;
12590 	param->ack_rssi = ev->ack_rssi;
12591 
12592 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12593 	return 0;
12594 }
12595 
12596 void
12597 qwz_wmi_process_mgmt_tx_comp(struct qwz_softc *sc,
12598     struct wmi_mgmt_tx_compl_event *tx_compl_param)
12599 {
12600 	struct ieee80211com *ic = &sc->sc_ic;
12601 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
12602 	struct ifnet *ifp = &ic->ic_if;
12603 	struct qwz_tx_data *tx_data;
12604 
12605 	if (tx_compl_param->desc_id >= nitems(arvif->txmgmt.data)) {
12606 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
12607 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
12608 		return;
12609 	}
12610 
12611 	tx_data = &arvif->txmgmt.data[tx_compl_param->desc_id];
12612 	if (tx_data->m == NULL) {
12613 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
12614 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
12615 		return;
12616 	}
12617 
12618 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
12619 	m_freem(tx_data->m);
12620 	tx_data->m = NULL;
12621 
12622 	ieee80211_release_node(ic, tx_data->ni);
12623 	tx_data->ni = NULL;
12624 
12625 	if (arvif->txmgmt.queued > 0)
12626 		arvif->txmgmt.queued--;
12627 
12628 	if (tx_compl_param->status != 0)
12629 		ifp->if_oerrors++;
12630 
12631 	if (arvif->txmgmt.queued < nitems(arvif->txmgmt.data) - 1) {
12632 		sc->qfullmsk &= ~(1U << QWZ_MGMT_QUEUE_ID);
12633 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
12634 			ifq_clr_oactive(&ifp->if_snd);
12635 			(*ifp->if_start)(ifp);
12636 		}
12637 	}
12638 }
12639 
12640 void
12641 qwz_mgmt_tx_compl_event(struct qwz_softc *sc, struct mbuf *m)
12642 {
12643 	struct wmi_mgmt_tx_compl_event tx_compl_param = { 0 };
12644 
12645 	if (qwz_pull_mgmt_tx_compl_param_tlv(sc, m, &tx_compl_param) != 0) {
12646 		printf("%s: failed to extract mgmt tx compl event\n",
12647 		    sc->sc_dev.dv_xname);
12648 		return;
12649 	}
12650 
12651 	qwz_wmi_process_mgmt_tx_comp(sc, &tx_compl_param);
12652 
12653 	DNPRINTF(QWZ_D_MGMT, "%s: event mgmt tx compl ev pdev_id %d, "
12654 	    "desc_id %d, status %d ack_rssi %d", __func__,
12655 	    tx_compl_param.pdev_id, tx_compl_param.desc_id,
12656 	    tx_compl_param.status, tx_compl_param.ack_rssi);
12657 }
12658 
12659 int
12660 qwz_pull_roam_ev(struct qwz_softc *sc, struct mbuf *m,
12661     struct wmi_roam_event *roam_ev)
12662 {
12663 	const void **tb;
12664 	const struct wmi_roam_event *ev;
12665 	int ret;
12666 
12667 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12668 	if (tb == NULL) {
12669 		ret = ENOMEM;
12670 		printf("%s: failed to parse tlv: %d\n",
12671 		    sc->sc_dev.dv_xname, ret);
12672 		return ret;
12673 	}
12674 
12675 	ev = tb[WMI_TAG_ROAM_EVENT];
12676 	if (!ev) {
12677 		printf("%s: failed to fetch roam ev\n",
12678 		    sc->sc_dev.dv_xname);
12679 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12680 		return EPROTO;
12681 	}
12682 
12683 	roam_ev->vdev_id = ev->vdev_id;
12684 	roam_ev->reason = ev->reason;
12685 	roam_ev->rssi = ev->rssi;
12686 
12687 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12688 	return 0;
12689 }
12690 
12691 void
12692 qwz_mac_handle_beacon_miss(struct qwz_softc *sc, uint32_t vdev_id)
12693 {
12694 	struct ieee80211com *ic = &sc->sc_ic;
12695 
12696 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
12697 	    (ic->ic_state != IEEE80211_S_RUN))
12698 		return;
12699 
12700 	if (ic->ic_mgt_timer == 0) {
12701 		if (ic->ic_if.if_flags & IFF_DEBUG)
12702 			printf("%s: receiving no beacons from %s; checking if "
12703 			    "this AP is still responding to probe requests\n",
12704 			    sc->sc_dev.dv_xname,
12705 			    ether_sprintf(ic->ic_bss->ni_macaddr));
12706 		/*
12707 		 * Rather than go directly to scan state, try to send a
12708 		 * directed probe request first. If that fails then the
12709 		 * state machine will drop us into scanning after timing
12710 		 * out waiting for a probe response.
12711 		 */
12712 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
12713 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
12714 	}
12715 }
12716 
12717 void
12718 qwz_roam_event(struct qwz_softc *sc, struct mbuf *m)
12719 {
12720 	struct wmi_roam_event roam_ev = {};
12721 
12722 	if (qwz_pull_roam_ev(sc, m, &roam_ev) != 0) {
12723 		printf("%s: failed to extract roam event\n",
12724 		    sc->sc_dev.dv_xname);
12725 		return;
12726 	}
12727 
12728 	DNPRINTF(QWZ_D_WMI, "%s: event roam vdev %u reason 0x%08x rssi %d\n",
12729 	    __func__, roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
12730 
12731 	if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
12732 		return;
12733 
12734 	switch (roam_ev.reason) {
12735 	case WMI_ROAM_REASON_BEACON_MISS:
12736 		qwz_mac_handle_beacon_miss(sc, roam_ev.vdev_id);
12737 		break;
12738 	case WMI_ROAM_REASON_BETTER_AP:
12739 	case WMI_ROAM_REASON_LOW_RSSI:
12740 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
12741 	case WMI_ROAM_REASON_HO_FAILED:
12742 		break;
12743 	}
12744 }
12745 
12746 int
12747 qwz_pull_vdev_install_key_compl_ev(struct qwz_softc *sc, struct mbuf *m,
12748     struct wmi_vdev_install_key_complete_arg *arg)
12749 {
12750 	const void **tb;
12751 	const struct wmi_vdev_install_key_compl_event *ev;
12752 	int ret;
12753 
12754 	tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12755 	if (tb == NULL) {
12756 		ret = ENOMEM;
12757 		printf("%s: failed to parse tlv: %d\n",
12758 		    sc->sc_dev.dv_xname, ret);
12759 		return ret;
12760 	}
12761 
12762 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
12763 	if (!ev) {
12764 		printf("%s: failed to fetch vdev install key compl ev\n",
12765 		    sc->sc_dev.dv_xname);
12766 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12767 		return EPROTO;
12768 	}
12769 
12770 	arg->vdev_id = ev->vdev_id;
12771 	arg->macaddr = ev->peer_macaddr.addr;
12772 	arg->key_idx = ev->key_idx;
12773 	arg->key_flags = ev->key_flags;
12774 	arg->status = ev->status;
12775 
12776 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12777 	return 0;
12778 }
12779 
12780 void
12781 qwz_vdev_install_key_compl_event(struct qwz_softc *sc, struct mbuf *m)
12782 {
12783 	struct wmi_vdev_install_key_complete_arg install_key_compl = { 0 };
12784 	struct qwz_vif *arvif;
12785 
12786 	if (qwz_pull_vdev_install_key_compl_ev(sc, m,
12787 	    &install_key_compl) != 0) {
12788 		printf("%s: failed to extract install key compl event\n",
12789 		    sc->sc_dev.dv_xname);
12790 		return;
12791 	}
12792 
12793 	DNPRINTF(QWZ_D_WMI, "%s: event vdev install key ev idx %d flags %08x "
12794 	    "macaddr %s status %d\n", __func__, install_key_compl.key_idx,
12795 	    install_key_compl.key_flags,
12796 	    ether_sprintf((u_char *)install_key_compl.macaddr),
12797 	    install_key_compl.status);
12798 
12799 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12800 		if (arvif->vdev_id == install_key_compl.vdev_id)
12801 			break;
12802 	}
12803 	if (!arvif) {
12804 		printf("%s: invalid vdev id in install key compl ev %d\n",
12805 		    sc->sc_dev.dv_xname, install_key_compl.vdev_id);
12806 		return;
12807 	}
12808 
12809 	sc->install_key_status = 0;
12810 
12811 	if (install_key_compl.status !=
12812 	    WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
12813 		printf("%s: install key failed for %s status %d\n",
12814 		    sc->sc_dev.dv_xname,
12815 		    ether_sprintf((u_char *)install_key_compl.macaddr),
12816 		    install_key_compl.status);
12817 		sc->install_key_status = install_key_compl.status;
12818 	}
12819 
12820 	sc->install_key_done = 1;
12821 	wakeup(&sc->install_key_done);
12822 }
12823 
12824 void
12825 qwz_wmi_tlv_op_rx(struct qwz_softc *sc, struct mbuf *m)
12826 {
12827 	struct wmi_cmd_hdr *cmd_hdr;
12828 	enum wmi_tlv_event_id id;
12829 
12830 	cmd_hdr = mtod(m, struct wmi_cmd_hdr *);
12831 	id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
12832 
12833 	m_adj(m, sizeof(struct wmi_cmd_hdr));
12834 
12835 	switch (id) {
12836 		/* Process all the WMI events here */
12837 	case WMI_SERVICE_READY_EVENTID:
12838 		qwz_service_ready_event(sc, m);
12839 		break;
12840 	case WMI_SERVICE_READY_EXT_EVENTID:
12841 		qwz_service_ready_ext_event(sc, m);
12842 		break;
12843 	case WMI_SERVICE_READY_EXT2_EVENTID:
12844 		qwz_service_ready_ext2_event(sc, m);
12845 		break;
12846 	case WMI_REG_CHAN_LIST_CC_EVENTID:
12847 		qwz_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_ID);
12848 		break;
12849 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
12850 		qwz_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_EXT_ID);
12851 		break;
12852 	case WMI_READY_EVENTID:
12853 		qwz_ready_event(sc, m);
12854 		break;
12855 	case WMI_PEER_DELETE_RESP_EVENTID:
12856 		qwz_peer_delete_resp_event(sc, m);
12857 		break;
12858 	case WMI_VDEV_START_RESP_EVENTID:
12859 		qwz_vdev_start_resp_event(sc, m);
12860 		break;
12861 #if 0
12862 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
12863 		ath12k_bcn_tx_status_event(ab, skb);
12864 		break;
12865 #endif
12866 	case WMI_VDEV_STOPPED_EVENTID:
12867 		qwz_vdev_stopped_event(sc, m);
12868 		break;
12869 	case WMI_MGMT_RX_EVENTID:
12870 		qwz_mgmt_rx_event(sc, m);
12871 		/* mgmt_rx_event() owns the skb now! */
12872 		return;
12873 	case WMI_MGMT_TX_COMPLETION_EVENTID:
12874 		qwz_mgmt_tx_compl_event(sc, m);
12875 		break;
12876 	case WMI_SCAN_EVENTID:
12877 		qwz_scan_event(sc, m);
12878 		break;
12879 #if 0
12880 	case WMI_PEER_STA_KICKOUT_EVENTID:
12881 		ath12k_peer_sta_kickout_event(ab, skb);
12882 		break;
12883 #endif
12884 	case WMI_ROAM_EVENTID:
12885 		qwz_roam_event(sc, m);
12886 		break;
12887 	case WMI_CHAN_INFO_EVENTID:
12888 		qwz_chan_info_event(sc, m);
12889 		break;
12890 #if 0
12891 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
12892 		ath12k_pdev_bss_chan_info_event(ab, skb);
12893 		break;
12894 #endif
12895 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
12896 		qwz_vdev_install_key_compl_event(sc, m);
12897 		break;
12898 	case WMI_SERVICE_AVAILABLE_EVENTID:
12899 		qwz_service_available_event(sc, m);
12900 		break;
12901 	case WMI_PEER_ASSOC_CONF_EVENTID:
12902 		qwz_peer_assoc_conf_event(sc, m);
12903 		break;
12904 	case WMI_UPDATE_STATS_EVENTID:
12905 		/* ignore */
12906 		break;
12907 #if 0
12908 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
12909 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
12910 		break;
12911 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
12912 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
12913 		break;
12914 	case WMI_PDEV_UTF_EVENTID:
12915 		ath12k_tm_wmi_event(ab, id, skb);
12916 		break;
12917 	case WMI_PDEV_TEMPERATURE_EVENTID:
12918 		ath12k_wmi_pdev_temperature_event(ab, skb);
12919 		break;
12920 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
12921 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
12922 		break;
12923 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
12924 		ath12k_fils_discovery_event(ab, skb);
12925 		break;
12926 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
12927 		ath12k_probe_resp_tx_status_event(ab, skb);
12928 		break;
12929 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
12930 		ath12k_wmi_obss_color_collision_event(ab, skb);
12931 		break;
12932 	case WMI_TWT_ADD_DIALOG_EVENTID:
12933 		ath12k_wmi_twt_add_dialog_event(ab, skb);
12934 		break;
12935 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
12936 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
12937 		break;
12938 	case WMI_VDEV_DELETE_RESP_EVENTID:
12939 		ath12k_vdev_delete_resp_event(ab, skb);
12940 		break;
12941 	case WMI_WOW_WAKEUP_HOST_EVENTID:
12942 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
12943 		break;
12944 	case WMI_11D_NEW_COUNTRY_EVENTID:
12945 		ath12k_reg_11d_new_cc_event(ab, skb);
12946 		break;
12947 #endif
12948 	case WMI_DIAG_EVENTID:
12949 		/* Ignore. These events trigger tracepoints in Linux. */
12950 		break;
12951 #if 0
12952 	case WMI_PEER_STA_PS_STATECHG_EVENTID:
12953 		ath12k_wmi_event_peer_sta_ps_state_chg(ab, skb);
12954 		break;
12955 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
12956 		ath12k_wmi_gtk_offload_status_event(ab, skb);
12957 		break;
12958 #endif
12959 	case WMI_UPDATE_FW_MEM_DUMP_EVENTID:
12960 		DPRINTF("%s: 0x%x: update fw mem dump\n", __func__, id);
12961 		break;
12962 	case WMI_PDEV_SET_HW_MODE_RESP_EVENTID:
12963 		DPRINTF("%s: 0x%x: set HW mode response event\n", __func__, id);
12964 		break;
12965 	case WMI_WLAN_FREQ_AVOID_EVENTID:
12966 		DPRINTF("%s: 0x%x: wlan freq avoid event\n", __func__, id);
12967 		break;
12968 	default:
12969 		DPRINTF("%s: unsupported event id 0x%x\n", __func__, id);
12970 		break;
12971 	}
12972 
12973 	m_freem(m);
12974 }
12975 
12976 void
12977 qwz_wmi_op_ep_tx_credits(struct qwz_softc *sc)
12978 {
12979 	struct qwz_htc *htc = &sc->htc;
12980 	int i;
12981 
12982 	/* try to send pending beacons first. they take priority */
12983 	sc->wmi.tx_credits = 1;
12984 	wakeup(&sc->wmi.tx_credits);
12985 
12986 	if (!sc->hw_params.credit_flow)
12987 		return;
12988 
12989 	for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
12990 		struct qwz_htc_ep *ep = &htc->endpoint[i];
12991 		if (ep->tx_credit_flow_enabled && ep->tx_credits > 0)
12992 			wakeup(&ep->tx_credits);
12993 	}
12994 }
12995 
12996 int
12997 qwz_connect_pdev_htc_service(struct qwz_softc *sc, uint32_t pdev_idx)
12998 {
12999 	int status;
13000 	uint32_t svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
13001 	    ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
13002 	    ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
13003 	struct qwz_htc_svc_conn_req conn_req;
13004 	struct qwz_htc_svc_conn_resp conn_resp;
13005 
13006 	memset(&conn_req, 0, sizeof(conn_req));
13007 	memset(&conn_resp, 0, sizeof(conn_resp));
13008 
13009 	/* these fields are the same for all service endpoints */
13010 	conn_req.ep_ops.ep_tx_complete = qwz_wmi_htc_tx_complete;
13011 	conn_req.ep_ops.ep_rx_complete = qwz_wmi_tlv_op_rx;
13012 	conn_req.ep_ops.ep_tx_credits = qwz_wmi_op_ep_tx_credits;
13013 
13014 	/* connect to control service */
13015 	conn_req.service_id = svc_id[pdev_idx];
13016 
13017 	status = qwz_htc_connect_service(&sc->htc, &conn_req, &conn_resp);
13018 	if (status) {
13019 		printf("%s: failed to connect to WMI CONTROL service "
13020 		    "status: %d\n", sc->sc_dev.dv_xname, status);
13021 		return status;
13022 	}
13023 
13024 	sc->wmi.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
13025 	sc->wmi.wmi[pdev_idx].eid = conn_resp.eid;
13026 	sc->wmi.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
13027 	sc->wmi.wmi[pdev_idx].tx_ce_desc = 0;
13028 
13029 	return 0;
13030 }
13031 
13032 int
13033 qwz_wmi_connect(struct qwz_softc *sc)
13034 {
13035 	uint32_t i;
13036 	uint8_t wmi_ep_count;
13037 
13038 	wmi_ep_count = sc->htc.wmi_ep_count;
13039 	if (wmi_ep_count > sc->hw_params.max_radios)
13040 		return -1;
13041 
13042 	for (i = 0; i < wmi_ep_count; i++)
13043 		qwz_connect_pdev_htc_service(sc, i);
13044 
13045 	return 0;
13046 }
13047 
13048 void
13049 qwz_htc_reset_endpoint_states(struct qwz_htc *htc)
13050 {
13051 	struct qwz_htc_ep *ep;
13052 	int i;
13053 
13054 	for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
13055 		ep = &htc->endpoint[i];
13056 		ep->service_id = ATH12K_HTC_SVC_ID_UNUSED;
13057 		ep->max_ep_message_len = 0;
13058 		ep->max_tx_queue_depth = 0;
13059 		ep->eid = i;
13060 		ep->htc = htc;
13061 		ep->tx_credit_flow_enabled = 1;
13062 	}
13063 }
13064 
13065 void
13066 qwz_htc_control_tx_complete(struct qwz_softc *sc, struct mbuf *m)
13067 {
13068 	printf("%s: not implemented\n", __func__);
13069 
13070 	m_freem(m);
13071 }
13072 
13073 void
13074 qwz_htc_control_rx_complete(struct qwz_softc *sc, struct mbuf *m)
13075 {
13076 	printf("%s: not implemented\n", __func__);
13077 
13078 	m_freem(m);
13079 }
13080 
13081 uint8_t
13082 qwz_htc_get_credit_allocation(struct qwz_htc *htc, uint16_t service_id)
13083 {
13084 	uint8_t i, allocation = 0;
13085 
13086 	for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
13087 		if (htc->service_alloc_table[i].service_id == service_id) {
13088 			allocation =
13089 			    htc->service_alloc_table[i].credit_allocation;
13090 		}
13091 	}
13092 
13093 	return allocation;
13094 }
13095 
13096 const char *
13097 qwz_htc_service_name(enum ath12k_htc_svc_id id)
13098 {
13099 	switch (id) {
13100 	case ATH12K_HTC_SVC_ID_RESERVED:
13101 		return "Reserved";
13102 	case ATH12K_HTC_SVC_ID_RSVD_CTRL:
13103 		return "Control";
13104 	case ATH12K_HTC_SVC_ID_WMI_CONTROL:
13105 		return "WMI";
13106 	case ATH12K_HTC_SVC_ID_WMI_DATA_BE:
13107 		return "DATA BE";
13108 	case ATH12K_HTC_SVC_ID_WMI_DATA_BK:
13109 		return "DATA BK";
13110 	case ATH12K_HTC_SVC_ID_WMI_DATA_VI:
13111 		return "DATA VI";
13112 	case ATH12K_HTC_SVC_ID_WMI_DATA_VO:
13113 		return "DATA VO";
13114 	case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1:
13115 		return "WMI MAC1";
13116 	case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2:
13117 		return "WMI MAC2";
13118 	case ATH12K_HTC_SVC_ID_NMI_CONTROL:
13119 		return "NMI Control";
13120 	case ATH12K_HTC_SVC_ID_NMI_DATA:
13121 		return "NMI Data";
13122 	case ATH12K_HTC_SVC_ID_HTT_DATA_MSG:
13123 		return "HTT Data";
13124 	case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS:
13125 		return "RAW";
13126 	case ATH12K_HTC_SVC_ID_IPA_TX:
13127 		return "IPA TX";
13128 	case ATH12K_HTC_SVC_ID_PKT_LOG:
13129 		return "PKT LOG";
13130 	case ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG:
13131 		return "WMI DIAG";
13132 	}
13133 
13134 	return "Unknown";
13135 }
13136 
13137 struct mbuf *
13138 qwz_htc_alloc_mbuf(size_t payload_size)
13139 {
13140 	struct mbuf *m;
13141 	size_t size = sizeof(struct ath12k_htc_hdr) + payload_size;
13142 
13143 	m = m_gethdr(M_DONTWAIT, MT_DATA);
13144 	if (m == NULL)
13145 		return NULL;
13146 
13147 	if (size <= MCLBYTES)
13148 		MCLGET(m, M_DONTWAIT);
13149 	else
13150 		MCLGETL(m, M_DONTWAIT, size);
13151 	if ((m->m_flags & M_EXT) == 0) {
13152 		m_freem(m);
13153 		return NULL;
13154 	}
13155 
13156 	m->m_len = m->m_pkthdr.len = size;
13157 	memset(mtod(m, void *), 0, size);
13158 
13159 	return m;
13160 }
13161 
13162 struct mbuf *
13163 qwz_htc_build_tx_ctrl_mbuf(void)
13164 {
13165 	size_t size;
13166 
13167 	size = ATH12K_HTC_CONTROL_BUFFER_SIZE - sizeof(struct ath12k_htc_hdr);
13168 
13169 	return qwz_htc_alloc_mbuf(size);
13170 }
13171 
13172 void
13173 qwz_htc_prepare_tx_mbuf(struct qwz_htc_ep *ep, struct mbuf *m)
13174 {
13175 	struct ath12k_htc_hdr *hdr;
13176 
13177 	hdr = mtod(m, struct ath12k_htc_hdr *);
13178 
13179 	memset(hdr, 0, sizeof(*hdr));
13180 	hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
13181 	    FIELD_PREP(HTC_HDR_PAYLOADLEN, (m->m_pkthdr.len - sizeof(*hdr)));
13182 
13183 	if (ep->tx_credit_flow_enabled)
13184 		hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
13185 		    ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE);
13186 #ifdef notyet
13187 	spin_lock_bh(&ep->htc->tx_lock);
13188 #endif
13189 	hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
13190 #ifdef notyet
13191 	spin_unlock_bh(&ep->htc->tx_lock);
13192 #endif
13193 }
13194 
13195 int
13196 qwz_htc_send(struct qwz_htc *htc, enum ath12k_htc_ep_id eid, struct mbuf *m)
13197 {
13198 	struct qwz_htc_ep *ep = &htc->endpoint[eid];
13199 	struct qwz_softc *sc = htc->sc;
13200 	struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
13201 	void *ctx;
13202 	struct qwz_tx_data *tx_data;
13203 	int credits = 0;
13204 	int ret;
13205 	int credit_flow_enabled = (sc->hw_params.credit_flow &&
13206 	    ep->tx_credit_flow_enabled);
13207 
13208 	if (eid >= ATH12K_HTC_EP_COUNT) {
13209 		printf("%s: Invalid endpoint id: %d\n", __func__, eid);
13210 		return ENOENT;
13211 	}
13212 
13213 	if (credit_flow_enabled) {
13214 		credits = howmany(m->m_pkthdr.len, htc->target_credit_size);
13215 #ifdef notyet
13216 		spin_lock_bh(&htc->tx_lock);
13217 #endif
13218 		if (ep->tx_credits < credits) {
13219 			DNPRINTF(QWZ_D_HTC,
13220 			    "%s: ep %d insufficient credits required %d "
13221 			    "total %d\n", __func__, eid, credits,
13222 			    ep->tx_credits);
13223 #ifdef notyet
13224 			spin_unlock_bh(&htc->tx_lock);
13225 #endif
13226 			return EAGAIN;
13227 		}
13228 		ep->tx_credits -= credits;
13229 		DNPRINTF(QWZ_D_HTC, "%s: ep %d credits consumed %d total %d\n",
13230 		    __func__, eid, credits, ep->tx_credits);
13231 #ifdef notyet
13232 		spin_unlock_bh(&htc->tx_lock);
13233 #endif
13234 	}
13235 
13236 	qwz_htc_prepare_tx_mbuf(ep, m);
13237 
13238 	ctx = pipe->src_ring->per_transfer_context[pipe->src_ring->write_index];
13239 	tx_data = (struct qwz_tx_data *)ctx;
13240 
13241 	tx_data->eid = eid;
13242 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
13243 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
13244 	if (ret) {
13245 		printf("%s: can't map mbuf (error %d)\n",
13246 		    sc->sc_dev.dv_xname, ret);
13247 		if (ret != ENOBUFS)
13248 			m_freem(m);
13249 		goto err_credits;
13250 	}
13251 
13252 	DNPRINTF(QWZ_D_HTC, "%s: tx mbuf %p eid %d paddr %lx\n",
13253 	    __func__, m, tx_data->eid, tx_data->map->dm_segs[0].ds_addr);
13254 #ifdef QWZ_DEBUG
13255 	{
13256 		int i;
13257 		uint8_t *p = mtod(m, uint8_t *);
13258 		DNPRINTF(QWZ_D_HTC, "%s message buffer:", __func__);
13259 		for (i = 0; i < m->m_pkthdr.len; i++) {
13260 			DNPRINTF(QWZ_D_HTC, "%s %.2x",
13261 			    i % 16 == 0 ? "\n" : "", p[i]);
13262 		}
13263 		if (i % 16)
13264 			DNPRINTF(QWZ_D_HTC, "\n");
13265 	}
13266 #endif
13267 	ret = qwz_ce_send(htc->sc, m, ep->ul_pipe_id, ep->eid);
13268 	if (ret)
13269 		goto err_unmap;
13270 
13271 	return 0;
13272 
13273 err_unmap:
13274 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
13275 err_credits:
13276 	if (credit_flow_enabled) {
13277 #ifdef notyet
13278 		spin_lock_bh(&htc->tx_lock);
13279 #endif
13280 		ep->tx_credits += credits;
13281 		DNPRINTF(QWZ_D_HTC, "%s: ep %d credits reverted %d total %d\n",
13282 		    __func__, eid, credits, ep->tx_credits);
13283 #ifdef notyet
13284 		spin_unlock_bh(&htc->tx_lock);
13285 #endif
13286 
13287 		if (ep->ep_ops.ep_tx_credits)
13288 			ep->ep_ops.ep_tx_credits(htc->sc);
13289 	}
13290 	return ret;
13291 }
13292 
13293 int
13294 qwz_htc_connect_service(struct qwz_htc *htc,
13295     struct qwz_htc_svc_conn_req *conn_req,
13296     struct qwz_htc_svc_conn_resp *conn_resp)
13297 {
13298 	struct qwz_softc *sc = htc->sc;
13299 	struct ath12k_htc_conn_svc *req_msg;
13300 	struct ath12k_htc_conn_svc_resp resp_msg_dummy;
13301 	struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
13302 	enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT;
13303 	struct qwz_htc_ep *ep;
13304 	struct mbuf *m;
13305 	unsigned int max_msg_size = 0;
13306 	int length, status = 0;
13307 	int disable_credit_flow_ctrl = 0;
13308 	uint16_t flags = 0;
13309 	uint16_t message_id, service_id;
13310 	uint8_t tx_alloc = 0;
13311 
13312 	/* special case for HTC pseudo control service */
13313 	if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) {
13314 		disable_credit_flow_ctrl = 1;
13315 		assigned_eid = ATH12K_HTC_EP_0;
13316 		max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN;
13317 		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
13318 		goto setup;
13319 	}
13320 
13321 	tx_alloc = qwz_htc_get_credit_allocation(htc, conn_req->service_id);
13322 	if (!tx_alloc)
13323 		DNPRINTF(QWZ_D_HTC,
13324 		    "%s: htc service %s does not allocate target credits\n",
13325 		    sc->sc_dev.dv_xname,
13326 		    qwz_htc_service_name(conn_req->service_id));
13327 
13328 	m = qwz_htc_build_tx_ctrl_mbuf();
13329 	if (!m) {
13330 		printf("%s: Failed to allocate HTC packet\n",
13331 		    sc->sc_dev.dv_xname);
13332 		return ENOMEM;
13333 	}
13334 
13335 	length = sizeof(*req_msg);
13336 	m->m_len = m->m_pkthdr.len = sizeof(struct ath12k_htc_hdr) + length;
13337 
13338 	req_msg = (struct ath12k_htc_conn_svc *)(mtod(m, uint8_t *) +
13339 	    sizeof(struct ath12k_htc_hdr));
13340 	memset(req_msg, 0, length);
13341 	req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
13342 	    ATH12K_HTC_MSG_CONNECT_SERVICE_ID);
13343 
13344 	flags |= FIELD_PREP(ATH12K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
13345 
13346 	/* Only enable credit flow control for WMI ctrl service */
13347 	if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL ||
13348 	      conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
13349 	      conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
13350 		flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13351 		disable_credit_flow_ctrl = 1;
13352 	}
13353 
13354 	if (!sc->hw_params.credit_flow) {
13355 		flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13356 		disable_credit_flow_ctrl = 1;
13357 	}
13358 
13359 	req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
13360 	req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
13361 	    conn_req->service_id);
13362 
13363 	sc->ctl_resp = 0;
13364 
13365 	status = qwz_htc_send(htc, ATH12K_HTC_EP_0, m);
13366 	if (status) {
13367 		if (status != ENOBUFS)
13368 			m_freem(m);
13369 		return status;
13370 	}
13371 
13372 	while (!sc->ctl_resp) {
13373 		int ret = tsleep_nsec(&sc->ctl_resp, 0, "qwzhtcinit",
13374 		    SEC_TO_NSEC(1));
13375 		if (ret) {
13376 			printf("%s: Service connect timeout\n",
13377 			    sc->sc_dev.dv_xname);
13378 			return ret;
13379 		}
13380 	}
13381 
13382 	/* we controlled the buffer creation, it's aligned */
13383 	resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer;
13384 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
13385 	service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
13386 			       resp_msg->msg_svc_id);
13387 	if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
13388 	    (htc->control_resp_len < sizeof(*resp_msg))) {
13389 		printf("%s: Invalid resp message ID 0x%x", __func__,
13390 		    message_id);
13391 		return EPROTO;
13392 	}
13393 
13394 	DNPRINTF(QWZ_D_HTC, "%s: service %s connect response status 0x%lx "
13395 	    "assigned ep 0x%lx\n", __func__, qwz_htc_service_name(service_id),
13396 	    FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
13397 	    FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
13398 
13399 	conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
13400 	    resp_msg->flags_len);
13401 
13402 	/* check response status */
13403 	if (conn_resp->connect_resp_code !=
13404 	    ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) {
13405 		printf("%s: HTC Service %s connect request failed: 0x%x)\n",
13406 		    __func__, qwz_htc_service_name(service_id),
13407 		    conn_resp->connect_resp_code);
13408 		return EPROTO;
13409 	}
13410 
13411 	assigned_eid = (enum ath12k_htc_ep_id)FIELD_GET(
13412 	    HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len);
13413 
13414 	max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
13415 	    resp_msg->flags_len);
13416 setup:
13417 	if (assigned_eid >= ATH12K_HTC_EP_COUNT)
13418 		return EPROTO;
13419 
13420 	if (max_msg_size == 0)
13421 		return EPROTO;
13422 
13423 	ep = &htc->endpoint[assigned_eid];
13424 	ep->eid = assigned_eid;
13425 
13426 	if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED)
13427 		return EPROTO;
13428 
13429 	/* return assigned endpoint to caller */
13430 	conn_resp->eid = assigned_eid;
13431 	conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
13432 	    resp_msg->flags_len);
13433 
13434 	/* setup the endpoint */
13435 	ep->service_id = conn_req->service_id;
13436 	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
13437 	ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
13438 	    resp_msg->flags_len);
13439 	ep->tx_credits = tx_alloc;
13440 
13441 	/* copy all the callbacks */
13442 	ep->ep_ops = conn_req->ep_ops;
13443 
13444 	status = sc->ops.map_service_to_pipe(htc->sc, ep->service_id,
13445 	    &ep->ul_pipe_id, &ep->dl_pipe_id);
13446 	if (status)
13447 		return status;
13448 
13449 	DNPRINTF(QWZ_D_HTC,
13450 	    "%s: htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
13451 	    __func__, qwz_htc_service_name(ep->service_id), ep->ul_pipe_id,
13452 	    ep->dl_pipe_id, ep->eid);
13453 
13454 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
13455 		ep->tx_credit_flow_enabled = 0;
13456 		DNPRINTF(QWZ_D_HTC,
13457 		    "%s: htc service '%s' eid %d tx flow control disabled\n",
13458 		    __func__, qwz_htc_service_name(ep->service_id),
13459 		    assigned_eid);
13460 	}
13461 
13462 	return status;
13463 }
13464 
13465 int
13466 qwz_htc_start(struct qwz_htc *htc)
13467 {
13468 	struct mbuf *m;
13469 	int status = 0;
13470 	struct qwz_softc *sc = htc->sc;
13471 	struct ath12k_htc_setup_complete_extended *msg;
13472 
13473 	m = qwz_htc_build_tx_ctrl_mbuf();
13474 	if (!m)
13475 		return ENOMEM;
13476 
13477 	m->m_len = m->m_pkthdr.len = sizeof(struct ath12k_htc_hdr) +
13478 	    sizeof(*msg);
13479 
13480 	msg = (struct ath12k_htc_setup_complete_extended *)(mtod(m, uint8_t *) +
13481 	    sizeof(struct ath12k_htc_hdr));
13482 	msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
13483 	    ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID);
13484 
13485 	if (sc->hw_params.credit_flow)
13486 		DNPRINTF(QWZ_D_HTC, "%s: using tx credit flow control\n",
13487 		    __func__);
13488 	else
13489 		msg->flags |= ATH12K_GLOBAL_DISABLE_CREDIT_FLOW;
13490 
13491 	status = qwz_htc_send(htc, ATH12K_HTC_EP_0, m);
13492 	if (status) {
13493 		m_freem(m);
13494 		return status;
13495 	}
13496 
13497 	return 0;
13498 }
13499 
13500 int
13501 qwz_htc_init(struct qwz_softc *sc)
13502 {
13503 	struct qwz_htc *htc = &sc->htc;
13504 	struct qwz_htc_svc_conn_req conn_req;
13505 	struct qwz_htc_svc_conn_resp conn_resp;
13506 	int ret;
13507 #ifdef notyet
13508 	spin_lock_init(&htc->tx_lock);
13509 #endif
13510 	qwz_htc_reset_endpoint_states(htc);
13511 
13512 	htc->sc = sc;
13513 
13514 	switch (sc->wmi.preferred_hw_mode) {
13515 	case WMI_HOST_HW_MODE_SINGLE:
13516 		htc->wmi_ep_count = 1;
13517 		break;
13518 	case WMI_HOST_HW_MODE_DBS:
13519 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
13520 		htc->wmi_ep_count = 2;
13521 		break;
13522 	case WMI_HOST_HW_MODE_DBS_SBS:
13523 		htc->wmi_ep_count = 3;
13524 		break;
13525 	default:
13526 		htc->wmi_ep_count = sc->hw_params.max_radios;
13527 		break;
13528 	}
13529 
13530 	/* setup our pseudo HTC control endpoint connection */
13531 	memset(&conn_req, 0, sizeof(conn_req));
13532 	memset(&conn_resp, 0, sizeof(conn_resp));
13533 	conn_req.ep_ops.ep_tx_complete = qwz_htc_control_tx_complete;
13534 	conn_req.ep_ops.ep_rx_complete = qwz_htc_control_rx_complete;
13535 	conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS;
13536 	conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL;
13537 
13538 	/* connect fake service */
13539 	ret = qwz_htc_connect_service(htc, &conn_req, &conn_resp);
13540 	if (ret) {
13541 		printf("%s: could not connect to htc service (%d)\n",
13542 		    sc->sc_dev.dv_xname, ret);
13543 		return ret;
13544 	}
13545 
13546 	return 0;
13547 }
13548 
13549 int
13550 qwz_htc_setup_target_buffer_assignments(struct qwz_htc *htc)
13551 {
13552 	struct qwz_htc_svc_tx_credits *serv_entry;
13553 	uint32_t svc_id[] = {
13554 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
13555 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
13556 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2,
13557 	};
13558 	int i, credits;
13559 
13560 	credits =  htc->total_transmit_credits;
13561 	serv_entry = htc->service_alloc_table;
13562 
13563 	if ((htc->wmi_ep_count == 0) ||
13564 	    (htc->wmi_ep_count > nitems(svc_id)))
13565 		return EINVAL;
13566 
13567 	/* Divide credits among number of endpoints for WMI */
13568 	credits = credits / htc->wmi_ep_count;
13569 	for (i = 0; i < htc->wmi_ep_count; i++) {
13570 		serv_entry[i].service_id = svc_id[i];
13571 		serv_entry[i].credit_allocation = credits;
13572 	}
13573 
13574 	return 0;
13575 }
13576 
13577 int
13578 qwz_htc_wait_target(struct qwz_softc *sc)
13579 {
13580 	struct qwz_htc *htc = &sc->htc;
13581 	int polling = 0, ret;
13582 	uint16_t i;
13583 	struct ath12k_htc_ready *ready;
13584 	uint16_t message_id;
13585 	uint16_t credit_count;
13586 	uint16_t credit_size;
13587 
13588 	sc->ctl_resp = 0;
13589 	while (!sc->ctl_resp) {
13590 		ret = tsleep_nsec(&sc->ctl_resp, 0, "qwzhtcinit",
13591 		    SEC_TO_NSEC(1));
13592 		if (ret) {
13593 			if (ret != EWOULDBLOCK)
13594 				return ret;
13595 
13596 			if (polling) {
13597 				printf("%s: failed to receive control response "
13598 				    "completion\n", sc->sc_dev.dv_xname);
13599 				return ret;
13600 			}
13601 
13602 			printf("%s: failed to receive control response "
13603 			    "completion, polling...\n", sc->sc_dev.dv_xname);
13604 			polling = 1;
13605 
13606 			for (i = 0; i < sc->hw_params.ce_count; i++)
13607 				qwz_ce_per_engine_service(sc, i);
13608 		}
13609 	}
13610 
13611 	if (htc->control_resp_len < sizeof(*ready)) {
13612 		printf("%s: Invalid HTC ready msg len:%d\n", __func__,
13613 		    htc->control_resp_len);
13614 		return EINVAL;
13615 	}
13616 
13617 	ready = (struct ath12k_htc_ready *)htc->control_resp_buffer;
13618 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
13619 	credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
13620 	    ready->id_credit_count);
13621 	credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
13622 
13623 	if (message_id != ATH12K_HTC_MSG_READY_ID) {
13624 		printf("%s: Invalid HTC ready msg: 0x%x\n", __func__,
13625 		    message_id);
13626 		return EINVAL;
13627 	}
13628 
13629 	htc->total_transmit_credits = credit_count;
13630 	htc->target_credit_size = credit_size;
13631 
13632 	DNPRINTF(QWZ_D_HTC, "%s: target ready total_transmit_credits %d "
13633 	    "target_credit_size %d\n", __func__,
13634 	    htc->total_transmit_credits, htc->target_credit_size);
13635 
13636 	if ((htc->total_transmit_credits == 0) ||
13637 	    (htc->target_credit_size == 0)) {
13638 		printf("%s: Invalid credit size received\n", __func__);
13639 		return EINVAL;
13640 	}
13641 
13642 	qwz_htc_setup_target_buffer_assignments(htc);
13643 
13644 	return 0;
13645 }
13646 
13647 void
13648 qwz_dp_htt_htc_tx_complete(struct qwz_softc *sc, struct mbuf *m)
13649 {
13650 	/* Just free the mbuf, no further action required. */
13651 	m_freem(m);
13652 }
13653 
13654 static inline void
13655 qwz_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
13656 {
13657 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
13658 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
13659 		addr_l32 = swab32(addr_l32);
13660 		addr_h16 = swab16(addr_h16);
13661 	}
13662 #endif
13663 	uint32_t val32;
13664 	uint16_t val16;
13665 
13666 	val32 = le32toh(addr_l32);
13667 	memcpy(addr, &val32, 4);
13668 	val16 = le16toh(addr_h16);
13669 	memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
13670 }
13671 
13672 void
13673 qwz_peer_map_event(struct qwz_softc *sc, uint8_t vdev_id, uint16_t peer_id,
13674     uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
13675 {
13676 	struct ieee80211com *ic = &sc->sc_ic;
13677 	struct ieee80211_node *ni;
13678 	struct qwz_node *nq;
13679 	struct ath12k_peer *peer;
13680 #ifdef notyet
13681 	spin_lock_bh(&ab->base_lock);
13682 #endif
13683 	ni = ieee80211_find_node(ic, mac_addr);
13684 	if (ni == NULL)
13685 		return;
13686 	nq = (struct qwz_node *)ni;
13687 	peer = &nq->peer;
13688 
13689 	peer->vdev_id = vdev_id;
13690 	peer->peer_id = peer_id;
13691 	peer->ast_hash = ast_hash;
13692 	peer->hw_peer_id = hw_peer_id;
13693 #if 0
13694 	ether_addr_copy(peer->addr, mac_addr);
13695 	list_add(&peer->list, &ab->peers);
13696 #endif
13697 	sc->peer_mapped = 1;
13698 	wakeup(&sc->peer_mapped);
13699 
13700 	DNPRINTF(QWZ_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
13701 	    __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
13702 #ifdef notyet
13703 	spin_unlock_bh(&ab->base_lock);
13704 #endif
13705 }
13706 
13707 struct ieee80211_node *
13708 qwz_peer_find_by_id(struct qwz_softc *sc, uint16_t peer_id)
13709 {
13710 	struct ieee80211com *ic = &sc->sc_ic;
13711 	struct ieee80211_node *ni = NULL;
13712 	int s;
13713 
13714 	s = splnet();
13715 	RBT_FOREACH(ni, ieee80211_tree, &ic->ic_tree) {
13716 		struct qwz_node *nq = (struct qwz_node *)ni;
13717 		if (nq->peer.peer_id == peer_id)
13718 			break;
13719 	}
13720 	splx(s);
13721 
13722 	return ni;
13723 }
13724 
13725 void
13726 qwz_peer_unmap_event(struct qwz_softc *sc, uint16_t peer_id)
13727 {
13728 	struct ieee80211_node *ni;
13729 #ifdef notyet
13730 	spin_lock_bh(&ab->base_lock);
13731 #endif
13732 	ni = qwz_peer_find_by_id(sc, peer_id);
13733 	if (!ni) {
13734 		printf("%s: peer-unmap-event: unknown peer id %d\n",
13735 		    sc->sc_dev.dv_xname, peer_id);
13736 		goto exit;
13737 	}
13738 
13739 	DNPRINTF(QWZ_D_HTT, "%s: peer unmap peer %s id %d\n",
13740 	    __func__, ether_sprintf(ni->ni_macaddr), peer_id);
13741 #if 0
13742 	list_del(&peer->list);
13743 	kfree(peer);
13744 #endif
13745 	sc->peer_mapped = 1;
13746 	wakeup(&sc->peer_mapped);
13747 exit:
13748 #ifdef notyet
13749 	spin_unlock_bh(&ab->base_lock);
13750 #endif
13751 	return;
13752 }
13753 
13754 void
13755 qwz_dp_htt_htc_t2h_msg_handler(struct qwz_softc *sc, struct mbuf *m)
13756 {
13757 	struct qwz_dp *dp = &sc->dp;
13758 	struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
13759 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
13760 	    *(uint32_t *)resp);
13761 	uint16_t peer_id;
13762 	uint8_t vdev_id;
13763 	uint8_t mac_addr[IEEE80211_ADDR_LEN];
13764 	uint16_t peer_mac_h16;
13765 	uint16_t ast_hash;
13766 	uint16_t hw_peer_id;
13767 
13768 	DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
13769 
13770 	switch (type) {
13771 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
13772 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
13773 		    resp->version_msg.version);
13774 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
13775 		    resp->version_msg.version);
13776 		dp->htt_tgt_version_received = 1;
13777 		wakeup(&dp->htt_tgt_version_received);
13778 		break;
13779 	case HTT_T2H_MSG_TYPE_PEER_MAP:
13780 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
13781 		    resp->peer_map_ev.info);
13782 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
13783 		    resp->peer_map_ev.info);
13784 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
13785 		    resp->peer_map_ev.info1);
13786 		qwz_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
13787 		    peer_mac_h16, mac_addr);
13788 		qwz_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
13789 		break;
13790 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
13791 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
13792 		    resp->peer_map_ev.info);
13793 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
13794 		    resp->peer_map_ev.info);
13795 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
13796 		    resp->peer_map_ev.info1);
13797 		qwz_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
13798 		    peer_mac_h16, mac_addr);
13799 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
13800 		    resp->peer_map_ev.info2);
13801 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
13802 				       resp->peer_map_ev.info1);
13803 		qwz_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
13804 		    hw_peer_id);
13805 		break;
13806 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
13807 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
13808 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
13809 		    resp->peer_unmap_ev.info);
13810 		qwz_peer_unmap_event(sc, peer_id);
13811 		break;
13812 #if 0
13813 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
13814 		ath12k_htt_pull_ppdu_stats(ab, skb);
13815 		break;
13816 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
13817 		ath12k_debugfs_htt_ext_stats_handler(ab, skb);
13818 		break;
13819 	case HTT_T2H_MSG_TYPE_PKTLOG:
13820 		ath12k_htt_pktlog(ab, skb);
13821 		break;
13822 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
13823 		ath12k_htt_backpressure_event_handler(ab, skb);
13824 		break;
13825 #endif
13826 	default:
13827 		printf("%s: htt event %d not handled\n", __func__, type);
13828 		break;
13829 	}
13830 
13831 	m_freem(m);
13832 }
13833 
13834 int
13835 qwz_dp_htt_connect(struct qwz_dp *dp)
13836 {
13837 	struct qwz_htc_svc_conn_req conn_req;
13838 	struct qwz_htc_svc_conn_resp conn_resp;
13839 	int status;
13840 
13841 	memset(&conn_req, 0, sizeof(conn_req));
13842 	memset(&conn_resp, 0, sizeof(conn_resp));
13843 
13844 	conn_req.ep_ops.ep_tx_complete = qwz_dp_htt_htc_tx_complete;
13845 	conn_req.ep_ops.ep_rx_complete = qwz_dp_htt_htc_t2h_msg_handler;
13846 
13847 	/* connect to control service */
13848 	conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
13849 
13850 	status = qwz_htc_connect_service(&dp->sc->htc, &conn_req, &conn_resp);
13851 
13852 	if (status)
13853 		return status;
13854 
13855 	dp->eid = conn_resp.eid;
13856 
13857 	return 0;
13858 }
13859 
13860 void
13861 qwz_dp_pdev_reo_cleanup(struct qwz_softc *sc)
13862 {
13863 	struct qwz_dp *dp = &sc->dp;
13864 	int i;
13865 
13866 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
13867 		qwz_dp_srng_cleanup(sc, &dp->reo_dst_ring[i]);
13868 }
13869 
13870 int
13871 qwz_dp_pdev_reo_setup(struct qwz_softc *sc)
13872 {
13873 	struct qwz_dp *dp = &sc->dp;
13874 	int ret;
13875 	int i;
13876 
13877 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
13878 		ret = qwz_dp_srng_setup(sc, &dp->reo_dst_ring[i],
13879 		    HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE);
13880 		if (ret) {
13881 			printf("%s: failed to setup reo_dst_ring\n", __func__);
13882 			qwz_dp_pdev_reo_cleanup(sc);
13883 			return ret;
13884 		}
13885 	}
13886 
13887 	return 0;
13888 }
13889 
13890 void
13891 qwz_dp_rx_pdev_srng_free(struct qwz_softc *sc, int mac_id)
13892 {
13893 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
13894 	int i;
13895 
13896 	qwz_dp_srng_cleanup(sc, &dp->rx_refill_buf_ring.refill_buf_ring);
13897 
13898 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13899 		if (sc->hw_params.rx_mac_buf_ring)
13900 			qwz_dp_srng_cleanup(sc, &dp->rx_mac_buf_ring[i]);
13901 
13902 		qwz_dp_srng_cleanup(sc, &dp->rxdma_err_dst_ring[i]);
13903 		qwz_dp_srng_cleanup(sc,
13904 		    &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
13905 	}
13906 
13907 	qwz_dp_srng_cleanup(sc, &dp->rxdma_mon_buf_ring.refill_buf_ring);
13908 }
13909 
13910 int
13911 qwz_dp_rx_pdev_srng_alloc(struct qwz_softc *sc)
13912 {
13913 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
13914 	struct dp_srng *srng = NULL;
13915 	int i;
13916 	int ret;
13917 
13918 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13919 		srng = &dp->rxdma_mon_dst_ring[i];
13920 		ret = qwz_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_DST, 0,
13921 		    dp->mac_id + i, DP_RXDMA_MONITOR_DST_RING_SIZE);
13922 		if (ret) {
13923 			printf("%s: failed to setup "
13924 			    "rxdma_mon_dst_ring %d\n",
13925 			    sc->sc_dev.dv_xname, i);
13926 			return ret;
13927 		}
13928 
13929 		srng = &dp->tx_mon_dst_ring[i];
13930 		ret = qwz_dp_srng_setup(sc, srng, HAL_TX_MONITOR_DST, 0,
13931 		    dp->mac_id + i, DP_TX_MONITOR_DEST_RING_SIZE);
13932 		if (ret) {
13933 			printf("%s: failed to setup "
13934 			    "tx_mon_dst_ring %d\n",
13935 			    sc->sc_dev.dv_xname, i);
13936 			return ret;
13937 		}
13938 	}
13939 
13940 #if 0
13941 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13942 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
13943 		ret = qwz_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0,
13944 		    dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE);
13945 		if (ret) {
13946 			printf("%s: failed to setup "
13947 			    "rx_mon_status_refill_ring %d\n",
13948 			    sc->sc_dev.dv_xname, i);
13949 			return ret;
13950 		}
13951 	}
13952 	/* if rxdma1_enable is false, then it doesn't need
13953 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
13954 	 * and rxdma_mon_desc_ring.
13955 	 * init reap timer for QCA6390.
13956 	 */
13957 	if (!sc->hw_params.rxdma1_enable) {
13958 		timeout_set(&sc->mon_reap_timer, qwz_dp_service_mon_ring, sc);
13959 		return 0;
13960 	}
13961 
13962 	ret = ath12k_dp_srng_setup(ar->ab,
13963 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
13964 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
13965 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
13966 	if (ret) {
13967 		ath12k_warn(ar->ab,
13968 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
13969 		return ret;
13970 	}
13971 
13972 	ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
13973 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
13974 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
13975 	if (ret) {
13976 		ath12k_warn(ar->ab,
13977 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
13978 		return ret;
13979 	}
13980 
13981 	ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
13982 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
13983 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
13984 	if (ret) {
13985 		ath12k_warn(ar->ab,
13986 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
13987 		return ret;
13988 	}
13989 #endif
13990 	return 0;
13991 }
13992 
13993 void
13994 qwz_dp_rxdma_buf_ring_free(struct qwz_softc *sc, struct dp_rxdma_ring *rx_ring)
13995 {
13996 	int i;
13997 
13998 	for (i = 0; i < rx_ring->bufs_max; i++) {
13999 		struct qwz_rx_data *rx_data = &rx_ring->rx_data[i];
14000 
14001 		if (rx_data->map == NULL)
14002 			continue;
14003 
14004 		if (rx_data->m) {
14005 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14006 			m_free(rx_data->m);
14007 			rx_data->m = NULL;
14008 		}
14009 
14010 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
14011 		rx_data->map = NULL;
14012 	}
14013 
14014 	free(rx_ring->rx_data, M_DEVBUF,
14015 	    sizeof(rx_ring->rx_data[0]) * rx_ring->bufs_max);
14016 	rx_ring->rx_data = NULL;
14017 	rx_ring->bufs_max = 0;
14018 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14019 }
14020 
14021 void
14022 qwz_dp_rxdma_pdev_buf_free(struct qwz_softc *sc, int mac_id)
14023 {
14024 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
14025 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
14026 	int i;
14027 
14028 	qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
14029 
14030 	rx_ring = &dp->rxdma_mon_buf_ring;
14031 	qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
14032 
14033 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14034 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14035 		qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
14036 	}
14037 }
14038 
14039 void
14040 qwz_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie,
14041     uint8_t manager)
14042 {
14043 	struct ath12k_buffer_addr *binfo = (struct ath12k_buffer_addr *)desc;
14044 	uint32_t paddr_lo, paddr_hi;
14045 
14046 	paddr_lo = paddr & 0xffffffff;
14047 	paddr_hi = paddr >> 32;
14048 	binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
14049 	binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
14050 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
14051 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
14052 }
14053 
14054 void
14055 qwz_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie,
14056     uint8_t *rbm)
14057 {
14058 	struct ath12k_buffer_addr *binfo = (struct ath12k_buffer_addr *)desc;
14059 
14060 	*paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
14061 	    binfo->info1)) << 32) |
14062 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
14063 	*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
14064 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
14065 }
14066 
14067 int
14068 qwz_next_free_rxbuf_idx(struct dp_rxdma_ring *rx_ring)
14069 {
14070 	int i, idx;
14071 
14072 	for (i = 0; i < nitems(rx_ring->freemap); i++) {
14073 		idx = ffs(rx_ring->freemap[i]);
14074 		if (idx > 0)
14075 			return ((idx - 1) + (i * 8));
14076 	}
14077 
14078 	return -1;
14079 }
14080 
14081 int
14082 qwz_dp_rxbufs_replenish(struct qwz_softc *sc, int mac_id,
14083     struct dp_rxdma_ring *rx_ring, int req_entries,
14084     enum hal_rx_buf_return_buf_manager mgr)
14085 {
14086 	struct hal_srng *srng;
14087 	uint32_t *desc;
14088 	struct mbuf *m;
14089 	int num_free;
14090 	int num_remain;
14091 	int ret, idx;
14092 	uint32_t cookie;
14093 	uint64_t paddr;
14094 	struct qwz_rx_data *rx_data;
14095 
14096 	req_entries = MIN(req_entries, rx_ring->bufs_max);
14097 
14098 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
14099 #ifdef notyet
14100 	spin_lock_bh(&srng->lock);
14101 #endif
14102 	qwz_hal_srng_access_begin(sc, srng);
14103 
14104 	num_free = qwz_hal_srng_src_num_free(sc, srng, 1);
14105 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
14106 		req_entries = num_free;
14107 
14108 	req_entries = MIN(num_free, req_entries);
14109 	num_remain = req_entries;
14110 
14111 	while (num_remain > 0) {
14112 		const size_t size = DP_RX_BUFFER_SIZE;
14113 
14114 		m = m_gethdr(M_DONTWAIT, MT_DATA);
14115 		if (m == NULL)
14116 			goto fail_free_mbuf;
14117 
14118 		if (size <= MCLBYTES)
14119 			MCLGET(m, M_DONTWAIT);
14120 		else
14121 			MCLGETL(m, M_DONTWAIT, size);
14122 		if ((m->m_flags & M_EXT) == 0)
14123 			goto fail_free_mbuf;
14124 
14125 		m->m_len = m->m_pkthdr.len = size;
14126 
14127 		idx = qwz_next_free_rxbuf_idx(rx_ring);
14128 		if (idx == -1)
14129 			goto fail_free_mbuf;
14130 
14131 		rx_data = &rx_ring->rx_data[idx];
14132 		if (rx_data->map == NULL) {
14133 			ret = bus_dmamap_create(sc->sc_dmat, size, 1,
14134 			    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
14135 			if (ret)
14136 				goto fail_free_mbuf;
14137 		}
14138 
14139 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
14140 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
14141 		if (ret) {
14142 			printf("%s: can't map mbuf (error %d)\n",
14143 			    sc->sc_dev.dv_xname, ret);
14144 			goto fail_free_mbuf;
14145 		}
14146 
14147 		desc = qwz_hal_srng_src_get_next_entry(sc, srng);
14148 		if (!desc)
14149 			goto fail_dma_unmap;
14150 
14151 		rx_data->m = m;
14152 		m = NULL;
14153 
14154 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
14155 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, idx);
14156 
14157 		clrbit(rx_ring->freemap, idx);
14158 		num_remain--;
14159 
14160 		paddr = rx_data->map->dm_segs[0].ds_addr;
14161 		qwz_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
14162 	}
14163 
14164 	qwz_hal_srng_access_end(sc, srng);
14165 #ifdef notyet
14166 	spin_unlock_bh(&srng->lock);
14167 #endif
14168 	return 0;
14169 
14170 fail_dma_unmap:
14171 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14172 fail_free_mbuf:
14173 	m_free(m);
14174 
14175 	qwz_hal_srng_access_end(sc, srng);
14176 #ifdef notyet
14177 	spin_unlock_bh(&srng->lock);
14178 #endif
14179 	return ENOBUFS;
14180 }
14181 
14182 int
14183 qwz_dp_rxdma_ring_buf_setup(struct qwz_softc *sc,
14184     struct dp_rxdma_ring *rx_ring, uint32_t ringtype)
14185 {
14186 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
14187 	int num_entries;
14188 
14189 	num_entries = rx_ring->refill_buf_ring.size /
14190 	    qwz_hal_srng_get_entrysize(sc, ringtype);
14191 
14192 	KASSERT(rx_ring->rx_data == NULL);
14193 	rx_ring->rx_data = mallocarray(num_entries, sizeof(rx_ring->rx_data[0]),
14194 	    M_DEVBUF, M_NOWAIT | M_ZERO);
14195 	if (rx_ring->rx_data == NULL)
14196 		return ENOMEM;
14197 
14198 	rx_ring->bufs_max = num_entries;
14199 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14200 
14201 	return qwz_dp_rxbufs_replenish(sc, dp->mac_id, rx_ring, num_entries,
14202 	    sc->hw_params.hal_params->rx_buf_rbm);
14203 }
14204 
14205 int
14206 qwz_dp_rxdma_pdev_buf_setup(struct qwz_softc *sc)
14207 {
14208 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
14209 	struct dp_rxdma_ring *rx_ring;
14210 	int ret;
14211 #if 0
14212 	int i;
14213 #endif
14214 
14215 	rx_ring = &dp->rx_refill_buf_ring;
14216 	ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
14217 	if (ret)
14218 		return ret;
14219 
14220 	if (sc->hw_params.rxdma1_enable) {
14221 		rx_ring = &dp->rxdma_mon_buf_ring;
14222 		ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring,
14223 		    HAL_RXDMA_MONITOR_BUF);
14224 		if (ret)
14225 			return ret;
14226 	}
14227 #if 0
14228 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14229 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14230 		ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring,
14231 		    HAL_RXDMA_MONITOR_STATUS);
14232 		if (ret)
14233 			return ret;
14234 	}
14235 #endif
14236 	return 0;
14237 }
14238 
14239 void
14240 qwz_dp_rx_pdev_free(struct qwz_softc *sc, int mac_id)
14241 {
14242 	qwz_dp_rx_pdev_srng_free(sc, mac_id);
14243 	qwz_dp_rxdma_pdev_buf_free(sc, mac_id);
14244 }
14245 
14246 bus_addr_t
14247 qwz_hal_srng_get_hp_addr(struct qwz_softc *sc, struct hal_srng *srng)
14248 {
14249 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14250 		return 0;
14251 
14252 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14253 		return sc->hal.wrp.paddr +
14254 		    ((unsigned long)srng->u.src_ring.hp_addr -
14255 		    (unsigned long)sc->hal.wrp.vaddr);
14256 	} else {
14257 		return sc->hal.rdp.paddr +
14258 		    ((unsigned long)srng->u.dst_ring.hp_addr -
14259 		    (unsigned long)sc->hal.rdp.vaddr);
14260 	}
14261 }
14262 
14263 bus_addr_t
14264 qwz_hal_srng_get_tp_addr(struct qwz_softc *sc, struct hal_srng *srng)
14265 {
14266 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14267 		return 0;
14268 
14269 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14270 		return sc->hal.rdp.paddr +
14271 		    ((unsigned long)srng->u.src_ring.tp_addr -
14272 		    (unsigned long)sc->hal.rdp.vaddr);
14273 	} else {
14274 		return sc->hal.wrp.paddr +
14275 		    ((unsigned long)srng->u.dst_ring.tp_addr -
14276 		    (unsigned long)sc->hal.wrp.vaddr);
14277 	}
14278 }
14279 
14280 int
14281 qwz_dp_tx_get_ring_id_type(struct qwz_softc *sc, int mac_id, uint32_t ring_id,
14282     enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type,
14283     enum htt_srng_ring_id *htt_ring_id)
14284 {
14285 	switch (ring_type) {
14286 	case HAL_RXDMA_BUF:
14287 		/* for QCA6390, host fills rx buffer to fw and fw fills to
14288 		 * rxbuf ring for each rxdma
14289 		 */
14290 		if (!sc->hw_params.rx_mac_buf_ring) {
14291 			if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
14292 			    ring_id == HAL_SRNG_SW2RXDMA_BUF1))
14293 				return EINVAL;
14294 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14295 			*htt_ring_type = HTT_SW_TO_HW_RING;
14296 		} else {
14297 			if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
14298 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
14299 				*htt_ring_type = HTT_SW_TO_SW_RING;
14300 			} else {
14301 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14302 				*htt_ring_type = HTT_SW_TO_HW_RING;
14303 			}
14304 		}
14305 		break;
14306 	case HAL_RXDMA_DST:
14307 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
14308 		*htt_ring_type = HTT_HW_TO_SW_RING;
14309 		break;
14310 	case HAL_RXDMA_MONITOR_BUF:
14311 		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
14312 		*htt_ring_type = HTT_SW_TO_HW_RING;
14313 		break;
14314 	case HAL_RXDMA_MONITOR_STATUS:
14315 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
14316 		*htt_ring_type = HTT_SW_TO_HW_RING;
14317 		break;
14318 	case HAL_RXDMA_MONITOR_DST:
14319 		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
14320 		*htt_ring_type = HTT_HW_TO_SW_RING;
14321 		break;
14322 	case HAL_RXDMA_MONITOR_DESC:
14323 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
14324 		*htt_ring_type = HTT_SW_TO_HW_RING;
14325 		break;
14326 	default:
14327 		printf("%s: Unsupported ring type in DP :%d\n",
14328 		    sc->sc_dev.dv_xname, ring_type);
14329 		return EINVAL;
14330 	}
14331 
14332 	return 0;
14333 }
14334 
14335 int
14336 qwz_dp_tx_htt_srng_setup(struct qwz_softc *sc, uint32_t ring_id, int mac_id,
14337     enum hal_ring_type ring_type)
14338 {
14339 	struct htt_srng_setup_cmd *cmd;
14340 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
14341 	struct hal_srng_params params;
14342 	struct mbuf *m;
14343 	uint32_t ring_entry_sz;
14344 	uint64_t hp_addr, tp_addr;
14345 	enum htt_srng_ring_type htt_ring_type;
14346 	enum htt_srng_ring_id htt_ring_id;
14347 	int ret;
14348 
14349 	m = qwz_htc_alloc_mbuf(sizeof(*cmd));
14350 	if (!m)
14351 		return ENOMEM;
14352 
14353 	memset(&params, 0, sizeof(params));
14354 	qwz_hal_srng_get_params(sc, srng, &params);
14355 
14356 	hp_addr = qwz_hal_srng_get_hp_addr(sc, srng);
14357 	tp_addr = qwz_hal_srng_get_tp_addr(sc, srng);
14358 
14359 	ret = qwz_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
14360 	    ring_type, &htt_ring_type, &htt_ring_id);
14361 	if (ret)
14362 		goto err_free;
14363 
14364 	cmd = (struct htt_srng_setup_cmd *)(mtod(m, uint8_t *) +
14365 	    sizeof(struct ath12k_htc_hdr));
14366 	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
14367 	    HTT_H2T_MSG_TYPE_SRING_SETUP);
14368 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
14369 	    htt_ring_type == HTT_HW_TO_SW_RING)
14370 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14371 		    DP_SW2HW_MACID(mac_id));
14372 	else
14373 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14374 		    mac_id);
14375 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
14376 	    htt_ring_type);
14377 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
14378 
14379 	cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK;
14380 
14381 	cmd->ring_base_addr_hi = (uint64_t)params.ring_base_paddr >>
14382 	    HAL_ADDR_MSB_REG_SHIFT;
14383 
14384 	ring_entry_sz = qwz_hal_srng_get_entrysize(sc, ring_type);
14385 
14386 	ring_entry_sz >>= 2;
14387 	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
14388 	    ring_entry_sz);
14389 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
14390 	    params.num_entries * ring_entry_sz);
14391 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
14392 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
14393 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
14394 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
14395 	cmd->info1 |= FIELD_PREP(
14396 	    HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
14397 	    !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
14398 	if (htt_ring_type == HTT_SW_TO_HW_RING)
14399 		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
14400 
14401 	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
14402 	cmd->ring_head_off32_remote_addr_hi = hp_addr >> HAL_ADDR_MSB_REG_SHIFT;
14403 
14404 	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
14405 	cmd->ring_tail_off32_remote_addr_hi = tp_addr >> HAL_ADDR_MSB_REG_SHIFT;
14406 
14407 	cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
14408 	cmd->ring_msi_addr_hi = 0;
14409 	cmd->msi_data = params.msi_data;
14410 
14411 	cmd->intr_info = FIELD_PREP(
14412 	    HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
14413 	    params.intr_batch_cntr_thres_entries * ring_entry_sz);
14414 	cmd->intr_info |= FIELD_PREP(
14415 	    HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
14416 	    params.intr_timer_thres_us >> 3);
14417 
14418 	cmd->info2 = 0;
14419 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
14420 		cmd->info2 = FIELD_PREP(
14421 		    HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
14422 		    params.low_threshold);
14423 	}
14424 
14425 	DNPRINTF(QWZ_D_HTT, "%s: htt srng setup msi_addr_lo 0x%x "
14426 	    "msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d "
14427 	    "intr_info 0x%x flags 0x%x\n", __func__, cmd->ring_msi_addr_lo,
14428 	    cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type,
14429 	    cmd->intr_info, cmd->info2);
14430 
14431 	ret = qwz_htc_send(&sc->htc, sc->dp.eid, m);
14432 	if (ret)
14433 		goto err_free;
14434 
14435 	return 0;
14436 
14437 err_free:
14438 	m_freem(m);
14439 
14440 	return ret;
14441 }
14442 
14443 int
14444 qwz_dp_tx_htt_h2t_ppdu_stats_req(struct qwz_softc *sc, uint32_t mask,
14445     uint8_t pdev_id)
14446 {
14447 	struct qwz_dp *dp = &sc->dp;
14448 	struct mbuf *m;
14449 	struct htt_ppdu_stats_cfg_cmd *cmd;
14450 	int len = sizeof(*cmd);
14451 	uint8_t pdev_mask;
14452 	int ret;
14453 	int i;
14454 
14455 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14456 		m = qwz_htc_alloc_mbuf(len);
14457 		if (!m)
14458 			return ENOMEM;
14459 
14460 		cmd = (struct htt_ppdu_stats_cfg_cmd *)(mtod(m, uint8_t *) +
14461 		    sizeof(struct ath12k_htc_hdr));
14462 		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
14463 				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
14464 
14465 		pdev_mask = 1 << (pdev_id + i);
14466 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
14467 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK,
14468 		    mask);
14469 
14470 		ret = qwz_htc_send(&sc->htc, dp->eid, m);
14471 		if (ret) {
14472 			m_freem(m);
14473 			return ret;
14474 		}
14475 	}
14476 
14477 	return 0;
14478 }
14479 
14480 int
14481 qwz_dp_tx_htt_rx_filter_setup(struct qwz_softc *sc, uint32_t ring_id,
14482     int mac_id, enum hal_ring_type ring_type, size_t rx_buf_size,
14483     struct htt_rx_ring_tlv_filter *tlv_filter)
14484 {
14485 	struct htt_rx_ring_selection_cfg_cmd *cmd;
14486 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
14487 	struct hal_srng_params params;
14488 	struct mbuf *m;
14489 	int len = sizeof(*cmd);
14490 	enum htt_srng_ring_type htt_ring_type;
14491 	enum htt_srng_ring_id htt_ring_id;
14492 	int ret;
14493 
14494 	m = qwz_htc_alloc_mbuf(len);
14495 	if (!m)
14496 		return ENOMEM;
14497 
14498 	memset(&params, 0, sizeof(params));
14499 	qwz_hal_srng_get_params(sc, srng, &params);
14500 
14501 	ret = qwz_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
14502 	    ring_type, &htt_ring_type, &htt_ring_id);
14503 	if (ret)
14504 		goto err_free;
14505 
14506 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)(mtod(m, uint8_t *) +
14507 	    sizeof(struct ath12k_htc_hdr));
14508 	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
14509 	    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
14510 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
14511 	    htt_ring_type == HTT_HW_TO_SW_RING) {
14512 		cmd->info0 |=
14513 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
14514 		    DP_SW2HW_MACID(mac_id));
14515 	} else {
14516 		cmd->info0 |=
14517 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
14518 		    mac_id);
14519 	}
14520 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
14521 	    htt_ring_id);
14522 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
14523 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
14524 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
14525 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
14526 
14527 	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
14528 	    rx_buf_size);
14529 	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
14530 	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
14531 	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
14532 	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
14533 	cmd->rx_filter_tlv = tlv_filter->rx_filter;
14534 
14535 	ret = qwz_htc_send(&sc->htc, sc->dp.eid, m);
14536 	if (ret)
14537 		goto err_free;
14538 
14539 	return 0;
14540 
14541 err_free:
14542 	m_freem(m);
14543 
14544 	return ret;
14545 }
14546 
14547 int
14548 qwz_dp_rx_pdev_alloc(struct qwz_softc *sc, int mac_id)
14549 {
14550 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
14551 	uint32_t ring_id;
14552 	int i;
14553 	int ret;
14554 
14555 	ret = qwz_dp_rx_pdev_srng_alloc(sc);
14556 	if (ret) {
14557 		printf("%s: failed to setup rx srngs: %d\n",
14558 		    sc->sc_dev.dv_xname, ret);
14559 		return ret;
14560 	}
14561 
14562 #if 0
14563 	ret = qwz_dp_rxdma_pdev_buf_setup(sc);
14564 	if (ret) {
14565 		printf("%s: failed to setup rxdma ring: %d\n",
14566 		    sc->sc_dev.dv_xname, ret);
14567 		return ret;
14568 	}
14569 #endif
14570 
14571 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
14572 	ret = qwz_dp_tx_htt_srng_setup(sc, ring_id, mac_id, HAL_RXDMA_BUF);
14573 	if (ret) {
14574 		printf("%s: failed to configure rx_refill_buf_ring: %d\n",
14575 		    sc->sc_dev.dv_xname, ret);
14576 		return ret;
14577 	}
14578 
14579 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14580 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
14581 		ret = qwz_dp_tx_htt_srng_setup(sc, ring_id, mac_id + i,
14582 		    HAL_RXDMA_DST);
14583 		if (ret) {
14584 			printf("%s: failed to configure "
14585 			    "rxdma_err_dest_ring%d %d\n",
14586 			    sc->sc_dev.dv_xname, i, ret);
14587 			return ret;
14588 		}
14589 	}
14590 
14591 	if (!sc->hw_params.rxdma1_enable)
14592 		goto config_refill_ring;
14593 #if 0
14594 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
14595 	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
14596 					  mac_id, HAL_RXDMA_MONITOR_BUF);
14597 	if (ret) {
14598 		ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
14599 			    ret);
14600 		return ret;
14601 	}
14602 	ret = ath12k_dp_tx_htt_srng_setup(ab,
14603 					  dp->rxdma_mon_dst_ring.ring_id,
14604 					  mac_id, HAL_RXDMA_MONITOR_DST);
14605 	if (ret) {
14606 		ath12k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
14607 			    ret);
14608 		return ret;
14609 	}
14610 	ret = ath12k_dp_tx_htt_srng_setup(ab,
14611 					  dp->rxdma_mon_desc_ring.ring_id,
14612 					  mac_id, HAL_RXDMA_MONITOR_DESC);
14613 	if (ret) {
14614 		ath12k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
14615 			    ret);
14616 		return ret;
14617 	}
14618 #endif
14619 config_refill_ring:
14620 #if 0
14621 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14622 		ret = qwz_dp_tx_htt_srng_setup(sc,
14623 		    dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id,
14624 		    mac_id + i, HAL_RXDMA_MONITOR_STATUS);
14625 		if (ret) {
14626 			printf("%s: failed to configure "
14627 			    "mon_status_refill_ring%d %d\n",
14628 			    sc->sc_dev.dv_xname, i, ret);
14629 			return ret;
14630 		}
14631 	}
14632 #endif
14633 	return 0;
14634 }
14635 
14636 void
14637 qwz_dp_pdev_free(struct qwz_softc *sc)
14638 {
14639 	int i;
14640 
14641 	timeout_del(&sc->mon_reap_timer);
14642 
14643 	for (i = 0; i < sc->num_radios; i++)
14644 		qwz_dp_rx_pdev_free(sc, i);
14645 }
14646 
14647 int
14648 qwz_dp_pdev_alloc(struct qwz_softc *sc)
14649 {
14650 	int ret;
14651 	int i;
14652 
14653 	for (i = 0; i < sc->num_radios; i++) {
14654 		ret = qwz_dp_rx_pdev_alloc(sc, i);
14655 		if (ret) {
14656 			printf("%s: failed to allocate pdev rx "
14657 			    "for pdev_id %d\n", sc->sc_dev.dv_xname, i);
14658 			goto err;
14659 		}
14660 	}
14661 
14662 	return 0;
14663 
14664 err:
14665 	qwz_dp_pdev_free(sc);
14666 
14667 	return ret;
14668 }
14669 
14670 int
14671 qwz_dp_tx_htt_h2t_ver_req_msg(struct qwz_softc *sc)
14672 {
14673 	struct qwz_dp *dp = &sc->dp;
14674 	struct mbuf *m;
14675 	struct htt_ver_req_cmd *cmd;
14676 	int len = sizeof(*cmd);
14677 	int ret;
14678 
14679 	dp->htt_tgt_version_received = 0;
14680 
14681 	m = qwz_htc_alloc_mbuf(len);
14682 	if (!m)
14683 		return ENOMEM;
14684 
14685 	cmd = (struct htt_ver_req_cmd *)(mtod(m, uint8_t *) +
14686 	    sizeof(struct ath12k_htc_hdr));
14687 	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
14688 	    HTT_H2T_MSG_TYPE_VERSION_REQ);
14689 
14690 	ret = qwz_htc_send(&sc->htc, dp->eid, m);
14691 	if (ret) {
14692 		m_freem(m);
14693 		return ret;
14694 	}
14695 
14696 	while (!dp->htt_tgt_version_received) {
14697 		ret = tsleep_nsec(&dp->htt_tgt_version_received, 0,
14698 		    "qwztgtver", SEC_TO_NSEC(3));
14699 		if (ret)
14700 			return ETIMEDOUT;
14701 	}
14702 
14703 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
14704 		printf("%s: unsupported htt major version %d "
14705 		    "supported version is %d\n", __func__,
14706 		    dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
14707 		return ENOTSUP;
14708 	}
14709 
14710 	return 0;
14711 }
14712 
14713 void
14714 qwz_dp_update_vdev_search(struct qwz_softc *sc, struct qwz_vif *arvif)
14715 {
14716 	 /* When v2_map_support is true:for STA mode, enable address
14717 	  * search index, tcl uses ast_hash value in the descriptor.
14718 	  * When v2_map_support is false: for STA mode, don't enable
14719 	  * address search index.
14720 	  */
14721 	switch (arvif->vdev_type) {
14722 	case WMI_VDEV_TYPE_STA:
14723 		if (sc->hw_params.htt_peer_map_v2) {
14724 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
14725 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
14726 		} else {
14727 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
14728 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
14729 		}
14730 		break;
14731 	case WMI_VDEV_TYPE_AP:
14732 	case WMI_VDEV_TYPE_IBSS:
14733 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
14734 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
14735 		break;
14736 	case WMI_VDEV_TYPE_MONITOR:
14737 	default:
14738 		return;
14739 	}
14740 }
14741 
14742 void
14743 qwz_dp_vdev_tx_attach(struct qwz_softc *sc, struct qwz_pdev *pdev,
14744     struct qwz_vif *arvif)
14745 {
14746 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
14747 	    FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) |
14748 	    FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, pdev->pdev_id);
14749 
14750 	/* set HTT extension valid bit to 0 by default */
14751 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
14752 
14753 	qwz_dp_update_vdev_search(sc, arvif);
14754 }
14755 
14756 void
14757 qwz_dp_tx_status_parse(struct qwz_softc *sc, struct hal_wbm_release_ring *desc,
14758     struct hal_tx_status *ts)
14759 {
14760 	ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
14761 	    desc->info0);
14762 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
14763 	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
14764 		return;
14765 
14766 	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
14767 		return;
14768 
14769 	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
14770 	    desc->info0);
14771 	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
14772 	    desc->info1);
14773 	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
14774 	    desc->info1);
14775 	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
14776 	    desc->info2);
14777 	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
14778 	    ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
14779 	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
14780 	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
14781 	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
14782 		ts->rate_stats = desc->rate_stats.info0;
14783 	else
14784 		ts->rate_stats = 0;
14785 }
14786 
14787 void
14788 qwz_dp_tx_free_txbuf(struct qwz_softc *sc, int msdu_id,
14789     struct dp_tx_ring *tx_ring)
14790 {
14791 	struct qwz_tx_data *tx_data;
14792 
14793 	if (msdu_id >= sc->hw_params.tx_ring_size)
14794 		return;
14795 
14796 	tx_data = &tx_ring->data[msdu_id];
14797 
14798 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
14799 	m_freem(tx_data->m);
14800 	tx_data->m = NULL;
14801 
14802 	if (tx_ring->queued > 0)
14803 		tx_ring->queued--;
14804 }
14805 
14806 void
14807 qwz_dp_tx_htt_tx_complete_buf(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
14808     struct qwz_dp_htt_wbm_tx_status *ts)
14809 {
14810 	/* Not using Tx status info for now. Just free the buffer. */
14811 	qwz_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
14812 }
14813 
14814 void
14815 qwz_dp_tx_process_htt_tx_complete(struct qwz_softc *sc, void *desc,
14816     uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
14817 {
14818 	struct htt_tx_wbm_completion *status_desc;
14819 	struct qwz_dp_htt_wbm_tx_status ts = {0};
14820 	enum hal_wbm_htt_tx_comp_status wbm_status;
14821 
14822 	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
14823 
14824 	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
14825 	    status_desc->info0);
14826 
14827 	switch (wbm_status) {
14828 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
14829 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
14830 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
14831 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
14832 		ts.msdu_id = msdu_id;
14833 		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
14834 		    status_desc->info1);
14835 
14836 		if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
14837 			ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
14838 			    status_desc->info2);
14839 		else
14840 			ts.peer_id = HTT_INVALID_PEER_ID;
14841 
14842 		qwz_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
14843 		break;
14844 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
14845 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
14846 		qwz_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
14847 		break;
14848 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
14849 		/* This event is to be handled only when the driver decides to
14850 		 * use WDS offload functionality.
14851 		 */
14852 		break;
14853 	default:
14854 		printf("%s: Unknown htt tx status %d\n",
14855 		    sc->sc_dev.dv_xname, wbm_status);
14856 		break;
14857 	}
14858 }
14859 
14860 int
14861 qwz_mac_hw_ratecode_to_legacy_rate(struct ieee80211_node *ni, uint8_t hw_rc,
14862     uint8_t preamble, uint8_t *rateidx, uint16_t *rate)
14863 {
14864 	struct ieee80211_rateset *rs = &ni->ni_rates;
14865 	int i;
14866 
14867 	if (preamble == WMI_RATE_PREAMBLE_CCK) {
14868 		hw_rc &= ~ATH12k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
14869 		switch (hw_rc) {
14870 			case ATH12K_HW_RATE_CCK_LP_1M:
14871 				*rate = 2;
14872 				break;
14873 			case ATH12K_HW_RATE_CCK_LP_2M:
14874 			case ATH12K_HW_RATE_CCK_SP_2M:
14875 				*rate = 4;
14876 				break;
14877 			case ATH12K_HW_RATE_CCK_LP_5_5M:
14878 			case ATH12K_HW_RATE_CCK_SP_5_5M:
14879 				*rate = 11;
14880 				break;
14881 			case ATH12K_HW_RATE_CCK_LP_11M:
14882 			case ATH12K_HW_RATE_CCK_SP_11M:
14883 				*rate = 22;
14884 				break;
14885 			default:
14886 				return EINVAL;
14887 		}
14888 	} else {
14889 		switch (hw_rc) {
14890 			case ATH12K_HW_RATE_OFDM_6M:
14891 				*rate = 12;
14892 				break;
14893 			case ATH12K_HW_RATE_OFDM_9M:
14894 				*rate = 18;
14895 				break;
14896 			case ATH12K_HW_RATE_OFDM_12M:
14897 				*rate = 24;
14898 				break;
14899 			case ATH12K_HW_RATE_OFDM_18M:
14900 				*rate = 36;
14901 				break;
14902 			case ATH12K_HW_RATE_OFDM_24M:
14903 				*rate = 48;
14904 				break;
14905 			case ATH12K_HW_RATE_OFDM_36M:
14906 				*rate = 72;
14907 				break;
14908 			case ATH12K_HW_RATE_OFDM_48M:
14909 				*rate = 96;
14910 				break;
14911 			case ATH12K_HW_RATE_OFDM_54M:
14912 				*rate = 104;
14913 				break;
14914 			default:
14915 				return EINVAL;
14916 		}
14917 	}
14918 
14919 	for (i = 0; i < rs->rs_nrates; i++) {
14920 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
14921 		if (rval == *rate) {
14922 			*rateidx = i;
14923 			return 0;
14924 		}
14925 	}
14926 
14927 	return EINVAL;
14928 }
14929 
14930 void
14931 qwz_dp_tx_complete_msdu(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
14932     uint32_t msdu_id, struct hal_tx_status *ts)
14933 {
14934 	struct ieee80211com *ic = &sc->sc_ic;
14935 	struct qwz_tx_data *tx_data = &tx_ring->data[msdu_id];
14936 	uint8_t pkt_type, mcs, rateidx;
14937 	uint16_t rate;
14938 
14939 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) {
14940 		/* Must not happen */
14941 		return;
14942 	}
14943 
14944 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
14945 	m_freem(tx_data->m);
14946 	tx_data->m = NULL;
14947 
14948 	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, ts->rate_stats);
14949 	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, ts->rate_stats);
14950 	if (qwz_mac_hw_ratecode_to_legacy_rate(tx_data->ni, mcs, pkt_type,
14951 	    &rateidx, &rate) == 0)
14952 		tx_data->ni->ni_txrate = rateidx;
14953 
14954 	ieee80211_release_node(ic, tx_data->ni);
14955 	tx_data->ni = NULL;
14956 
14957 	if (tx_ring->queued > 0)
14958 		tx_ring->queued--;
14959 }
14960 
14961 #define QWZ_TX_COMPL_NEXT(x)	(((x) + 1) % DP_TX_COMP_RING_SIZE)
14962 
14963 int
14964 qwz_dp_tx_completion_handler(struct qwz_softc *sc, int ring_id)
14965 {
14966 	struct ieee80211com *ic = &sc->sc_ic;
14967 	struct ifnet *ifp = &ic->ic_if;
14968 	struct qwz_dp *dp = &sc->dp;
14969 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
14970 	struct hal_srng *status_ring = &sc->hal.srng_list[hal_ring_id];
14971 	struct hal_tx_status ts = { 0 };
14972 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
14973 	uint32_t *desc;
14974 	uint32_t msdu_id;
14975 	uint8_t mac_id;
14976 #ifdef notyet
14977 	spin_lock_bh(&status_ring->lock);
14978 #endif
14979 	qwz_hal_srng_access_begin(sc, status_ring);
14980 
14981 	while ((QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
14982 		tx_ring->tx_status_tail) &&
14983 	       (desc = qwz_hal_srng_dst_get_next_entry(sc, status_ring))) {
14984 		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
14985 		    sizeof(struct hal_wbm_release_ring));
14986 		tx_ring->tx_status_head =
14987 		    QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head);
14988 	}
14989 #if 0
14990 	if (unlikely((qwz_hal_srng_dst_peek(ab, status_ring) != NULL) &&
14991 		     (QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
14992 		      tx_ring->tx_status_tail))) {
14993 		/* TODO: Process pending tx_status messages when kfifo_is_full() */
14994 		ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
14995 	}
14996 #endif
14997 	qwz_hal_srng_access_end(sc, status_ring);
14998 #ifdef notyet
14999 	spin_unlock_bh(&status_ring->lock);
15000 #endif
15001 	while (QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
15002 	    tx_ring->tx_status_head) {
15003 		struct hal_wbm_release_ring *tx_status;
15004 		uint32_t desc_id;
15005 
15006 		tx_ring->tx_status_tail =
15007 		   QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail);
15008 		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
15009 		qwz_dp_tx_status_parse(sc, tx_status, &ts);
15010 
15011 		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15012 		    tx_status->buf_addr_info.info1);
15013 		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
15014 		if (mac_id >= MAX_RADIOS)
15015 			continue;
15016 		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
15017 		if (msdu_id >= sc->hw_params.tx_ring_size)
15018 			continue;
15019 
15020 		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
15021 			qwz_dp_tx_process_htt_tx_complete(sc,
15022 			    (void *)tx_status, mac_id, msdu_id, tx_ring);
15023 			continue;
15024 		}
15025 #if 0
15026 		spin_lock(&tx_ring->tx_idr_lock);
15027 		msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
15028 		if (unlikely(!msdu)) {
15029 			ath12k_warn(ab, "tx completion for unknown msdu_id %d\n",
15030 				    msdu_id);
15031 			spin_unlock(&tx_ring->tx_idr_lock);
15032 			continue;
15033 		}
15034 
15035 		spin_unlock(&tx_ring->tx_idr_lock);
15036 		ar = ab->pdevs[mac_id].ar;
15037 
15038 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
15039 			wake_up(&ar->dp.tx_empty_waitq);
15040 #endif
15041 		qwz_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
15042 	}
15043 
15044 	if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
15045 		sc->qfullmsk &= ~(1 << ring_id);
15046 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
15047 			ifq_clr_oactive(&ifp->if_snd);
15048 			(*ifp->if_start)(ifp);
15049 		}
15050 	}
15051 
15052 	return 0;
15053 }
15054 
15055 void
15056 qwz_hal_rx_reo_ent_paddr_get(struct qwz_softc *sc, void *desc, uint64_t *paddr,
15057     uint32_t *desc_bank)
15058 {
15059 	struct ath12k_buffer_addr *buff_addr = desc;
15060 
15061 	*paddr = ((uint64_t)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
15062 	    buff_addr->info1)) << 32) |
15063 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
15064 
15065 	*desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
15066 }
15067 
15068 int
15069 qwz_hal_desc_reo_parse_err(struct qwz_softc *sc, uint32_t *rx_desc,
15070     uint64_t *paddr, uint32_t *desc_bank)
15071 {
15072 	struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
15073 	enum hal_reo_dest_ring_push_reason push_reason;
15074 	enum hal_reo_dest_ring_error_code err_code;
15075 
15076 	push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
15077 	    desc->info0);
15078 	err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
15079 	    desc->info0);
15080 #if 0
15081 	ab->soc_stats.reo_error[err_code]++;
15082 #endif
15083 	if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
15084 	    push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
15085 		printf("%s: expected error push reason code, received %d\n",
15086 		    sc->sc_dev.dv_xname, push_reason);
15087 		return EINVAL;
15088 	}
15089 
15090 	if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
15091 	    HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
15092 		printf("%s: expected buffer type link_desc",
15093 		    sc->sc_dev.dv_xname);
15094 		return EINVAL;
15095 	}
15096 
15097 	qwz_hal_rx_reo_ent_paddr_get(sc, rx_desc, paddr, desc_bank);
15098 
15099 	return 0;
15100 }
15101 
15102 void
15103 qwz_hal_rx_msdu_link_info_get(void *link_desc, uint32_t *num_msdus,
15104     uint32_t *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm)
15105 {
15106 	struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
15107 	struct hal_rx_msdu_details *msdu;
15108 	int i;
15109 
15110 	*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
15111 
15112 	msdu = &link->msdu_link[0];
15113 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
15114 	    msdu->buf_addr_info.info1);
15115 
15116 	for (i = 0; i < *num_msdus; i++) {
15117 		msdu = &link->msdu_link[i];
15118 
15119 		if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
15120 		    msdu->buf_addr_info.info0)) {
15121 			*num_msdus = i;
15122 			break;
15123 		}
15124 		*msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15125 		    msdu->buf_addr_info.info1);
15126 		msdu_cookies++;
15127 	}
15128 }
15129 
15130 void
15131 qwz_hal_rx_msdu_link_desc_set(struct qwz_softc *sc, void *desc,
15132     void *link_desc, enum hal_wbm_rel_bm_act action)
15133 {
15134 	struct hal_wbm_release_ring *dst_desc = desc;
15135 	struct hal_wbm_release_ring *src_desc = link_desc;
15136 
15137 	dst_desc->buf_addr_info = src_desc->buf_addr_info;
15138 	dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15139 	    HAL_WBM_REL_SRC_MODULE_SW) |
15140 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
15141 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
15142 	    HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
15143 }
15144 
15145 int
15146 qwz_dp_rx_link_desc_return(struct qwz_softc *sc, uint32_t *link_desc,
15147     enum hal_wbm_rel_bm_act action)
15148 {
15149 	struct qwz_dp *dp = &sc->dp;
15150 	struct hal_srng *srng;
15151 	uint32_t *desc;
15152 	int ret = 0;
15153 
15154 	srng = &sc->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
15155 #ifdef notyet
15156 	spin_lock_bh(&srng->lock);
15157 #endif
15158 	qwz_hal_srng_access_begin(sc, srng);
15159 
15160 	desc = qwz_hal_srng_src_get_next_entry(sc, srng);
15161 	if (!desc) {
15162 		ret = ENOBUFS;
15163 		goto exit;
15164 	}
15165 
15166 	qwz_hal_rx_msdu_link_desc_set(sc, (void *)desc, (void *)link_desc,
15167 	    action);
15168 
15169 exit:
15170 	qwz_hal_srng_access_end(sc, srng);
15171 #ifdef notyet
15172 	spin_unlock_bh(&srng->lock);
15173 #endif
15174 	return ret;
15175 }
15176 
15177 int
15178 qwz_dp_rx_frag_h_mpdu(struct qwz_softc *sc, struct mbuf *m,
15179     uint32_t *ring_desc)
15180 {
15181 	printf("%s: not implemented\n", __func__);
15182 	return ENOTSUP;
15183 }
15184 
15185 static inline uint16_t
15186 qwz_dp_rx_h_msdu_start_msdu_len(struct qwz_softc *sc, struct hal_rx_desc *desc)
15187 {
15188 	return sc->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
15189 }
15190 
15191 void
15192 qwz_dp_process_rx_err_buf(struct qwz_softc *sc, uint32_t *ring_desc,
15193     int buf_id, int drop)
15194 {
15195 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
15196 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
15197 	struct mbuf *m;
15198 	struct qwz_rx_data *rx_data;
15199 	struct hal_rx_desc *rx_desc;
15200 	uint16_t msdu_len;
15201 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
15202 
15203 	if (buf_id >= rx_ring->bufs_max || isset(rx_ring->freemap, buf_id))
15204 		return;
15205 
15206 	rx_data = &rx_ring->rx_data[buf_id];
15207 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
15208 	m = rx_data->m;
15209 	rx_data->m = NULL;
15210 	setbit(rx_ring->freemap, buf_id);
15211 
15212 	if (drop) {
15213 		m_freem(m);
15214 		return;
15215 	}
15216 
15217 	rx_desc = mtod(m, struct hal_rx_desc *);
15218 	msdu_len = qwz_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
15219 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
15220 #if 0
15221 		uint8_t *hdr_status = ath12k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
15222 		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
15223 		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", hdr_status,
15224 				sizeof(struct ieee80211_hdr));
15225 		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
15226 				sizeof(struct hal_rx_desc));
15227 #endif
15228 		m_freem(m);
15229 		return;
15230 	}
15231 
15232 	if (qwz_dp_rx_frag_h_mpdu(sc, m, ring_desc)) {
15233 		qwz_dp_rx_link_desc_return(sc, ring_desc,
15234 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15235 	}
15236 
15237 	m_freem(m);
15238 }
15239 
15240 int
15241 qwz_dp_process_rx_err(struct qwz_softc *sc)
15242 {
15243 	struct ieee80211com *ic = &sc->sc_ic;
15244 	struct ifnet *ifp = &ic->ic_if;
15245 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
15246 	struct dp_link_desc_bank *link_desc_banks;
15247 	enum hal_rx_buf_return_buf_manager rbm;
15248 	int tot_n_bufs_reaped, ret, i;
15249 	int n_bufs_reaped[MAX_RADIOS] = {0};
15250 	struct dp_rxdma_ring *rx_ring;
15251 	struct dp_srng *reo_except;
15252 	uint32_t desc_bank, num_msdus;
15253 	struct hal_srng *srng;
15254 	struct qwz_dp *dp;
15255 	void *link_desc_va;
15256 	int buf_id, mac_id;
15257 	uint64_t paddr;
15258 	uint32_t *desc;
15259 	int is_frag;
15260 	uint8_t drop = 0;
15261 
15262 	tot_n_bufs_reaped = 0;
15263 
15264 	dp = &sc->dp;
15265 	reo_except = &dp->reo_except_ring;
15266 	link_desc_banks = dp->link_desc_banks;
15267 
15268 	srng = &sc->hal.srng_list[reo_except->ring_id];
15269 #ifdef notyet
15270 	spin_lock_bh(&srng->lock);
15271 #endif
15272 	qwz_hal_srng_access_begin(sc, srng);
15273 
15274 	while ((desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
15275 		struct hal_reo_dest_ring *reo_desc =
15276 		    (struct hal_reo_dest_ring *)desc;
15277 #if 0
15278 		ab->soc_stats.err_ring_pkts++;
15279 #endif
15280 		ret = qwz_hal_desc_reo_parse_err(sc, desc, &paddr, &desc_bank);
15281 		if (ret) {
15282 			printf("%s: failed to parse error reo desc %d\n",
15283 			    sc->sc_dev.dv_xname, ret);
15284 			continue;
15285 		}
15286 		link_desc_va = link_desc_banks[desc_bank].vaddr +
15287 		    (paddr - link_desc_banks[desc_bank].paddr);
15288 		qwz_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
15289 		    msdu_cookies, &rbm);
15290 		if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
15291 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
15292 #if 0
15293 			ab->soc_stats.invalid_rbm++;
15294 #endif
15295 			printf("%s: invalid return buffer manager %d\n",
15296 			    sc->sc_dev.dv_xname, rbm);
15297 			qwz_dp_rx_link_desc_return(sc, desc,
15298 			    HAL_WBM_REL_BM_ACT_REL_MSDU);
15299 			continue;
15300 		}
15301 
15302 		is_frag = !!(reo_desc->rx_mpdu_info.info0 &
15303 		    RX_MPDU_DESC_INFO0_FRAG_FLAG);
15304 
15305 		/* Process only rx fragments with one msdu per link desc below,
15306 		 * and drop msdu's indicated due to error reasons.
15307 		 */
15308 		if (!is_frag || num_msdus > 1) {
15309 			drop = 1;
15310 			/* Return the link desc back to wbm idle list */
15311 			qwz_dp_rx_link_desc_return(sc, desc,
15312 			   HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15313 		}
15314 
15315 		for (i = 0; i < num_msdus; i++) {
15316 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
15317 			    msdu_cookies[i]);
15318 
15319 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
15320 			    msdu_cookies[i]);
15321 
15322 			qwz_dp_process_rx_err_buf(sc, desc, buf_id, drop);
15323 			n_bufs_reaped[mac_id]++;
15324 			tot_n_bufs_reaped++;
15325 		}
15326 	}
15327 
15328 	qwz_hal_srng_access_end(sc, srng);
15329 #ifdef notyet
15330 	spin_unlock_bh(&srng->lock);
15331 #endif
15332 	for (i = 0; i < sc->num_radios; i++) {
15333 		if (!n_bufs_reaped[i])
15334 			continue;
15335 
15336 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
15337 
15338 		qwz_dp_rxbufs_replenish(sc, i, rx_ring, n_bufs_reaped[i],
15339 		    sc->hw_params.hal_params->rx_buf_rbm);
15340 	}
15341 
15342 	ifp->if_ierrors += tot_n_bufs_reaped;
15343 
15344 	return tot_n_bufs_reaped;
15345 }
15346 
15347 int
15348 qwz_hal_wbm_desc_parse_err(void *desc, struct hal_rx_wbm_rel_info *rel_info)
15349 {
15350 	struct hal_wbm_release_ring *wbm_desc = desc;
15351 	enum hal_wbm_rel_desc_type type;
15352 	enum hal_wbm_rel_src_module rel_src;
15353 	enum hal_rx_buf_return_buf_manager ret_buf_mgr;
15354 
15355 	type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE, wbm_desc->info0);
15356 
15357 	/* We expect only WBM_REL buffer type */
15358 	if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU)
15359 		return -EINVAL;
15360 
15361 	rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15362 	    wbm_desc->info0);
15363 	if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
15364 	    rel_src != HAL_WBM_REL_SRC_MODULE_REO)
15365 		return EINVAL;
15366 
15367 	ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
15368 	    wbm_desc->buf_addr_info.info1);
15369 	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
15370 #if 0
15371 		ab->soc_stats.invalid_rbm++;
15372 #endif
15373 		return EINVAL;
15374 	}
15375 
15376 	rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15377 	    wbm_desc->buf_addr_info.info1);
15378 	rel_info->err_rel_src = rel_src;
15379 	if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
15380 		rel_info->push_reason = FIELD_GET(
15381 		    HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON, wbm_desc->info0);
15382 		rel_info->err_code = FIELD_GET(
15383 		    HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE, wbm_desc->info0);
15384 	} else {
15385 		rel_info->push_reason = FIELD_GET(
15386 		    HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON, wbm_desc->info0);
15387 		rel_info->err_code = FIELD_GET(
15388 		    HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE, wbm_desc->info0);
15389 	}
15390 
15391 	rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
15392 	    wbm_desc->info2);
15393 	rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
15394 	    wbm_desc->info2);
15395 
15396 	return 0;
15397 }
15398 
15399 int
15400 qwz_dp_rx_h_null_q_desc(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15401     struct qwz_rx_msdu_list *msdu_list)
15402 {
15403 	printf("%s: not implemented\n", __func__);
15404 	return ENOTSUP;
15405 }
15406 
15407 int
15408 qwz_dp_rx_h_reo_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15409     struct qwz_rx_msdu_list *msdu_list)
15410 {
15411 	int drop = 0;
15412 #if 0
15413 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
15414 #endif
15415 	switch (msdu->err_code) {
15416 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
15417 		if (qwz_dp_rx_h_null_q_desc(sc, msdu, msdu_list))
15418 			drop = 1;
15419 		break;
15420 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
15421 		/* TODO: Do not drop PN failed packets in the driver;
15422 		 * instead, it is good to drop such packets in mac80211
15423 		 * after incrementing the replay counters.
15424 		 */
15425 		/* fallthrough */
15426 	default:
15427 		/* TODO: Review other errors and process them to mac80211
15428 		 * as appropriate.
15429 		 */
15430 		drop = 1;
15431 		break;
15432 	}
15433 
15434 	return drop;
15435 }
15436 
15437 int
15438 qwz_dp_rx_h_rxdma_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu)
15439 {
15440 	struct ieee80211com *ic = &sc->sc_ic;
15441 	int drop = 0;
15442 #if 0
15443 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
15444 #endif
15445 	switch (msdu->err_code) {
15446 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
15447 		ic->ic_stats.is_rx_locmicfail++;
15448 		drop = 1;
15449 		break;
15450 	default:
15451 		/* TODO: Review other rxdma error code to check if anything is
15452 		 * worth reporting to mac80211
15453 		 */
15454 		drop = 1;
15455 		break;
15456 	}
15457 
15458 	return drop;
15459 }
15460 
15461 void
15462 qwz_dp_rx_wbm_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15463     struct qwz_rx_msdu_list *msdu_list)
15464 {
15465 	int drop = 1;
15466 
15467 	switch (msdu->err_rel_src) {
15468 	case HAL_WBM_REL_SRC_MODULE_REO:
15469 		drop = qwz_dp_rx_h_reo_err(sc, msdu, msdu_list);
15470 		break;
15471 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
15472 		drop = qwz_dp_rx_h_rxdma_err(sc, msdu);
15473 		break;
15474 	default:
15475 		/* msdu will get freed */
15476 		break;
15477 	}
15478 
15479 	if (drop) {
15480 		m_freem(msdu->m);
15481 		msdu->m = NULL;
15482 		return;
15483 	}
15484 
15485 	qwz_dp_rx_deliver_msdu(sc, msdu);
15486 }
15487 
15488 int
15489 qwz_dp_rx_process_wbm_err(struct qwz_softc *sc)
15490 {
15491 	struct ieee80211com *ic = &sc->sc_ic;
15492 	struct ifnet *ifp = &ic->ic_if;
15493 	struct qwz_dp *dp = &sc->dp;
15494 	struct dp_rxdma_ring *rx_ring;
15495 	struct hal_rx_wbm_rel_info err_info;
15496 	struct hal_srng *srng;
15497 	struct qwz_rx_msdu_list msdu_list[MAX_RADIOS];
15498 	struct qwz_rx_msdu *msdu;
15499 	struct mbuf *m;
15500 	struct qwz_rx_data *rx_data;
15501 	uint32_t *rx_desc;
15502 	int idx, mac_id;
15503 	int num_buffs_reaped[MAX_RADIOS] = {0};
15504 	int total_num_buffs_reaped = 0;
15505 	int ret, i;
15506 
15507 	for (i = 0; i < sc->num_radios; i++)
15508 		TAILQ_INIT(&msdu_list[i]);
15509 
15510 	srng = &sc->hal.srng_list[dp->rx_rel_ring.ring_id];
15511 #ifdef notyet
15512 	spin_lock_bh(&srng->lock);
15513 #endif
15514 	qwz_hal_srng_access_begin(sc, srng);
15515 
15516 	while ((rx_desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
15517 		ret = qwz_hal_wbm_desc_parse_err(rx_desc, &err_info);
15518 		if (ret) {
15519 			printf("%s: failed to parse rx error in wbm_rel "
15520 			    "ring desc %d\n", sc->sc_dev.dv_xname, ret);
15521 			continue;
15522 		}
15523 
15524 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
15525 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
15526 
15527 		if (mac_id >= MAX_RADIOS)
15528 			continue;
15529 
15530 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
15531 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
15532 			continue;
15533 
15534 		rx_data = &rx_ring->rx_data[idx];
15535 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
15536 		m = rx_data->m;
15537 		rx_data->m = NULL;
15538 		setbit(rx_ring->freemap, idx);
15539 
15540 		num_buffs_reaped[mac_id]++;
15541 		total_num_buffs_reaped++;
15542 
15543 		if (err_info.push_reason !=
15544 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
15545 			m_freem(m);
15546 			continue;
15547 		}
15548 
15549 		msdu = &rx_data->rx_msdu;
15550 		memset(&msdu->rxi, 0, sizeof(msdu->rxi));
15551 		msdu->m = m;
15552 		msdu->err_rel_src = err_info.err_rel_src;
15553 		msdu->err_code = err_info.err_code;
15554 		msdu->rx_desc = mtod(m, struct hal_rx_desc *);
15555 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
15556 	}
15557 
15558 	qwz_hal_srng_access_end(sc, srng);
15559 #ifdef notyet
15560 	spin_unlock_bh(&srng->lock);
15561 #endif
15562 	if (!total_num_buffs_reaped)
15563 		goto done;
15564 
15565 	for (i = 0; i < sc->num_radios; i++) {
15566 		if (!num_buffs_reaped[i])
15567 			continue;
15568 
15569 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
15570 		qwz_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
15571 		    sc->hw_params.hal_params->rx_buf_rbm);
15572 	}
15573 
15574 	for (i = 0; i < sc->num_radios; i++) {
15575 		while ((msdu = TAILQ_FIRST(msdu_list))) {
15576 			TAILQ_REMOVE(msdu_list, msdu, entry);
15577 			if (test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) {
15578 				m_freem(msdu->m);
15579 				msdu->m = NULL;
15580 				continue;
15581 			}
15582 			qwz_dp_rx_wbm_err(sc, msdu, &msdu_list[i]);
15583 			msdu->m = NULL;
15584 		}
15585 	}
15586 done:
15587 	ifp->if_ierrors += total_num_buffs_reaped;
15588 
15589 	return total_num_buffs_reaped;
15590 }
15591 
15592 struct qwz_rx_msdu *
15593 qwz_dp_rx_get_msdu_last_buf(struct qwz_rx_msdu_list *msdu_list,
15594     struct qwz_rx_msdu *first)
15595 {
15596 	struct qwz_rx_msdu *msdu;
15597 
15598 	if (!first->is_continuation)
15599 		return first;
15600 
15601 	TAILQ_FOREACH(msdu, msdu_list, entry) {
15602 		if (!msdu->is_continuation)
15603 			return msdu;
15604 	}
15605 
15606 	return NULL;
15607 }
15608 
15609 static inline void *
15610 qwz_dp_rx_get_attention(struct qwz_softc *sc, struct hal_rx_desc *desc)
15611 {
15612 	return sc->hw_params.hw_ops->rx_desc_get_attention(desc);
15613 }
15614 
15615 int
15616 qwz_dp_rx_h_attn_is_mcbc(struct qwz_softc *sc, struct hal_rx_desc *desc)
15617 {
15618 	struct rx_attention *attn = qwz_dp_rx_get_attention(sc, desc);
15619 
15620 	return qwz_dp_rx_h_msdu_end_first_msdu(sc, desc) &&
15621 		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
15622 		 le32toh(attn->info1)));
15623 }
15624 
15625 static inline uint8_t
15626 qwz_dp_rx_h_msdu_end_l3pad(struct qwz_softc *sc, struct hal_rx_desc *desc)
15627 {
15628 	return sc->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
15629 }
15630 
15631 static inline int
15632 qwz_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
15633 {
15634 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, le32toh(attn->info2));
15635 }
15636 
15637 static inline uint32_t
15638 qwz_dp_rx_h_msdu_start_freq(struct qwz_softc *sc, struct hal_rx_desc *desc)
15639 {
15640 	return sc->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
15641 }
15642 
15643 uint32_t
15644 qwz_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
15645 {
15646 	uint32_t info = le32toh(attn->info1);
15647 	uint32_t errmap = 0;
15648 
15649 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
15650 		errmap |= DP_RX_MPDU_ERR_FCS;
15651 
15652 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
15653 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
15654 
15655 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
15656 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
15657 
15658 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
15659 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
15660 
15661 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
15662 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
15663 
15664 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
15665 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
15666 
15667 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
15668 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
15669 
15670 	return errmap;
15671 }
15672 
15673 int
15674 qwz_dp_rx_h_attn_msdu_len_err(struct qwz_softc *sc, struct hal_rx_desc *desc)
15675 {
15676 	struct rx_attention *rx_attention;
15677 	uint32_t errmap;
15678 
15679 	rx_attention = qwz_dp_rx_get_attention(sc, desc);
15680 	errmap = qwz_dp_rx_h_attn_mpdu_err(rx_attention);
15681 
15682 	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
15683 }
15684 
15685 int
15686 qwz_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
15687 {
15688 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
15689 	    le32toh(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK);
15690 }
15691 
15692 int
15693 qwz_dp_rx_msdu_coalesce(struct qwz_softc *sc, struct qwz_rx_msdu_list *msdu_list,
15694     struct qwz_rx_msdu *first, struct qwz_rx_msdu *last, uint8_t l3pad_bytes,
15695     int msdu_len)
15696 {
15697 	printf("%s: not implemented\n", __func__);
15698 	return ENOTSUP;
15699 }
15700 
15701 void
15702 qwz_dp_rx_h_rate(struct qwz_softc *sc, struct hal_rx_desc *rx_desc,
15703     struct ieee80211_rxinfo *rxi)
15704 {
15705 	/* TODO */
15706 }
15707 
15708 void
15709 qwz_dp_rx_h_ppdu(struct qwz_softc *sc, struct hal_rx_desc *rx_desc,
15710     struct ieee80211_rxinfo *rxi)
15711 {
15712 	uint8_t channel_num;
15713 	uint32_t meta_data;
15714 
15715 	meta_data = qwz_dp_rx_h_msdu_start_freq(sc, rx_desc);
15716 	channel_num = meta_data & 0xff;
15717 
15718 	rxi->rxi_chan = channel_num;
15719 
15720 	qwz_dp_rx_h_rate(sc, rx_desc, rxi);
15721 }
15722 
15723 void
15724 qwz_dp_rx_h_undecap_nwifi(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15725     uint8_t *first_hdr, enum hal_encrypt_type enctype)
15726 {
15727 	/*
15728 	* This function will need to do some work once we are receiving
15729 	* aggregated frames. For now, it needs to do nothing.
15730 	*/
15731 
15732 	if (!msdu->is_first_msdu)
15733 		printf("%s: not implemented\n", __func__);
15734 }
15735 
15736 void
15737 qwz_dp_rx_h_undecap_raw(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15738     enum hal_encrypt_type enctype, int decrypted)
15739 {
15740 #if 0
15741 	struct ieee80211_hdr *hdr;
15742 	size_t hdr_len;
15743 	size_t crypto_len;
15744 #endif
15745 
15746 	if (!msdu->is_first_msdu ||
15747 	    !(msdu->is_first_msdu && msdu->is_last_msdu))
15748 		return;
15749 
15750 	m_adj(msdu->m, -IEEE80211_CRC_LEN);
15751 #if 0
15752 	if (!decrypted)
15753 		return;
15754 
15755 	hdr = (void *)msdu->data;
15756 
15757 	/* Tail */
15758 	if (status->flag & RX_FLAG_IV_STRIPPED) {
15759 		skb_trim(msdu, msdu->len -
15760 			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
15761 
15762 		skb_trim(msdu, msdu->len -
15763 			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
15764 	} else {
15765 		/* MIC */
15766 		if (status->flag & RX_FLAG_MIC_STRIPPED)
15767 			skb_trim(msdu, msdu->len -
15768 				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
15769 
15770 		/* ICV */
15771 		if (status->flag & RX_FLAG_ICV_STRIPPED)
15772 			skb_trim(msdu, msdu->len -
15773 				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
15774 	}
15775 
15776 	/* MMIC */
15777 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
15778 	    !ieee80211_has_morefrags(hdr->frame_control) &&
15779 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
15780 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
15781 
15782 	/* Head */
15783 	if (status->flag & RX_FLAG_IV_STRIPPED) {
15784 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
15785 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
15786 
15787 		memmove((void *)msdu->data + crypto_len,
15788 			(void *)msdu->data, hdr_len);
15789 		skb_pull(msdu, crypto_len);
15790 	}
15791 #endif
15792 }
15793 
15794 static inline uint8_t *
15795 qwz_dp_rx_h_80211_hdr(struct qwz_softc *sc, struct hal_rx_desc *desc)
15796 {
15797 	return sc->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
15798 }
15799 
15800 static inline enum hal_encrypt_type
15801 qwz_dp_rx_h_mpdu_start_enctype(struct qwz_softc *sc, struct hal_rx_desc *desc)
15802 {
15803 	if (!sc->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
15804 		return HAL_ENCRYPT_TYPE_OPEN;
15805 
15806 	return sc->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
15807 }
15808 
15809 static inline uint8_t
15810 qwz_dp_rx_h_msdu_start_decap_type(struct qwz_softc *sc, struct hal_rx_desc *desc)
15811 {
15812 	return sc->hw_params.hw_ops->rx_desc_get_decap_type(desc);
15813 }
15814 
15815 void
15816 qwz_dp_rx_h_undecap(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15817     struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype,
15818     int decrypted)
15819 {
15820 	uint8_t *first_hdr;
15821 	uint8_t decap;
15822 
15823 	first_hdr = qwz_dp_rx_h_80211_hdr(sc, rx_desc);
15824 	decap = qwz_dp_rx_h_msdu_start_decap_type(sc, rx_desc);
15825 
15826 	switch (decap) {
15827 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
15828 		qwz_dp_rx_h_undecap_nwifi(sc, msdu, first_hdr, enctype);
15829 		break;
15830 	case DP_RX_DECAP_TYPE_RAW:
15831 		qwz_dp_rx_h_undecap_raw(sc, msdu, enctype, decrypted);
15832 		break;
15833 #if 0
15834 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
15835 		ehdr = (struct ethhdr *)msdu->data;
15836 
15837 		/* mac80211 allows fast path only for authorized STA */
15838 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
15839 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
15840 			ath12k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
15841 						   enctype, status);
15842 			break;
15843 		}
15844 
15845 		/* PN for mcast packets will be validated in mac80211;
15846 		 * remove eth header and add 802.11 header.
15847 		 */
15848 		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
15849 			ath12k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
15850 						   enctype, status);
15851 		break;
15852 	case DP_RX_DECAP_TYPE_8023:
15853 		/* TODO: Handle undecap for these formats */
15854 		break;
15855 #endif
15856 	}
15857 }
15858 
15859 int
15860 qwz_dp_rx_h_mpdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15861     struct hal_rx_desc *rx_desc)
15862 {
15863 	struct ieee80211com *ic = &sc->sc_ic;
15864 	int fill_crypto_hdr = 0;
15865 	enum hal_encrypt_type enctype;
15866 	int is_decrypted = 0;
15867 #if 0
15868 	struct ath12k_skb_rxcb *rxcb;
15869 #endif
15870 	struct ieee80211_frame *wh;
15871 #if 0
15872 	struct ath12k_peer *peer;
15873 #endif
15874 	struct rx_attention *rx_attention;
15875 	uint32_t err_bitmap;
15876 
15877 	/* PN for multicast packets will be checked in net80211 */
15878 	fill_crypto_hdr = qwz_dp_rx_h_attn_is_mcbc(sc, rx_desc);
15879 	msdu->is_mcbc = fill_crypto_hdr;
15880 #if 0
15881 	if (rxcb->is_mcbc) {
15882 		rxcb->peer_id = ath12k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
15883 		rxcb->seq_no = ath12k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
15884 	}
15885 
15886 	spin_lock_bh(&ar->ab->base_lock);
15887 	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
15888 	if (peer) {
15889 		if (rxcb->is_mcbc)
15890 			enctype = peer->sec_type_grp;
15891 		else
15892 			enctype = peer->sec_type;
15893 	} else {
15894 #endif
15895 		enctype = qwz_dp_rx_h_mpdu_start_enctype(sc, rx_desc);
15896 #if 0
15897 	}
15898 	spin_unlock_bh(&ar->ab->base_lock);
15899 #endif
15900 	rx_attention = qwz_dp_rx_get_attention(sc, rx_desc);
15901 	err_bitmap = qwz_dp_rx_h_attn_mpdu_err(rx_attention);
15902 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
15903 		is_decrypted = qwz_dp_rx_h_attn_is_decrypted(rx_attention);
15904 #if 0
15905 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
15906 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
15907 			     RX_FLAG_MMIC_ERROR |
15908 			     RX_FLAG_DECRYPTED |
15909 			     RX_FLAG_IV_STRIPPED |
15910 			     RX_FLAG_MMIC_STRIPPED);
15911 
15912 #endif
15913 	if (err_bitmap & DP_RX_MPDU_ERR_FCS) {
15914 		if (ic->ic_flags & IEEE80211_F_RSNON)
15915 			ic->ic_stats.is_rx_decryptcrc++;
15916 		else
15917 			ic->ic_stats.is_rx_decap++;
15918 	}
15919 
15920 	/* XXX Trusting firmware to handle Michael MIC counter-measures... */
15921 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
15922 		ic->ic_stats.is_rx_locmicfail++;
15923 
15924 	if (err_bitmap & DP_RX_MPDU_ERR_DECRYPT)
15925 		ic->ic_stats.is_rx_wepfail++;
15926 
15927 	if (is_decrypted) {
15928 #if 0
15929 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
15930 
15931 		if (fill_crypto_hdr)
15932 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
15933 					RX_FLAG_ICV_STRIPPED;
15934 		else
15935 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
15936 					   RX_FLAG_PN_VALIDATED;
15937 #endif
15938 		msdu->rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
15939 	}
15940 #if 0
15941 	ath12k_dp_rx_h_csum_offload(ar, msdu);
15942 #endif
15943 	qwz_dp_rx_h_undecap(sc, msdu, rx_desc, enctype, is_decrypted);
15944 
15945 	if (is_decrypted && !fill_crypto_hdr &&
15946 	    qwz_dp_rx_h_msdu_start_decap_type(sc, rx_desc) !=
15947 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
15948 		/* Hardware has stripped the IV. */
15949 		wh = mtod(msdu->m, struct ieee80211_frame *);
15950 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
15951 	}
15952 
15953 	return err_bitmap ? EIO : 0;
15954 }
15955 
15956 int
15957 qwz_dp_rx_process_msdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
15958     struct qwz_rx_msdu_list *msdu_list)
15959 {
15960 	struct hal_rx_desc *rx_desc, *lrx_desc;
15961 	struct rx_attention *rx_attention;
15962 	struct qwz_rx_msdu *last_buf;
15963 	uint8_t l3_pad_bytes;
15964 	uint16_t msdu_len;
15965 	int ret;
15966 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
15967 
15968 	last_buf = qwz_dp_rx_get_msdu_last_buf(msdu_list, msdu);
15969 	if (!last_buf) {
15970 		DPRINTF("%s: No valid Rx buffer to access "
15971 		    "Atten/MSDU_END/MPDU_END tlvs\n", __func__);
15972 		return EIO;
15973 	}
15974 
15975 	rx_desc = mtod(msdu->m, struct hal_rx_desc *);
15976 	if (qwz_dp_rx_h_attn_msdu_len_err(sc, rx_desc)) {
15977 		DPRINTF("%s: msdu len not valid\n", __func__);
15978 		return EIO;
15979 	}
15980 
15981 	lrx_desc = mtod(last_buf->m, struct hal_rx_desc *);
15982 	rx_attention = qwz_dp_rx_get_attention(sc, lrx_desc);
15983 	if (!qwz_dp_rx_h_attn_msdu_done(rx_attention)) {
15984 		DPRINTF("%s: msdu_done bit in attention is not set\n",
15985 		    __func__);
15986 		return EIO;
15987 	}
15988 
15989 	msdu->rx_desc = rx_desc;
15990 	msdu_len = qwz_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
15991 	l3_pad_bytes = qwz_dp_rx_h_msdu_end_l3pad(sc, lrx_desc);
15992 
15993 	if (msdu->is_frag) {
15994 		m_adj(msdu->m, hal_rx_desc_sz);
15995 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
15996 	} else if (!msdu->is_continuation) {
15997 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
15998 #if 0
15999 			uint8_t *hdr_status;
16000 
16001 			hdr_status = ath12k_dp_rx_h_80211_hdr(ab, rx_desc);
16002 #endif
16003 			DPRINTF("%s: invalid msdu len %u\n",
16004 			    __func__, msdu_len);
16005 #if 0
16006 			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", hdr_status,
16007 					sizeof(struct ieee80211_hdr));
16008 			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
16009 					sizeof(struct hal_rx_desc));
16010 #endif
16011 			return EINVAL;
16012 		}
16013 		m_adj(msdu->m, hal_rx_desc_sz + l3_pad_bytes);
16014 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
16015 	} else {
16016 		ret = qwz_dp_rx_msdu_coalesce(sc, msdu_list, msdu, last_buf,
16017 		    l3_pad_bytes, msdu_len);
16018 		if (ret) {
16019 			DPRINTF("%s: failed to coalesce msdu rx buffer%d\n",
16020 			    __func__, ret);
16021 			return ret;
16022 		}
16023 	}
16024 
16025 	memset(&msdu->rxi, 0, sizeof(msdu->rxi));
16026 	qwz_dp_rx_h_ppdu(sc, rx_desc, &msdu->rxi);
16027 
16028 	return qwz_dp_rx_h_mpdu(sc, msdu, rx_desc);
16029 }
16030 
16031 void
16032 qwz_dp_rx_deliver_msdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu)
16033 {
16034 	struct ieee80211com *ic = &sc->sc_ic;
16035 	struct ifnet *ifp = &ic->ic_if;
16036 	struct ieee80211_frame *wh;
16037 	struct ieee80211_node *ni;
16038 
16039 	wh = mtod(msdu->m, struct ieee80211_frame *);
16040 	ni = ieee80211_find_rxnode(ic, wh);
16041 
16042 #if NBPFILTER > 0
16043 	if (sc->sc_drvbpf != NULL) {
16044 		struct qwz_rx_radiotap_header *tap = &sc->sc_rxtap;
16045 
16046 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
16047 		    msdu->m, BPF_DIRECTION_IN);
16048 	}
16049 #endif
16050 	ieee80211_input(ifp, msdu->m, ni, &msdu->rxi);
16051 	ieee80211_release_node(ic, ni);
16052 }
16053 
16054 void
16055 qwz_dp_rx_process_received_packets(struct qwz_softc *sc,
16056     struct qwz_rx_msdu_list *msdu_list, int mac_id)
16057 {
16058 	struct qwz_rx_msdu *msdu;
16059 	int ret;
16060 
16061 	while ((msdu = TAILQ_FIRST(msdu_list))) {
16062 		TAILQ_REMOVE(msdu_list, msdu, entry);
16063 		ret = qwz_dp_rx_process_msdu(sc, msdu, msdu_list);
16064 		if (ret) {
16065 			DNPRINTF(QWZ_D_MAC, "Unable to process msdu: %d", ret);
16066 			m_freem(msdu->m);
16067 			msdu->m = NULL;
16068 			continue;
16069 		}
16070 
16071 		qwz_dp_rx_deliver_msdu(sc, msdu);
16072 		msdu->m = NULL;
16073 	}
16074 }
16075 
16076 int
16077 qwz_dp_process_rx(struct qwz_softc *sc, int ring_id)
16078 {
16079 	struct qwz_dp *dp = &sc->dp;
16080 	struct qwz_pdev_dp *pdev_dp = &sc->pdev_dp;
16081 	struct dp_rxdma_ring *rx_ring;
16082 	int num_buffs_reaped[MAX_RADIOS] = {0};
16083 	struct qwz_rx_msdu_list msdu_list[MAX_RADIOS];
16084 	struct qwz_rx_msdu *msdu;
16085 	struct mbuf *m;
16086 	struct qwz_rx_data *rx_data;
16087 	int total_msdu_reaped = 0;
16088 	struct hal_srng *srng;
16089 	int done = 0;
16090 	int idx;
16091 	unsigned int mac_id;
16092 	struct hal_reo_dest_ring *desc;
16093 	enum hal_reo_dest_ring_push_reason push_reason;
16094 	uint32_t cookie;
16095 	int i;
16096 
16097 	for (i = 0; i < MAX_RADIOS; i++)
16098 		TAILQ_INIT(&msdu_list[i]);
16099 
16100 	srng = &sc->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
16101 #ifdef notyet
16102 	spin_lock_bh(&srng->lock);
16103 #endif
16104 try_again:
16105 	qwz_hal_srng_access_begin(sc, srng);
16106 
16107 	while ((desc = (struct hal_reo_dest_ring *)
16108 	    qwz_hal_srng_dst_get_next_entry(sc, srng))) {
16109 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
16110 		    desc->buf_addr_info.info1);
16111 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16112 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
16113 
16114 		if (mac_id >= MAX_RADIOS)
16115 			continue;
16116 
16117 		rx_ring = &pdev_dp->rx_refill_buf_ring;
16118 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
16119 			continue;
16120 
16121 		rx_data = &rx_ring->rx_data[idx];
16122 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16123 		m = rx_data->m;
16124 		rx_data->m = NULL;
16125 		setbit(rx_ring->freemap, idx);
16126 
16127 		num_buffs_reaped[mac_id]++;
16128 
16129 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
16130 		    desc->info0);
16131 		if (push_reason !=
16132 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
16133 			m_freem(m);
16134 #if 0
16135 			sc->soc_stats.hal_reo_error[
16136 			    dp->reo_dst_ring[ring_id].ring_id]++;
16137 #endif
16138 			continue;
16139 		}
16140 
16141 		msdu = &rx_data->rx_msdu;
16142 		msdu->m = m;
16143 		msdu->is_first_msdu = !!(desc->rx_msdu_info.info0 &
16144 		    RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
16145 		msdu->is_last_msdu = !!(desc->rx_msdu_info.info0 &
16146 		    RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
16147 		msdu->is_continuation = !!(desc->rx_msdu_info.info0 &
16148 		    RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
16149 		msdu->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
16150 		    desc->rx_mpdu_info.meta_data);
16151 		msdu->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
16152 		    desc->rx_mpdu_info.info0);
16153 		msdu->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
16154 		    desc->info0);
16155 
16156 		msdu->mac_id = mac_id;
16157 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
16158 
16159 		if (msdu->is_continuation) {
16160 			done = 0;
16161 		} else {
16162 			total_msdu_reaped++;
16163 			done = 1;
16164 		}
16165 	}
16166 
16167 	/* Hw might have updated the head pointer after we cached it.
16168 	 * In this case, even though there are entries in the ring we'll
16169 	 * get rx_desc NULL. Give the read another try with updated cached
16170 	 * head pointer so that we can reap complete MPDU in the current
16171 	 * rx processing.
16172 	 */
16173 	if (!done && qwz_hal_srng_dst_num_free(sc, srng, 1)) {
16174 		qwz_hal_srng_access_end(sc, srng);
16175 		goto try_again;
16176 	}
16177 
16178 	qwz_hal_srng_access_end(sc, srng);
16179 #ifdef notyet
16180 	spin_unlock_bh(&srng->lock);
16181 #endif
16182 	if (!total_msdu_reaped)
16183 		goto exit;
16184 
16185 	for (i = 0; i < sc->num_radios; i++) {
16186 		if (!num_buffs_reaped[i])
16187 			continue;
16188 
16189 		qwz_dp_rx_process_received_packets(sc, &msdu_list[i], i);
16190 
16191 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16192 
16193 		qwz_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
16194 		    sc->hw_params.hal_params->rx_buf_rbm);
16195 	}
16196 exit:
16197 	return total_msdu_reaped;
16198 }
16199 
16200 struct mbuf *
16201 qwz_dp_rx_alloc_mon_status_buf(struct qwz_softc *sc,
16202     struct dp_rxdma_ring *rx_ring, int *buf_idx)
16203 {
16204 	struct mbuf *m;
16205 	struct qwz_rx_data *rx_data;
16206 	const size_t size = DP_RX_BUFFER_SIZE;
16207 	int ret, idx;
16208 
16209 	m = m_gethdr(M_DONTWAIT, MT_DATA);
16210 	if (m == NULL)
16211 		return NULL;
16212 
16213 	if (size <= MCLBYTES)
16214 		MCLGET(m, M_DONTWAIT);
16215 	else
16216 		MCLGETL(m, M_DONTWAIT, size);
16217 	if ((m->m_flags & M_EXT) == 0)
16218 		goto fail_free_mbuf;
16219 
16220 	m->m_len = m->m_pkthdr.len = size;
16221 	idx = qwz_next_free_rxbuf_idx(rx_ring);
16222 	if (idx == -1)
16223 		goto fail_free_mbuf;
16224 
16225 	rx_data = &rx_ring->rx_data[idx];
16226 	if (rx_data->m != NULL)
16227 		goto fail_free_mbuf;
16228 
16229 	if (rx_data->map == NULL) {
16230 		ret = bus_dmamap_create(sc->sc_dmat, size, 1,
16231 		    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
16232 		if (ret)
16233 			goto fail_free_mbuf;
16234 	}
16235 
16236 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
16237 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
16238 	if (ret) {
16239 		printf("%s: can't map mbuf (error %d)\n",
16240 		    sc->sc_dev.dv_xname, ret);
16241 		goto fail_free_mbuf;
16242 	}
16243 
16244 	*buf_idx = idx;
16245 	rx_data->m = m;
16246 	clrbit(rx_ring->freemap, idx);
16247 	return m;
16248 
16249 fail_free_mbuf:
16250 	m_freem(m);
16251 	return NULL;
16252 }
16253 
16254 int
16255 qwz_dp_rx_reap_mon_status_ring(struct qwz_softc *sc, int mac_id,
16256     struct mbuf_list *ml)
16257 {
16258 	const struct ath12k_hw_hal_params *hal_params;
16259 	struct qwz_pdev_dp *dp;
16260 	struct dp_rxdma_ring *rx_ring;
16261 	struct qwz_mon_data *pmon;
16262 	struct hal_srng *srng;
16263 	void *rx_mon_status_desc;
16264 	struct mbuf *m;
16265 	struct qwz_rx_data *rx_data;
16266 	struct hal_tlv_hdr *tlv;
16267 	uint32_t cookie;
16268 	int buf_idx, srng_id;
16269 	uint64_t paddr;
16270 	uint8_t rbm;
16271 	int num_buffs_reaped = 0;
16272 
16273 	dp = &sc->pdev_dp;
16274 	pmon = &dp->mon_data;
16275 
16276 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
16277 	    mac_id);
16278 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
16279 
16280 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
16281 #ifdef notyet
16282 	spin_lock_bh(&srng->lock);
16283 #endif
16284 	qwz_hal_srng_access_begin(sc, srng);
16285 	while (1) {
16286 		rx_mon_status_desc = qwz_hal_srng_src_peek(sc, srng);
16287 		if (!rx_mon_status_desc) {
16288 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16289 			break;
16290 		}
16291 
16292 		qwz_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
16293 		    &cookie, &rbm);
16294 		if (paddr) {
16295 			buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16296 			if (buf_idx >= rx_ring->bufs_max ||
16297 			    isset(rx_ring->freemap, buf_idx)) {
16298 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
16299 				goto move_next;
16300 			}
16301 
16302 			rx_data = &rx_ring->rx_data[buf_idx];
16303 
16304 			bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0,
16305 			    rx_data->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
16306 
16307 			tlv = mtod(rx_data->m, struct hal_tlv_hdr *);
16308 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
16309 			    HAL_RX_STATUS_BUFFER_DONE) {
16310 				/* If done status is missing, hold onto status
16311 				 * ring until status is done for this status
16312 				 * ring buffer.
16313 				 * Keep HP in mon_status_ring unchanged,
16314 				 * and break from here.
16315 				 * Check status for same buffer for next time
16316 				 */
16317 				pmon->buf_state = DP_MON_STATUS_NO_DMA;
16318 				break;
16319 			}
16320 
16321 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16322 			m = rx_data->m;
16323 			rx_data->m = NULL;
16324 			setbit(rx_ring->freemap, buf_idx);
16325 #if 0
16326 			if (ab->hw_params.full_monitor_mode) {
16327 				ath12k_dp_rx_mon_update_status_buf_state(pmon, tlv);
16328 				if (paddr == pmon->mon_status_paddr)
16329 					pmon->buf_state = DP_MON_STATUS_MATCH;
16330 			}
16331 #endif
16332 			ml_enqueue(ml, m);
16333 		} else {
16334 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16335 		}
16336 move_next:
16337 		m = qwz_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx);
16338 		if (!m) {
16339 			hal_params = sc->hw_params.hal_params;
16340 			qwz_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
16341 			    hal_params->rx_buf_rbm);
16342 			num_buffs_reaped++;
16343 			break;
16344 		}
16345 		rx_data = &rx_ring->rx_data[buf_idx];
16346 
16347 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
16348 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx);
16349 
16350 		paddr = rx_data->map->dm_segs[0].ds_addr;
16351 		qwz_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr,
16352 		    cookie, sc->hw_params.hal_params->rx_buf_rbm);
16353 		qwz_hal_srng_src_get_next_entry(sc, srng);
16354 		num_buffs_reaped++;
16355 	}
16356 	qwz_hal_srng_access_end(sc, srng);
16357 #ifdef notyet
16358 	spin_unlock_bh(&srng->lock);
16359 #endif
16360 	return num_buffs_reaped;
16361 }
16362 
16363 enum hal_rx_mon_status
16364 qwz_hal_rx_parse_mon_status(struct qwz_softc *sc,
16365     struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m)
16366 {
16367 	/* TODO */
16368 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
16369 }
16370 
16371 int
16372 qwz_dp_rx_process_mon_status(struct qwz_softc *sc, int mac_id)
16373 {
16374 	enum hal_rx_mon_status hal_status;
16375 	struct mbuf *m;
16376 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
16377 #if 0
16378 	struct ath12k_peer *peer;
16379 	struct ath12k_sta *arsta;
16380 #endif
16381 	int num_buffs_reaped = 0;
16382 #if 0
16383 	uint32_t rx_buf_sz;
16384 	uint16_t log_type;
16385 #endif
16386 	struct qwz_mon_data *pmon = (struct qwz_mon_data *)&sc->pdev_dp.mon_data;
16387 #if  0
16388 	struct qwz_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
16389 #endif
16390 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
16391 
16392 	num_buffs_reaped = qwz_dp_rx_reap_mon_status_ring(sc, mac_id, &ml);
16393 	if (!num_buffs_reaped)
16394 		goto exit;
16395 
16396 	memset(ppdu_info, 0, sizeof(*ppdu_info));
16397 	ppdu_info->peer_id = HAL_INVALID_PEERID;
16398 
16399 	while ((m = ml_dequeue(&ml))) {
16400 #if 0
16401 		if (ath12k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
16402 			log_type = ATH12K_PKTLOG_TYPE_LITE_RX;
16403 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
16404 		} else if (ath12k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
16405 			log_type = ATH12K_PKTLOG_TYPE_RX_STATBUF;
16406 			rx_buf_sz = DP_RX_BUFFER_SIZE;
16407 		} else {
16408 			log_type = ATH12K_PKTLOG_TYPE_INVALID;
16409 			rx_buf_sz = 0;
16410 		}
16411 
16412 		if (log_type != ATH12K_PKTLOG_TYPE_INVALID)
16413 			trace_ath12k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
16414 #endif
16415 
16416 		memset(ppdu_info, 0, sizeof(*ppdu_info));
16417 		ppdu_info->peer_id = HAL_INVALID_PEERID;
16418 		hal_status = qwz_hal_rx_parse_mon_status(sc, ppdu_info, m);
16419 #if 0
16420 		if (test_bit(ATH12K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
16421 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
16422 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
16423 			rx_mon_stats->status_ppdu_done++;
16424 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
16425 			ath12k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
16426 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
16427 		}
16428 #endif
16429 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
16430 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
16431 			m_freem(m);
16432 			continue;
16433 		}
16434 #if 0
16435 		rcu_read_lock();
16436 		spin_lock_bh(&ab->base_lock);
16437 		peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
16438 
16439 		if (!peer || !peer->sta) {
16440 			ath12k_dbg(ab, ATH12K_DBG_DATA,
16441 				   "failed to find the peer with peer_id %d\n",
16442 				   ppdu_info->peer_id);
16443 			goto next_skb;
16444 		}
16445 
16446 		arsta = (struct ath12k_sta *)peer->sta->drv_priv;
16447 		ath12k_dp_rx_update_peer_stats(arsta, ppdu_info);
16448 
16449 		if (ath12k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
16450 			trace_ath12k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
16451 
16452 next_skb:
16453 		spin_unlock_bh(&ab->base_lock);
16454 		rcu_read_unlock();
16455 
16456 		dev_kfree_skb_any(skb);
16457 		memset(ppdu_info, 0, sizeof(*ppdu_info));
16458 		ppdu_info->peer_id = HAL_INVALID_PEERID;
16459 #endif
16460 	}
16461 exit:
16462 	return num_buffs_reaped;
16463 }
16464 
16465 int
16466 qwz_dp_rx_process_mon_rings(struct qwz_softc *sc, int mac_id)
16467 {
16468 	int ret = 0;
16469 #if 0
16470 	if (test_bit(ATH12K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
16471 	    ab->hw_params.full_monitor_mode)
16472 		ret = ath12k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
16473 	else
16474 #endif
16475 		ret = qwz_dp_rx_process_mon_status(sc, mac_id);
16476 
16477 	return ret;
16478 }
16479 
16480 void
16481 qwz_dp_service_mon_ring(void *arg)
16482 {
16483 	struct qwz_softc *sc = arg;
16484 	int i;
16485 
16486 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++)
16487 		qwz_dp_rx_process_mon_rings(sc, i);
16488 
16489 	timeout_add(&sc->mon_reap_timer, ATH12K_MON_TIMER_INTERVAL);
16490 }
16491 
16492 int
16493 qwz_dp_process_rxdma_err(struct qwz_softc *sc, int mac_id)
16494 {
16495 	struct ieee80211com *ic = &sc->sc_ic;
16496 	struct ifnet *ifp = &ic->ic_if;
16497 	struct dp_srng *err_ring;
16498 	struct dp_rxdma_ring *rx_ring;
16499 	struct dp_link_desc_bank *link_desc_banks = sc->dp.link_desc_banks;
16500 	struct hal_srng *srng;
16501 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
16502 	enum hal_rx_buf_return_buf_manager rbm;
16503 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
16504 	struct qwz_rx_data *rx_data;
16505 	struct hal_reo_entrance_ring *entr_ring;
16506 	void *desc;
16507 	int num_buf_freed = 0;
16508 	uint64_t paddr;
16509 	uint32_t cookie;
16510 	uint32_t desc_bank;
16511 	void *link_desc_va;
16512 	int num_msdus;
16513 	int i, idx, srng_id;
16514 
16515 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
16516 	    mac_id);
16517 	err_ring = &sc->pdev_dp.rxdma_err_dst_ring[srng_id];
16518 	rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16519 
16520 	srng = &sc->hal.srng_list[err_ring->ring_id];
16521 #ifdef notyet
16522 	spin_lock_bh(&srng->lock);
16523 #endif
16524 	qwz_hal_srng_access_begin(sc, srng);
16525 
16526 	while ((desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
16527 		qwz_hal_rx_reo_ent_paddr_get(sc, desc, &paddr, &cookie);
16528 		desc_bank = FIELD_GET(DP_LINK_DESC_BANK_MASK,
16529 		    cookie);
16530 
16531 		entr_ring = (struct hal_reo_entrance_ring *)desc;
16532 		rxdma_err_code = FIELD_GET(
16533 		    HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
16534 		    entr_ring->info1);
16535 #if 0
16536 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
16537 #endif
16538 		link_desc_va = link_desc_banks[desc_bank].vaddr +
16539 		     (paddr - link_desc_banks[desc_bank].paddr);
16540 		qwz_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
16541 		    msdu_cookies, &rbm);
16542 
16543 		for (i = 0; i < num_msdus; i++) {
16544 			idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
16545 			    msdu_cookies[i]);
16546 			if (idx >= rx_ring->bufs_max ||
16547 			    isset(rx_ring->freemap, idx))
16548 				continue;
16549 
16550 			rx_data = &rx_ring->rx_data[idx];
16551 
16552 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16553 			m_freem(rx_data->m);
16554 			rx_data->m = NULL;
16555 			setbit(rx_ring->freemap, idx);
16556 
16557 			num_buf_freed++;
16558 		}
16559 
16560 		qwz_dp_rx_link_desc_return(sc, desc,
16561 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
16562 	}
16563 
16564 	qwz_hal_srng_access_end(sc, srng);
16565 #ifdef notyet
16566 	spin_unlock_bh(&srng->lock);
16567 #endif
16568 	if (num_buf_freed)
16569 		qwz_dp_rxbufs_replenish(sc, mac_id, rx_ring, num_buf_freed,
16570 		    sc->hw_params.hal_params->rx_buf_rbm);
16571 
16572 	ifp->if_ierrors += num_buf_freed;
16573 
16574 	return num_buf_freed;
16575 }
16576 
16577 void
16578 qwz_hal_reo_status_queue_stats(struct qwz_softc *sc, uint32_t *reo_desc,
16579     struct hal_reo_status *status)
16580 {
16581 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16582 	struct hal_reo_get_queue_stats_status *desc =
16583 	    (struct hal_reo_get_queue_stats_status *)tlv->value;
16584 
16585 	status->uniform_hdr.cmd_num =
16586 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16587 	status->uniform_hdr.cmd_status =
16588 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16589 #if 0
16590 	ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n");
16591 	ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n",
16592 		   status->uniform_hdr.cmd_num,
16593 		   status->uniform_hdr.cmd_status);
16594 	ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %ld cur_idx %ld\n",
16595 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
16596 			     desc->info0),
16597 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
16598 			     desc->info0));
16599 	ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
16600 		   desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
16601 	ath12k_dbg(ab, ATH12K_DBG_HAL,
16602 		   "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
16603 		   desc->last_rx_enqueue_timestamp,
16604 		   desc->last_rx_dequeue_timestamp);
16605 	ath12k_dbg(ab, ATH12K_DBG_HAL,
16606 		   "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
16607 		   desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
16608 		   desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
16609 		   desc->rx_bitmap[6], desc->rx_bitmap[7]);
16610 	ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
16611 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
16612 			     desc->info1),
16613 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
16614 			     desc->info1));
16615 	ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
16616 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
16617 			     desc->info2),
16618 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
16619 			     desc->info2),
16620 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
16621 			     desc->info2));
16622 	ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
16623 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
16624 			     desc->info3),
16625 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
16626 			     desc->info3));
16627 	ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
16628 		   desc->num_mpdu_frames, desc->num_msdu_frames,
16629 		   desc->total_bytes);
16630 	ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
16631 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
16632 			     desc->info4),
16633 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
16634 			     desc->info4),
16635 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
16636 			     desc->info4));
16637 	ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %ld\n",
16638 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
16639 			     desc->info5));
16640 #endif
16641 }
16642 
16643 void
16644 qwz_hal_reo_flush_queue_status(struct qwz_softc *sc, uint32_t *reo_desc,
16645     struct hal_reo_status *status)
16646 {
16647 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16648 	struct hal_reo_flush_queue_status *desc =
16649 	    (struct hal_reo_flush_queue_status *)tlv->value;
16650 
16651 	status->uniform_hdr.cmd_num = FIELD_GET(
16652 	   HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16653 	status->uniform_hdr.cmd_status = FIELD_GET(
16654 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16655 	status->u.flush_queue.err_detected = FIELD_GET(
16656 	    HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED, desc->info0);
16657 }
16658 
16659 void
16660 qwz_hal_reo_flush_cache_status(struct qwz_softc *sc, uint32_t *reo_desc,
16661     struct hal_reo_status *status)
16662 {
16663 	struct ath12k_hal *hal = &sc->hal;
16664 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16665 	struct hal_reo_flush_cache_status *desc =
16666 	    (struct hal_reo_flush_cache_status *)tlv->value;
16667 
16668 	status->uniform_hdr.cmd_num = FIELD_GET(
16669 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16670 	status->uniform_hdr.cmd_status = FIELD_GET(
16671 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16672 
16673 	status->u.flush_cache.err_detected = FIELD_GET(
16674 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
16675 	status->u.flush_cache.err_code = FIELD_GET(
16676 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE, desc->info0);
16677 	if (!status->u.flush_cache.err_code)
16678 		hal->avail_blk_resource |= BIT(hal->current_blk_index);
16679 
16680 	status->u.flush_cache.cache_controller_flush_status_hit = FIELD_GET(
16681 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT, desc->info0);
16682 
16683 	status->u.flush_cache.cache_controller_flush_status_desc_type =
16684 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
16685 	    desc->info0);
16686 	status->u.flush_cache.cache_controller_flush_status_client_id =
16687 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
16688 	    desc->info0);
16689 	status->u.flush_cache.cache_controller_flush_status_err =
16690 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
16691 	    desc->info0);
16692 	status->u.flush_cache.cache_controller_flush_status_cnt =
16693 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
16694 	    desc->info0);
16695 }
16696 
16697 void
16698 qwz_hal_reo_unblk_cache_status(struct qwz_softc *sc, uint32_t *reo_desc,
16699     struct hal_reo_status *status)
16700 {
16701 	struct ath12k_hal *hal = &sc->hal;
16702 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16703 	struct hal_reo_unblock_cache_status *desc =
16704 	   (struct hal_reo_unblock_cache_status *)tlv->value;
16705 
16706 	status->uniform_hdr.cmd_num = FIELD_GET(
16707 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16708 	status->uniform_hdr.cmd_status = FIELD_GET(
16709 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16710 
16711 	status->u.unblock_cache.err_detected = FIELD_GET(
16712 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
16713 	status->u.unblock_cache.unblock_type = FIELD_GET(
16714 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE, desc->info0);
16715 
16716 	if (!status->u.unblock_cache.err_detected &&
16717 	    status->u.unblock_cache.unblock_type ==
16718 	    HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
16719 		hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
16720 }
16721 
16722 void
16723 qwz_hal_reo_flush_timeout_list_status(struct qwz_softc *ab, uint32_t *reo_desc,
16724     struct hal_reo_status *status)
16725 {
16726 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16727 	struct hal_reo_flush_timeout_list_status *desc =
16728 	    (struct hal_reo_flush_timeout_list_status *)tlv->value;
16729 
16730 	status->uniform_hdr.cmd_num = FIELD_GET(
16731 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16732 	status->uniform_hdr.cmd_status = FIELD_GET(
16733 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16734 
16735 	status->u.timeout_list.err_detected = FIELD_GET(
16736 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR, desc->info0);
16737 	status->u.timeout_list.list_empty = FIELD_GET(
16738 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY, desc->info0);
16739 
16740 	status->u.timeout_list.release_desc_cnt = FIELD_GET(
16741 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT, desc->info1);
16742 	status->u.timeout_list.fwd_buf_cnt = FIELD_GET(
16743 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT, desc->info1);
16744 }
16745 
16746 void
16747 qwz_hal_reo_desc_thresh_reached_status(struct qwz_softc *sc, uint32_t *reo_desc,
16748     struct hal_reo_status *status)
16749 {
16750 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16751 	struct hal_reo_desc_thresh_reached_status *desc =
16752 	    (struct hal_reo_desc_thresh_reached_status *)tlv->value;
16753 
16754 	status->uniform_hdr.cmd_num = FIELD_GET(
16755 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
16756 	status->uniform_hdr.cmd_status = FIELD_GET(
16757 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
16758 
16759 	status->u.desc_thresh_reached.threshold_idx = FIELD_GET(
16760 	    HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX, desc->info0);
16761 
16762 	status->u.desc_thresh_reached.link_desc_counter0 = FIELD_GET(
16763 	    HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0, desc->info1);
16764 
16765 	status->u.desc_thresh_reached.link_desc_counter1 = FIELD_GET(
16766 	    HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1, desc->info2);
16767 
16768 	status->u.desc_thresh_reached.link_desc_counter2 = FIELD_GET(
16769 	    HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2, desc->info3);
16770 
16771 	status->u.desc_thresh_reached.link_desc_counter_sum = FIELD_GET(
16772 	    HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
16773 	    desc->info4);
16774 }
16775 
16776 void
16777 qwz_hal_reo_update_rx_reo_queue_status(struct qwz_softc *ab, uint32_t *reo_desc,
16778     struct hal_reo_status *status)
16779 {
16780 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
16781 	struct hal_reo_status_hdr *desc =
16782 	    (struct hal_reo_status_hdr *)tlv->value;
16783 
16784 	status->uniform_hdr.cmd_num = FIELD_GET(
16785 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->info0);
16786 	status->uniform_hdr.cmd_status = FIELD_GET(
16787 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->info0);
16788 }
16789 
16790 int
16791 qwz_dp_process_reo_status(struct qwz_softc *sc)
16792 {
16793 	struct qwz_dp *dp = &sc->dp;
16794 	struct hal_srng *srng;
16795 	struct dp_reo_cmd *cmd, *tmp;
16796 	int found = 0, ret = 0;
16797 	uint32_t *reo_desc;
16798 	uint16_t tag;
16799 	struct hal_reo_status reo_status;
16800 
16801 	srng = &sc->hal.srng_list[dp->reo_status_ring.ring_id];
16802 	memset(&reo_status, 0, sizeof(reo_status));
16803 #ifdef notyet
16804 	spin_lock_bh(&srng->lock);
16805 #endif
16806 	qwz_hal_srng_access_begin(sc, srng);
16807 
16808 	while ((reo_desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
16809 		ret = 1;
16810 
16811 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
16812 		switch (tag) {
16813 		case HAL_REO_GET_QUEUE_STATS_STATUS:
16814 			qwz_hal_reo_status_queue_stats(sc, reo_desc,
16815 			    &reo_status);
16816 			break;
16817 		case HAL_REO_FLUSH_QUEUE_STATUS:
16818 			qwz_hal_reo_flush_queue_status(sc, reo_desc,
16819 			    &reo_status);
16820 			break;
16821 		case HAL_REO_FLUSH_CACHE_STATUS:
16822 			qwz_hal_reo_flush_cache_status(sc, reo_desc,
16823 			    &reo_status);
16824 			break;
16825 		case HAL_REO_UNBLOCK_CACHE_STATUS:
16826 			qwz_hal_reo_unblk_cache_status(sc, reo_desc,
16827 			    &reo_status);
16828 			break;
16829 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
16830 			qwz_hal_reo_flush_timeout_list_status(sc, reo_desc,
16831 			    &reo_status);
16832 			break;
16833 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
16834 			qwz_hal_reo_desc_thresh_reached_status(sc, reo_desc,
16835 			    &reo_status);
16836 			break;
16837 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
16838 			qwz_hal_reo_update_rx_reo_queue_status(sc, reo_desc,
16839 			    &reo_status);
16840 			break;
16841 		default:
16842 			printf("%s: Unknown reo status type %d\n",
16843 			    sc->sc_dev.dv_xname, tag);
16844 			continue;
16845 		}
16846 #ifdef notyet
16847 		spin_lock_bh(&dp->reo_cmd_lock);
16848 #endif
16849 		TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
16850 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
16851 				found = 1;
16852 				TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
16853 				break;
16854 			}
16855 		}
16856 #ifdef notyet
16857 		spin_unlock_bh(&dp->reo_cmd_lock);
16858 #endif
16859 		if (found) {
16860 			cmd->handler(dp, (void *)&cmd->data,
16861 			    reo_status.uniform_hdr.cmd_status);
16862 			free(cmd, M_DEVBUF, sizeof(*cmd));
16863 		}
16864 		found = 0;
16865 	}
16866 
16867 	qwz_hal_srng_access_end(sc, srng);
16868 #ifdef notyet
16869 	spin_unlock_bh(&srng->lock);
16870 #endif
16871 	return ret;
16872 }
16873 
16874 int
16875 qwz_dp_service_srng(struct qwz_softc *sc, int grp_id)
16876 {
16877 	struct qwz_pdev_dp *dp = &sc->pdev_dp;
16878 	int i, j, ret = 0;
16879 
16880 	if (sc->hw_params.ring_mask->tx[grp_id]) {
16881 		i = fls(sc->hw_params.ring_mask->tx[grp_id]) - 1;
16882 		qwz_dp_tx_completion_handler(sc, i);
16883 	}
16884 
16885 	if (sc->hw_params.ring_mask->rx_err[grp_id] &&
16886 	    qwz_dp_process_rx_err(sc))
16887 		ret = 1;
16888 
16889 	if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] &&
16890 	    qwz_dp_rx_process_wbm_err(sc))
16891 		ret = 1;
16892 
16893 	if (sc->hw_params.ring_mask->rx[grp_id]) {
16894 		i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1;
16895 		if (qwz_dp_process_rx(sc, i))
16896 			ret = 1;
16897 	}
16898 
16899 	for (i = 0; i < sc->num_radios; i++) {
16900 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
16901 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
16902 
16903 			if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
16904 			   (1 << id)) == 0)
16905 				continue;
16906 
16907 			if (qwz_dp_rx_process_mon_rings(sc, id))
16908 				ret = 1;
16909 		}
16910 	}
16911 
16912 	if (sc->hw_params.ring_mask->reo_status[grp_id] &&
16913 	    qwz_dp_process_reo_status(sc))
16914 		ret = 1;
16915 
16916 	for (i = 0; i < sc->num_radios; i++) {
16917 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
16918 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
16919 
16920 			if (sc->hw_params.ring_mask->rxdma2host[grp_id] &
16921 			   (1 << (id))) {
16922 				if (qwz_dp_process_rxdma_err(sc, id))
16923 					ret = 1;
16924 			}
16925 
16926 			if (sc->hw_params.ring_mask->host2rxdma[grp_id] &
16927 			    (1 << id)) {
16928 				qwz_dp_rxbufs_replenish(sc, id,
16929 				    &dp->rx_refill_buf_ring, 0,
16930 				    sc->hw_params.hal_params->rx_buf_rbm);
16931 			}
16932 		}
16933 	}
16934 
16935 	return ret;
16936 }
16937 
16938 int
16939 qwz_wmi_wait_for_service_ready(struct qwz_softc *sc)
16940 {
16941 	int ret;
16942 
16943 	while (!sc->wmi.service_ready) {
16944 		ret = tsleep_nsec(&sc->wmi.service_ready, 0, "qwzwmirdy",
16945 		    SEC_TO_NSEC(5));
16946 		if (ret)
16947 			return -1;
16948 	}
16949 
16950 	return 0;
16951 }
16952 
16953 void
16954 qwz_fill_band_to_mac_param(struct qwz_softc *sc,
16955     struct wmi_host_pdev_band_to_mac *band_to_mac)
16956 {
16957 	uint8_t i;
16958 	struct ath12k_hal_reg_capabilities_ext *hal_reg_cap;
16959 	struct qwz_pdev *pdev;
16960 
16961 	for (i = 0; i < sc->num_radios; i++) {
16962 		pdev = &sc->pdevs[i];
16963 		hal_reg_cap = &sc->hal_reg_cap[i];
16964 		band_to_mac[i].pdev_id = pdev->pdev_id;
16965 
16966 		switch (pdev->cap.supported_bands) {
16967 		case WMI_HOST_WLAN_2G_5G_CAP:
16968 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
16969 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
16970 			break;
16971 		case WMI_HOST_WLAN_2G_CAP:
16972 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
16973 			band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
16974 			break;
16975 		case WMI_HOST_WLAN_5G_CAP:
16976 			band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
16977 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
16978 			break;
16979 		default:
16980 			break;
16981 		}
16982 	}
16983 }
16984 
16985 struct mbuf *
16986 qwz_wmi_alloc_mbuf(size_t len)
16987 {
16988 	struct mbuf *m;
16989 	uint32_t round_len = roundup(len, 4);
16990 
16991 	m = qwz_htc_alloc_mbuf(sizeof(struct wmi_cmd_hdr) + round_len);
16992 	if (!m)
16993 		return NULL;
16994 
16995 	return m;
16996 }
16997 
16998 int
16999 qwz_wmi_cmd_send_nowait(struct qwz_pdev_wmi *wmi, struct mbuf *m,
17000     uint32_t cmd_id)
17001 {
17002 	struct qwz_softc *sc = wmi->wmi->sc;
17003 	struct wmi_cmd_hdr *cmd_hdr;
17004 	uint32_t cmd = 0;
17005 
17006 	cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
17007 
17008 	cmd_hdr = (struct wmi_cmd_hdr *)(mtod(m, uint8_t *) +
17009 	    sizeof(struct ath12k_htc_hdr));
17010 	cmd_hdr->cmd_id = htole32(cmd);
17011 
17012 	DNPRINTF(QWZ_D_WMI, "%s: sending WMI command 0x%u\n", __func__, cmd);
17013 	return qwz_htc_send(&sc->htc, wmi->eid, m);
17014 }
17015 
17016 int
17017 qwz_wmi_cmd_send(struct qwz_pdev_wmi *wmi, struct mbuf *m, uint32_t cmd_id)
17018 {
17019 	struct qwz_wmi_base *wmi_sc = wmi->wmi;
17020 	int ret = EOPNOTSUPP;
17021 	struct qwz_softc *sc = wmi_sc->sc;
17022 #ifdef notyet
17023 	might_sleep();
17024 #endif
17025 	if (sc->hw_params.credit_flow) {
17026 		struct qwz_htc *htc = &sc->htc;
17027 		struct qwz_htc_ep *ep = &htc->endpoint[wmi->eid];
17028 
17029 		while (!ep->tx_credits) {
17030 			ret = tsleep_nsec(&ep->tx_credits, 0, "qwztxcrd",
17031 			    SEC_TO_NSEC(3));
17032 			if (ret) {
17033 				printf("%s: tx credits timeout\n",
17034 				    sc->sc_dev.dv_xname);
17035 				if (test_bit(ATH12K_FLAG_CRASH_FLUSH,
17036 				    sc->sc_flags))
17037 					return ESHUTDOWN;
17038 				else
17039 					return EAGAIN;
17040 			}
17041 		}
17042 	} else {
17043 		while (!wmi->tx_ce_desc) {
17044 			ret = tsleep_nsec(&wmi->tx_ce_desc, 0, "qwztxce",
17045 			    SEC_TO_NSEC(3));
17046 			if (ret) {
17047 				printf("%s: tx ce desc timeout\n",
17048 				    sc->sc_dev.dv_xname);
17049 				if (test_bit(ATH12K_FLAG_CRASH_FLUSH,
17050 				    sc->sc_flags))
17051 					return ESHUTDOWN;
17052 				else
17053 					return EAGAIN;
17054 			}
17055 		}
17056 	}
17057 
17058 	ret = qwz_wmi_cmd_send_nowait(wmi, m, cmd_id);
17059 
17060 	if (ret == EAGAIN)
17061 		printf("%s: wmi command %d timeout\n",
17062 		    sc->sc_dev.dv_xname, cmd_id);
17063 
17064 	if (ret == ENOBUFS)
17065 		printf("%s: ce desc not available for wmi command %d\n",
17066 		    sc->sc_dev.dv_xname, cmd_id);
17067 
17068 	return ret;
17069 }
17070 
17071 int
17072 qwz_wmi_pdev_set_param(struct qwz_softc *sc, uint32_t param_id,
17073     uint32_t param_value, uint8_t pdev_id)
17074 {
17075 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17076 	struct wmi_pdev_set_param_cmd *cmd;
17077 	struct mbuf *m;
17078 	int ret;
17079 
17080 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17081 	if (!m)
17082 		return ENOMEM;
17083 
17084 	cmd = (struct wmi_pdev_set_param_cmd *)(mtod(m, uint8_t *) +
17085 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17086 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
17087 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17088 	cmd->pdev_id = pdev_id;
17089 	cmd->param_id = param_id;
17090 	cmd->param_value = param_value;
17091 
17092 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PDEV_SET_PARAM_CMDID);
17093 	if (ret) {
17094 		if (ret != ESHUTDOWN) {
17095 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17096 			    sc->sc_dev.dv_xname);
17097 		}
17098 		m_freem(m);
17099 		return ret;
17100 	}
17101 
17102 	DNPRINTF(QWZ_D_WMI, "%s: cmd pdev set param %d pdev id %d value %d\n",
17103 	    __func__, param_id, pdev_id, param_value);
17104 
17105 	return 0;
17106 }
17107 
17108 int
17109 qwz_wmi_pdev_lro_cfg(struct qwz_softc *sc, uint8_t pdev_id)
17110 {
17111 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17112 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
17113 	struct mbuf *m;
17114 	int ret;
17115 
17116 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17117 	if (!m)
17118 		return ENOMEM;
17119 
17120 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)(mtod(m, uint8_t *) +
17121 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17122 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
17123 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17124 
17125 	arc4random_buf(cmd->th_4, sizeof(uint32_t) * ATH12K_IPV4_TH_SEED_SIZE);
17126 	arc4random_buf(cmd->th_6, sizeof(uint32_t) * ATH12K_IPV6_TH_SEED_SIZE);
17127 
17128 	cmd->pdev_id = pdev_id;
17129 
17130 	ret = qwz_wmi_cmd_send(wmi, m, WMI_LRO_CONFIG_CMDID);
17131 	if (ret) {
17132 		if (ret != ESHUTDOWN) {
17133 			printf("%s: failed to send lro cfg req wmi cmd\n",
17134 			    sc->sc_dev.dv_xname);
17135 		}
17136 		m_freem(m);
17137 		return ret;
17138 	}
17139 
17140 	DNPRINTF(QWZ_D_WMI, "%s: cmd lro config pdev_id 0x%x\n",
17141 	    __func__, pdev_id);
17142 
17143 	return 0;
17144 }
17145 
17146 int
17147 qwz_wmi_pdev_set_ps_mode(struct qwz_softc *sc, int vdev_id, uint8_t pdev_id,
17148     enum wmi_sta_ps_mode psmode)
17149 {
17150 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17151 	struct wmi_pdev_set_ps_mode_cmd *cmd;
17152 	struct mbuf *m;
17153 	int ret;
17154 
17155 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17156 	if (!m)
17157 		return ENOMEM;
17158 
17159 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)(mtod(m, uint8_t *) +
17160 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17161 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17162 	    WMI_TAG_STA_POWERSAVE_MODE_CMD) |
17163 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17164 	cmd->vdev_id = vdev_id;
17165 	cmd->sta_ps_mode = psmode;
17166 
17167 	ret = qwz_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_MODE_CMDID);
17168 	if (ret) {
17169 		if (ret != ESHUTDOWN) {
17170 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17171 			    sc->sc_dev.dv_xname);
17172 		}
17173 		m_freem(m);
17174 		return ret;
17175 	}
17176 
17177 	DNPRINTF(QWZ_D_WMI, "%s: cmd sta powersave mode psmode %d vdev id %d\n",
17178 	    __func__, psmode, vdev_id);
17179 
17180 	return 0;
17181 }
17182 
17183 int
17184 qwz_wmi_scan_prob_req_oui(struct qwz_softc *sc, const uint8_t *mac_addr,
17185     uint8_t pdev_id)
17186 {
17187 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17188 	struct mbuf *m;
17189 	struct wmi_scan_prob_req_oui_cmd *cmd;
17190 	uint32_t prob_req_oui;
17191 	int len, ret;
17192 
17193 	prob_req_oui = (((uint32_t)mac_addr[0]) << 16) |
17194 		       (((uint32_t)mac_addr[1]) << 8) | mac_addr[2];
17195 
17196 	len = sizeof(*cmd);
17197 	m = qwz_wmi_alloc_mbuf(len);
17198 	if (!m)
17199 		return ENOMEM;
17200 
17201 	cmd = (struct wmi_scan_prob_req_oui_cmd *)(mtod(m, uint8_t *) +
17202 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17203 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17204 	    WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
17205 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17206 	cmd->prob_req_oui = prob_req_oui;
17207 
17208 	DNPRINTF(QWZ_D_WMI, "%s: scan prob req oui %d\n", __func__,
17209 	    prob_req_oui);
17210 
17211 	ret = qwz_wmi_cmd_send(wmi, m, WMI_SCAN_PROB_REQ_OUI_CMDID);
17212 	if (ret) {
17213 		if (ret != ESHUTDOWN) {
17214 			printf("%s: failed to send WMI_SCAN_PROB_REQ_OUI cmd\n",
17215 			    sc->sc_dev.dv_xname);
17216 		}
17217 		m_freem(m);
17218 		return ret;
17219 	}
17220 
17221 	return 0;
17222 }
17223 
17224 int
17225 qwz_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwz_softc *sc, uint32_t pdev_id)
17226 {
17227 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17228 	struct wmi_dfs_phyerr_offload_cmd *cmd;
17229 	struct mbuf *m;
17230 	int ret;
17231 
17232 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17233 	if (!m)
17234 		return ENOMEM;
17235 
17236 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)(mtod(m, uint8_t *) +
17237 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17238 
17239 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17240 	    WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
17241 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17242 
17243 	cmd->pdev_id = pdev_id;
17244 
17245 	ret = qwz_wmi_cmd_send(wmi, m,
17246 	    WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
17247 	if (ret) {
17248 		if (ret != ESHUTDOWN) {
17249 			printf("%s: failed to send "
17250 			    "WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n",
17251 			    sc->sc_dev.dv_xname);
17252 		}
17253 		m_free(m);
17254 		return ret;
17255 	}
17256 
17257 	DNPRINTF(QWZ_D_WMI, "%s: cmd pdev dfs phyerr offload enable "
17258 	    "pdev id %d\n", __func__, pdev_id);
17259 
17260 	return 0;
17261 }
17262 
17263 int
17264 qwz_wmi_send_scan_chan_list_cmd(struct qwz_softc *sc, uint8_t pdev_id,
17265     struct scan_chan_list_params *chan_list)
17266 {
17267 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17268 	struct wmi_scan_chan_list_cmd *cmd;
17269 	struct mbuf *m;
17270 	struct wmi_channel *chan_info;
17271 	struct channel_param *tchan_info;
17272 	struct wmi_tlv *tlv;
17273 	void *ptr;
17274 	int i, ret, len;
17275 	uint16_t num_send_chans, num_sends = 0, max_chan_limit = 0;
17276 	uint32_t *reg1, *reg2;
17277 
17278 	tchan_info = chan_list->ch_param;
17279 	while (chan_list->nallchans) {
17280 		len = sizeof(*cmd) + TLV_HDR_SIZE;
17281 		max_chan_limit = (wmi->wmi->max_msg_len[pdev_id] - len) /
17282 		    sizeof(*chan_info);
17283 
17284 		if (chan_list->nallchans > max_chan_limit)
17285 			num_send_chans = max_chan_limit;
17286 		else
17287 			num_send_chans = chan_list->nallchans;
17288 
17289 		chan_list->nallchans -= num_send_chans;
17290 		len += sizeof(*chan_info) * num_send_chans;
17291 
17292 		m = qwz_wmi_alloc_mbuf(len);
17293 		if (!m)
17294 			return ENOMEM;
17295 
17296 		cmd = (struct wmi_scan_chan_list_cmd *)(mtod(m, uint8_t *) +
17297 		    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17298 		cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17299 		    WMI_TAG_SCAN_CHAN_LIST_CMD) |
17300 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17301 		cmd->pdev_id = chan_list->pdev_id;
17302 		cmd->num_scan_chans = num_send_chans;
17303 		if (num_sends)
17304 			cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
17305 
17306 		DNPRINTF(QWZ_D_WMI, "%s: no.of chan = %d len = %d "
17307 		    "pdev_id = %d num_sends = %d\n", __func__, num_send_chans,
17308 		    len, cmd->pdev_id, num_sends);
17309 
17310 		ptr = (void *)(mtod(m, uint8_t *) +
17311 		    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
17312 		    sizeof(*cmd));
17313 
17314 		len = sizeof(*chan_info) * num_send_chans;
17315 		tlv = ptr;
17316 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
17317 		    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17318 		ptr += TLV_HDR_SIZE;
17319 
17320 		for (i = 0; i < num_send_chans; ++i) {
17321 			chan_info = ptr;
17322 			memset(chan_info, 0, sizeof(*chan_info));
17323 			len = sizeof(*chan_info);
17324 			chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17325 			    WMI_TAG_CHANNEL) |
17326 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17327 
17328 			reg1 = &chan_info->reg_info_1;
17329 			reg2 = &chan_info->reg_info_2;
17330 			chan_info->mhz = tchan_info->mhz;
17331 			chan_info->band_center_freq1 = tchan_info->cfreq1;
17332 			chan_info->band_center_freq2 = tchan_info->cfreq2;
17333 
17334 			if (tchan_info->is_chan_passive)
17335 				chan_info->info |= WMI_CHAN_INFO_PASSIVE;
17336 			if (tchan_info->allow_he)
17337 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
17338 			else if (tchan_info->allow_vht)
17339 				chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
17340 			else if (tchan_info->allow_ht)
17341 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
17342 			if (tchan_info->half_rate)
17343 				chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
17344 			if (tchan_info->quarter_rate)
17345 				chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
17346 			if (tchan_info->psc_channel)
17347 				chan_info->info |= WMI_CHAN_INFO_PSC;
17348 			if (tchan_info->dfs_set)
17349 				chan_info->info |= WMI_CHAN_INFO_DFS;
17350 
17351 			chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
17352 			    tchan_info->phy_mode);
17353 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
17354 			    tchan_info->minpower);
17355 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
17356 			    tchan_info->maxpower);
17357 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
17358 			    tchan_info->maxregpower);
17359 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
17360 			    tchan_info->reg_class_id);
17361 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
17362 			    tchan_info->antennamax);
17363 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
17364 			    tchan_info->maxregpower);
17365 
17366 			DNPRINTF(QWZ_D_WMI, "%s: chan scan list "
17367 			    "chan[%d] = %u, chan_info->info %8x\n",
17368 			    __func__, i, chan_info->mhz, chan_info->info);
17369 
17370 			ptr += sizeof(*chan_info);
17371 
17372 			tchan_info++;
17373 		}
17374 
17375 		ret = qwz_wmi_cmd_send(wmi, m, WMI_SCAN_CHAN_LIST_CMDID);
17376 		if (ret) {
17377 			if (ret != ESHUTDOWN) {
17378 				printf("%s: failed to send WMI_SCAN_CHAN_LIST "
17379 				    "cmd\n", sc->sc_dev.dv_xname);
17380 			}
17381 			m_freem(m);
17382 			return ret;
17383 		}
17384 
17385 		DNPRINTF(QWZ_D_WMI, "%s: cmd scan chan list channels %d\n",
17386 		    __func__, num_send_chans);
17387 
17388 		num_sends++;
17389 	}
17390 
17391 	return 0;
17392 }
17393 
17394 int
17395 qwz_wmi_send_11d_scan_start_cmd(struct qwz_softc *sc,
17396     struct wmi_11d_scan_start_params *param, uint8_t pdev_id)
17397 {
17398 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17399 	struct wmi_11d_scan_start_cmd *cmd;
17400 	struct mbuf *m;
17401 	int ret;
17402 
17403 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17404 	if (!m)
17405 		return ENOMEM;
17406 
17407 	cmd = (struct wmi_11d_scan_start_cmd *)(mtod(m, uint8_t *) +
17408 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17409 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
17410 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17411 
17412 	cmd->vdev_id = param->vdev_id;
17413 	cmd->scan_period_msec = param->scan_period_msec;
17414 	cmd->start_interval_msec = param->start_interval_msec;
17415 
17416 	ret = qwz_wmi_cmd_send(wmi, m, WMI_11D_SCAN_START_CMDID);
17417 	if (ret) {
17418 		if (ret != ESHUTDOWN) {
17419 			printf("%s: failed to send WMI_11D_SCAN_START_CMDID: "
17420 			    "%d\n", sc->sc_dev.dv_xname, ret);
17421 		}
17422 		m_freem(m);
17423 		return ret;
17424 	}
17425 
17426 	DNPRINTF(QWZ_D_WMI, "%s: cmd 11d scan start vdev id %d period %d "
17427 	    "ms internal %d ms\n", __func__, cmd->vdev_id,
17428 	    cmd->scan_period_msec, cmd->start_interval_msec);
17429 
17430 	return 0;
17431 }
17432 
17433 static inline void
17434 qwz_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
17435     struct scan_req_params *param)
17436 {
17437 	/* Scan events subscription */
17438 	if (param->scan_ev_started)
17439 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
17440 	if (param->scan_ev_completed)
17441 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
17442 	if (param->scan_ev_bss_chan)
17443 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
17444 	if (param->scan_ev_foreign_chan)
17445 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
17446 	if (param->scan_ev_dequeued)
17447 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
17448 	if (param->scan_ev_preempted)
17449 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
17450 	if (param->scan_ev_start_failed)
17451 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
17452 	if (param->scan_ev_restarted)
17453 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
17454 	if (param->scan_ev_foreign_chn_exit)
17455 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
17456 	if (param->scan_ev_suspended)
17457 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
17458 	if (param->scan_ev_resumed)
17459 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
17460 
17461 	/** Set scan control flags */
17462 	cmd->scan_ctrl_flags = 0;
17463 	if (param->scan_f_passive)
17464 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
17465 	if (param->scan_f_strict_passive_pch)
17466 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
17467 	if (param->scan_f_promisc_mode)
17468 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
17469 	if (param->scan_f_capture_phy_err)
17470 		cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
17471 	if (param->scan_f_half_rate)
17472 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
17473 	if (param->scan_f_quarter_rate)
17474 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
17475 	if (param->scan_f_cck_rates)
17476 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
17477 	if (param->scan_f_ofdm_rates)
17478 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
17479 	if (param->scan_f_chan_stat_evnt)
17480 		cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
17481 	if (param->scan_f_filter_prb_req)
17482 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
17483 	if (param->scan_f_bcast_probe)
17484 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
17485 	if (param->scan_f_offchan_mgmt_tx)
17486 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
17487 	if (param->scan_f_offchan_data_tx)
17488 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
17489 	if (param->scan_f_force_active_dfs_chn)
17490 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
17491 	if (param->scan_f_add_tpc_ie_in_probe)
17492 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
17493 	if (param->scan_f_add_ds_ie_in_probe)
17494 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
17495 	if (param->scan_f_add_spoofed_mac_in_probe)
17496 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
17497 	if (param->scan_f_add_rand_seq_in_probe)
17498 		cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
17499 	if (param->scan_f_en_ie_whitelist_in_probe)
17500 		cmd->scan_ctrl_flags |=
17501 			 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
17502 
17503 	/* for adaptive scan mode using 3 bits (21 - 23 bits) */
17504 	WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
17505 	    param->adaptive_dwell_time_mode);
17506 
17507 	cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
17508 }
17509 
17510 int
17511 qwz_wmi_send_scan_start_cmd(struct qwz_softc *sc,
17512     struct scan_req_params *params)
17513 {
17514 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[params->pdev_id];
17515 	struct wmi_start_scan_cmd *cmd;
17516 	struct wmi_ssid *ssid = NULL;
17517 	struct wmi_mac_addr *bssid;
17518 	struct mbuf *m;
17519 	struct wmi_tlv *tlv;
17520 	void *ptr;
17521 	int i, ret, len;
17522 	uint32_t *tmp_ptr;
17523 	uint16_t extraie_len_with_pad = 0;
17524 	struct hint_short_ssid *s_ssid = NULL;
17525 	struct hint_bssid *hint_bssid = NULL;
17526 
17527 	len = sizeof(*cmd);
17528 
17529 	len += TLV_HDR_SIZE;
17530 	if (params->num_chan)
17531 		len += params->num_chan * sizeof(uint32_t);
17532 
17533 	len += TLV_HDR_SIZE;
17534 	if (params->num_ssids)
17535 		len += params->num_ssids * sizeof(*ssid);
17536 
17537 	len += TLV_HDR_SIZE;
17538 	if (params->num_bssid)
17539 		len += sizeof(*bssid) * params->num_bssid;
17540 
17541 	len += TLV_HDR_SIZE;
17542 	if (params->extraie.len && params->extraie.len <= 0xFFFF) {
17543 		extraie_len_with_pad = roundup(params->extraie.len,
17544 		    sizeof(uint32_t));
17545 	}
17546 	len += extraie_len_with_pad;
17547 
17548 	if (params->num_hint_bssid) {
17549 		len += TLV_HDR_SIZE +
17550 		    params->num_hint_bssid * sizeof(struct hint_bssid);
17551 	}
17552 
17553 	if (params->num_hint_s_ssid) {
17554 		len += TLV_HDR_SIZE +
17555 		    params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
17556 	}
17557 
17558 	m = qwz_wmi_alloc_mbuf(len);
17559 	if (!m)
17560 		return ENOMEM;
17561 
17562 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
17563 	    sizeof(struct wmi_cmd_hdr));
17564 
17565 	cmd = ptr;
17566 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
17567 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17568 
17569 	cmd->scan_id = params->scan_id;
17570 	cmd->scan_req_id = params->scan_req_id;
17571 	cmd->vdev_id = params->vdev_id;
17572 	cmd->scan_priority = params->scan_priority;
17573 	cmd->notify_scan_events = params->notify_scan_events;
17574 
17575 	qwz_wmi_copy_scan_event_cntrl_flags(cmd, params);
17576 
17577 	cmd->dwell_time_active = params->dwell_time_active;
17578 	cmd->dwell_time_active_2g = params->dwell_time_active_2g;
17579 	cmd->dwell_time_passive = params->dwell_time_passive;
17580 	cmd->dwell_time_active_6g = params->dwell_time_active_6g;
17581 	cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
17582 	cmd->min_rest_time = params->min_rest_time;
17583 	cmd->max_rest_time = params->max_rest_time;
17584 	cmd->repeat_probe_time = params->repeat_probe_time;
17585 	cmd->probe_spacing_time = params->probe_spacing_time;
17586 	cmd->idle_time = params->idle_time;
17587 	cmd->max_scan_time = params->max_scan_time;
17588 	cmd->probe_delay = params->probe_delay;
17589 	cmd->burst_duration = params->burst_duration;
17590 	cmd->num_chan = params->num_chan;
17591 	cmd->num_bssid = params->num_bssid;
17592 	cmd->num_ssids = params->num_ssids;
17593 	cmd->ie_len = params->extraie.len;
17594 	cmd->n_probes = params->n_probes;
17595 	IEEE80211_ADDR_COPY(cmd->mac_addr.addr, params->mac_addr.addr);
17596 	IEEE80211_ADDR_COPY(cmd->mac_mask.addr, params->mac_mask.addr);
17597 
17598 	ptr += sizeof(*cmd);
17599 
17600 	len = params->num_chan * sizeof(uint32_t);
17601 
17602 	tlv = ptr;
17603 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
17604 	    FIELD_PREP(WMI_TLV_LEN, len);
17605 	ptr += TLV_HDR_SIZE;
17606 	tmp_ptr = (uint32_t *)ptr;
17607 
17608 	for (i = 0; i < params->num_chan; ++i)
17609 		tmp_ptr[i] = params->chan_list[i];
17610 
17611 	ptr += len;
17612 
17613 	len = params->num_ssids * sizeof(*ssid);
17614 	tlv = ptr;
17615 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
17616 	    FIELD_PREP(WMI_TLV_LEN, len);
17617 
17618 	ptr += TLV_HDR_SIZE;
17619 
17620 	if (params->num_ssids) {
17621 		ssid = ptr;
17622 		for (i = 0; i < params->num_ssids; ++i) {
17623 			ssid->ssid_len = params->ssid[i].length;
17624 			memcpy(ssid->ssid, params->ssid[i].ssid,
17625 			       params->ssid[i].length);
17626 			ssid++;
17627 		}
17628 	}
17629 
17630 	ptr += (params->num_ssids * sizeof(*ssid));
17631 	len = params->num_bssid * sizeof(*bssid);
17632 	tlv = ptr;
17633 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
17634 	    FIELD_PREP(WMI_TLV_LEN, len);
17635 
17636 	ptr += TLV_HDR_SIZE;
17637 	bssid = ptr;
17638 
17639 	if (params->num_bssid) {
17640 		for (i = 0; i < params->num_bssid; ++i) {
17641 			IEEE80211_ADDR_COPY(bssid->addr,
17642 			    params->bssid_list[i].addr);
17643 			bssid++;
17644 		}
17645 	}
17646 
17647 	ptr += params->num_bssid * sizeof(*bssid);
17648 
17649 	len = extraie_len_with_pad;
17650 	tlv = ptr;
17651 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
17652 	    FIELD_PREP(WMI_TLV_LEN, len);
17653 	ptr += TLV_HDR_SIZE;
17654 
17655 	if (extraie_len_with_pad)
17656 		memcpy(ptr, params->extraie.ptr, params->extraie.len);
17657 
17658 	ptr += extraie_len_with_pad;
17659 
17660 	if (params->num_hint_s_ssid) {
17661 		len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
17662 		tlv = ptr;
17663 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
17664 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
17665 		    FIELD_PREP(WMI_TLV_LEN, len);
17666 		ptr += TLV_HDR_SIZE;
17667 		s_ssid = ptr;
17668 		for (i = 0; i < params->num_hint_s_ssid; ++i) {
17669 			s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
17670 			s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
17671 			s_ssid++;
17672 		}
17673 		ptr += len;
17674 	}
17675 
17676 	if (params->num_hint_bssid) {
17677 		len = params->num_hint_bssid * sizeof(struct hint_bssid);
17678 		tlv = ptr;
17679 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
17680 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
17681 		    FIELD_PREP(WMI_TLV_LEN, len);
17682 		ptr += TLV_HDR_SIZE;
17683 		hint_bssid = ptr;
17684 		for (i = 0; i < params->num_hint_bssid; ++i) {
17685 			hint_bssid->freq_flags =
17686 				params->hint_bssid[i].freq_flags;
17687 			IEEE80211_ADDR_COPY(
17688 			    &params->hint_bssid[i].bssid.addr[0],
17689 			    &hint_bssid->bssid.addr[0]);
17690 			hint_bssid++;
17691 		}
17692 	}
17693 
17694 	ret = qwz_wmi_cmd_send(wmi, m, WMI_START_SCAN_CMDID);
17695 	if (ret) {
17696 		if (ret != ESHUTDOWN) {
17697 			printf("%s: failed to send WMI_START_SCAN_CMDID\n",
17698 			    sc->sc_dev.dv_xname);
17699 		}
17700 		m_freem(m);
17701 		return ret;
17702 	}
17703 
17704 	DNPRINTF(QWZ_D_WMI, "%s: cmd start scan", __func__);
17705 
17706 	return 0;
17707 }
17708 
17709 int
17710 qwz_wmi_send_scan_stop_cmd(struct qwz_softc *sc,
17711     struct scan_cancel_param *param)
17712 {
17713 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
17714 	struct wmi_stop_scan_cmd *cmd;
17715 	struct mbuf *m;
17716 	int ret;
17717 
17718 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17719 	if (!m)
17720 		return ENOMEM;
17721 
17722 	cmd = (struct wmi_stop_scan_cmd *)(mtod(m, uint8_t *) +
17723 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17724 
17725 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
17726 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17727 
17728 	cmd->vdev_id = param->vdev_id;
17729 	cmd->requestor = param->requester;
17730 	cmd->scan_id = param->scan_id;
17731 	cmd->pdev_id = param->pdev_id;
17732 	/* stop the scan with the corresponding scan_id */
17733 	if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
17734 		/* Cancelling all scans */
17735 		cmd->req_type =  WMI_SCAN_STOP_ALL;
17736 	} else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
17737 		/* Cancelling VAP scans */
17738 		cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
17739 	} else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
17740 		/* Cancelling specific scan */
17741 		cmd->req_type =  WMI_SCAN_STOP_ONE;
17742 	} else {
17743 		printf("%s: invalid scan cancel param %d\n",
17744 		    sc->sc_dev.dv_xname, param->req_type);
17745 		m_freem(m);
17746 		return EINVAL;
17747 	}
17748 
17749 	ret = qwz_wmi_cmd_send(wmi, m, WMI_STOP_SCAN_CMDID);
17750 	if (ret) {
17751 		if (ret != ESHUTDOWN) {
17752 			printf("%s: failed to send WMI_STOP_SCAN_CMDID\n",
17753 			    sc->sc_dev.dv_xname);
17754 		}
17755 		m_freem(m);
17756 		return ret;
17757 	}
17758 
17759 	DNPRINTF(QWZ_D_WMI, "%s: cmd stop scan\n", __func__);
17760 	return ret;
17761 }
17762 
17763 int
17764 qwz_wmi_send_peer_create_cmd(struct qwz_softc *sc, uint8_t pdev_id,
17765     struct peer_create_params *param)
17766 {
17767 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17768 	struct wmi_peer_create_cmd *cmd;
17769 	struct mbuf *m;
17770 	int ret;
17771 
17772 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17773 	if (!m)
17774 		return ENOMEM;
17775 
17776 	cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
17777 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17778 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
17779 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17780 
17781 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
17782 	cmd->peer_type = param->peer_type;
17783 	cmd->vdev_id = param->vdev_id;
17784 
17785 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
17786 	if (ret) {
17787 		if (ret != ESHUTDOWN) {
17788 			printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
17789 			    sc->sc_dev.dv_xname);
17790 		}
17791 		m_freem(m);
17792 		return ret;
17793 	}
17794 
17795 	DNPRINTF(QWZ_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
17796 	    __func__, param->vdev_id, ether_sprintf(param->peer_addr));
17797 
17798 	return ret;
17799 }
17800 
17801 int
17802 qwz_wmi_send_peer_delete_cmd(struct qwz_softc *sc, const uint8_t *peer_addr,
17803     uint8_t vdev_id, uint8_t pdev_id)
17804 {
17805 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17806 	struct wmi_peer_delete_cmd *cmd;
17807 	struct mbuf *m;
17808 	int ret;
17809 
17810 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
17811 	if (!m)
17812 		return ENOMEM;
17813 
17814 	cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
17815 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17816 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
17817 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17818 
17819 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
17820 	cmd->vdev_id = vdev_id;
17821 
17822 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
17823 	if (ret) {
17824 		if (ret != ESHUTDOWN) {
17825 			printf("%s: failed to send WMI_PEER_DELETE cmd\n",
17826 			    sc->sc_dev.dv_xname);
17827 		}
17828 		m_freem(m);
17829 		return ret;
17830 	}
17831 
17832 	DNPRINTF(QWZ_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
17833 	    __func__, vdev_id, peer_addr);
17834 
17835 	return 0;
17836 }
17837 
17838 int
17839 qwz_wmi_vdev_install_key(struct qwz_softc *sc,
17840     struct wmi_vdev_install_key_arg *arg, uint8_t pdev_id)
17841 {
17842 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17843 	struct wmi_vdev_install_key_cmd *cmd;
17844 	struct wmi_tlv *tlv;
17845 	struct mbuf *m;
17846 	int ret, len;
17847 	int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
17848 
17849 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
17850 
17851 	m = qwz_wmi_alloc_mbuf(len);
17852 	if (m == NULL)
17853 		return -ENOMEM;
17854 
17855 	cmd = (struct wmi_vdev_install_key_cmd *)(mtod(m, uint8_t *) +
17856 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17857 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17858 	    WMI_TAG_VDEV_INSTALL_KEY_CMD) |
17859 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17860 	cmd->vdev_id = arg->vdev_id;
17861 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, arg->macaddr);
17862 	cmd->key_idx = arg->key_idx;
17863 	cmd->key_flags = arg->key_flags;
17864 	cmd->key_cipher = arg->key_cipher;
17865 	cmd->key_len = arg->key_len;
17866 	cmd->key_txmic_len = arg->key_txmic_len;
17867 	cmd->key_rxmic_len = arg->key_rxmic_len;
17868 
17869 	if (arg->key_rsc_counter)
17870 		memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
17871 		       sizeof(struct wmi_key_seq_counter));
17872 
17873 	tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
17874 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
17875 	    sizeof(*cmd));
17876 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
17877 	    FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
17878 	if (arg->key_data)
17879 		memcpy(tlv->value, (uint8_t *)arg->key_data,
17880 		    key_len_aligned);
17881 
17882 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_INSTALL_KEY_CMDID);
17883 	if (ret) {
17884 		printf("%s: failed to send WMI_VDEV_INSTALL_KEY cmd\n",
17885 		    sc->sc_dev.dv_xname);
17886 		m_freem(m);
17887 		return ret;
17888 	}
17889 
17890 	DNPRINTF(QWZ_D_WMI,
17891 	    "%s: cmd vdev install key idx %d cipher %d len %d\n",
17892 	    __func__, arg->key_idx, arg->key_cipher, arg->key_len);
17893 
17894 	return ret;
17895 }
17896 
17897 void
17898 qwz_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
17899     struct peer_assoc_params *param, int hw_crypto_disabled)
17900 {
17901 	cmd->peer_flags = 0;
17902 
17903 	if (param->is_wme_set) {
17904 		if (param->qos_flag)
17905 			cmd->peer_flags |= WMI_PEER_QOS;
17906 		if (param->apsd_flag)
17907 			cmd->peer_flags |= WMI_PEER_APSD;
17908 		if (param->ht_flag)
17909 			cmd->peer_flags |= WMI_PEER_HT;
17910 		if (param->bw_40)
17911 			cmd->peer_flags |= WMI_PEER_40MHZ;
17912 		if (param->bw_80)
17913 			cmd->peer_flags |= WMI_PEER_80MHZ;
17914 		if (param->bw_160)
17915 			cmd->peer_flags |= WMI_PEER_160MHZ;
17916 
17917 		/* Typically if STBC is enabled for VHT it should be enabled
17918 		 * for HT as well
17919 		 **/
17920 		if (param->stbc_flag)
17921 			cmd->peer_flags |= WMI_PEER_STBC;
17922 
17923 		/* Typically if LDPC is enabled for VHT it should be enabled
17924 		 * for HT as well
17925 		 **/
17926 		if (param->ldpc_flag)
17927 			cmd->peer_flags |= WMI_PEER_LDPC;
17928 
17929 		if (param->static_mimops_flag)
17930 			cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
17931 		if (param->dynamic_mimops_flag)
17932 			cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
17933 		if (param->spatial_mux_flag)
17934 			cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
17935 		if (param->vht_flag)
17936 			cmd->peer_flags |= WMI_PEER_VHT;
17937 		if (param->he_flag)
17938 			cmd->peer_flags |= WMI_PEER_HE;
17939 		if (param->twt_requester)
17940 			cmd->peer_flags |= WMI_PEER_TWT_REQ;
17941 		if (param->twt_responder)
17942 			cmd->peer_flags |= WMI_PEER_TWT_RESP;
17943 	}
17944 
17945 	/* Suppress authorization for all AUTH modes that need 4-way handshake
17946 	 * (during re-association).
17947 	 * Authorization will be done for these modes on key installation.
17948 	 */
17949 	if (param->auth_flag)
17950 		cmd->peer_flags |= WMI_PEER_AUTH;
17951 	if (param->need_ptk_4_way) {
17952 		cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
17953 		if (!hw_crypto_disabled && param->is_assoc)
17954 			cmd->peer_flags &= ~WMI_PEER_AUTH;
17955 	}
17956 	if (param->need_gtk_2_way)
17957 		cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
17958 	/* safe mode bypass the 4-way handshake */
17959 	if (param->safe_mode_enabled)
17960 		cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
17961 				     WMI_PEER_NEED_GTK_2_WAY);
17962 
17963 	if (param->is_pmf_enabled)
17964 		cmd->peer_flags |= WMI_PEER_PMF;
17965 
17966 	/* Disable AMSDU for station transmit, if user configures it */
17967 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
17968 	 * it
17969 	 * if (param->amsdu_disable) Add after FW support
17970 	 **/
17971 
17972 	/* Target asserts if node is marked HT and all MCS is set to 0.
17973 	 * Mark the node as non-HT if all the mcs rates are disabled through
17974 	 * iwpriv
17975 	 **/
17976 	if (param->peer_ht_rates.num_rates == 0)
17977 		cmd->peer_flags &= ~WMI_PEER_HT;
17978 }
17979 
17980 int
17981 qwz_wmi_send_peer_assoc_cmd(struct qwz_softc *sc, uint8_t pdev_id,
17982     struct peer_assoc_params *param)
17983 {
17984 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17985 	struct wmi_peer_assoc_complete_cmd *cmd;
17986 	struct wmi_vht_rate_set *mcs;
17987 	struct wmi_he_rate_set *he_mcs;
17988 	struct mbuf *m;
17989 	struct wmi_tlv *tlv;
17990 	void *ptr;
17991 	uint32_t peer_legacy_rates_align;
17992 	uint32_t peer_ht_rates_align;
17993 	int i, ret, len;
17994 
17995 	peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
17996 	    sizeof(uint32_t));
17997 	peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
17998 	    sizeof(uint32_t));
17999 
18000 	len = sizeof(*cmd) +
18001 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(uint8_t)) +
18002 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(uint8_t)) +
18003 	      sizeof(*mcs) + TLV_HDR_SIZE +
18004 	      (sizeof(*he_mcs) * param->peer_he_mcs_count);
18005 
18006 	m = qwz_wmi_alloc_mbuf(len);
18007 	if (!m)
18008 		return ENOMEM;
18009 
18010 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
18011 	    sizeof(struct wmi_cmd_hdr));
18012 
18013 	cmd = ptr;
18014 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18015 	    WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
18016 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18017 
18018 	cmd->vdev_id = param->vdev_id;
18019 
18020 	cmd->peer_new_assoc = param->peer_new_assoc;
18021 	cmd->peer_associd = param->peer_associd;
18022 
18023 	qwz_wmi_copy_peer_flags(cmd, param,
18024 	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags));
18025 
18026 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_mac);
18027 
18028 	cmd->peer_rate_caps = param->peer_rate_caps;
18029 	cmd->peer_caps = param->peer_caps;
18030 	cmd->peer_listen_intval = param->peer_listen_intval;
18031 	cmd->peer_ht_caps = param->peer_ht_caps;
18032 	cmd->peer_max_mpdu = param->peer_max_mpdu;
18033 	cmd->peer_mpdu_density = param->peer_mpdu_density;
18034 	cmd->peer_vht_caps = param->peer_vht_caps;
18035 	cmd->peer_phymode = param->peer_phymode;
18036 
18037 	/* Update 11ax capabilities */
18038 	cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
18039 	cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
18040 	cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
18041 	cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
18042 	cmd->peer_he_ops = param->peer_he_ops;
18043 	memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
18044 	       sizeof(param->peer_he_cap_phyinfo));
18045 	memcpy(&cmd->peer_ppet, &param->peer_ppet,
18046 	       sizeof(param->peer_ppet));
18047 
18048 	/* Update peer legacy rate information */
18049 	ptr += sizeof(*cmd);
18050 
18051 	tlv = ptr;
18052 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18053 	    FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
18054 
18055 	ptr += TLV_HDR_SIZE;
18056 
18057 	cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
18058 	memcpy(ptr, param->peer_legacy_rates.rates,
18059 	    param->peer_legacy_rates.num_rates);
18060 
18061 	/* Update peer HT rate information */
18062 	ptr += peer_legacy_rates_align;
18063 
18064 	tlv = ptr;
18065 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18066 	    FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
18067 	ptr += TLV_HDR_SIZE;
18068 	cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
18069 	memcpy(ptr, param->peer_ht_rates.rates,
18070 	    param->peer_ht_rates.num_rates);
18071 
18072 	/* VHT Rates */
18073 	ptr += peer_ht_rates_align;
18074 
18075 	mcs = ptr;
18076 
18077 	mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
18078 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
18079 
18080 	cmd->peer_nss = param->peer_nss;
18081 
18082 	/* Update bandwidth-NSS mapping */
18083 	cmd->peer_bw_rxnss_override = 0;
18084 	cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
18085 
18086 	if (param->vht_capable) {
18087 		mcs->rx_max_rate = param->rx_max_rate;
18088 		mcs->rx_mcs_set = param->rx_mcs_set;
18089 		mcs->tx_max_rate = param->tx_max_rate;
18090 		mcs->tx_mcs_set = param->tx_mcs_set;
18091 	}
18092 
18093 	/* HE Rates */
18094 	cmd->peer_he_mcs = param->peer_he_mcs_count;
18095 	cmd->min_data_rate = param->min_data_rate;
18096 
18097 	ptr += sizeof(*mcs);
18098 
18099 	len = param->peer_he_mcs_count * sizeof(*he_mcs);
18100 
18101 	tlv = ptr;
18102 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18103 	    FIELD_PREP(WMI_TLV_LEN, len);
18104 	ptr += TLV_HDR_SIZE;
18105 
18106 	/* Loop through the HE rate set */
18107 	for (i = 0; i < param->peer_he_mcs_count; i++) {
18108 		he_mcs = ptr;
18109 		he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18110 		    WMI_TAG_HE_RATE_SET) |
18111 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE);
18112 
18113 		he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
18114 		he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
18115 		ptr += sizeof(*he_mcs);
18116 	}
18117 
18118 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_ASSOC_CMDID);
18119 	if (ret) {
18120 		if (ret != ESHUTDOWN) {
18121 			printf("%s: failed to send WMI_PEER_ASSOC_CMDID\n",
18122 			    sc->sc_dev.dv_xname);
18123 		}
18124 		m_freem(m);
18125 		return ret;
18126 	}
18127 
18128 	DNPRINTF(QWZ_D_WMI, "%s: cmd peer assoc vdev id %d assoc id %d "
18129 	    "peer mac %s peer_flags %x rate_caps %x peer_caps %x "
18130 	    "listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d "
18131 	    "peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x "
18132 	    "he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
18133 	    __func__, cmd->vdev_id, cmd->peer_associd,
18134 	    ether_sprintf(param->peer_mac),
18135 	    cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
18136 	    cmd->peer_listen_intval, cmd->peer_ht_caps,
18137 	    cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
18138 	    cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info,
18139 	    cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
18140 	    cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
18141 	    cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override);
18142 
18143 	return 0;
18144 }
18145 
18146 void
18147 qwz_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
18148     struct target_resource_config *tg_cfg)
18149 {
18150 	wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
18151 	wmi_cfg->num_peers = tg_cfg->num_peers;
18152 	wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
18153 	wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
18154 	wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
18155 	wmi_cfg->num_tids = tg_cfg->num_tids;
18156 	wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
18157 	wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
18158 	wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
18159 	wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
18160 	wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
18161 	wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
18162 	wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
18163 	wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
18164 	wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
18165 	wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
18166 	wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
18167 	wmi_cfg->roam_offload_max_ap_profiles =
18168 	    tg_cfg->roam_offload_max_ap_profiles;
18169 	wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
18170 	wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
18171 	wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
18172 	wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
18173 	wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
18174 	wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
18175 	wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
18176 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
18177 	    tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
18178 	wmi_cfg->vow_config = tg_cfg->vow_config;
18179 	wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
18180 	wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
18181 	wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
18182 	wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
18183 	wmi_cfg->num_tdls_conn_table_entries =
18184 	    tg_cfg->num_tdls_conn_table_entries;
18185 	wmi_cfg->beacon_tx_offload_max_vdev =
18186 	    tg_cfg->beacon_tx_offload_max_vdev;
18187 	wmi_cfg->num_multicast_filter_entries =
18188 	    tg_cfg->num_multicast_filter_entries;
18189 	wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
18190 	wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
18191 	wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
18192 	wmi_cfg->max_tdls_concurrent_sleep_sta =
18193 	    tg_cfg->max_tdls_concurrent_sleep_sta;
18194 	wmi_cfg->max_tdls_concurrent_buffer_sta =
18195 	    tg_cfg->max_tdls_concurrent_buffer_sta;
18196 	wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
18197 	wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
18198 	wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
18199 	wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
18200 	wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
18201 	wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
18202 	wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
18203 	wmi_cfg->flag1 = tg_cfg->flag1;
18204 	wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
18205 	wmi_cfg->sched_params = tg_cfg->sched_params;
18206 	wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
18207 	wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
18208 #ifdef notyet /* 6 GHz support */
18209 	wmi_cfg->host_service_flags &=
18210 	    ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18211 	wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
18212 	    WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18213 	wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
18214 	wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
18215 	wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
18216 #endif
18217 }
18218 
18219 int
18220 qwz_init_cmd_send(struct qwz_pdev_wmi *wmi, struct wmi_init_cmd_param *param)
18221 {
18222 	struct mbuf *m;
18223 	struct wmi_init_cmd *cmd;
18224 	struct wmi_resource_config *cfg;
18225 	struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
18226 	struct wmi_pdev_band_to_mac *band_to_mac;
18227 	struct wlan_host_mem_chunk *host_mem_chunks;
18228 	struct wmi_tlv *tlv;
18229 	size_t ret, len;
18230 	void *ptr;
18231 	uint32_t hw_mode_len = 0;
18232 	uint16_t idx;
18233 
18234 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
18235 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
18236 		    (param->num_band_to_mac * sizeof(*band_to_mac));
18237 
18238 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
18239 	    (param->num_mem_chunks ?
18240 	    (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
18241 
18242 	m = qwz_wmi_alloc_mbuf(len);
18243 	if (!m)
18244 		return ENOMEM;
18245 
18246 	cmd = (struct wmi_init_cmd *)(mtod(m, uint8_t *) +
18247 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18248 
18249 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
18250 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18251 
18252 	ptr = mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
18253 	   sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
18254 	cfg = ptr;
18255 
18256 	qwz_wmi_copy_resource_config(cfg, param->res_cfg);
18257 
18258 	cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
18259 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
18260 
18261 	ptr += sizeof(*cfg);
18262 	host_mem_chunks = ptr + TLV_HDR_SIZE;
18263 	len = sizeof(struct wlan_host_mem_chunk);
18264 
18265 	for (idx = 0; idx < param->num_mem_chunks; ++idx) {
18266 		host_mem_chunks[idx].tlv_header =
18267 		    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
18268 		    FIELD_PREP(WMI_TLV_LEN, len);
18269 
18270 		host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
18271 		host_mem_chunks[idx].size = param->mem_chunks[idx].len;
18272 		host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
18273 
18274 		DNPRINTF(QWZ_D_WMI,
18275 		    "%s: host mem chunk req_id %d paddr 0x%llx len %d\n",
18276 		    __func__, param->mem_chunks[idx].req_id,
18277 		    (uint64_t)param->mem_chunks[idx].paddr,
18278 		    param->mem_chunks[idx].len);
18279 	}
18280 	cmd->num_host_mem_chunks = param->num_mem_chunks;
18281 	len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
18282 
18283 	/* num_mem_chunks is zero */
18284 	tlv = ptr;
18285 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18286 	    FIELD_PREP(WMI_TLV_LEN, len);
18287 	ptr += TLV_HDR_SIZE + len;
18288 
18289 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
18290 		hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
18291 		hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18292 		    WMI_TAG_PDEV_SET_HW_MODE_CMD) |
18293 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE);
18294 
18295 		hw_mode->hw_mode_index = param->hw_mode_id;
18296 		hw_mode->num_band_to_mac = param->num_band_to_mac;
18297 
18298 		ptr += sizeof(*hw_mode);
18299 
18300 		len = param->num_band_to_mac * sizeof(*band_to_mac);
18301 		tlv = ptr;
18302 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18303 		    FIELD_PREP(WMI_TLV_LEN, len);
18304 
18305 		ptr += TLV_HDR_SIZE;
18306 		len = sizeof(*band_to_mac);
18307 
18308 		for (idx = 0; idx < param->num_band_to_mac; idx++) {
18309 			band_to_mac = (void *)ptr;
18310 
18311 			band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18312 			    WMI_TAG_PDEV_BAND_TO_MAC) |
18313 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
18314 			band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
18315 			band_to_mac->start_freq =
18316 			    param->band_to_mac[idx].start_freq;
18317 			band_to_mac->end_freq =
18318 			    param->band_to_mac[idx].end_freq;
18319 			ptr += sizeof(*band_to_mac);
18320 		}
18321 	}
18322 
18323 	ret = qwz_wmi_cmd_send(wmi, m, WMI_INIT_CMDID);
18324 	if (ret) {
18325 		if (ret != ESHUTDOWN)
18326 			printf("%s: failed to send WMI_INIT_CMDID\n", __func__);
18327 		m_freem(m);
18328 		return ret;
18329 	}
18330 
18331 	DNPRINTF(QWZ_D_WMI, "%s: cmd wmi init\n", __func__);
18332 
18333 	return 0;
18334 }
18335 
18336 int
18337 qwz_wmi_cmd_init(struct qwz_softc *sc)
18338 {
18339 	struct qwz_wmi_base *wmi_sc = &sc->wmi;
18340 	struct wmi_init_cmd_param init_param;
18341 	struct target_resource_config  config;
18342 
18343 	memset(&init_param, 0, sizeof(init_param));
18344 	memset(&config, 0, sizeof(config));
18345 
18346 	sc->hw_params.hw_ops->wmi_init_config(sc, &config);
18347 
18348 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT))
18349 		config.is_reg_cc_ext_event_supported = 1;
18350 
18351 	memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
18352 
18353 	init_param.res_cfg = &wmi_sc->wlan_resource_config;
18354 	init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
18355 	init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
18356 	init_param.mem_chunks = wmi_sc->mem_chunks;
18357 
18358 	if (sc->hw_params.single_pdev_only)
18359 		init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
18360 
18361 	init_param.num_band_to_mac = sc->num_radios;
18362 	qwz_fill_band_to_mac_param(sc, init_param.band_to_mac);
18363 
18364 	return qwz_init_cmd_send(&wmi_sc->wmi[0], &init_param);
18365 }
18366 
18367 int
18368 qwz_wmi_wait_for_unified_ready(struct qwz_softc *sc)
18369 {
18370 	int ret;
18371 
18372 	while (!sc->wmi.unified_ready) {
18373 		ret = tsleep_nsec(&sc->wmi.unified_ready, 0, "qwzunfrdy",
18374 		    SEC_TO_NSEC(5));
18375 		if (ret)
18376 			return -1;
18377 	}
18378 
18379 	return 0;
18380 }
18381 
18382 int
18383 qwz_wmi_set_hw_mode(struct qwz_softc *sc,
18384     enum wmi_host_hw_mode_config_type mode)
18385 {
18386 	struct wmi_pdev_set_hw_mode_cmd_param *cmd;
18387 	struct mbuf *m;
18388 	struct qwz_wmi_base *wmi = &sc->wmi;
18389 	int len;
18390 	int ret;
18391 
18392 	len = sizeof(*cmd);
18393 
18394 	m = qwz_wmi_alloc_mbuf(len);
18395 	if (!m)
18396 		return ENOMEM;
18397 
18398 	cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)(mtod(m, uint8_t *) +
18399 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18400 
18401 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
18402 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18403 
18404 	cmd->pdev_id = WMI_PDEV_ID_SOC;
18405 	cmd->hw_mode_index = mode;
18406 
18407 	ret = qwz_wmi_cmd_send(&wmi->wmi[0], m, WMI_PDEV_SET_HW_MODE_CMDID);
18408 	if (ret) {
18409 		if (ret != ESHUTDOWN) {
18410 			printf("%s: failed to send "
18411 			    "WMI_PDEV_SET_HW_MODE_CMDID\n", __func__);
18412 		}
18413 		m_freem(m);
18414 		return ret;
18415 	}
18416 
18417 	DNPRINTF(QWZ_D_WMI, "%s: cmd pdev set hw mode %d\n", __func__,
18418 	    cmd->hw_mode_index);
18419 
18420 	return 0;
18421 }
18422 
18423 int
18424 qwz_wmi_set_sta_ps_param(struct qwz_softc *sc, uint32_t vdev_id,
18425      uint8_t pdev_id, uint32_t param, uint32_t param_value)
18426 {
18427 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18428 	struct wmi_sta_powersave_param_cmd *cmd;
18429 	struct mbuf *m;
18430 	int ret;
18431 
18432 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
18433 	if (!m)
18434 		return ENOMEM;
18435 
18436 	cmd = (struct wmi_sta_powersave_param_cmd *)(mtod(m, uint8_t *) +
18437 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18438 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18439 	    WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
18440 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18441 
18442 	cmd->vdev_id = vdev_id;
18443 	cmd->param = param;
18444 	cmd->value = param_value;
18445 
18446 	ret = qwz_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_PARAM_CMDID);
18447 	if (ret) {
18448 		if (ret != ESHUTDOWN) {
18449 			printf("%s: failed to send "
18450 			    "WMI_STA_POWERSAVE_PARAM_CMDID",
18451 			    sc->sc_dev.dv_xname);
18452 		}
18453 		m_freem(m);
18454 		return ret;
18455 	}
18456 
18457 	DNPRINTF(QWZ_D_WMI, "%s: cmd set powersave param vdev_id %d param %d "
18458 	    "value %d\n", __func__, vdev_id, param, param_value);
18459 
18460 	return 0;
18461 }
18462 
18463 int
18464 qwz_wmi_mgmt_send(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
18465     uint32_t buf_id, struct mbuf *frame, struct qwz_tx_data *tx_data)
18466 {
18467 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18468 	struct wmi_mgmt_send_cmd *cmd;
18469 	struct wmi_tlv *frame_tlv;
18470 	struct mbuf *m;
18471 	uint32_t buf_len;
18472 	int ret, len;
18473 	uint64_t paddr;
18474 
18475 	paddr = tx_data->map->dm_segs[0].ds_addr;
18476 
18477 	buf_len = frame->m_pkthdr.len < WMI_MGMT_SEND_DOWNLD_LEN ?
18478 	    frame->m_pkthdr.len : WMI_MGMT_SEND_DOWNLD_LEN;
18479 
18480 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
18481 
18482 	m = qwz_wmi_alloc_mbuf(len);
18483 	if (!m)
18484 		return ENOMEM;
18485 
18486 	cmd = (struct wmi_mgmt_send_cmd *)(mtod(m, uint8_t *) +
18487 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18488 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
18489 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18490 	cmd->vdev_id = arvif->vdev_id;
18491 	cmd->desc_id = buf_id;
18492 	cmd->chanfreq = 0;
18493 	cmd->paddr_lo = paddr & 0xffffffff;
18494 	cmd->paddr_hi = paddr >> 32;
18495 	cmd->frame_len = frame->m_pkthdr.len;
18496 	cmd->buf_len = buf_len;
18497 	cmd->tx_params_valid = 0;
18498 
18499 	frame_tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
18500 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
18501 	    sizeof(*cmd));
18502 	frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18503 	    FIELD_PREP(WMI_TLV_LEN, buf_len);
18504 
18505 	memcpy(frame_tlv->value, mtod(frame, void *), buf_len);
18506 #if 0 /* Not needed on OpenBSD? */
18507 	ath12k_ce_byte_swap(frame_tlv->value, buf_len);
18508 #endif
18509 	ret = qwz_wmi_cmd_send(wmi, m, WMI_MGMT_TX_SEND_CMDID);
18510 	if (ret) {
18511 		if (ret != ESHUTDOWN) {
18512 			printf("%s: failed to submit "
18513 			    "WMI_MGMT_TX_SEND_CMDID cmd\n",
18514 			    sc->sc_dev.dv_xname);
18515 		}
18516 		m_freem(m);
18517 		return ret;
18518 	}
18519 
18520 	DNPRINTF(QWZ_D_WMI, "%s: cmd mgmt tx send", __func__);
18521 
18522 	tx_data->m = frame;
18523 	return 0;
18524 }
18525 
18526 int
18527 qwz_wmi_vdev_create(struct qwz_softc *sc, uint8_t *macaddr,
18528     struct vdev_create_params *param)
18529 {
18530 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
18531 	struct wmi_vdev_create_cmd *cmd;
18532 	struct mbuf *m;
18533 	struct wmi_vdev_txrx_streams *txrx_streams;
18534 	struct wmi_tlv *tlv;
18535 	int ret, len;
18536 	void *ptr;
18537 
18538 	/* It can be optimized my sending tx/rx chain configuration
18539 	 * only for supported bands instead of always sending it for
18540 	 * both the bands.
18541 	 */
18542 	len = sizeof(*cmd) + TLV_HDR_SIZE +
18543 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
18544 
18545 	m = qwz_wmi_alloc_mbuf(len);
18546 	if (!m)
18547 		return ENOMEM;
18548 
18549 	cmd = (struct wmi_vdev_create_cmd *)(mtod(m, uint8_t *) +
18550 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18551 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
18552 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18553 
18554 	cmd->vdev_id = param->if_id;
18555 	cmd->vdev_type = param->type;
18556 	cmd->vdev_subtype = param->subtype;
18557 	cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
18558 	cmd->pdev_id = param->pdev_id;
18559 	cmd->mbssid_flags = param->mbssid_flags;
18560 	cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
18561 
18562 	IEEE80211_ADDR_COPY(cmd->vdev_macaddr.addr, macaddr);
18563 
18564 	ptr = (void *)(mtod(m, uint8_t *) +
18565 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
18566 	    sizeof(*cmd));
18567 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
18568 
18569 	tlv = ptr;
18570 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18571 	    FIELD_PREP(WMI_TLV_LEN, len);
18572 
18573 	ptr += TLV_HDR_SIZE;
18574 	txrx_streams = ptr;
18575 	len = sizeof(*txrx_streams);
18576 	txrx_streams->tlv_header =
18577 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
18578 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
18579 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
18580 	txrx_streams->supported_tx_streams = param->chains[0].tx;
18581 	txrx_streams->supported_rx_streams = param->chains[0].rx;
18582 
18583 	txrx_streams++;
18584 	txrx_streams->tlv_header =
18585 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
18586 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
18587 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
18588 	txrx_streams->supported_tx_streams = param->chains[1].tx;
18589 	txrx_streams->supported_rx_streams = param->chains[1].rx;
18590 
18591 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_CREATE_CMDID);
18592 	if (ret) {
18593 		if (ret != ESHUTDOWN) {
18594 			printf("%s: failed to submit WMI_VDEV_CREATE_CMDID\n",
18595 			    sc->sc_dev.dv_xname);
18596 		}
18597 		m_freem(m);
18598 		return ret;
18599 	}
18600 
18601 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev create id %d type %d subtype %d "
18602 	    "macaddr %s pdevid %d\n", __func__, param->if_id, param->type,
18603 	    param->subtype, ether_sprintf(macaddr), param->pdev_id);
18604 
18605 	return ret;
18606 }
18607 
18608 int
18609 qwz_wmi_vdev_set_param_cmd(struct qwz_softc *sc, uint32_t vdev_id,
18610     uint8_t pdev_id, uint32_t param_id, uint32_t param_value)
18611 {
18612 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18613 	struct wmi_vdev_set_param_cmd *cmd;
18614 	struct mbuf *m;
18615 	int ret;
18616 
18617 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
18618 	if (!m)
18619 		return ENOMEM;
18620 
18621 	cmd = (struct wmi_vdev_set_param_cmd *)(mtod(m, uint8_t *) +
18622 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18623 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
18624 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18625 
18626 	cmd->vdev_id = vdev_id;
18627 	cmd->param_id = param_id;
18628 	cmd->param_value = param_value;
18629 
18630 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_SET_PARAM_CMDID);
18631 	if (ret) {
18632 		if (ret != ESHUTDOWN) {
18633 			printf("%s: failed to send WMI_VDEV_SET_PARAM_CMDID\n",
18634 			    sc->sc_dev.dv_xname);
18635 		}
18636 		m_freem(m);
18637 		return ret;
18638 	}
18639 
18640 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev set param vdev 0x%x param %d "
18641 	    "value %d\n", __func__, vdev_id, param_id, param_value);
18642 
18643 	return 0;
18644 }
18645 
18646 int
18647 qwz_wmi_vdev_up(struct qwz_softc *sc, uint32_t vdev_id, uint32_t pdev_id,
18648     uint32_t aid, const uint8_t *bssid, uint8_t *tx_bssid,
18649     uint32_t nontx_profile_idx, uint32_t nontx_profile_cnt)
18650 {
18651 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18652 	struct wmi_vdev_up_cmd *cmd;
18653 	struct mbuf *m;
18654 	int ret;
18655 
18656 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
18657 	if (!m)
18658 		return ENOMEM;
18659 
18660 	cmd = (struct wmi_vdev_up_cmd *)(mtod(m, uint8_t *) +
18661 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18662 
18663 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
18664 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18665 	cmd->vdev_id = vdev_id;
18666 	cmd->vdev_assoc_id = aid;
18667 
18668 	IEEE80211_ADDR_COPY(cmd->vdev_bssid.addr, bssid);
18669 
18670 	cmd->nontx_profile_idx = nontx_profile_idx;
18671 	cmd->nontx_profile_cnt = nontx_profile_cnt;
18672 	if (tx_bssid)
18673 		IEEE80211_ADDR_COPY(cmd->tx_vdev_bssid.addr, tx_bssid);
18674 #if 0
18675 	if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
18676 		bss_conf = &arvif->vif->bss_conf;
18677 
18678 		if (bss_conf->nontransmitted) {
18679 			ether_addr_copy(cmd->tx_vdev_bssid.addr,
18680 					bss_conf->transmitter_bssid);
18681 			cmd->nontx_profile_idx = bss_conf->bssid_index;
18682 			cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
18683 		}
18684 	}
18685 #endif
18686 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_UP_CMDID);
18687 	if (ret) {
18688 		if (ret != ESHUTDOWN) {
18689 			printf("%s: failed to submit WMI_VDEV_UP cmd\n",
18690 			    sc->sc_dev.dv_xname);
18691 		}
18692 		m_freem(m);
18693 		return ret;
18694 	}
18695 
18696 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev up id 0x%x assoc id %d bssid %s\n",
18697 	    __func__, vdev_id, aid, ether_sprintf((u_char *)bssid));
18698 
18699 	return 0;
18700 }
18701 
18702 int
18703 qwz_wmi_vdev_down(struct qwz_softc *sc, uint32_t vdev_id, uint8_t pdev_id)
18704 {
18705 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18706 	struct wmi_vdev_down_cmd *cmd;
18707 	struct mbuf *m;
18708 	int ret;
18709 
18710 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
18711 	if (!m)
18712 		return ENOMEM;
18713 
18714 	cmd = (struct wmi_vdev_down_cmd *)(mtod(m, uint8_t *) +
18715 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18716 
18717 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
18718 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18719 	cmd->vdev_id = vdev_id;
18720 
18721 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_DOWN_CMDID);
18722 	if (ret) {
18723 		if (ret != ESHUTDOWN) {
18724 			printf("%s: failed to submit WMI_VDEV_DOWN cmd\n",
18725 			    sc->sc_dev.dv_xname);
18726 		}
18727 		m_freem(m);
18728 		return ret;
18729 	}
18730 
18731 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev down id 0x%x\n", __func__, vdev_id);
18732 
18733 	return 0;
18734 }
18735 
18736 void
18737 qwz_wmi_put_wmi_channel(struct wmi_channel *chan,
18738     struct wmi_vdev_start_req_arg *arg)
18739 {
18740 	uint32_t center_freq1 = arg->channel.band_center_freq1;
18741 
18742 	memset(chan, 0, sizeof(*chan));
18743 
18744 	chan->mhz = arg->channel.freq;
18745 	chan->band_center_freq1 = arg->channel.band_center_freq1;
18746 
18747 	if (arg->channel.mode == MODE_11AX_HE160) {
18748 		if (arg->channel.freq > arg->channel.band_center_freq1)
18749 			chan->band_center_freq1 = center_freq1 + 40;
18750 		else
18751 			chan->band_center_freq1 = center_freq1 - 40;
18752 
18753 		chan->band_center_freq2 = arg->channel.band_center_freq1;
18754 	} else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
18755 	    (arg->channel.mode == MODE_11AX_HE80_80)) {
18756 		chan->band_center_freq2 = arg->channel.band_center_freq2;
18757 	} else
18758 		chan->band_center_freq2 = 0;
18759 
18760 	chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
18761 	if (arg->channel.passive)
18762 		chan->info |= WMI_CHAN_INFO_PASSIVE;
18763 	if (arg->channel.allow_ibss)
18764 		chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
18765 	if (arg->channel.allow_ht)
18766 		chan->info |= WMI_CHAN_INFO_ALLOW_HT;
18767 	if (arg->channel.allow_vht)
18768 		chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
18769 	if (arg->channel.allow_he)
18770 		chan->info |= WMI_CHAN_INFO_ALLOW_HE;
18771 	if (arg->channel.ht40plus)
18772 		chan->info |= WMI_CHAN_INFO_HT40_PLUS;
18773 	if (arg->channel.chan_radar)
18774 		chan->info |= WMI_CHAN_INFO_DFS;
18775 	if (arg->channel.freq2_radar)
18776 		chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
18777 
18778 	chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
18779 	    arg->channel.max_power) |
18780 	    FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
18781 	    arg->channel.max_reg_power);
18782 
18783 	chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
18784 	    arg->channel.max_antenna_gain) |
18785 	    FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
18786 	    arg->channel.max_power);
18787 }
18788 
18789 int
18790 qwz_wmi_vdev_stop(struct qwz_softc *sc, uint8_t vdev_id, uint8_t pdev_id)
18791 {
18792 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18793 	struct wmi_vdev_stop_cmd *cmd;
18794 	struct mbuf *m;
18795 	int ret;
18796 
18797 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
18798 	if (!m)
18799 		return ENOMEM;
18800 
18801 	cmd = (struct wmi_vdev_stop_cmd *)(mtod(m, uint8_t *) +
18802 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18803 
18804 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
18805 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18806 	cmd->vdev_id = vdev_id;
18807 
18808 	ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_STOP_CMDID);
18809 	if (ret) {
18810 		if (ret != ESHUTDOWN) {
18811 			printf("%s: failed to submit WMI_VDEV_STOP cmd\n",
18812 			    sc->sc_dev.dv_xname);
18813 		}
18814 		m_freem(m);
18815 		return ret;
18816 	}
18817 
18818 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev stop id 0x%x\n", __func__, vdev_id);
18819 
18820 	return ret;
18821 }
18822 
18823 int
18824 qwz_wmi_vdev_start(struct qwz_softc *sc, struct wmi_vdev_start_req_arg *arg,
18825     int pdev_id, int restart)
18826 {
18827 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18828 	struct wmi_vdev_start_request_cmd *cmd;
18829 	struct mbuf *m;
18830 	struct wmi_channel *chan;
18831 	struct wmi_tlv *tlv;
18832 	void *ptr;
18833 	int ret, len;
18834 
18835 	if (arg->ssid_len > sizeof(cmd->ssid.ssid))
18836 		return EINVAL;
18837 
18838 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
18839 
18840 	m = qwz_wmi_alloc_mbuf(len);
18841 	if (!m)
18842 		return ENOMEM;
18843 
18844 	cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
18845 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18846 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18847 	    WMI_TAG_VDEV_START_REQUEST_CMD) |
18848 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18849 	cmd->vdev_id = arg->vdev_id;
18850 	cmd->beacon_interval = arg->bcn_intval;
18851 	cmd->bcn_tx_rate = arg->bcn_tx_rate;
18852 	cmd->dtim_period = arg->dtim_period;
18853 	cmd->num_noa_descriptors = arg->num_noa_descriptors;
18854 	cmd->preferred_rx_streams = arg->pref_rx_streams;
18855 	cmd->preferred_tx_streams = arg->pref_tx_streams;
18856 	cmd->cac_duration_ms = arg->cac_duration_ms;
18857 	cmd->regdomain = arg->regdomain;
18858 	cmd->he_ops = arg->he_ops;
18859 	cmd->mbssid_flags = arg->mbssid_flags;
18860 	cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
18861 
18862 	if (!restart) {
18863 		if (arg->ssid) {
18864 			cmd->ssid.ssid_len = arg->ssid_len;
18865 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
18866 		}
18867 		if (arg->hidden_ssid)
18868 			cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
18869 		if (arg->pmf_enabled)
18870 			cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
18871 	}
18872 
18873 	cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
18874 	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
18875 		cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
18876 
18877 	ptr = mtod(m, void *) + sizeof(struct ath12k_htc_hdr) +
18878 	    sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
18879 	chan = ptr;
18880 
18881 	qwz_wmi_put_wmi_channel(chan, arg);
18882 
18883 	chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
18884 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
18885 	ptr += sizeof(*chan);
18886 
18887 	tlv = ptr;
18888 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18889 	    FIELD_PREP(WMI_TLV_LEN, 0);
18890 
18891 	/* Note: This is a nested TLV containing:
18892 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
18893 	 */
18894 
18895 	ptr += sizeof(*tlv);
18896 
18897 	ret = qwz_wmi_cmd_send(wmi, m, restart ?
18898 	    WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
18899 	if (ret) {
18900 		if (ret != ESHUTDOWN) {
18901 			printf("%s: failed to submit vdev_%s cmd\n",
18902 			    sc->sc_dev.dv_xname, restart ? "restart" : "start");
18903 		}
18904 		m_freem(m);
18905 		return ret;
18906 	}
18907 
18908 	DNPRINTF(QWZ_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
18909 	   __func__, restart ? "restart" : "start", arg->vdev_id,
18910 	   arg->channel.freq, arg->channel.mode);
18911 
18912 	return ret;
18913 }
18914 
18915 int
18916 qwz_core_start(struct qwz_softc *sc)
18917 {
18918 	int ret;
18919 
18920 	ret = qwz_wmi_attach(sc);
18921 	if (ret) {
18922 		printf("%s: failed to attach wmi: %d\n",
18923 		    sc->sc_dev.dv_xname, ret);
18924 		return ret;
18925 	}
18926 
18927 	ret = qwz_htc_init(sc);
18928 	if (ret) {
18929 		printf("%s: failed to init htc: %d\n",
18930 		    sc->sc_dev.dv_xname, ret);
18931 		goto err_wmi_detach;
18932 	}
18933 
18934 	ret = sc->ops.start(sc);
18935 	if (ret) {
18936 		printf("%s: failed to start host interface: %d\n",
18937 		    sc->sc_dev.dv_xname, ret);
18938 		goto err_wmi_detach;
18939 	}
18940 
18941 	ret = qwz_htc_wait_target(sc);
18942 	if (ret) {
18943 		printf("%s: failed to connect to HTC: %d\n",
18944 		    sc->sc_dev.dv_xname, ret);
18945 		goto err_hif_stop;
18946 	}
18947 
18948 	ret = qwz_dp_htt_connect(&sc->dp);
18949 	if (ret) {
18950 		printf("%s: failed to connect to HTT: %d\n",
18951 		    sc->sc_dev.dv_xname, ret);
18952 		goto err_hif_stop;
18953 	}
18954 
18955 	ret = qwz_wmi_connect(sc);
18956 	if (ret) {
18957 		printf("%s: failed to connect wmi: %d\n",
18958 		    sc->sc_dev.dv_xname, ret);
18959 		goto err_hif_stop;
18960 	}
18961 
18962 	sc->wmi.service_ready = 0;
18963 
18964 	ret = qwz_htc_start(&sc->htc);
18965 	if (ret) {
18966 		printf("%s: failed to start HTC: %d\n",
18967 		    sc->sc_dev.dv_xname, ret);
18968 		goto err_hif_stop;
18969 	}
18970 
18971 	ret = qwz_wmi_wait_for_service_ready(sc);
18972 	if (ret) {
18973 		printf("%s: failed to receive wmi service ready event: %d\n",
18974 		    sc->sc_dev.dv_xname, ret);
18975 		goto err_hif_stop;
18976 	}
18977 #if 0
18978 	ret = ath12k_mac_allocate(ab);
18979 	if (ret) {
18980 		ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
18981 			   ret);
18982 		goto err_hif_stop;
18983 	}
18984 	ath12k_dp_pdev_pre_alloc(sc);
18985 #endif
18986 	ret = qwz_dp_pdev_reo_setup(sc);
18987 	if (ret) {
18988 		printf("%s: failed to initialize reo destination rings: %d\n",
18989 		    __func__, ret);
18990 		goto err_mac_destroy;
18991 	}
18992 
18993 	ret = qwz_wmi_cmd_init(sc);
18994 	if (ret) {
18995 		printf("%s: failed to send wmi init cmd: %d\n", __func__, ret);
18996 		goto err_reo_cleanup;
18997 	}
18998 
18999 	ret = qwz_wmi_wait_for_unified_ready(sc);
19000 	if (ret) {
19001 		printf("%s: failed to receive wmi unified ready event: %d\n",
19002 		    __func__, ret);
19003 		goto err_reo_cleanup;
19004 	}
19005 
19006 	/* put hardware to DBS mode */
19007 	if (sc->hw_params.single_pdev_only &&
19008 	    sc->hw_params.num_rxmda_per_pdev > 1) {
19009 		ret = qwz_wmi_set_hw_mode(sc, WMI_HOST_HW_MODE_DBS);
19010 		if (ret) {
19011 			printf("%s: failed to send dbs mode: %d\n",
19012 			    __func__, ret);
19013 			goto err_hif_stop;
19014 		}
19015 	}
19016 
19017 	ret = qwz_dp_tx_htt_h2t_ver_req_msg(sc);
19018 	if (ret) {
19019 		if (ret != ENOTSUP) {
19020 			printf("%s: failed to send htt version "
19021 			    "request message: %d\n", __func__, ret);
19022 		}
19023 		goto err_reo_cleanup;
19024 	}
19025 
19026 	return 0;
19027 err_reo_cleanup:
19028 	qwz_dp_pdev_reo_cleanup(sc);
19029 err_mac_destroy:
19030 #if 0
19031 	ath12k_mac_destroy(ab);
19032 #endif
19033 err_hif_stop:
19034 	sc->ops.stop(sc);
19035 err_wmi_detach:
19036 	qwz_wmi_detach(sc);
19037 	return ret;
19038 }
19039 
19040 void
19041 qwz_core_stop(struct qwz_softc *sc)
19042 {
19043 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
19044 		qwz_qmi_firmware_stop(sc);
19045 
19046 	sc->ops.stop(sc);
19047 	qwz_wmi_detach(sc);
19048 	qwz_dp_pdev_reo_cleanup(sc);
19049 }
19050 
19051 void
19052 qwz_core_pdev_destroy(struct qwz_softc *sc)
19053 {
19054 	qwz_dp_pdev_free(sc);
19055 }
19056 
19057 int
19058 qwz_core_pdev_create(struct qwz_softc *sc)
19059 {
19060 	int ret;
19061 
19062 	ret = qwz_dp_pdev_alloc(sc);
19063 	if (ret) {
19064 		printf("%s: failed to attach DP pdev: %d\n",
19065 		    sc->sc_dev.dv_xname, ret);
19066 		return ret;
19067 	}
19068 
19069 	ret = qwz_mac_register(sc);
19070 	if (ret) {
19071 		printf("%s: failed register the radio with mac80211: %d\n",
19072 		    sc->sc_dev.dv_xname, ret);
19073 		goto err_dp_pdev_free;
19074 	}
19075 #if 0
19076 
19077 	ret = ath12k_thermal_register(ab);
19078 	if (ret) {
19079 		ath12k_err(ab, "could not register thermal device: %d\n",
19080 			   ret);
19081 		goto err_mac_unregister;
19082 	}
19083 
19084 	ret = ath12k_spectral_init(ab);
19085 	if (ret) {
19086 		ath12k_err(ab, "failed to init spectral %d\n", ret);
19087 		goto err_thermal_unregister;
19088 	}
19089 #endif
19090 	return 0;
19091 #if 0
19092 err_thermal_unregister:
19093 	ath12k_thermal_unregister(ab);
19094 err_mac_unregister:
19095 	ath12k_mac_unregister(ab);
19096 #endif
19097 err_dp_pdev_free:
19098 	qwz_dp_pdev_free(sc);
19099 #if 0
19100 err_pdev_debug:
19101 	ath12k_debugfs_pdev_destroy(ab);
19102 #endif
19103 	return ret;
19104 }
19105 
19106 void
19107 qwz_core_deinit(struct qwz_softc *sc)
19108 {
19109 	struct ath12k_hal *hal = &sc->hal;
19110 	int s = splnet();
19111 
19112 #ifdef notyet
19113 	mutex_lock(&ab->core_lock);
19114 #endif
19115 	sc->ops.irq_disable(sc);
19116 
19117 	qwz_core_stop(sc);
19118 	qwz_core_pdev_destroy(sc);
19119 #ifdef notyet
19120 	mutex_unlock(&ab->core_lock);
19121 #endif
19122 	sc->ops.power_down(sc);
19123 #if 0
19124 	ath12k_mac_destroy(ab);
19125 	ath12k_debugfs_soc_destroy(ab);
19126 #endif
19127 	qwz_dp_free(sc);
19128 #if 0
19129 	ath12k_reg_free(ab);
19130 #endif
19131 	qwz_qmi_deinit_service(sc);
19132 
19133 	hal->num_shadow_reg_configured = 0;
19134 
19135 	splx(s);
19136 }
19137 
19138 int
19139 qwz_core_qmi_firmware_ready(struct qwz_softc *sc)
19140 {
19141 	int ret;
19142 
19143 	ret = qwz_core_start_firmware(sc, sc->fw_mode);
19144 	if (ret) {
19145 		printf("%s: failed to start firmware: %d\n",
19146 		    sc->sc_dev.dv_xname, ret);
19147 		return ret;
19148 	}
19149 
19150 	ret = qwz_ce_init_pipes(sc);
19151 	if (ret) {
19152 		printf("%s: failed to initialize CE: %d\n",
19153 		    sc->sc_dev.dv_xname, ret);
19154 		goto err_firmware_stop;
19155 	}
19156 
19157 	ret = qwz_dp_alloc(sc);
19158 	if (ret) {
19159 		printf("%s: failed to init DP: %d\n",
19160 		    sc->sc_dev.dv_xname, ret);
19161 		goto err_firmware_stop;
19162 	}
19163 
19164 	switch (sc->crypto_mode) {
19165 	case ATH12K_CRYPT_MODE_SW:
19166 		set_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19167 		set_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
19168 		break;
19169 	case ATH12K_CRYPT_MODE_HW:
19170 		clear_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19171 		clear_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
19172 		break;
19173 	default:
19174 		printf("%s: invalid crypto_mode: %d\n",
19175 		    sc->sc_dev.dv_xname, sc->crypto_mode);
19176 		return EINVAL;
19177 	}
19178 
19179 	if (sc->frame_mode == ATH12K_HW_TXRX_RAW)
19180 		set_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
19181 #if 0
19182 	mutex_lock(&ab->core_lock);
19183 #endif
19184 	ret = qwz_core_start(sc);
19185 	if (ret) {
19186 		printf("%s: failed to start core: %d\n",
19187 		    sc->sc_dev.dv_xname, ret);
19188 		goto err_dp_free;
19189 	}
19190 
19191 	if (!sc->attached) {
19192 		printf("%s: %s fw 0x%x address %s\n", sc->sc_dev.dv_xname,
19193 		    sc->hw_params.name, sc->qmi_target.fw_version,
19194 		    ether_sprintf(sc->mac_addr));
19195 	}
19196 
19197 	ret = qwz_core_pdev_create(sc);
19198 	if (ret) {
19199 		printf("%s: failed to create pdev core: %d\n",
19200 		    sc->sc_dev.dv_xname, ret);
19201 		goto err_core_stop;
19202 	}
19203 
19204 #if 0 /* TODO: Is this in the right spot for OpenBSD? */
19205 	sc->ops.irq_enable(sc);
19206 #endif
19207 
19208 #if 0
19209 	mutex_unlock(&ab->core_lock);
19210 #endif
19211 
19212 	return 0;
19213 err_core_stop:
19214 	qwz_core_stop(sc);
19215 #if 0
19216 	ath12k_mac_destroy(ab);
19217 #endif
19218 err_dp_free:
19219 	qwz_dp_free(sc);
19220 #if 0
19221 	mutex_unlock(&ab->core_lock);
19222 #endif
19223 err_firmware_stop:
19224 	qwz_qmi_firmware_stop(sc);
19225 
19226 	return ret;
19227 }
19228 
19229 void
19230 qwz_qmi_fw_ready(struct qwz_softc *sc)
19231 {
19232 	int ret = 0;
19233 
19234 	clear_bit(ATH12K_FLAG_QMI_FAIL, sc->sc_flags);
19235 
19236 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
19237 	clear_bit(ATH12K_FLAG_RECOVERY, sc->sc_flags);
19238 	ret = qwz_core_qmi_firmware_ready(sc);
19239 	if (ret) {
19240 		set_bit(ATH12K_FLAG_QMI_FAIL, sc->sc_flags);
19241 		return;
19242 	}
19243 }
19244 
19245 int
19246 qwz_qmi_event_server_arrive(struct qwz_softc *sc)
19247 {
19248 	int ret;
19249 
19250 	sc->fw_ready = 0;
19251 	sc->expect_fwmem_req = 1;
19252 
19253 	ret = qwz_qmi_phy_cap_send(sc);
19254 	if (ret < 0) {
19255 		printf("%s: failed to send qmi phy cap: %d\n",
19256 		    sc->sc_dev.dv_xname, ret);
19257 		sc->expect_fwmem_req = 0;
19258 		return ret;
19259 	}
19260 
19261 	ret = qwz_qmi_fw_ind_register_send(sc);
19262 	if (ret < 0) {
19263 		printf("%s: failed to send qmi firmware indication: %d\n",
19264 		    sc->sc_dev.dv_xname, ret);
19265 		sc->expect_fwmem_req = 0;
19266 		return ret;
19267 	}
19268 
19269 	ret = qwz_qmi_host_cap_send(sc);
19270 	if (ret < 0) {
19271 		printf("%s: failed to send qmi host cap: %d\n",
19272 		    sc->sc_dev.dv_xname, ret);
19273 		sc->expect_fwmem_req = 0;
19274 		return ret;
19275 	}
19276 
19277 	ret = qwz_qmi_mem_seg_send(sc);
19278 	if (ret == EBUSY)
19279 		ret = qwz_qmi_mem_seg_send(sc);
19280 	sc->expect_fwmem_req = 0;
19281 	if (ret) {
19282 		printf("%s: failed to send qmi memory segments: %d\n",
19283 		    sc->sc_dev.dv_xname, ret);
19284 		return ret;
19285 	}
19286 
19287 	ret = qwz_qmi_event_load_bdf(sc);
19288 	if (ret < 0) {
19289 		printf("%s: qmi failed to download BDF:%d\n",
19290 		    sc->sc_dev.dv_xname, ret);
19291 		return ret;
19292 	}
19293 
19294 	ret = qwz_qmi_wlanfw_m3_info_send(sc);
19295 	if (ret) {
19296 		printf("%s: qmi m3 info send failed:%d\n",
19297 		    sc->sc_dev.dv_xname, ret);
19298 		return ret;
19299 	}
19300 
19301 	while (!sc->fw_ready) {
19302 		ret = tsleep_nsec(&sc->fw_ready, 0, "qwzfwrdy",
19303 		    SEC_TO_NSEC(10));
19304 		if (ret) {
19305 			printf("%s: fw ready timeout\n", sc->sc_dev.dv_xname);
19306 			return -1;
19307 		}
19308 	}
19309 
19310 	qwz_qmi_fw_ready(sc);
19311 	return 0;
19312 }
19313 
19314 int
19315 qwz_core_init(struct qwz_softc *sc)
19316 {
19317 	int error;
19318 
19319 	error = qwz_qmi_init_service(sc);
19320 	if (error) {
19321 		printf("%s: failed to initialize qmi :%d\n",
19322 		    sc->sc_dev.dv_xname, error);
19323 		return error;
19324 	}
19325 
19326 	error = sc->ops.power_up(sc);
19327 	if (error) {
19328 		printf("%s: failed to power up :%d\n",
19329 		    sc->sc_dev.dv_xname, error);
19330 		qwz_qmi_deinit_service(sc);
19331 	}
19332 
19333 	return error;
19334 }
19335 
19336 int
19337 qwz_init_hw_params(struct qwz_softc *sc)
19338 {
19339 	const struct ath12k_hw_params *hw_params = NULL;
19340 	int i;
19341 
19342 	for (i = 0; i < nitems(ath12k_hw_params); i++) {
19343 		hw_params = &ath12k_hw_params[i];
19344 
19345 		if (hw_params->hw_rev == sc->sc_hw_rev)
19346 			break;
19347 	}
19348 
19349 	if (i == nitems(ath12k_hw_params)) {
19350 		printf("%s: unsupported hardware version: 0x%x\n",
19351 		    sc->sc_dev.dv_xname, sc->sc_hw_rev);
19352 		return EINVAL;
19353 	}
19354 
19355 	sc->hw_params = *hw_params;
19356 
19357 	DPRINTF("%s: %s\n", sc->sc_dev.dv_xname, sc->hw_params.name);
19358 
19359 	return 0;
19360 }
19361 
19362 static const struct hal_srng_config hw_srng_config_templ[] = {
19363 	/* TODO: max_rings can populated by querying HW capabilities */
19364 	[HAL_REO_DST] = {
19365 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
19366 		.max_rings = 8,
19367 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
19368 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19369 		.ring_dir = HAL_SRNG_DIR_DST,
19370 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
19371 	},
19372 	[HAL_REO_EXCEPTION] = {
19373 		/* Designating REO2SW0 ring as exception ring.
19374 		 * Any of theREO2SW rings can be used as exception ring.
19375 		 */
19376 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
19377 		.max_rings = 1,
19378 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
19379 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19380 		.ring_dir = HAL_SRNG_DIR_DST,
19381 		.max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
19382 	},
19383 	[HAL_REO_REINJECT] = {
19384 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
19385 		.max_rings = 4,
19386 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
19387 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19388 		.ring_dir = HAL_SRNG_DIR_SRC,
19389 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
19390 	},
19391 	[HAL_REO_CMD] = {
19392 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
19393 		.max_rings = 1,
19394 		.entry_size = (sizeof(struct hal_tlv_64_hdr) +
19395 		    sizeof(struct hal_reo_get_queue_stats)) >> 2,
19396 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19397 		.ring_dir = HAL_SRNG_DIR_SRC,
19398 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
19399 	},
19400 	[HAL_REO_STATUS] = {
19401 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
19402 		.max_rings = 1,
19403 		.entry_size = (sizeof(struct hal_tlv_64_hdr) +
19404 		    sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
19405 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19406 		.ring_dir = HAL_SRNG_DIR_DST,
19407 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
19408 	},
19409 	[HAL_TCL_DATA] = {
19410 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
19411 		.max_rings = 6,
19412 		.entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
19413 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19414 		.ring_dir = HAL_SRNG_DIR_SRC,
19415 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
19416 	},
19417 	[HAL_TCL_CMD] = {
19418 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
19419 		.max_rings = 1,
19420 		.entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
19421 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19422 		.ring_dir = HAL_SRNG_DIR_SRC,
19423 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
19424 	},
19425 	[HAL_TCL_STATUS] = {
19426 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
19427 		.max_rings = 1,
19428 		.entry_size = (sizeof(struct hal_tlv_hdr) +
19429 		    sizeof(struct hal_tcl_status_ring)) >> 2,
19430 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19431 		.ring_dir = HAL_SRNG_DIR_DST,
19432 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
19433 	},
19434 	[HAL_CE_SRC] = {
19435 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
19436 		.max_rings = 16,
19437 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
19438 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19439 		.ring_dir = HAL_SRNG_DIR_SRC,
19440 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
19441 	},
19442 	[HAL_CE_DST] = {
19443 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
19444 		.max_rings = 16,
19445 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
19446 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19447 		.ring_dir = HAL_SRNG_DIR_SRC,
19448 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
19449 	},
19450 	[HAL_CE_DST_STATUS] = {
19451 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
19452 		.max_rings = 16,
19453 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
19454 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19455 		.ring_dir = HAL_SRNG_DIR_DST,
19456 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
19457 	},
19458 	[HAL_WBM_IDLE_LINK] = {
19459 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
19460 		.max_rings = 1,
19461 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
19462 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19463 		.ring_dir = HAL_SRNG_DIR_SRC,
19464 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
19465 	},
19466 	[HAL_SW2WBM_RELEASE] = {
19467 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
19468 		.max_rings = 2,
19469 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
19470 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19471 		.ring_dir = HAL_SRNG_DIR_SRC,
19472 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
19473 	},
19474 	[HAL_WBM2SW_RELEASE] = {
19475 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
19476 		.max_rings = 8,
19477 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
19478 		.mac_type = ATH12K_HAL_SRNG_UMAC,
19479 		.ring_dir = HAL_SRNG_DIR_DST,
19480 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
19481 	},
19482 	[HAL_RXDMA_BUF] = {
19483 		.start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
19484 		.max_rings = 1,
19485 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
19486 		.mac_type = ATH12K_HAL_SRNG_DMAC,
19487 		.ring_dir = HAL_SRNG_DIR_SRC,
19488 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19489 	},
19490 	[HAL_RXDMA_DST] = {
19491 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
19492 		.max_rings = 0,
19493 		.entry_size = 0,
19494 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19495 		.ring_dir = HAL_SRNG_DIR_DST,
19496 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19497 	},
19498 	[HAL_RXDMA_MONITOR_BUF] = {
19499 		.start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
19500 		.max_rings = 1,
19501 		.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
19502 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19503 		.ring_dir = HAL_SRNG_DIR_SRC,
19504 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19505 	},
19506 	[HAL_RXDMA_MONITOR_STATUS] = { 0, },
19507 	[HAL_RXDMA_MONITOR_DESC] = { 0, },
19508 	[HAL_RXDMA_DIR_BUF] = {
19509 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
19510 		.max_rings = 2,
19511 		.entry_size = 8 >> 2, /* TODO: Define the struct */
19512 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19513 		.ring_dir = HAL_SRNG_DIR_SRC,
19514 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19515 	},
19516 	[HAL_PPE2TCL] = {
19517 		.start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
19518 		.max_rings = 1,
19519 		.entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
19520 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19521 		.ring_dir = HAL_SRNG_DIR_SRC,
19522 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
19523 	},
19524 	[HAL_PPE_RELEASE] = {
19525 		.start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
19526 		.max_rings = 1,
19527 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
19528 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19529 		.ring_dir = HAL_SRNG_DIR_SRC,
19530 		.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
19531 	},
19532 	[HAL_TX_MONITOR_BUF] = {
19533 		.start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
19534 		.max_rings = 1,
19535 		.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
19536 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19537 		.ring_dir = HAL_SRNG_DIR_SRC,
19538 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19539 	},
19540 	[HAL_RXDMA_MONITOR_DST] = {
19541 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
19542 		.max_rings = 1,
19543 		.entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
19544 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19545 		.ring_dir = HAL_SRNG_DIR_DST,
19546 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19547 	},
19548 	[HAL_TX_MONITOR_DST] = {
19549 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
19550 		.max_rings = 1,
19551 		.entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
19552 		.mac_type = ATH12K_HAL_SRNG_PMAC,
19553 		.ring_dir = HAL_SRNG_DIR_DST,
19554 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
19555 	}
19556 };
19557 
19558 int
19559 qwz_hal_srng_create_config_wcn7850(struct qwz_softc *sc)
19560 {
19561 	struct ath12k_hal *hal = &sc->hal;
19562 	struct hal_srng_config *s;
19563 
19564 	hal->srng_config = malloc(sizeof(hw_srng_config_templ),
19565 	    M_DEVBUF, M_NOWAIT | M_ZERO);
19566 	if (!hal->srng_config)
19567 		return ENOMEM;
19568 
19569 	memcpy(hal->srng_config, hw_srng_config_templ,
19570 	    sizeof(hw_srng_config_templ));
19571 
19572 	s = &hal->srng_config[HAL_REO_DST];
19573 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(sc);
19574 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
19575 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc);
19576 	s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
19577 
19578 	s = &hal->srng_config[HAL_REO_EXCEPTION];
19579 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(sc);
19580 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
19581 
19582 	s = &hal->srng_config[HAL_REO_REINJECT];
19583 	s->max_rings = 1;
19584 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(sc);
19585 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
19586 
19587 	s = &hal->srng_config[HAL_REO_CMD];
19588 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(sc);
19589 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
19590 
19591 	s = &hal->srng_config[HAL_REO_STATUS];
19592 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(sc);
19593 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
19594 
19595 	s = &hal->srng_config[HAL_TCL_DATA];
19596 	s->max_rings = 5;
19597 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
19598 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
19599 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
19600 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
19601 
19602 	s = &hal->srng_config[HAL_TCL_CMD];
19603 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(sc);
19604 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
19605 
19606 	s = &hal->srng_config[HAL_TCL_STATUS];
19607 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(sc);
19608 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
19609 
19610 	s = &hal->srng_config[HAL_CE_SRC];
19611 	s->max_rings = 12;
19612 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
19613 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
19614 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
19615 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
19616 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
19617 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
19618 
19619 	s = &hal->srng_config[HAL_CE_DST];
19620 	s->max_rings = 12;
19621 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
19622 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
19623 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
19624 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
19625 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
19626 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
19627 
19628 	s = &hal->srng_config[HAL_CE_DST_STATUS];
19629 	s->max_rings = 12;
19630 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
19631 		HAL_CE_DST_STATUS_RING_BASE_LSB;
19632 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
19633 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
19634 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
19635 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
19636 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
19637 
19638 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
19639 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(sc);
19640 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
19641 
19642 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
19643 	s->max_rings = 1;
19644 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
19645 		HAL_WBM_SW_RELEASE_RING_BASE_LSB(sc);
19646 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
19647 
19648 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
19649 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
19650 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
19651 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(sc) -
19652 		HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
19653 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
19654 
19655 	s = &hal->srng_config[HAL_RXDMA_BUF];
19656 	s->max_rings = 2;
19657 	s->mac_type = ATH12K_HAL_SRNG_PMAC;
19658 
19659 	s = &hal->srng_config[HAL_RXDMA_DST];
19660 	s->max_rings = 1;
19661 	s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
19662 
19663 	/* below rings are not used */
19664 	s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
19665 	s->max_rings = 0;
19666 
19667 	s = &hal->srng_config[HAL_PPE2TCL];
19668 	s->max_rings = 0;
19669 
19670 	s = &hal->srng_config[HAL_PPE_RELEASE];
19671 	s->max_rings = 0;
19672 
19673 	s = &hal->srng_config[HAL_TX_MONITOR_BUF];
19674 	s->max_rings = 0;
19675 
19676 	s = &hal->srng_config[HAL_TX_MONITOR_DST];
19677 	s->max_rings = 0;
19678 
19679 	s = &hal->srng_config[HAL_PPE2TCL];
19680 	s->max_rings = 0;
19681 
19682 	return 0;
19683 }
19684 
19685 int
19686 qwz_hal_srng_get_ring_id(struct qwz_softc *sc,
19687     enum hal_ring_type type, int ring_num, int mac_id)
19688 {
19689 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
19690 	int ring_id;
19691 
19692 	if (ring_num >= srng_config->max_rings) {
19693 		printf("%s: invalid ring number :%d\n", __func__, ring_num);
19694 		return -1;
19695 	}
19696 
19697 	ring_id = srng_config->start_ring_id + ring_num;
19698 	if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
19699 		ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
19700 
19701 	if (ring_id >= HAL_SRNG_RING_ID_MAX) {
19702 		printf("%s: invalid ring ID :%d\n", __func__, ring_id);
19703 		return -1;
19704 	}
19705 
19706 	return ring_id;
19707 }
19708 
19709 void
19710 qwz_hal_srng_update_hp_tp_addr(struct qwz_softc *sc, int shadow_cfg_idx,
19711     enum hal_ring_type ring_type, int ring_num)
19712 {
19713 	struct hal_srng *srng;
19714 	struct ath12k_hal *hal = &sc->hal;
19715 	int ring_id;
19716 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
19717 
19718 	ring_id = qwz_hal_srng_get_ring_id(sc, ring_type, ring_num, 0);
19719 	if (ring_id < 0)
19720 		return;
19721 
19722 	srng = &hal->srng_list[ring_id];
19723 
19724 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
19725 		srng->u.dst_ring.tp_addr = (uint32_t *)(
19726 		    HAL_SHADOW_REG(shadow_cfg_idx) +
19727 		    (unsigned long)sc->mem);
19728 	else
19729 		srng->u.src_ring.hp_addr = (uint32_t *)(
19730 		    HAL_SHADOW_REG(shadow_cfg_idx) +
19731 		    (unsigned long)sc->mem);
19732 }
19733 
19734 void
19735 qwz_hal_srng_shadow_update_hp_tp(struct qwz_softc *sc, struct hal_srng *srng)
19736 {
19737 #ifdef notyet
19738 	lockdep_assert_held(&srng->lock);
19739 #endif
19740 	/* Update the shadow HP if the ring isn't empty. */
19741 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
19742 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
19743 		qwz_hal_srng_access_end(sc, srng);
19744 }
19745 
19746 int
19747 qwz_hal_srng_update_shadow_config(struct qwz_softc *sc,
19748     enum hal_ring_type ring_type, int ring_num)
19749 {
19750 	struct ath12k_hal *hal = &sc->hal;
19751 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
19752 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
19753 	uint32_t target_reg;
19754 
19755 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
19756 		return EINVAL;
19757 
19758 	hal->num_shadow_reg_configured++;
19759 
19760 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
19761 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
19762 		ring_num;
19763 
19764 	/* For destination ring, shadow the TP */
19765 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
19766 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
19767 
19768 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
19769 
19770 	/* update hp/tp addr to hal structure*/
19771 	qwz_hal_srng_update_hp_tp_addr(sc, shadow_cfg_idx, ring_type, ring_num);
19772 
19773 	DPRINTF("%s: target_reg %x, shadow reg 0x%x shadow_idx 0x%x, "
19774 	    "ring_type %d, ring num %d\n", __func__, target_reg,
19775 	     HAL_SHADOW_REG(shadow_cfg_idx), shadow_cfg_idx,
19776 	     ring_type, ring_num);
19777 
19778 	return 0;
19779 }
19780 
19781 void
19782 qwz_hal_srng_shadow_config(struct qwz_softc *sc)
19783 {
19784 	struct ath12k_hal *hal = &sc->hal;
19785 	int ring_type, ring_num;
19786 	struct hal_srng_config *cfg;
19787 
19788 	/* update all the non-CE srngs. */
19789 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
19790 		cfg = &hal->srng_config[ring_type];
19791 
19792 		if (ring_type == HAL_CE_SRC ||
19793 		    ring_type == HAL_CE_DST ||
19794 			ring_type == HAL_CE_DST_STATUS)
19795 			continue;
19796 
19797 		if (cfg->mac_type == ATH12K_HAL_SRNG_DMAC ||
19798 		    cfg->mac_type == ATH12K_HAL_SRNG_PMAC)
19799 			continue;
19800 
19801 		for (ring_num = 0; ring_num < cfg->max_rings; ring_num++) {
19802 			qwz_hal_srng_update_shadow_config(sc, ring_type,
19803 			    ring_num);
19804 		}
19805 	}
19806 }
19807 
19808 void
19809 qwz_hal_srng_get_shadow_config(struct qwz_softc *sc, uint32_t **cfg,
19810     uint32_t *len)
19811 {
19812 	struct ath12k_hal *hal = &sc->hal;
19813 
19814 	*len = hal->num_shadow_reg_configured;
19815 	*cfg = hal->shadow_reg_addr;
19816 }
19817 
19818 int
19819 qwz_hal_alloc_cont_rdp(struct qwz_softc *sc)
19820 {
19821 	struct ath12k_hal *hal = &sc->hal;
19822 	size_t size = sizeof(uint32_t) * HAL_SRNG_RING_ID_MAX;
19823 
19824 	if (hal->rdpmem == NULL) {
19825 		hal->rdpmem = qwz_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
19826 		if (hal->rdpmem == NULL) {
19827 			printf("%s: could not allocate RDP DMA memory\n",
19828 			    sc->sc_dev.dv_xname);
19829 			return ENOMEM;
19830 
19831 		}
19832 	}
19833 
19834 	hal->rdp.vaddr = QWZ_DMA_KVA(hal->rdpmem);
19835 	hal->rdp.paddr = QWZ_DMA_DVA(hal->rdpmem);
19836 	return 0;
19837 }
19838 
19839 void
19840 qwz_hal_free_cont_rdp(struct qwz_softc *sc)
19841 {
19842 	struct ath12k_hal *hal = &sc->hal;
19843 
19844 	if (hal->rdpmem == NULL)
19845 		return;
19846 
19847 	hal->rdp.vaddr = NULL;
19848 	hal->rdp.paddr = 0L;
19849 	qwz_dmamem_free(sc->sc_dmat, hal->rdpmem);
19850 	hal->rdpmem = NULL;
19851 }
19852 
19853 int
19854 qwz_hal_alloc_cont_wrp(struct qwz_softc *sc)
19855 {
19856 	struct ath12k_hal *hal = &sc->hal;
19857 	size_t size = sizeof(uint32_t) *
19858 	    (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
19859 
19860 	if (hal->wrpmem == NULL) {
19861 		hal->wrpmem = qwz_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
19862 		if (hal->wrpmem == NULL) {
19863 			printf("%s: could not allocate WDP DMA memory\n",
19864 			    sc->sc_dev.dv_xname);
19865 			return ENOMEM;
19866 
19867 		}
19868 	}
19869 
19870 	hal->wrp.vaddr = QWZ_DMA_KVA(hal->wrpmem);
19871 	hal->wrp.paddr = QWZ_DMA_DVA(hal->wrpmem);
19872 	return 0;
19873 }
19874 
19875 void
19876 qwz_hal_free_cont_wrp(struct qwz_softc *sc)
19877 {
19878 	struct ath12k_hal *hal = &sc->hal;
19879 
19880 	if (hal->wrpmem == NULL)
19881 		return;
19882 
19883 	hal->wrp.vaddr = NULL;
19884 	hal->wrp.paddr = 0L;
19885 	qwz_dmamem_free(sc->sc_dmat, hal->wrpmem);
19886 	hal->wrpmem = NULL;
19887 }
19888 
19889 int
19890 qwz_hal_srng_init(struct qwz_softc *sc)
19891 {
19892 	struct ath12k_hal *hal = &sc->hal;
19893 	int ret;
19894 
19895 	memset(hal, 0, sizeof(*hal));
19896 
19897 	ret = sc->hw_params.hal_ops->create_srng_config(sc);
19898 	if (ret)
19899 		goto err_hal;
19900 
19901 	ret = qwz_hal_alloc_cont_rdp(sc);
19902 	if (ret)
19903 		goto err_hal;
19904 
19905 	ret = qwz_hal_alloc_cont_wrp(sc);
19906 	if (ret)
19907 		goto err_free_cont_rdp;
19908 
19909 #ifdef notyet
19910 	qwz_hal_register_srng_key(sc);
19911 #endif
19912 
19913 	return 0;
19914 err_free_cont_rdp:
19915 	qwz_hal_free_cont_rdp(sc);
19916 
19917 err_hal:
19918 	if (hal->srng_config)
19919 		free(hal->srng_config, M_DEVBUF, 0);
19920 	return ret;
19921 }
19922 
19923 void
19924 qwz_hal_srng_dst_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
19925 {
19926 	struct ath12k_hal *hal = &sc->hal;
19927 	uint32_t val;
19928 	uint64_t hp_addr;
19929 	uint32_t reg_base;
19930 
19931 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
19932 
19933 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
19934 		sc->ops.write32(sc,
19935 		    reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc),
19936 		    srng->msi_addr);
19937 
19938 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
19939 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
19940 		    HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
19941 		sc->ops.write32(sc,
19942 		    reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc), val);
19943 
19944 		sc->ops.write32(sc,
19945 		    reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(sc),
19946 		    srng->msi_data);
19947 	}
19948 
19949 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
19950 
19951 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
19952 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
19953 	    FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
19954 	    (srng->entry_size * srng->num_entries));
19955 	sc->ops.write32(sc,
19956 	    reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(sc), val);
19957 
19958 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
19959 	    FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
19960 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_ID_OFFSET(sc), val);
19961 
19962 	/* interrupt setup */
19963 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
19964 	    (srng->intr_timer_thres_us >> 3));
19965 
19966 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
19967 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
19968 
19969 	sc->ops.write32(sc,
19970 	    reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc), val);
19971 
19972 	hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr -
19973 	    (unsigned long)hal->rdp.vaddr);
19974 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc),
19975 	    hp_addr & HAL_ADDR_LSB_REG_MASK);
19976 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc),
19977 	    hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
19978 
19979 	/* Initialize head and tail pointers to indicate ring is empty */
19980 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
19981 	sc->ops.write32(sc, reg_base, 0);
19982 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
19983 	*srng->u.dst_ring.hp_addr = 0;
19984 
19985 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
19986 	val = 0;
19987 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
19988 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
19989 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
19990 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
19991 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
19992 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
19993 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
19994 
19995 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_MISC_OFFSET(sc), val);
19996 }
19997 
19998 void
19999 qwz_hal_srng_src_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
20000 {
20001 	struct ath12k_hal *hal = &sc->hal;
20002 	uint32_t val;
20003 	uint64_t tp_addr;
20004 	uint32_t reg_base;
20005 
20006 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20007 
20008 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
20009 		sc->ops.write32(sc,
20010 		    reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc),
20011 		    srng->msi_addr);
20012 
20013 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
20014 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
20015 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
20016 		sc->ops.write32(sc,
20017 		    reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc),
20018 		    val);
20019 
20020 		sc->ops.write32(sc,
20021 		    reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(sc),
20022 		    srng->msi_data);
20023 	}
20024 
20025 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
20026 
20027 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20028 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20029 	    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
20030 	    (srng->entry_size * srng->num_entries));
20031 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
20032 
20033 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
20034 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_ID_OFFSET(sc), val);
20035 
20036 	/* interrupt setup */
20037 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
20038 	 * unit of 8 usecs instead of 1 usec (as required by v1).
20039 	 */
20040 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
20041 	    srng->intr_timer_thres_us);
20042 
20043 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
20044 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
20045 
20046 	sc->ops.write32(sc,
20047 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc), val);
20048 
20049 	val = 0;
20050 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
20051 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
20052 		    srng->u.src_ring.low_threshold);
20053 	}
20054 	sc->ops.write32(sc,
20055 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc), val);
20056 
20057 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
20058 		tp_addr = hal->rdp.paddr +
20059 		    ((unsigned long)srng->u.src_ring.tp_addr -
20060 		    (unsigned long)hal->rdp.vaddr);
20061 		sc->ops.write32(sc,
20062 		    reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc),
20063 		    tp_addr & HAL_ADDR_LSB_REG_MASK);
20064 		sc->ops.write32(sc,
20065 		    reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc),
20066 		    tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
20067 	}
20068 
20069 	/* Initialize head and tail pointers to indicate ring is empty */
20070 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20071 	sc->ops.write32(sc, reg_base, 0);
20072 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
20073 	*srng->u.src_ring.tp_addr = 0;
20074 
20075 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20076 	val = 0;
20077 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
20078 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
20079 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
20080 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
20081 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
20082 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
20083 
20084 	/* Loop count is not used for SRC rings */
20085 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
20086 
20087 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
20088 
20089 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
20090 		val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
20091 
20092 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_MISC_OFFSET(sc), val);
20093 }
20094 
20095 void
20096 qwz_hal_srng_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
20097 {
20098 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
20099 		qwz_hal_srng_src_hw_init(sc, srng);
20100 	else
20101 		qwz_hal_srng_dst_hw_init(sc, srng);
20102 }
20103 
20104 void
20105 qwz_hal_ce_dst_setup(struct qwz_softc *sc, struct hal_srng *srng, int ring_num)
20106 {
20107 	struct hal_srng_config *srng_config = &sc->hal.srng_config[HAL_CE_DST];
20108 	uint32_t addr;
20109 	uint32_t val;
20110 
20111 	addr = HAL_CE_DST_RING_CTRL +
20112 	    srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
20113 	    ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
20114 
20115 	val = sc->ops.read32(sc, addr);
20116 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
20117 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
20118 	    srng->u.dst_ring.max_buffer_length);
20119 	sc->ops.write32(sc, addr, val);
20120 }
20121 
20122 void
20123 qwz_hal_ce_src_set_desc(void *buf, uint64_t paddr, uint32_t len, uint32_t id,
20124     uint8_t byte_swap_data)
20125 {
20126 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
20127 
20128 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
20129 	desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
20130 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20131 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
20132 	    byte_swap_data) |
20133 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
20134 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
20135 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
20136 }
20137 
20138 void
20139 qwz_hal_ce_dst_set_desc(void *buf, uint64_t paddr)
20140 {
20141 	struct hal_ce_srng_dest_desc *desc =
20142 	    (struct hal_ce_srng_dest_desc *)buf;
20143 
20144 	desc->buffer_addr_low = htole32(paddr & HAL_ADDR_LSB_REG_MASK);
20145 	desc->buffer_addr_info = htole32(FIELD_PREP(
20146 	    HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
20147 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)));
20148 }
20149 
20150 uint32_t
20151 qwz_hal_ce_dst_status_get_length(void *buf)
20152 {
20153 	struct hal_ce_srng_dst_status_desc *desc =
20154 		(struct hal_ce_srng_dst_status_desc *)buf;
20155 	uint32_t len;
20156 
20157 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
20158 	desc->flags &= ~htole32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
20159 
20160 	return len;
20161 }
20162 
20163 
20164 int
20165 qwz_hal_srng_setup(struct qwz_softc *sc, enum hal_ring_type type,
20166     int ring_num, int mac_id, struct hal_srng_params *params)
20167 {
20168 	struct ath12k_hal *hal = &sc->hal;
20169 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
20170 	struct hal_srng *srng;
20171 	int ring_id;
20172 	uint32_t lmac_idx;
20173 	int i;
20174 	uint32_t reg_base;
20175 
20176 	ring_id = qwz_hal_srng_get_ring_id(sc, type, ring_num, mac_id);
20177 	if (ring_id < 0)
20178 		return ring_id;
20179 
20180 	srng = &hal->srng_list[ring_id];
20181 
20182 	srng->ring_id = ring_id;
20183 	srng->ring_dir = srng_config->ring_dir;
20184 	srng->ring_base_paddr = params->ring_base_paddr;
20185 	srng->ring_base_vaddr = params->ring_base_vaddr;
20186 	srng->entry_size = srng_config->entry_size;
20187 	srng->num_entries = params->num_entries;
20188 	srng->ring_size = srng->entry_size * srng->num_entries;
20189 	srng->intr_batch_cntr_thres_entries =
20190 	    params->intr_batch_cntr_thres_entries;
20191 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
20192 	srng->flags = params->flags;
20193 	srng->msi_addr = params->msi_addr;
20194 	srng->msi_data = params->msi_data;
20195 	srng->initialized = 1;
20196 #if 0
20197 	spin_lock_init(&srng->lock);
20198 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
20199 #endif
20200 
20201 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
20202 		srng->hwreg_base[i] = srng_config->reg_start[i] +
20203 		    (ring_num * srng_config->reg_size[i]);
20204 	}
20205 
20206 	memset(srng->ring_base_vaddr, 0,
20207 	    (srng->entry_size * srng->num_entries) << 2);
20208 
20209 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
20210 	/* TODO: Add comments on these swap configurations */
20211 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
20212 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
20213 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
20214 #endif
20215 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20216 
20217 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
20218 		srng->u.src_ring.hp = 0;
20219 		srng->u.src_ring.cached_tp = 0;
20220 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
20221 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
20222 		srng->u.src_ring.low_threshold = params->low_threshold *
20223 		    srng->entry_size;
20224 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
20225 			if (!sc->hw_params.supports_shadow_regs)
20226 				srng->u.src_ring.hp_addr =
20227 				    (uint32_t *)((unsigned long)sc->mem +
20228 				    reg_base);
20229 			else
20230 				DPRINTF("%s: type %d ring_num %d reg_base "
20231 				    "0x%x shadow 0x%lx\n",
20232 				    sc->sc_dev.dv_xname, type, ring_num, reg_base,
20233 				   (unsigned long)srng->u.src_ring.hp_addr -
20234 				   (unsigned long)sc->mem);
20235 		} else {
20236 			lmac_idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
20237 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
20238 			    lmac_idx);
20239 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20240 		}
20241 	} else {
20242 		/* During initialization loop count in all the descriptors
20243 		 * will be set to zero, and HW will set it to 1 on completing
20244 		 * descriptor update in first loop, and increments it by 1 on
20245 		 * subsequent loops (loop count wraps around after reaching
20246 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
20247 		 * loop count in descriptors updated by HW (to be processed
20248 		 * by SW).
20249 		 */
20250 		srng->u.dst_ring.loop_cnt = 1;
20251 		srng->u.dst_ring.tp = 0;
20252 		srng->u.dst_ring.cached_hp = 0;
20253 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
20254 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
20255 			if (!sc->hw_params.supports_shadow_regs)
20256 				srng->u.dst_ring.tp_addr =
20257 				    (uint32_t *)((unsigned long)sc->mem +
20258 				    reg_base + (HAL_REO1_RING_TP -
20259 				    HAL_REO1_RING_HP));
20260 			else
20261 				DPRINTF("%s: type %d ring_num %d target_reg "
20262 				    "0x%x shadow 0x%lx\n", sc->sc_dev.dv_xname,
20263 				    type, ring_num,
20264 				    reg_base + (HAL_REO1_RING_TP -
20265 				    HAL_REO1_RING_HP),
20266 				    (unsigned long)srng->u.dst_ring.tp_addr -
20267 				    (unsigned long)sc->mem);
20268 		} else {
20269 			/* For LMAC rings, tail pointer updates will be done
20270 			 * through FW by writing to a shared memory location
20271 			 */
20272 			lmac_idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
20273 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
20274 			    lmac_idx);
20275 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20276 		}
20277 	}
20278 
20279 	if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
20280 		return ring_id;
20281 
20282 	qwz_hal_srng_hw_init(sc, srng);
20283 
20284 	if (type == HAL_CE_DST) {
20285 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
20286 		qwz_hal_ce_dst_setup(sc, srng, ring_num);
20287 	}
20288 
20289 	return ring_id;
20290 }
20291 
20292 size_t
20293 qwz_hal_ce_get_desc_size(enum hal_ce_desc type)
20294 {
20295 	switch (type) {
20296 	case HAL_CE_DESC_SRC:
20297 		return sizeof(struct hal_ce_srng_src_desc);
20298 	case HAL_CE_DESC_DST:
20299 		return sizeof(struct hal_ce_srng_dest_desc);
20300 	case HAL_CE_DESC_DST_STATUS:
20301 		return sizeof(struct hal_ce_srng_dst_status_desc);
20302 	}
20303 
20304 	return 0;
20305 }
20306 
20307 struct qwz_tx_data *
20308 qwz_ce_completed_send_next(struct qwz_ce_pipe *pipe)
20309 {
20310 	struct qwz_softc *sc = pipe->sc;
20311 	struct hal_srng *srng;
20312 	unsigned int sw_index;
20313 	unsigned int nentries_mask;
20314 	void *ctx;
20315 	struct qwz_tx_data *tx_data = NULL;
20316 	uint32_t *desc;
20317 #ifdef notyet
20318 	spin_lock_bh(&ab->ce.ce_lock);
20319 #endif
20320 	sw_index = pipe->src_ring->sw_index;
20321 	nentries_mask = pipe->src_ring->nentries_mask;
20322 
20323 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
20324 #ifdef notyet
20325 	spin_lock_bh(&srng->lock);
20326 #endif
20327 	qwz_hal_srng_access_begin(sc, srng);
20328 
20329 	desc = qwz_hal_srng_src_reap_next(sc, srng);
20330 	if (!desc)
20331 		goto err_unlock;
20332 
20333 	ctx = pipe->src_ring->per_transfer_context[sw_index];
20334 	tx_data = (struct qwz_tx_data *)ctx;
20335 
20336 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
20337 	pipe->src_ring->sw_index = sw_index;
20338 
20339 err_unlock:
20340 #ifdef notyet
20341 	spin_unlock_bh(&srng->lock);
20342 
20343 	spin_unlock_bh(&ab->ce.ce_lock);
20344 #endif
20345 	return tx_data;
20346 }
20347 
20348 int
20349 qwz_ce_send_done_cb(struct qwz_ce_pipe *pipe)
20350 {
20351 	struct qwz_softc *sc = pipe->sc;
20352 	struct qwz_tx_data *tx_data;
20353 	int ret = 0;
20354 
20355 	while ((tx_data = qwz_ce_completed_send_next(pipe)) != NULL) {
20356 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
20357 		m_freem(tx_data->m);
20358 		tx_data->m = NULL;
20359 		ret = 1;
20360 	}
20361 
20362 	return ret;
20363 }
20364 
20365 void
20366 qwz_ce_poll_send_completed(struct qwz_softc *sc, uint8_t pipe_id)
20367 {
20368 	struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
20369 
20370 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
20371 		pipe->send_cb(pipe);
20372 }
20373 
20374 void
20375 qwz_htc_process_credit_report(struct qwz_htc *htc,
20376     const struct ath12k_htc_credit_report *report, int len,
20377     enum ath12k_htc_ep_id eid)
20378 {
20379 	struct qwz_softc *sc = htc->sc;
20380 	struct qwz_htc_ep *ep;
20381 	int i, n_reports;
20382 
20383 	if (len % sizeof(*report))
20384 		printf("%s: Uneven credit report len %d", __func__, len);
20385 
20386 	n_reports = len / sizeof(*report);
20387 #ifdef notyet
20388 	spin_lock_bh(&htc->tx_lock);
20389 #endif
20390 	for (i = 0; i < n_reports; i++, report++) {
20391 		if (report->eid >= ATH12K_HTC_EP_COUNT)
20392 			break;
20393 
20394 		ep = &htc->endpoint[report->eid];
20395 		ep->tx_credits += report->credits;
20396 
20397 		DNPRINTF(QWZ_D_HTC, "%s: ep %d credits got %d total %d\n",
20398 		    __func__, report->eid, report->credits, ep->tx_credits);
20399 
20400 		if (ep->ep_ops.ep_tx_credits) {
20401 #ifdef notyet
20402 			spin_unlock_bh(&htc->tx_lock);
20403 #endif
20404 			ep->ep_ops.ep_tx_credits(sc);
20405 #ifdef notyet
20406 			spin_lock_bh(&htc->tx_lock);
20407 #endif
20408 		}
20409 	}
20410 #ifdef notyet
20411 	spin_unlock_bh(&htc->tx_lock);
20412 #endif
20413 }
20414 
20415 int
20416 qwz_htc_process_trailer(struct qwz_htc *htc, uint8_t *buffer, int length,
20417     enum ath12k_htc_ep_id src_eid)
20418 {
20419 	struct qwz_softc *sc = htc->sc;
20420 	int status = 0;
20421 	struct ath12k_htc_record *record;
20422 	size_t len;
20423 
20424 	while (length > 0) {
20425 		record = (struct ath12k_htc_record *)buffer;
20426 
20427 		if (length < sizeof(record->hdr)) {
20428 			status = EINVAL;
20429 			break;
20430 		}
20431 
20432 		if (record->hdr.len > length) {
20433 			/* no room left in buffer for record */
20434 			printf("%s: Invalid record length: %d\n",
20435 			    __func__, record->hdr.len);
20436 			status = EINVAL;
20437 			break;
20438 		}
20439 
20440 		if (sc->hw_params.credit_flow) {
20441 			switch (record->hdr.id) {
20442 			case ATH12K_HTC_RECORD_CREDITS:
20443 				len = sizeof(struct ath12k_htc_credit_report);
20444 				if (record->hdr.len < len) {
20445 					printf("%s: Credit report too long\n",
20446 					    __func__);
20447 					status = EINVAL;
20448 					break;
20449 				}
20450 				qwz_htc_process_credit_report(htc,
20451 				    record->credit_report,
20452 				    record->hdr.len, src_eid);
20453 				break;
20454 			default:
20455 				printf("%s: unhandled record: id:%d length:%d\n",
20456 				    __func__, record->hdr.id, record->hdr.len);
20457 				break;
20458 			}
20459 		}
20460 
20461 		if (status)
20462 			break;
20463 
20464 		/* multiple records may be present in a trailer */
20465 		buffer += sizeof(record->hdr) + record->hdr.len;
20466 		length -= sizeof(record->hdr) + record->hdr.len;
20467 	}
20468 
20469 	return status;
20470 }
20471 
20472 void
20473 qwz_htc_suspend_complete(struct qwz_softc *sc, int ack)
20474 {
20475 	printf("%s: not implemented\n", __func__);
20476 }
20477 
20478 void
20479 qwz_htc_wakeup_from_suspend(struct qwz_softc *sc)
20480 {
20481 	/* TODO This is really all the Linux driver does here... silence it? */
20482 	printf("%s: wakeup from suspend received\n", __func__);
20483 }
20484 
20485 void
20486 qwz_htc_rx_completion_handler(struct qwz_softc *sc, struct mbuf *m)
20487 {
20488 	struct qwz_htc *htc = &sc->htc;
20489 	struct ath12k_htc_hdr *hdr;
20490 	struct qwz_htc_ep *ep;
20491 	uint16_t payload_len;
20492 	uint32_t message_id, trailer_len = 0;
20493 	uint8_t eid;
20494 	int trailer_present;
20495 
20496 	m = m_pullup(m, sizeof(struct ath12k_htc_hdr));
20497 	if (m == NULL) {
20498 		printf("%s: m_pullup failed\n", __func__);
20499 		m = NULL; /* already freed */
20500 		goto out;
20501 	}
20502 
20503 	hdr = mtod(m, struct ath12k_htc_hdr *);
20504 
20505 	eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
20506 
20507 	if (eid >= ATH12K_HTC_EP_COUNT) {
20508 		printf("%s: HTC Rx: invalid eid %d\n", __func__, eid);
20509 		printf("%s: HTC info: 0x%x\n", __func__, hdr->htc_info);
20510 		printf("%s: CTRL info: 0x%x\n", __func__, hdr->ctrl_info);
20511 		goto out;
20512 	}
20513 
20514 	ep = &htc->endpoint[eid];
20515 
20516 	payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
20517 
20518 	if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) {
20519 		printf("%s: HTC rx frame too long, len: %zu\n", __func__,
20520 		    payload_len + sizeof(*hdr));
20521 		goto out;
20522 	}
20523 
20524 	if (m->m_pkthdr.len < payload_len) {
20525 		printf("%s: HTC Rx: insufficient length, got %d, "
20526 		    "expected %d\n", __func__, m->m_pkthdr.len, payload_len);
20527 		goto out;
20528 	}
20529 
20530 	/* get flags to check for trailer */
20531 	trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
20532 	    ATH12K_HTC_FLAG_TRAILER_PRESENT;
20533 
20534 	DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p trailer_present %d\n",
20535 	    __func__, eid, m, trailer_present);
20536 
20537 	if (trailer_present) {
20538 		int status = 0;
20539 		uint8_t *trailer;
20540 		int trim;
20541 		size_t min_len;
20542 
20543 		trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
20544 		min_len = sizeof(struct ath12k_htc_record_hdr);
20545 
20546 		if ((trailer_len < min_len) ||
20547 		    (trailer_len > payload_len)) {
20548 			printf("%s: Invalid trailer length: %d\n", __func__,
20549 			    trailer_len);
20550 			goto out;
20551 		}
20552 
20553 		trailer = (uint8_t *)hdr;
20554 		trailer += sizeof(*hdr);
20555 		trailer += payload_len;
20556 		trailer -= trailer_len;
20557 		status = qwz_htc_process_trailer(htc, trailer,
20558 		    trailer_len, eid);
20559 		if (status)
20560 			goto out;
20561 
20562 		trim = trailer_len;
20563 		m_adj(m, -trim);
20564 	}
20565 
20566 	if (trailer_len >= payload_len)
20567 		/* zero length packet with trailer data, just drop these */
20568 		goto out;
20569 
20570 	m_adj(m, sizeof(*hdr));
20571 
20572 	if (eid == ATH12K_HTC_EP_0) {
20573 		struct ath12k_htc_msg *msg;
20574 
20575 		msg = mtod(m, struct ath12k_htc_msg *);
20576 		message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
20577 
20578 		DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p message_id %d\n",
20579 		    __func__, eid, m, message_id);
20580 
20581 		switch (message_id) {
20582 		case ATH12K_HTC_MSG_READY_ID:
20583 		case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
20584 			/* handle HTC control message */
20585 			if (sc->ctl_resp) {
20586 				/* this is a fatal error, target should not be
20587 				 * sending unsolicited messages on the ep 0
20588 				 */
20589 				printf("%s: HTC rx ctrl still processing\n",
20590 				    __func__);
20591 				goto out;
20592 			}
20593 
20594 			htc->control_resp_len =
20595 			    MIN(m->m_pkthdr.len, ATH12K_HTC_MAX_CTRL_MSG_LEN);
20596 
20597 			m_copydata(m, 0, htc->control_resp_len,
20598 			    htc->control_resp_buffer);
20599 
20600 			sc->ctl_resp = 1;
20601 			wakeup(&sc->ctl_resp);
20602 			break;
20603 		case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE:
20604 			qwz_htc_suspend_complete(sc, 1);
20605 			break;
20606 		case ATH12K_HTC_MSG_NACK_SUSPEND:
20607 			qwz_htc_suspend_complete(sc, 0);
20608 			break;
20609 		case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
20610 			qwz_htc_wakeup_from_suspend(sc);
20611 			break;
20612 		default:
20613 			printf("%s: ignoring unsolicited htc ep0 event %ld\n",
20614 			    __func__,
20615 			    FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
20616 			break;
20617 		}
20618 		goto out;
20619 	}
20620 
20621 	DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p\n", __func__, eid, m);
20622 
20623 	ep->ep_ops.ep_rx_complete(sc, m);
20624 
20625 	/* poll tx completion for interrupt disabled CE's */
20626 	qwz_ce_poll_send_completed(sc, ep->ul_pipe_id);
20627 
20628 	/* mbuf is now owned by the rx completion handler */
20629 	m = NULL;
20630 out:
20631 	m_freem(m);
20632 }
20633 
20634 void
20635 qwz_ce_free_ring(struct qwz_softc *sc, struct qwz_ce_ring *ring)
20636 {
20637 	bus_size_t dsize;
20638 	size_t size;
20639 
20640 	if (ring == NULL)
20641 		return;
20642 
20643 	if (ring->base_addr) {
20644 		dsize = ring->nentries * ring->desc_sz;
20645 		bus_dmamem_unmap(sc->sc_dmat, ring->base_addr, dsize);
20646 	}
20647 	if (ring->nsegs)
20648 		bus_dmamem_free(sc->sc_dmat, &ring->dsegs, ring->nsegs);
20649 	if (ring->dmap)
20650 		bus_dmamap_destroy(sc->sc_dmat, ring->dmap);
20651 
20652 	size = sizeof(*ring) + (ring->nentries *
20653 	    sizeof(ring->per_transfer_context[0]));
20654 	free(ring, M_DEVBUF, size);
20655 }
20656 
20657 static inline int
20658 qwz_ce_need_shadow_fix(int ce_id)
20659 {
20660 	/* only ce4 needs shadow workaround */
20661 	return (ce_id == 4);
20662 }
20663 
20664 void
20665 qwz_ce_stop_shadow_timers(struct qwz_softc *sc)
20666 {
20667 	int i;
20668 
20669 	if (!sc->hw_params.supports_shadow_regs)
20670 		return;
20671 
20672 	for (i = 0; i < sc->hw_params.ce_count; i++)
20673 		if (qwz_ce_need_shadow_fix(i))
20674 			qwz_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
20675 }
20676 
20677 void
20678 qwz_ce_free_pipes(struct qwz_softc *sc)
20679 {
20680 	struct qwz_ce_pipe *pipe;
20681 	int i;
20682 
20683 	for (i = 0; i < sc->hw_params.ce_count; i++) {
20684 		pipe = &sc->ce.ce_pipe[i];
20685 		if (pipe->src_ring) {
20686 			qwz_ce_free_ring(sc, pipe->src_ring);
20687 			pipe->src_ring = NULL;
20688 		}
20689 
20690 		if (pipe->dest_ring) {
20691 			qwz_ce_free_ring(sc, pipe->dest_ring);
20692 			pipe->dest_ring = NULL;
20693 		}
20694 
20695 		if (pipe->status_ring) {
20696 			qwz_ce_free_ring(sc, pipe->status_ring);
20697 			pipe->status_ring = NULL;
20698 		}
20699 	}
20700 }
20701 
20702 int
20703 qwz_ce_alloc_src_ring_transfer_contexts(struct qwz_ce_pipe *pipe,
20704     const struct ce_attr *attr)
20705 {
20706 	struct qwz_softc *sc = pipe->sc;
20707 	struct qwz_tx_data *txdata;
20708 	size_t size;
20709 	int ret, i;
20710 
20711 	/* Allocate an array of qwz_tx_data structures. */
20712 	txdata = mallocarray(pipe->src_ring->nentries, sizeof(*txdata),
20713 	    M_DEVBUF, M_NOWAIT | M_ZERO);
20714 	if (txdata == NULL)
20715 		return ENOMEM;
20716 
20717 	size = sizeof(*txdata) * pipe->src_ring->nentries;
20718 
20719 	/* Create per-transfer DMA maps. */
20720 	for (i = 0; i < pipe->src_ring->nentries; i++) {
20721 		struct qwz_tx_data *ctx = &txdata[i];
20722 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
20723 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
20724 		if (ret) {
20725 			int j;
20726 			for (j = 0; j < i; j++) {
20727 				struct qwz_tx_data *ctx = &txdata[j];
20728 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
20729 			}
20730 			free(txdata, M_DEVBUF, size);
20731 			return ret;
20732 		}
20733 		pipe->src_ring->per_transfer_context[i] = ctx;
20734 	}
20735 
20736 	return 0;
20737 }
20738 
20739 int
20740 qwz_ce_alloc_dest_ring_transfer_contexts(struct qwz_ce_pipe *pipe,
20741     const struct ce_attr *attr)
20742 {
20743 	struct qwz_softc *sc = pipe->sc;
20744 	struct qwz_rx_data *rxdata;
20745 	size_t size;
20746 	int ret, i;
20747 
20748 	/* Allocate an array of qwz_rx_data structures. */
20749 	rxdata = mallocarray(pipe->dest_ring->nentries, sizeof(*rxdata),
20750 	    M_DEVBUF, M_NOWAIT | M_ZERO);
20751 	if (rxdata == NULL)
20752 		return ENOMEM;
20753 
20754 	size = sizeof(*rxdata) * pipe->dest_ring->nentries;
20755 
20756 	/* Create per-transfer DMA maps. */
20757 	for (i = 0; i < pipe->dest_ring->nentries; i++) {
20758 		struct qwz_rx_data *ctx = &rxdata[i];
20759 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
20760 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
20761 		if (ret) {
20762 			int j;
20763 			for (j = 0; j < i; j++) {
20764 				struct qwz_rx_data *ctx = &rxdata[j];
20765 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
20766 			}
20767 			free(rxdata, M_DEVBUF, size);
20768 			return ret;
20769 		}
20770 		pipe->dest_ring->per_transfer_context[i] = ctx;
20771 	}
20772 
20773 	return 0;
20774 }
20775 
20776 struct qwz_ce_ring *
20777 qwz_ce_alloc_ring(struct qwz_softc *sc, int nentries, size_t desc_sz)
20778 {
20779 	struct qwz_ce_ring *ce_ring;
20780 	size_t size = sizeof(*ce_ring) +
20781 	    (nentries * sizeof(ce_ring->per_transfer_context[0]));
20782 	bus_size_t dsize;
20783 
20784 	ce_ring = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
20785 	if (ce_ring == NULL)
20786 		return NULL;
20787 
20788 	ce_ring->nentries = nentries;
20789 	ce_ring->nentries_mask = nentries - 1;
20790 	ce_ring->desc_sz = desc_sz;
20791 
20792 	dsize = nentries * desc_sz;
20793 	if (bus_dmamap_create(sc->sc_dmat, dsize, 1, dsize, 0, BUS_DMA_NOWAIT,
20794 	    &ce_ring->dmap)) {
20795 		free(ce_ring, M_DEVBUF, size);
20796 		return NULL;
20797 	}
20798 
20799 	if (bus_dmamem_alloc(sc->sc_dmat, dsize, CE_DESC_RING_ALIGN, 0,
20800 	    &ce_ring->dsegs, 1, &ce_ring->nsegs,
20801 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
20802 		qwz_ce_free_ring(sc, ce_ring);
20803 		return NULL;
20804 	}
20805 
20806 	if (bus_dmamem_map(sc->sc_dmat, &ce_ring->dsegs, 1, dsize,
20807 	    &ce_ring->base_addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
20808 		qwz_ce_free_ring(sc, ce_ring);
20809 		return NULL;
20810 	}
20811 
20812 	if (bus_dmamap_load(sc->sc_dmat, ce_ring->dmap, ce_ring->base_addr,
20813 	    dsize, NULL, BUS_DMA_NOWAIT)) {
20814 		qwz_ce_free_ring(sc, ce_ring);
20815 		return NULL;
20816 	}
20817 
20818 	return ce_ring;
20819 }
20820 
20821 int
20822 qwz_ce_alloc_pipe(struct qwz_softc *sc, int ce_id)
20823 {
20824 	struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
20825 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
20826 	struct qwz_ce_ring *ring;
20827 	int nentries;
20828 	size_t desc_sz;
20829 
20830 	pipe->attr_flags = attr->flags;
20831 
20832 	if (attr->src_nentries) {
20833 		pipe->send_cb = qwz_ce_send_done_cb;
20834 		nentries = qwz_roundup_pow_of_two(attr->src_nentries);
20835 		desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
20836 		ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
20837 		if (ring == NULL)
20838 			return ENOMEM;
20839 		pipe->src_ring = ring;
20840 		if (qwz_ce_alloc_src_ring_transfer_contexts(pipe, attr))
20841 			return ENOMEM;
20842 	}
20843 
20844 	if (attr->dest_nentries) {
20845 		pipe->recv_cb = attr->recv_cb;
20846 		nentries = qwz_roundup_pow_of_two(attr->dest_nentries);
20847 		desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_DST);
20848 		ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
20849 		if (ring == NULL)
20850 			return ENOMEM;
20851 		pipe->dest_ring = ring;
20852 		if (qwz_ce_alloc_dest_ring_transfer_contexts(pipe, attr))
20853 			return ENOMEM;
20854 
20855 		desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
20856 		ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
20857 		if (ring == NULL)
20858 			return ENOMEM;
20859 		pipe->status_ring = ring;
20860 	}
20861 
20862 	return 0;
20863 }
20864 
20865 void
20866 qwz_ce_rx_pipe_cleanup(struct qwz_ce_pipe *pipe)
20867 {
20868 	struct qwz_softc *sc = pipe->sc;
20869 	struct qwz_ce_ring *ring = pipe->dest_ring;
20870 	void *ctx;
20871 	struct qwz_rx_data *rx_data;
20872 	int i;
20873 
20874 	if (!(ring && pipe->buf_sz))
20875 		return;
20876 
20877 	for (i = 0; i < ring->nentries; i++) {
20878 		ctx = ring->per_transfer_context[i];
20879 		if (!ctx)
20880 			continue;
20881 
20882 		rx_data = (struct qwz_rx_data *)ctx;
20883 		if (rx_data->m) {
20884 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
20885 			m_freem(rx_data->m);
20886 			rx_data->m = NULL;
20887 		}
20888 	}
20889 }
20890 
20891 void
20892 qwz_ce_shadow_config(struct qwz_softc *sc)
20893 {
20894 	int i;
20895 
20896 	for (i = 0; i < sc->hw_params.ce_count; i++) {
20897 		if (sc->hw_params.host_ce_config[i].src_nentries)
20898 			qwz_hal_srng_update_shadow_config(sc, HAL_CE_SRC, i);
20899 
20900 		if (sc->hw_params.host_ce_config[i].dest_nentries) {
20901 			qwz_hal_srng_update_shadow_config(sc, HAL_CE_DST, i);
20902 
20903 			qwz_hal_srng_update_shadow_config(sc,
20904 			    HAL_CE_DST_STATUS, i);
20905 		}
20906 	}
20907 }
20908 
20909 void
20910 qwz_ce_get_shadow_config(struct qwz_softc *sc, uint32_t **shadow_cfg,
20911     uint32_t *shadow_cfg_len)
20912 {
20913 	if (!sc->hw_params.supports_shadow_regs)
20914 		return;
20915 
20916 	qwz_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
20917 
20918 	/* shadow is already configured */
20919 	if (*shadow_cfg_len)
20920 		return;
20921 
20922 	/* shadow isn't configured yet, configure now.
20923 	 * non-CE srngs are configured firstly, then
20924 	 * all CE srngs.
20925 	 */
20926 	qwz_hal_srng_shadow_config(sc);
20927 	qwz_ce_shadow_config(sc);
20928 
20929 	/* get the shadow configuration */
20930 	qwz_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
20931 }
20932 
20933 void
20934 qwz_ce_cleanup_pipes(struct qwz_softc *sc)
20935 {
20936 	struct qwz_ce_pipe *pipe;
20937 	int pipe_num;
20938 
20939 	for (pipe_num = 0; pipe_num < sc->hw_params.ce_count; pipe_num++) {
20940 		pipe = &sc->ce.ce_pipe[pipe_num];
20941 		qwz_ce_rx_pipe_cleanup(pipe);
20942 
20943 		/* Cleanup any src CE's which have interrupts disabled */
20944 		qwz_ce_poll_send_completed(sc, pipe_num);
20945 	}
20946 }
20947 
20948 int
20949 qwz_ce_alloc_pipes(struct qwz_softc *sc)
20950 {
20951 	struct qwz_ce_pipe *pipe;
20952 	int i;
20953 	int ret;
20954 	const struct ce_attr *attr;
20955 
20956 	for (i = 0; i < sc->hw_params.ce_count; i++) {
20957 		attr = &sc->hw_params.host_ce_config[i];
20958 		pipe = &sc->ce.ce_pipe[i];
20959 		pipe->pipe_num = i;
20960 		pipe->sc = sc;
20961 		pipe->buf_sz = attr->src_sz_max;
20962 
20963 		ret = qwz_ce_alloc_pipe(sc, i);
20964 		if (ret) {
20965 			/* Free any partial successful allocation */
20966 			qwz_ce_free_pipes(sc);
20967 			return ret;
20968 		}
20969 	}
20970 
20971 	return 0;
20972 }
20973 
20974 void
20975 qwz_get_ce_msi_idx(struct qwz_softc *sc, uint32_t ce_id,
20976     uint32_t *msi_data_idx)
20977 {
20978 	*msi_data_idx = ce_id;
20979 }
20980 
20981 void
20982 qwz_ce_srng_msi_ring_params_setup(struct qwz_softc *sc, uint32_t ce_id,
20983     struct hal_srng_params *ring_params)
20984 {
20985 	uint32_t msi_data_start = 0;
20986 	uint32_t msi_data_count = 1, msi_data_idx;
20987 	uint32_t msi_irq_start = 0;
20988 	uint32_t addr_lo;
20989 	uint32_t addr_hi;
20990 	int ret;
20991 
20992 	ret = sc->ops.get_user_msi_vector(sc, "CE",
20993 	    &msi_data_count, &msi_data_start, &msi_irq_start);
20994 	if (ret)
20995 		return;
20996 
20997 	qwz_get_msi_address(sc, &addr_lo, &addr_hi);
20998 	qwz_get_ce_msi_idx(sc, ce_id, &msi_data_idx);
20999 
21000 	ring_params->msi_addr = addr_lo;
21001 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
21002 	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
21003 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
21004 }
21005 
21006 int
21007 qwz_ce_init_ring(struct qwz_softc *sc, struct qwz_ce_ring *ce_ring,
21008     int ce_id, enum hal_ring_type type)
21009 {
21010 	struct hal_srng_params params = { 0 };
21011 	int ret;
21012 
21013 	params.ring_base_paddr = ce_ring->dmap->dm_segs[0].ds_addr;
21014 	params.ring_base_vaddr = (uint32_t *)ce_ring->base_addr;
21015 	params.num_entries = ce_ring->nentries;
21016 
21017 	if (!(CE_ATTR_DIS_INTR & sc->hw_params.host_ce_config[ce_id].flags))
21018 		qwz_ce_srng_msi_ring_params_setup(sc, ce_id, &params);
21019 
21020 	switch (type) {
21021 	case HAL_CE_SRC:
21022 		if (!(CE_ATTR_DIS_INTR &
21023 		    sc->hw_params.host_ce_config[ce_id].flags))
21024 			params.intr_batch_cntr_thres_entries = 1;
21025 		break;
21026 	case HAL_CE_DST:
21027 		params.max_buffer_len =
21028 		    sc->hw_params.host_ce_config[ce_id].src_sz_max;
21029 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21030 		    CE_ATTR_DIS_INTR)) {
21031 			params.intr_timer_thres_us = 1024;
21032 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
21033 			params.low_threshold = ce_ring->nentries - 3;
21034 		}
21035 		break;
21036 	case HAL_CE_DST_STATUS:
21037 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21038 		    CE_ATTR_DIS_INTR)) {
21039 			params.intr_batch_cntr_thres_entries = 1;
21040 			params.intr_timer_thres_us = 0x1000;
21041 		}
21042 		break;
21043 	default:
21044 		printf("%s: Invalid CE ring type %d\n",
21045 		    sc->sc_dev.dv_xname, type);
21046 		return EINVAL;
21047 	}
21048 
21049 	/* TODO: Init other params needed by HAL to init the ring */
21050 
21051 	ret = qwz_hal_srng_setup(sc, type, ce_id, 0, &params);
21052 	if (ret < 0) {
21053 		printf("%s: failed to setup srng: ring_id %d ce_id %d\n",
21054 		    sc->sc_dev.dv_xname, ret, ce_id);
21055 		return ret;
21056 	}
21057 
21058 	ce_ring->hal_ring_id = ret;
21059 
21060 	if (sc->hw_params.supports_shadow_regs &&
21061 	    qwz_ce_need_shadow_fix(ce_id))
21062 		qwz_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
21063 		    ATH12K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id);
21064 
21065 	return 0;
21066 }
21067 
21068 int
21069 qwz_ce_init_pipes(struct qwz_softc *sc)
21070 {
21071 	struct qwz_ce_pipe *pipe;
21072 	int i;
21073 	int ret;
21074 
21075 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21076 		pipe = &sc->ce.ce_pipe[i];
21077 
21078 		if (pipe->src_ring) {
21079 			ret = qwz_ce_init_ring(sc, pipe->src_ring, i,
21080 			    HAL_CE_SRC);
21081 			if (ret) {
21082 				printf("%s: failed to init src ring: %d\n",
21083 				    sc->sc_dev.dv_xname, ret);
21084 				/* Should we clear any partial init */
21085 				return ret;
21086 			}
21087 
21088 			pipe->src_ring->write_index = 0;
21089 			pipe->src_ring->sw_index = 0;
21090 		}
21091 
21092 		if (pipe->dest_ring) {
21093 			ret = qwz_ce_init_ring(sc, pipe->dest_ring, i,
21094 			    HAL_CE_DST);
21095 			if (ret) {
21096 				printf("%s: failed to init dest ring: %d\n",
21097 				    sc->sc_dev.dv_xname, ret);
21098 				/* Should we clear any partial init */
21099 				return ret;
21100 			}
21101 
21102 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
21103 			    pipe->dest_ring->nentries - 2 : 0;
21104 
21105 			pipe->dest_ring->write_index = 0;
21106 			pipe->dest_ring->sw_index = 0;
21107 		}
21108 
21109 		if (pipe->status_ring) {
21110 			ret = qwz_ce_init_ring(sc, pipe->status_ring, i,
21111 			    HAL_CE_DST_STATUS);
21112 			if (ret) {
21113 				printf("%s: failed to init status ring: %d\n",
21114 				    sc->sc_dev.dv_xname, ret);
21115 				/* Should we clear any partial init */
21116 				return ret;
21117 			}
21118 
21119 			pipe->status_ring->write_index = 0;
21120 			pipe->status_ring->sw_index = 0;
21121 		}
21122 	}
21123 
21124 	return 0;
21125 }
21126 
21127 int
21128 qwz_hal_srng_src_num_free(struct qwz_softc *sc, struct hal_srng *srng,
21129     int sync_hw_ptr)
21130 {
21131 	uint32_t tp, hp;
21132 #ifdef notyet
21133 	lockdep_assert_held(&srng->lock);
21134 #endif
21135 	hp = srng->u.src_ring.hp;
21136 
21137 	if (sync_hw_ptr) {
21138 		tp = *srng->u.src_ring.tp_addr;
21139 		srng->u.src_ring.cached_tp = tp;
21140 	} else {
21141 		tp = srng->u.src_ring.cached_tp;
21142 	}
21143 
21144 	if (tp > hp)
21145 		return ((tp - hp) / srng->entry_size) - 1;
21146 	else
21147 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
21148 }
21149 
21150 int
21151 qwz_ce_rx_buf_enqueue_pipe(struct qwz_ce_pipe *pipe, bus_dmamap_t map)
21152 {
21153 	struct qwz_softc *sc = pipe->sc;
21154 	struct qwz_ce_ring *ring = pipe->dest_ring;
21155 	struct hal_srng *srng;
21156 	unsigned int write_index;
21157 	unsigned int nentries_mask = ring->nentries_mask;
21158 	uint32_t *desc;
21159 	uint64_t paddr;
21160 	int ret;
21161 #ifdef notyet
21162 	lockdep_assert_held(&ab->ce.ce_lock);
21163 #endif
21164 	write_index = ring->write_index;
21165 
21166 	srng = &sc->hal.srng_list[ring->hal_ring_id];
21167 #ifdef notyet
21168 	spin_lock_bh(&srng->lock);
21169 #endif
21170 	qwz_hal_srng_access_begin(sc, srng);
21171 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21172 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_POSTREAD);
21173 
21174 	if (qwz_hal_srng_src_num_free(sc, srng, 0) < 1) {
21175 		ret = ENOSPC;
21176 		goto exit;
21177 	}
21178 
21179 	desc = qwz_hal_srng_src_get_next_entry(sc, srng);
21180 	if (!desc) {
21181 		ret = ENOSPC;
21182 		goto exit;
21183 	}
21184 
21185 	paddr = map->dm_segs[0].ds_addr;
21186 	qwz_hal_ce_dst_set_desc(desc, paddr);
21187 
21188 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
21189 	ring->write_index = write_index;
21190 
21191 	pipe->rx_buf_needed--;
21192 
21193 	ret = 0;
21194 exit:
21195 	qwz_hal_srng_access_end(sc, srng);
21196 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21197 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
21198 #ifdef notyet
21199 	spin_unlock_bh(&srng->lock);
21200 #endif
21201 	return ret;
21202 }
21203 
21204 int
21205 qwz_ce_rx_post_pipe(struct qwz_ce_pipe *pipe)
21206 {
21207 	struct qwz_softc *sc = pipe->sc;
21208 	int ret = 0;
21209 	unsigned int idx;
21210 	void *ctx;
21211 	struct qwz_rx_data *rx_data;
21212 	struct mbuf *m;
21213 
21214 	if (!pipe->dest_ring)
21215 		return 0;
21216 
21217 #ifdef notyet
21218 	spin_lock_bh(&ab->ce.ce_lock);
21219 #endif
21220 	while (pipe->rx_buf_needed) {
21221 		m = m_gethdr(M_DONTWAIT, MT_DATA);
21222 		if (m == NULL) {
21223 			ret = ENOBUFS;
21224 			goto done;
21225 		}
21226 
21227 		if (pipe->buf_sz <= MCLBYTES)
21228 			MCLGET(m, M_DONTWAIT);
21229 		else
21230 			MCLGETL(m, M_DONTWAIT, pipe->buf_sz);
21231 		if ((m->m_flags & M_EXT) == 0) {
21232 			ret = ENOBUFS;
21233 			goto done;
21234 		}
21235 
21236 		idx = pipe->dest_ring->write_index;
21237 		ctx = pipe->dest_ring->per_transfer_context[idx];
21238 		rx_data = (struct qwz_rx_data *)ctx;
21239 
21240 		m->m_len = m->m_pkthdr.len = pipe->buf_sz;
21241 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
21242 		    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
21243 		if (ret) {
21244 			printf("%s: can't map mbuf (error %d)\n",
21245 			    sc->sc_dev.dv_xname, ret);
21246 			m_freem(m);
21247 			goto done;
21248 		}
21249 
21250 		ret = qwz_ce_rx_buf_enqueue_pipe(pipe, rx_data->map);
21251 		if (ret) {
21252 			printf("%s: failed to enqueue rx buf: %d\n",
21253 			    sc->sc_dev.dv_xname, ret);
21254 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21255 			m_freem(m);
21256 			break;
21257 		} else
21258 			rx_data->m = m;
21259 	}
21260 
21261 done:
21262 #ifdef notyet
21263 	spin_unlock_bh(&ab->ce.ce_lock);
21264 #endif
21265 	return ret;
21266 }
21267 
21268 void
21269 qwz_ce_rx_post_buf(struct qwz_softc *sc)
21270 {
21271 	struct qwz_ce_pipe *pipe;
21272 	int i;
21273 	int ret;
21274 
21275 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21276 		pipe = &sc->ce.ce_pipe[i];
21277 		ret = qwz_ce_rx_post_pipe(pipe);
21278 		if (ret) {
21279 			if (ret == ENOSPC)
21280 				continue;
21281 
21282 			printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21283 			    sc->sc_dev.dv_xname, i, ret);
21284 #ifdef notyet
21285 			mod_timer(&ab->rx_replenish_retry,
21286 				  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
21287 #endif
21288 
21289 			return;
21290 		}
21291 	}
21292 }
21293 
21294 int
21295 qwz_ce_completed_recv_next(struct qwz_ce_pipe *pipe,
21296     void **per_transfer_contextp, int *nbytes)
21297 {
21298 	struct qwz_softc *sc = pipe->sc;
21299 	struct hal_srng *srng;
21300 	unsigned int sw_index;
21301 	unsigned int nentries_mask;
21302 	uint32_t *desc;
21303 	int ret = 0;
21304 #ifdef notyet
21305 	spin_lock_bh(&ab->ce.ce_lock);
21306 #endif
21307 	sw_index = pipe->dest_ring->sw_index;
21308 	nentries_mask = pipe->dest_ring->nentries_mask;
21309 
21310 	srng = &sc->hal.srng_list[pipe->status_ring->hal_ring_id];
21311 #ifdef notyet
21312 	spin_lock_bh(&srng->lock);
21313 #endif
21314 	qwz_hal_srng_access_begin(sc, srng);
21315 
21316 	desc = qwz_hal_srng_dst_get_next_entry(sc, srng);
21317 	if (!desc) {
21318 		ret = EIO;
21319 		goto err;
21320 	}
21321 
21322 	*nbytes = qwz_hal_ce_dst_status_get_length(desc);
21323 	if (*nbytes == 0) {
21324 		ret = EIO;
21325 		goto err;
21326 	}
21327 
21328 	if (per_transfer_contextp) {
21329 		*per_transfer_contextp =
21330 		    pipe->dest_ring->per_transfer_context[sw_index];
21331 	}
21332 
21333 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
21334 	pipe->dest_ring->sw_index = sw_index;
21335 
21336 	pipe->rx_buf_needed++;
21337 err:
21338 	qwz_hal_srng_access_end(sc, srng);
21339 #ifdef notyet
21340 	spin_unlock_bh(&srng->lock);
21341 	spin_unlock_bh(&ab->ce.ce_lock);
21342 #endif
21343 	return ret;
21344 }
21345 
21346 int
21347 qwz_ce_recv_process_cb(struct qwz_ce_pipe *pipe)
21348 {
21349 	struct qwz_softc *sc = pipe->sc;
21350 	struct mbuf *m;
21351 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
21352 	void *transfer_context;
21353 	unsigned int nbytes, max_nbytes;
21354 	int ret = 0, err;
21355 
21356 	while (qwz_ce_completed_recv_next(pipe, &transfer_context,
21357 	    &nbytes) == 0) {
21358 		struct qwz_rx_data *rx_data = transfer_context;
21359 
21360 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21361 		m = rx_data->m;
21362 		rx_data->m = NULL;
21363 
21364 		max_nbytes = m->m_pkthdr.len;
21365 		if (max_nbytes < nbytes) {
21366 			printf("%s: received more than expected (nbytes %d, "
21367 			    "max %d)", __func__, nbytes, max_nbytes);
21368 			m_freem(m);
21369 			continue;
21370 		}
21371 		m->m_len = m->m_pkthdr.len = nbytes;
21372 		ml_enqueue(&ml, m);
21373 		ret = 1;
21374 	}
21375 
21376 	while ((m = ml_dequeue(&ml))) {
21377 		DNPRINTF(QWZ_D_CE, "%s: rx ce pipe %d len %d\n", __func__,
21378 		    pipe->pipe_num, m->m_len);
21379 		pipe->recv_cb(sc, m);
21380 	}
21381 
21382 	err = qwz_ce_rx_post_pipe(pipe);
21383 	if (err && err != ENOSPC) {
21384 		printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21385 		    __func__, pipe->pipe_num, err);
21386 #ifdef notyet
21387 		mod_timer(&ab->rx_replenish_retry,
21388 			  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
21389 #endif
21390 	}
21391 
21392 	return ret;
21393 }
21394 
21395 int
21396 qwz_ce_per_engine_service(struct qwz_softc *sc, uint16_t ce_id)
21397 {
21398 	struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
21399 	int ret = 0;
21400 
21401 	if (pipe->send_cb) {
21402 		if (pipe->send_cb(pipe))
21403 			ret = 1;
21404 	}
21405 
21406 	if (pipe->recv_cb) {
21407 		if (qwz_ce_recv_process_cb(pipe))
21408 			ret = 1;
21409 	}
21410 
21411 	return ret;
21412 }
21413 
21414 int
21415 qwz_ce_send(struct qwz_softc *sc, struct mbuf *m, uint8_t pipe_id,
21416     uint16_t transfer_id)
21417 {
21418 	struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
21419 	struct hal_srng *srng;
21420 	uint32_t *desc;
21421 	unsigned int write_index, sw_index;
21422 	unsigned int nentries_mask;
21423 	int ret = 0;
21424 	uint8_t byte_swap_data = 0;
21425 	int num_used;
21426 	uint64_t paddr;
21427 	void *ctx;
21428 	struct qwz_tx_data *tx_data;
21429 
21430 	/* Check if some entries could be regained by handling tx completion if
21431 	 * the CE has interrupts disabled and the used entries is more than the
21432 	 * defined usage threshold.
21433 	 */
21434 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
21435 #ifdef notyet
21436 		spin_lock_bh(&ab->ce.ce_lock);
21437 #endif
21438 		write_index = pipe->src_ring->write_index;
21439 
21440 		sw_index = pipe->src_ring->sw_index;
21441 
21442 		if (write_index >= sw_index)
21443 			num_used = write_index - sw_index;
21444 		else
21445 			num_used = pipe->src_ring->nentries - sw_index +
21446 			    write_index;
21447 #ifdef notyet
21448 		spin_unlock_bh(&ab->ce.ce_lock);
21449 #endif
21450 		if (num_used > ATH12K_CE_USAGE_THRESHOLD)
21451 			qwz_ce_poll_send_completed(sc, pipe->pipe_num);
21452 	}
21453 
21454 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
21455 		return ESHUTDOWN;
21456 #ifdef notyet
21457 	spin_lock_bh(&ab->ce.ce_lock);
21458 #endif
21459 	write_index = pipe->src_ring->write_index;
21460 	nentries_mask = pipe->src_ring->nentries_mask;
21461 
21462 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
21463 #ifdef notyet
21464 	spin_lock_bh(&srng->lock);
21465 #endif
21466 	qwz_hal_srng_access_begin(sc, srng);
21467 
21468 	if (qwz_hal_srng_src_num_free(sc, srng, 0) < 1) {
21469 		qwz_hal_srng_access_end(sc, srng);
21470 		ret = ENOBUFS;
21471 		goto err_unlock;
21472 	}
21473 
21474 	desc = qwz_hal_srng_src_get_next_reaped(sc, srng);
21475 	if (!desc) {
21476 		qwz_hal_srng_access_end(sc, srng);
21477 		ret = ENOBUFS;
21478 		goto err_unlock;
21479 	}
21480 
21481 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
21482 		byte_swap_data = 1;
21483 
21484 	ctx = pipe->src_ring->per_transfer_context[write_index];
21485 	tx_data = (struct qwz_tx_data *)ctx;
21486 
21487 	paddr = tx_data->map->dm_segs[0].ds_addr;
21488 	qwz_hal_ce_src_set_desc(desc, paddr, m->m_pkthdr.len,
21489 	    transfer_id, byte_swap_data);
21490 
21491 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
21492 	    write_index);
21493 
21494 	qwz_hal_srng_access_end(sc, srng);
21495 
21496 err_unlock:
21497 #ifdef notyet
21498 	spin_unlock_bh(&srng->lock);
21499 
21500 	spin_unlock_bh(&ab->ce.ce_lock);
21501 #endif
21502 	return ret;
21503 }
21504 
21505 int
21506 qwz_get_num_chains(uint32_t mask)
21507 {
21508 	int num_chains = 0;
21509 
21510 	while (mask) {
21511 		if (mask & 0x1)
21512 			num_chains++;
21513 		mask >>= 1;
21514 	}
21515 
21516 	return num_chains;
21517 }
21518 
21519 int
21520 qwz_set_antenna(struct qwz_pdev *pdev, uint32_t tx_ant, uint32_t rx_ant)
21521 {
21522 	struct qwz_softc *sc = pdev->sc;
21523 	int ret;
21524 #ifdef notyet
21525 	lockdep_assert_held(&ar->conf_mutex);
21526 #endif
21527 	sc->cfg_tx_chainmask = tx_ant;
21528 	sc->cfg_rx_chainmask = rx_ant;
21529 #if 0
21530 	if (ar->state != ATH12K_STATE_ON &&
21531 	    ar->state != ATH12K_STATE_RESTARTED)
21532 		return 0;
21533 #endif
21534 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_TX_CHAIN_MASK,
21535 	    tx_ant, pdev->pdev_id);
21536 	if (ret) {
21537 		printf("%s: failed to set tx-chainmask: %d, req 0x%x\n",
21538 		    sc->sc_dev.dv_xname, ret, tx_ant);
21539 		return ret;
21540 	}
21541 
21542 	sc->num_tx_chains = qwz_get_num_chains(tx_ant);
21543 
21544 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_RX_CHAIN_MASK,
21545 	    rx_ant, pdev->pdev_id);
21546 	if (ret) {
21547 		printf("%s: failed to set rx-chainmask: %d, req 0x%x\n",
21548 		    sc->sc_dev.dv_xname, ret, rx_ant);
21549 		return ret;
21550 	}
21551 
21552 	sc->num_rx_chains = qwz_get_num_chains(rx_ant);
21553 #if 0
21554 	/* Reload HT/VHT/HE capability */
21555 	ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
21556 	ath12k_mac_setup_he_cap(ar, &ar->pdev->cap);
21557 #endif
21558 	return 0;
21559 }
21560 
21561 int
21562 qwz_reg_update_chan_list(struct qwz_softc *sc, uint8_t pdev_id)
21563 {
21564 	struct ieee80211com *ic = &sc->sc_ic;
21565 	struct scan_chan_list_params *params;
21566 	struct ieee80211_channel *channel, *lastc;
21567 	struct channel_param *ch;
21568 	int num_channels = 0;
21569 	size_t params_size;
21570 	int ret;
21571 #if 0
21572 	if (ar->state == ATH12K_STATE_RESTARTING)
21573 		return 0;
21574 #endif
21575 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
21576 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
21577 		if (channel->ic_flags == 0)
21578 			continue;
21579 		num_channels++;
21580 	}
21581 
21582 	if (!num_channels)
21583 		return EINVAL;
21584 
21585 	params_size = sizeof(*params) +
21586 	    num_channels * sizeof(*params->ch_param);
21587 
21588 	/*
21589 	 * TODO: This is a temporary list for qwz_wmi_send_scan_chan_list_cmd
21590 	 * to loop over. Could that function loop over ic_channels directly?
21591 	 */
21592 	params = malloc(params_size, M_DEVBUF, M_NOWAIT | M_ZERO);
21593 	if (!params)
21594 		return ENOMEM;
21595 
21596 	params->pdev_id = pdev_id;
21597 	params->nallchans = num_channels;
21598 
21599 	ch = params->ch_param;
21600 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
21601 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
21602 		if (channel->ic_flags == 0)
21603 			continue;
21604 #ifdef notyet
21605 		/* TODO: Set to true/false based on some condition? */
21606 		ch->allow_ht = true;
21607 		ch->allow_vht = true;
21608 		ch->allow_he = true;
21609 #endif
21610 		ch->dfs_set = !!(IEEE80211_IS_CHAN_5GHZ(channel) &&
21611 		    (channel->ic_flags & IEEE80211_CHAN_PASSIVE));
21612 		ch->is_chan_passive = !!(channel->ic_flags &
21613 		    IEEE80211_CHAN_PASSIVE);
21614 		ch->is_chan_passive |= ch->dfs_set;
21615 		ch->mhz = ieee80211_ieee2mhz(ieee80211_chan2ieee(ic, channel),
21616 		    channel->ic_flags);
21617 		ch->cfreq1 = ch->mhz;
21618 		ch->minpower = 0;
21619 		ch->maxpower = 40; /* XXX from Linux debug trace */
21620 		ch->maxregpower = ch->maxpower;
21621 		ch->antennamax = 0;
21622 
21623 		/* TODO: Use appropriate phymodes */
21624 		if (IEEE80211_IS_CHAN_A(channel))
21625 			ch->phy_mode = MODE_11A;
21626 		else if (IEEE80211_IS_CHAN_G(channel))
21627 			ch->phy_mode = MODE_11G;
21628 		else
21629 			ch->phy_mode = MODE_11B;
21630 #ifdef notyet
21631 		if (channel->band == NL80211_BAND_6GHZ &&
21632 		    cfg80211_channel_is_psc(channel))
21633 			ch->psc_channel = true;
21634 #endif
21635 		DNPRINTF(QWZ_D_WMI, "%s: mac channel freq %d maxpower %d "
21636 		    "regpower %d antenna %d mode %d\n", __func__,
21637 		    ch->mhz, ch->maxpower, ch->maxregpower,
21638 		    ch->antennamax, ch->phy_mode);
21639 
21640 		ch++;
21641 		/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
21642 		 * set_agile, reg_class_idx
21643 		 */
21644 	}
21645 
21646 	ret = qwz_wmi_send_scan_chan_list_cmd(sc, pdev_id, params);
21647 	free(params, M_DEVBUF, params_size);
21648 
21649 	return ret;
21650 }
21651 
21652 static const struct htt_rx_ring_tlv_filter qwz_mac_mon_status_filter_default = {
21653 	.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
21654 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
21655 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
21656 	.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
21657 	.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
21658 	.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
21659 	.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
21660 	    HTT_RX_FP_CTRL_FILTER_FLASG3
21661 };
21662 
21663 int
21664 qwz_mac_register(struct qwz_softc *sc)
21665 {
21666 	/* Initialize channel counters frequency value in hertz */
21667 	sc->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
21668 
21669 	sc->free_vdev_map = (1U << (sc->num_radios * TARGET_NUM_VDEVS(sc))) - 1;
21670 
21671 	if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
21672 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr, sc->mac_addr);
21673 
21674 	return 0;
21675 }
21676 
21677 int
21678 qwz_mac_config_mon_status_default(struct qwz_softc *sc, int enable)
21679 {
21680 	struct htt_rx_ring_tlv_filter tlv_filter = { 0 };
21681 	int ret = 0;
21682 #if 0
21683 	int i;
21684 	struct dp_rxdma_ring *ring;
21685 #endif
21686 
21687 	if (enable)
21688 		tlv_filter = qwz_mac_mon_status_filter_default;
21689 #if 0 /* mon status info is not useful and the code triggers mbuf corruption */
21690 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
21691 		ring = &sc->pdev_dp.rx_mon_status_refill_ring[i];
21692 		ret = qwz_dp_tx_htt_rx_filter_setup(sc,
21693 		    ring->refill_buf_ring.ring_id, sc->pdev_dp.mac_id + i,
21694 		    HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter);
21695 		if (ret)
21696 			return ret;
21697 	}
21698 
21699 	if (enable && !sc->hw_params.rxdma1_enable) {
21700 		timeout_add_msec(&sc->mon_reap_timer,
21701 		    ATH12K_MON_TIMER_INTERVAL);
21702 	}
21703 #endif
21704 	return ret;
21705 }
21706 
21707 int
21708 qwz_mac_txpower_recalc(struct qwz_softc *sc, struct qwz_pdev *pdev)
21709 {
21710 	struct qwz_vif *arvif;
21711 	int ret, txpower = -1;
21712 	uint32_t param;
21713 	uint32_t min_tx_power = sc->target_caps.hw_min_tx_power;
21714 	uint32_t max_tx_power = sc->target_caps.hw_max_tx_power;
21715 #ifdef notyet
21716 	lockdep_assert_held(&ar->conf_mutex);
21717 #endif
21718 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
21719 		if (arvif->txpower <= 0)
21720 			continue;
21721 
21722 		if (txpower == -1)
21723 			txpower = arvif->txpower;
21724 		else
21725 			txpower = MIN(txpower, arvif->txpower);
21726 	}
21727 
21728 	if (txpower == -1)
21729 		return 0;
21730 
21731 	/* txpwr is set as 2 units per dBm in FW*/
21732 	txpower = MIN(MAX(min_tx_power, txpower), max_tx_power) * 2;
21733 	DNPRINTF(QWZ_D_MAC, "txpower to set in hw %d\n", txpower / 2);
21734 
21735 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
21736 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
21737 		ret = qwz_wmi_pdev_set_param(sc, param, txpower,
21738 		    pdev->pdev_id);
21739 		if (ret)
21740 			goto fail;
21741 	}
21742 
21743 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
21744 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
21745 		ret = qwz_wmi_pdev_set_param(sc, param, txpower,
21746 		    pdev->pdev_id);
21747 		if (ret)
21748 			goto fail;
21749 	}
21750 
21751 	return 0;
21752 
21753 fail:
21754 	DNPRINTF(QWZ_D_MAC, "%s: failed to recalc txpower limit %d "
21755 	    "using pdev param %d: %d\n", sc->sc_dev.dv_xname, txpower / 2,
21756 	    param, ret);
21757 
21758 	return ret;
21759 }
21760 
21761 int
21762 qwz_mac_op_start(struct qwz_pdev *pdev)
21763 {
21764 	struct qwz_softc *sc = pdev->sc;
21765 	struct ieee80211com *ic = &sc->sc_ic;
21766 	int ret;
21767 
21768 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_PMF_QOS, 1,
21769 	    pdev->pdev_id);
21770 	if (ret) {
21771 		printf("%s: failed to enable PMF QOS for pdev %d: %d\n",
21772 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21773 		goto err;
21774 	}
21775 
21776 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
21777 	    pdev->pdev_id);
21778 	if (ret) {
21779 		printf("%s: failed to enable dynamic bw for pdev %d: %d\n",
21780 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21781 		goto err;
21782 	}
21783 
21784 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT)) {
21785 		ret = qwz_wmi_scan_prob_req_oui(sc, ic->ic_myaddr,
21786 		    pdev->pdev_id);
21787 		if (ret) {
21788 			printf("%s: failed to set prob req oui for "
21789 			    "pdev %d: %i\n", sc->sc_dev.dv_xname,
21790 			    pdev->pdev_id, ret);
21791 			goto err;
21792 		}
21793 	}
21794 
21795 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0,
21796 	    pdev->pdev_id);
21797 	if (ret) {
21798 		printf("%s: failed to set ac override for ARP for "
21799 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21800 		goto err;
21801 	}
21802 
21803 	ret = qwz_wmi_send_dfs_phyerr_offload_enable_cmd(sc, pdev->pdev_id);
21804 	if (ret) {
21805 		printf("%s: failed to offload radar detection for "
21806 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21807 		goto err;
21808 	}
21809 
21810 	ret = qwz_dp_tx_htt_h2t_ppdu_stats_req(sc, HTT_PPDU_STATS_TAG_DEFAULT,
21811 	    pdev->pdev_id);
21812 	if (ret) {
21813 		printf("%s: failed to req ppdu stats for pdev %d: %d\n",
21814 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21815 		goto err;
21816 	}
21817 
21818 	ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1,
21819 	    pdev->pdev_id);
21820 	if (ret) {
21821 		printf("%s: failed to enable MESH MCAST ENABLE for "
21822 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21823 		goto err;
21824 	}
21825 
21826 	qwz_set_antenna(pdev, pdev->cap.tx_chain_mask, pdev->cap.rx_chain_mask);
21827 
21828 	/* TODO: Do we need to enable ANI? */
21829 
21830 	ret = qwz_reg_update_chan_list(sc, pdev->pdev_id);
21831 	if (ret) {
21832 		printf("%s: failed to update channel list for pdev %d: %d\n",
21833 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
21834 		goto err;
21835 	}
21836 
21837 	sc->num_started_vdevs = 0;
21838 	sc->num_created_vdevs = 0;
21839 	sc->num_peers = 0;
21840 	sc->allocated_vdev_map = 0;
21841 
21842 	/* Configure monitor status ring with default rx_filter to get rx status
21843 	 * such as rssi, rx_duration.
21844 	 */
21845 	ret = qwz_mac_config_mon_status_default(sc, 1);
21846 	if (ret) {
21847 		printf("%s: failed to configure monitor status ring "
21848 		    "with default rx_filter: (%d)\n",
21849 		    sc->sc_dev.dv_xname, ret);
21850 		goto err;
21851 	}
21852 
21853 	/* Configure the hash seed for hash based reo dest ring selection */
21854 	qwz_wmi_pdev_lro_cfg(sc, pdev->pdev_id);
21855 
21856 	/* allow device to enter IMPS */
21857 	if (sc->hw_params.idle_ps) {
21858 		ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
21859 		    1, pdev->pdev_id);
21860 		if (ret) {
21861 			printf("%s: failed to enable idle ps: %d\n",
21862 			    sc->sc_dev.dv_xname, ret);
21863 			goto err;
21864 		}
21865 	}
21866 #ifdef notyet
21867 	mutex_unlock(&ar->conf_mutex);
21868 #endif
21869 	sc->pdevs_active |= (1 << pdev->pdev_id);
21870 	return 0;
21871 err:
21872 #ifdef notyet
21873 	ar->state = ATH12K_STATE_OFF;
21874 	mutex_unlock(&ar->conf_mutex);
21875 #endif
21876 	return ret;
21877 }
21878 
21879 int
21880 qwz_mac_setup_vdev_params_mbssid(struct qwz_vif *arvif,
21881     uint32_t *flags, uint32_t *tx_vdev_id)
21882 {
21883 	*tx_vdev_id = 0;
21884 	*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
21885 	return 0;
21886 }
21887 
21888 int
21889 qwz_mac_setup_vdev_create_params(struct qwz_vif *arvif, struct qwz_pdev *pdev,
21890     struct vdev_create_params *params)
21891 {
21892 	struct qwz_softc *sc = arvif->sc;
21893 	int ret;
21894 
21895 	params->if_id = arvif->vdev_id;
21896 	params->type = arvif->vdev_type;
21897 	params->subtype = arvif->vdev_subtype;
21898 	params->pdev_id = pdev->pdev_id;
21899 	params->mbssid_flags = 0;
21900 	params->mbssid_tx_vdev_id = 0;
21901 
21902 	if (!isset(sc->wmi.svc_map,
21903 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
21904 		ret = qwz_mac_setup_vdev_params_mbssid(arvif,
21905 		    &params->mbssid_flags, &params->mbssid_tx_vdev_id);
21906 		if (ret)
21907 			return ret;
21908 	}
21909 
21910 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
21911 		params->chains[0].tx = sc->num_tx_chains;
21912 		params->chains[0].rx = sc->num_rx_chains;
21913 	}
21914 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
21915 		params->chains[1].tx = sc->num_tx_chains;
21916 		params->chains[1].rx = sc->num_rx_chains;
21917 	}
21918 #if 0
21919 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
21920 	    ar->supports_6ghz) {
21921 		params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
21922 		params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
21923 	}
21924 #endif
21925 	return 0;
21926 }
21927 
21928 int
21929 qwz_mac_op_update_vif_offload(struct qwz_softc *sc, struct qwz_pdev *pdev,
21930     struct qwz_vif *arvif)
21931 {
21932 	uint32_t param_id, param_value;
21933 	int ret;
21934 
21935 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
21936 	if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
21937 		param_value = ATH12K_HW_TXRX_RAW;
21938 	else
21939 		param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
21940 
21941 	ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
21942 	    param_id, param_value);
21943 	if (ret) {
21944 		printf("%s: failed to set vdev %d tx encap mode: %d\n",
21945 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
21946 		return ret;
21947 	}
21948 
21949 	param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
21950 	if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
21951 		param_value = ATH12K_HW_TXRX_RAW;
21952 	else
21953 		param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
21954 
21955 	ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
21956 	    param_id, param_value);
21957 	if (ret) {
21958 		printf("%s: failed to set vdev %d rx decap mode: %d\n",
21959 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
21960 		return ret;
21961 	}
21962 
21963 	return 0;
21964 }
21965 
21966 void
21967 qwz_mac_vdev_delete(struct qwz_softc *sc, struct qwz_vif *arvif)
21968 {
21969 	printf("%s: not implemented\n", __func__);
21970 }
21971 
21972 int
21973 qwz_mac_vdev_setup_sync(struct qwz_softc *sc)
21974 {
21975 	int ret;
21976 
21977 #ifdef notyet
21978 	lockdep_assert_held(&ar->conf_mutex);
21979 #endif
21980 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
21981 		return ESHUTDOWN;
21982 
21983 	while (!sc->vdev_setup_done) {
21984 		ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwzvdev",
21985 		    SEC_TO_NSEC(1));
21986 		if (ret) {
21987 			printf("%s: vdev start timeout\n",
21988 			    sc->sc_dev.dv_xname);
21989 			return ret;
21990 		}
21991 	}
21992 
21993 	return 0;
21994 }
21995 
21996 int
21997 qwz_mac_set_txbf_conf(struct qwz_vif *arvif)
21998 {
21999 	/* TX beamforming is not yet supported. */
22000 	return 0;
22001 }
22002 
22003 int
22004 qwz_mac_vdev_stop(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
22005 {
22006 	int ret;
22007 #ifdef notyet
22008 	lockdep_assert_held(&ar->conf_mutex);
22009 #endif
22010 #if 0
22011 	reinit_completion(&ar->vdev_setup_done);
22012 #endif
22013 	sc->vdev_setup_done = 0;
22014 	ret = qwz_wmi_vdev_stop(sc, arvif->vdev_id, pdev_id);
22015 	if (ret) {
22016 		printf("%s: failed to stop WMI vdev %i: %d\n",
22017 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22018 		return ret;
22019 	}
22020 
22021 	ret = qwz_mac_vdev_setup_sync(sc);
22022 	if (ret) {
22023 		printf("%s: failed to synchronize setup for vdev %i: %d\n",
22024 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22025 		return ret;
22026 	}
22027 
22028 	if (sc->num_started_vdevs > 0)
22029 		sc->num_started_vdevs--;
22030 
22031 	DNPRINTF(QWZ_D_MAC, "%s: vdev vdev_id %d stopped\n", __func__,
22032 	    arvif->vdev_id);
22033 
22034 	if (test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) {
22035 		clear_bit(ATH12K_CAC_RUNNING, sc->sc_flags);
22036 		DNPRINTF(QWZ_D_MAC, "%s: CAC Stopped for vdev %d\n", __func__,
22037 		    arvif->vdev_id);
22038 	}
22039 
22040 	return 0;
22041 }
22042 
22043 int
22044 qwz_mac_vdev_start_restart(struct qwz_softc *sc, struct qwz_vif *arvif,
22045     int pdev_id, int restart)
22046 {
22047 	struct ieee80211com *ic = &sc->sc_ic;
22048 	struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
22049 	struct wmi_vdev_start_req_arg arg = {};
22050 	int ret = 0;
22051 #ifdef notyet
22052 	lockdep_assert_held(&ar->conf_mutex);
22053 #endif
22054 #if 0
22055 	reinit_completion(&ar->vdev_setup_done);
22056 #endif
22057 	arg.vdev_id = arvif->vdev_id;
22058 	arg.dtim_period = ic->ic_dtim_period;
22059 	arg.bcn_intval = ic->ic_lintval;
22060 
22061 	arg.channel.freq = chan->ic_freq;
22062 	arg.channel.band_center_freq1 = chan->ic_freq;
22063 	arg.channel.band_center_freq2 = chan->ic_freq;
22064 
22065 	switch (ic->ic_curmode) {
22066 	case IEEE80211_MODE_11A:
22067 		arg.channel.mode = MODE_11A;
22068 		break;
22069 	case IEEE80211_MODE_11B:
22070 		arg.channel.mode = MODE_11B;
22071 		break;
22072 	case IEEE80211_MODE_11G:
22073 		arg.channel.mode = MODE_11G;
22074 		break;
22075 	default:
22076 		printf("%s: unsupported phy mode %d\n",
22077 		    sc->sc_dev.dv_xname, ic->ic_curmode);
22078 		return ENOTSUP;
22079 	}
22080 
22081 	arg.channel.min_power = 0;
22082 	arg.channel.max_power = 20; /* XXX */
22083 	arg.channel.max_reg_power = 20; /* XXX */
22084 	arg.channel.max_antenna_gain = 0; /* XXX */
22085 
22086 	arg.pref_tx_streams = 1;
22087 	arg.pref_rx_streams = 1;
22088 
22089 	arg.mbssid_flags = 0;
22090 	arg.mbssid_tx_vdev_id = 0;
22091 	if (isset(sc->wmi.svc_map,
22092 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
22093 		ret = qwz_mac_setup_vdev_params_mbssid(arvif,
22094 		    &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
22095 		if (ret)
22096 			return ret;
22097 	}
22098 #if 0
22099 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
22100 		arg.ssid = arvif->u.ap.ssid;
22101 		arg.ssid_len = arvif->u.ap.ssid_len;
22102 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
22103 
22104 		/* For now allow DFS for AP mode */
22105 		arg.channel.chan_radar =
22106 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
22107 
22108 		arg.channel.freq2_radar = ctx->radar_enabled;
22109 
22110 		arg.channel.passive = arg.channel.chan_radar;
22111 
22112 		spin_lock_bh(&ab->base_lock);
22113 		arg.regdomain = ar->ab->dfs_region;
22114 		spin_unlock_bh(&ab->base_lock);
22115 	}
22116 #endif
22117 	/* XXX */
22118 	arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
22119 
22120 	DNPRINTF(QWZ_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
22121 	    __func__, arg.vdev_id, arg.channel.freq,
22122 	    qwz_wmi_phymode_str(arg.channel.mode));
22123 
22124 	sc->vdev_setup_done = 0;
22125 	ret = qwz_wmi_vdev_start(sc, &arg, pdev_id, restart);
22126 	if (ret) {
22127 		printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
22128 		    restart ? "restart" : "start", arg.vdev_id);
22129 		return ret;
22130 	}
22131 
22132 	ret = qwz_mac_vdev_setup_sync(sc);
22133 	if (ret) {
22134 		printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
22135 		    sc->sc_dev.dv_xname, arg.vdev_id,
22136 		    restart ? "restart" : "start", ret);
22137 		return ret;
22138 	}
22139 
22140 	if (!restart)
22141 		sc->num_started_vdevs++;
22142 
22143 	DNPRINTF(QWZ_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
22144 
22145 	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
22146 	 * i.e dfs_cac_ms value which will be valid only for radar channels
22147 	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
22148 	 * done before channel usage. This flags is used to drop rx packets.
22149 	 * during CAC.
22150 	 */
22151 	/* TODO Set the flag for other interface types as required */
22152 #if 0
22153 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
22154 	    chandef->chan->dfs_cac_ms &&
22155 	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
22156 		set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
22157 		ath12k_dbg(ab, ATH12K_DBG_MAC,
22158 			   "CAC Started in chan_freq %d for vdev %d\n",
22159 			   arg.channel.freq, arg.vdev_id);
22160 	}
22161 #endif
22162 	ret = qwz_mac_set_txbf_conf(arvif);
22163 	if (ret)
22164 		printf("%s: failed to set txbf conf for vdev %d: %d\n",
22165 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22166 
22167 	return 0;
22168 }
22169 
22170 int
22171 qwz_mac_vdev_restart(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
22172 {
22173 	return qwz_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
22174 }
22175 
22176 int
22177 qwz_mac_vdev_start(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
22178 {
22179 	return qwz_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
22180 }
22181 
22182 void
22183 qwz_vif_free(struct qwz_softc *sc, struct qwz_vif *arvif)
22184 {
22185 	struct qwz_txmgmt_queue *txmgmt;
22186 	int i;
22187 
22188 	if (arvif == NULL)
22189 		return;
22190 
22191 	txmgmt = &arvif->txmgmt;
22192 	for (i = 0; i < nitems(txmgmt->data); i++) {
22193 		struct qwz_tx_data *tx_data = &txmgmt->data[i];
22194 
22195 		if (tx_data->m) {
22196 			m_freem(tx_data->m);
22197 			tx_data->m = NULL;
22198 		}
22199 		if (tx_data->map) {
22200 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
22201 			tx_data->map = NULL;
22202 		}
22203 	}
22204 
22205 	free(arvif, M_DEVBUF, sizeof(*arvif));
22206 }
22207 
22208 struct qwz_vif *
22209 qwz_vif_alloc(struct qwz_softc *sc)
22210 {
22211 	struct qwz_vif *arvif;
22212 	struct qwz_txmgmt_queue *txmgmt;
22213 	int i, ret = 0;
22214 	const bus_size_t size = IEEE80211_MAX_LEN;
22215 
22216 	arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
22217 	if (arvif == NULL)
22218 		return NULL;
22219 
22220 	txmgmt = &arvif->txmgmt;
22221 	for (i = 0; i < nitems(txmgmt->data); i++) {
22222 		struct qwz_tx_data *tx_data = &txmgmt->data[i];
22223 
22224 		ret = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
22225 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &tx_data->map);
22226 		if (ret) {
22227 			qwz_vif_free(sc, arvif);
22228 			return NULL;
22229 		}
22230 	}
22231 
22232 	arvif->sc = sc;
22233 
22234 	return arvif;
22235 }
22236 
22237 int
22238 qwz_mac_op_add_interface(struct qwz_pdev *pdev)
22239 {
22240 	struct qwz_softc *sc = pdev->sc;
22241 	struct ieee80211com *ic = &sc->sc_ic;
22242 	struct qwz_vif *arvif = NULL;
22243 	struct vdev_create_params vdev_param = { 0 };
22244 #if 0
22245 	struct peer_create_params peer_param;
22246 #endif
22247 	uint32_t param_id, param_value;
22248 	uint16_t nss;
22249 #if 0
22250 	int i;
22251 	int fbret;
22252 #endif
22253 	int ret, bit;
22254 #ifdef notyet
22255 	mutex_lock(&ar->conf_mutex);
22256 #endif
22257 #if 0
22258 	if (vif->type == NL80211_IFTYPE_AP &&
22259 	    ar->num_peers > (ar->max_num_peers - 1)) {
22260 		ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
22261 		ret = -ENOBUFS;
22262 		goto err;
22263 	}
22264 #endif
22265 	if (sc->num_created_vdevs > (TARGET_NUM_VDEVS(sc) - 1)) {
22266 		printf("%s: failed to create vdev %u, reached vdev limit %d\n",
22267 		    sc->sc_dev.dv_xname, sc->num_created_vdevs,
22268 		    TARGET_NUM_VDEVS(sc));
22269 		ret = EBUSY;
22270 		goto err;
22271 	}
22272 
22273 	arvif = qwz_vif_alloc(sc);
22274 	if (arvif == NULL) {
22275 		ret = ENOMEM;
22276 		goto err;
22277 	}
22278 #if 0
22279 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
22280 			  ath12k_mac_vif_sta_connection_loss_work);
22281 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
22282 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
22283 		arvif->bitrate_mask.control[i].gi = 0;
22284 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
22285 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
22286 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
22287 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
22288 		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
22289 		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
22290 	}
22291 #endif
22292 
22293 	if (sc->free_vdev_map == 0) {
22294 		printf("%s: cannot add interface; all vdevs are busy\n",
22295 		    sc->sc_dev.dv_xname);
22296 		ret = EBUSY;
22297 		goto err;
22298 	}
22299 	bit = ffs(sc->free_vdev_map) - 1;
22300 
22301 	arvif->vdev_id = bit;
22302 	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
22303 
22304 	switch (ic->ic_opmode) {
22305 	case IEEE80211_M_STA:
22306 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
22307 		break;
22308 #if 0
22309 	case NL80211_IFTYPE_MESH_POINT:
22310 		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
22311 		fallthrough;
22312 	case NL80211_IFTYPE_AP:
22313 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
22314 		break;
22315 	case NL80211_IFTYPE_MONITOR:
22316 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
22317 		ar->monitor_vdev_id = bit;
22318 		break;
22319 #endif
22320 	default:
22321 		printf("%s: invalid operating mode %d\n",
22322 		    sc->sc_dev.dv_xname, ic->ic_opmode);
22323 		ret = EINVAL;
22324 		goto err;
22325 	}
22326 
22327 	DNPRINTF(QWZ_D_MAC,
22328 	    "%s: add interface id %d type %d subtype %d map 0x%x\n",
22329 	    __func__, arvif->vdev_id, arvif->vdev_type,
22330 	    arvif->vdev_subtype, sc->free_vdev_map);
22331 
22332 	ret = qwz_mac_setup_vdev_create_params(arvif, pdev, &vdev_param);
22333 	if (ret) {
22334 		printf("%s: failed to create vdev parameters %d: %d\n",
22335 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22336 		goto err;
22337 	}
22338 
22339 	ret = qwz_wmi_vdev_create(sc, ic->ic_myaddr, &vdev_param);
22340 	if (ret) {
22341 		printf("%s: failed to create WMI vdev %d %s: %d\n",
22342 		    sc->sc_dev.dv_xname, arvif->vdev_id,
22343 		    ether_sprintf(ic->ic_myaddr), ret);
22344 		goto err;
22345 	}
22346 
22347 	sc->num_created_vdevs++;
22348 	DNPRINTF(QWZ_D_MAC, "%s: vdev %s created, vdev_id %d\n", __func__,
22349 	    ether_sprintf(ic->ic_myaddr), arvif->vdev_id);
22350 	sc->allocated_vdev_map |= 1U << arvif->vdev_id;
22351 	sc->free_vdev_map &= ~(1U << arvif->vdev_id);
22352 #ifdef notyet
22353 	spin_lock_bh(&ar->data_lock);
22354 #endif
22355 	TAILQ_INSERT_TAIL(&sc->vif_list, arvif, entry);
22356 #ifdef notyet
22357 	spin_unlock_bh(&ar->data_lock);
22358 #endif
22359 	ret = qwz_mac_op_update_vif_offload(sc, pdev, arvif);
22360 	if (ret)
22361 		goto err_vdev_del;
22362 
22363 	nss = qwz_get_num_chains(sc->cfg_tx_chainmask) ? : 1;
22364 	ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22365 	    WMI_VDEV_PARAM_NSS, nss);
22366 	if (ret) {
22367 		printf("%s: failed to set vdev %d chainmask 0x%x, nss %d: %d\n",
22368 		    sc->sc_dev.dv_xname, arvif->vdev_id, sc->cfg_tx_chainmask,
22369 		    nss, ret);
22370 		goto err_vdev_del;
22371 	}
22372 
22373 	switch (arvif->vdev_type) {
22374 #if 0
22375 	case WMI_VDEV_TYPE_AP:
22376 		peer_param.vdev_id = arvif->vdev_id;
22377 		peer_param.peer_addr = vif->addr;
22378 		peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
22379 		ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
22380 		if (ret) {
22381 			ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
22382 				    arvif->vdev_id, ret);
22383 			goto err_vdev_del;
22384 		}
22385 
22386 		ret = ath12k_mac_set_kickout(arvif);
22387 		if (ret) {
22388 			ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
22389 				    arvif->vdev_id, ret);
22390 			goto err_peer_del;
22391 		}
22392 
22393 		ath12k_mac_11d_scan_stop_all(ar->ab);
22394 		break;
22395 #endif
22396 	case WMI_VDEV_TYPE_STA:
22397 		param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
22398 		param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
22399 		ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
22400 		    pdev->pdev_id, param_id, param_value);
22401 		if (ret) {
22402 			printf("%s: failed to set vdev %d RX wake policy: %d\n",
22403 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22404 			goto err_peer_del;
22405 		}
22406 
22407 		param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
22408 		param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
22409 		ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
22410 		    pdev->pdev_id, param_id, param_value);
22411 		if (ret) {
22412 			printf("%s: failed to set vdev %d "
22413 			    "TX wake threshold: %d\n",
22414 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22415 			goto err_peer_del;
22416 		}
22417 
22418 		param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
22419 		param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
22420 		ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
22421 		    pdev->pdev_id, param_id, param_value);
22422 		if (ret) {
22423 			printf("%s: failed to set vdev %d pspoll count: %d\n",
22424 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22425 			goto err_peer_del;
22426 		}
22427 
22428 		ret = qwz_wmi_pdev_set_ps_mode(sc, arvif->vdev_id,
22429 		    pdev->pdev_id, WMI_STA_PS_MODE_DISABLED);
22430 		if (ret) {
22431 			printf("%s: failed to disable vdev %d ps mode: %d\n",
22432 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22433 			goto err_peer_del;
22434 		}
22435 
22436 		if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
22437 			sc->completed_11d_scan = 0;
22438 			sc->state_11d = ATH12K_11D_PREPARING;
22439 		}
22440 		break;
22441 #if 0
22442 	case WMI_VDEV_TYPE_MONITOR:
22443 		set_bit(ATH12K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
22444 		break;
22445 #endif
22446 	default:
22447 		printf("%s: invalid vdev type %d\n",
22448 		    sc->sc_dev.dv_xname, arvif->vdev_type);
22449 		ret = EINVAL;
22450 		goto err;
22451 	}
22452 
22453 	arvif->txpower = 40;
22454 	ret = qwz_mac_txpower_recalc(sc, pdev);
22455 	if (ret)
22456 		goto err_peer_del;
22457 
22458 	param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
22459 	param_value = ic->ic_rtsthreshold;
22460 	ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22461 	    param_id, param_value);
22462 	if (ret) {
22463 		printf("%s: failed to set rts threshold for vdev %d: %d\n",
22464 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22465 		goto err_peer_del;
22466 	}
22467 
22468 	qwz_dp_vdev_tx_attach(sc, pdev, arvif);
22469 #if 0
22470 	if (vif->type != NL80211_IFTYPE_MONITOR &&
22471 	    test_bit(ATH12K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
22472 		ret = ath12k_mac_monitor_vdev_create(ar);
22473 		if (ret)
22474 			ath12k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
22475 				    ret);
22476 	}
22477 
22478 	mutex_unlock(&ar->conf_mutex);
22479 #endif
22480 	return 0;
22481 
22482 err_peer_del:
22483 #if 0
22484 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
22485 		fbret = qwz_peer_delete(sc, arvif->vdev_id, vif->addr);
22486 		if (fbret) {
22487 			printf("%s: fallback fail to delete peer addr %pM "
22488 			    "vdev_id %d ret %d\n", sc->sc_dev.dv_xname,
22489 			    vif->addr, arvif->vdev_id, fbret);
22490 			goto err;
22491 		}
22492 	}
22493 #endif
22494 err_vdev_del:
22495 	qwz_mac_vdev_delete(sc, arvif);
22496 #ifdef notyet
22497 	spin_lock_bh(&ar->data_lock);
22498 #endif
22499 	TAILQ_REMOVE(&sc->vif_list, arvif, entry);
22500 #ifdef notyet
22501 	spin_unlock_bh(&ar->data_lock);
22502 #endif
22503 
22504 err:
22505 #ifdef notyet
22506 	mutex_unlock(&ar->conf_mutex);
22507 #endif
22508 	qwz_vif_free(sc, arvif);
22509 	return ret;
22510 }
22511 
22512 int
22513 qwz_mac_start(struct qwz_softc *sc)
22514 {
22515 	struct qwz_pdev *pdev;
22516 	int i, error;
22517 
22518 	for (i = 0; i < sc->num_radios; i++) {
22519 		pdev = &sc->pdevs[i];
22520 		error = qwz_mac_op_start(pdev);
22521 		if (error)
22522 			return error;
22523 
22524 		error = qwz_mac_op_add_interface(pdev);
22525 		if (error)
22526 			return error;
22527 	}
22528 
22529 	return 0;
22530 }
22531 
22532 void
22533 qwz_init_task(void *arg)
22534 {
22535 	struct qwz_softc *sc = arg;
22536 	struct ifnet *ifp = &sc->sc_ic.ic_if;
22537 	int s = splnet();
22538 	rw_enter_write(&sc->ioctl_rwl);
22539 
22540 	if (ifp->if_flags & IFF_RUNNING)
22541 		qwz_stop(ifp);
22542 
22543 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
22544 		qwz_init(ifp);
22545 
22546 	rw_exit(&sc->ioctl_rwl);
22547 	splx(s);
22548 }
22549 
22550 void
22551 qwz_mac_11d_scan_start(struct qwz_softc *sc, struct qwz_vif *arvif)
22552 {
22553 	struct ieee80211com *ic = &sc->sc_ic;
22554 	struct wmi_11d_scan_start_params param;
22555 	int ret;
22556 #ifdef notyet
22557 	mutex_lock(&ar->ab->vdev_id_11d_lock);
22558 #endif
22559 	DNPRINTF(QWZ_D_MAC, "%s: vdev id for 11d scan %d\n", __func__,
22560 	    sc->vdev_id_11d_scan);
22561 #if 0
22562 	if (ar->regdom_set_by_user)
22563 		goto fin;
22564 #endif
22565 	if (sc->vdev_id_11d_scan != QWZ_11D_INVALID_VDEV_ID)
22566 		goto fin;
22567 
22568 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD))
22569 		goto fin;
22570 
22571 	if (ic->ic_opmode != IEEE80211_M_STA)
22572 		goto fin;
22573 
22574 	param.vdev_id = arvif->vdev_id;
22575 	param.start_interval_msec = 0;
22576 	param.scan_period_msec = QWZ_SCAN_11D_INTERVAL;
22577 
22578 	DNPRINTF(QWZ_D_MAC, "%s: start 11d scan\n", __func__);
22579 
22580 	ret = qwz_wmi_send_11d_scan_start_cmd(sc, &param,
22581 	   0 /* TODO: derive pdev ID from arvif somehow? */);
22582 	if (ret) {
22583 		if (ret != ESHUTDOWN) {
22584 			printf("%s: failed to start 11d scan; vdev: %d "
22585 			    "ret: %d\n", sc->sc_dev.dv_xname,
22586 			    arvif->vdev_id, ret);
22587 		}
22588 	} else {
22589 		sc->vdev_id_11d_scan = arvif->vdev_id;
22590 		if (sc->state_11d == ATH12K_11D_PREPARING)
22591 			sc->state_11d = ATH12K_11D_RUNNING;
22592 	}
22593 fin:
22594 	if (sc->state_11d == ATH12K_11D_PREPARING) {
22595 		sc->state_11d = ATH12K_11D_IDLE;
22596 		sc->completed_11d_scan = 0;
22597 	}
22598 #ifdef notyet
22599 	mutex_unlock(&ar->ab->vdev_id_11d_lock);
22600 #endif
22601 }
22602 
22603 void
22604 qwz_mac_scan_finish(struct qwz_softc *sc)
22605 {
22606 	struct ieee80211com *ic = &sc->sc_ic;
22607 	struct ifnet *ifp = &ic->ic_if;
22608 	enum ath12k_scan_state ostate;
22609 
22610 #ifdef notyet
22611 	lockdep_assert_held(&ar->data_lock);
22612 #endif
22613 	ostate = sc->scan.state;
22614 	switch (ostate) {
22615 	case ATH12K_SCAN_IDLE:
22616 		break;
22617 	case ATH12K_SCAN_RUNNING:
22618 	case ATH12K_SCAN_ABORTING:
22619 #if 0
22620 		if (ar->scan.is_roc && ar->scan.roc_notify)
22621 			ieee80211_remain_on_channel_expired(ar->hw);
22622 		fallthrough;
22623 #endif
22624 	case ATH12K_SCAN_STARTING:
22625 		sc->scan.state = ATH12K_SCAN_IDLE;
22626 		sc->scan_channel = 0;
22627 		sc->scan.roc_freq = 0;
22628 
22629 		timeout_del(&sc->scan.timeout);
22630 		if (!sc->scan.is_roc)
22631 			ieee80211_end_scan(ifp);
22632 #if 0
22633 		complete_all(&ar->scan.completed);
22634 #endif
22635 		break;
22636 	}
22637 }
22638 
22639 int
22640 qwz_mac_get_rate_hw_value(struct ieee80211com *ic,
22641     struct ieee80211_node *ni, int bitrate)
22642 {
22643 	uint32_t preamble;
22644 	uint16_t hw_value;
22645 	int shortpre = 0;
22646 
22647 	if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
22648 		preamble = WMI_RATE_PREAMBLE_CCK;
22649 	else
22650 		preamble = WMI_RATE_PREAMBLE_OFDM;
22651 
22652 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
22653 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
22654 		shortpre = 1;
22655 
22656 	switch (bitrate) {
22657 	case 2:
22658 		hw_value = ATH12K_HW_RATE_CCK_LP_1M;
22659 		break;
22660 	case 4:
22661 		if (shortpre)
22662 			hw_value = ATH12K_HW_RATE_CCK_SP_2M;
22663 		else
22664 			hw_value = ATH12K_HW_RATE_CCK_LP_2M;
22665 		break;
22666 	case 11:
22667 		if (shortpre)
22668 			hw_value = ATH12K_HW_RATE_CCK_SP_5_5M;
22669 		else
22670 			hw_value = ATH12K_HW_RATE_CCK_LP_5_5M;
22671 		break;
22672 	case 22:
22673 		if (shortpre)
22674 			hw_value = ATH12K_HW_RATE_CCK_SP_11M;
22675 		else
22676 			hw_value = ATH12K_HW_RATE_CCK_LP_11M;
22677 		break;
22678 	case 12:
22679 		hw_value = ATH12K_HW_RATE_OFDM_6M;
22680 		break;
22681 	case 18:
22682 		hw_value = ATH12K_HW_RATE_OFDM_9M;
22683 		break;
22684 	case 24:
22685 		hw_value = ATH12K_HW_RATE_OFDM_12M;
22686 		break;
22687 	case 36:
22688 		hw_value = ATH12K_HW_RATE_OFDM_18M;
22689 		break;
22690 	case 48:
22691 		hw_value = ATH12K_HW_RATE_OFDM_24M;
22692 		break;
22693 	case 72:
22694 		hw_value = ATH12K_HW_RATE_OFDM_36M;
22695 		break;
22696 	case 96:
22697 		hw_value = ATH12K_HW_RATE_OFDM_48M;
22698 		break;
22699 	case 108:
22700 		hw_value = ATH12K_HW_RATE_OFDM_54M;
22701 		break;
22702 	default:
22703 		return -1;
22704 	}
22705 
22706 	return ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
22707 }
22708 
22709 int
22710 qwz_peer_delete(struct qwz_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
22711     uint8_t *addr)
22712 {
22713 	int ret;
22714 
22715 	sc->peer_mapped = 0;
22716 	sc->peer_delete_done = 0;
22717 
22718 	ret = qwz_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
22719 	if (ret) {
22720 		printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
22721 		    sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
22722 		return ret;
22723 	}
22724 
22725 	while (!sc->peer_mapped) {
22726 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwzpeer",
22727 		    SEC_TO_NSEC(3));
22728 		if (ret) {
22729 			printf("%s: peer delete unmap timeout\n",
22730 			    sc->sc_dev.dv_xname);
22731 			return ret;
22732 		}
22733 	}
22734 
22735 	while (!sc->peer_delete_done) {
22736 		ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwzpeerd",
22737 		    SEC_TO_NSEC(3));
22738 		if (ret) {
22739 			printf("%s: peer delete command timeout\n",
22740 			    sc->sc_dev.dv_xname);
22741 			return ret;
22742 		}
22743 	}
22744 
22745 	sc->num_peers--;
22746 	return 0;
22747 }
22748 
22749 int
22750 qwz_peer_create(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
22751     struct ieee80211_node *ni, struct peer_create_params *param)
22752 {
22753 	struct ieee80211com *ic = &sc->sc_ic;
22754 	struct qwz_node *nq = (struct qwz_node *)ni;
22755 	struct ath12k_peer *peer;
22756 	int ret;
22757 #ifdef notyet
22758 	lockdep_assert_held(&ar->conf_mutex);
22759 #endif
22760 	if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
22761 		DPRINTF("%s: failed to create peer due to insufficient "
22762 		    "peer entry resource in firmware\n", __func__);
22763 		return ENOBUFS;
22764 	}
22765 #ifdef notyet
22766 	mutex_lock(&ar->ab->tbl_mtx_lock);
22767 	spin_lock_bh(&ar->ab->base_lock);
22768 #endif
22769 	peer = &nq->peer;
22770 	if (peer) {
22771 		if (peer->peer_id != HAL_INVALID_PEERID &&
22772 		    peer->vdev_id == param->vdev_id) {
22773 #ifdef notyet
22774 			spin_unlock_bh(&ar->ab->base_lock);
22775 			mutex_unlock(&ar->ab->tbl_mtx_lock);
22776 #endif
22777 			return EINVAL;
22778 		}
22779 #if 0
22780 		/* Assume sta is transitioning to another band.
22781 		 * Remove here the peer from rhash.
22782 		 */
22783 		ath12k_peer_rhash_delete(ar->ab, peer);
22784 #endif
22785 	}
22786 #ifdef notyet
22787 	spin_unlock_bh(&ar->ab->base_lock);
22788 	mutex_unlock(&ar->ab->tbl_mtx_lock);
22789 #endif
22790 	sc->peer_mapped = 0;
22791 
22792 	ret = qwz_wmi_send_peer_create_cmd(sc, pdev_id, param);
22793 	if (ret) {
22794 		printf("%s: failed to send peer create vdev_id %d ret %d\n",
22795 		    sc->sc_dev.dv_xname, param->vdev_id, ret);
22796 		return ret;
22797 	}
22798 
22799 	while (!sc->peer_mapped) {
22800 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwzpeer",
22801 		    SEC_TO_NSEC(3));
22802 		if (ret) {
22803 			printf("%s: peer create command timeout\n",
22804 			    sc->sc_dev.dv_xname);
22805 			return ret;
22806 		}
22807 	}
22808 
22809 #ifdef notyet
22810 	mutex_lock(&ar->ab->tbl_mtx_lock);
22811 	spin_lock_bh(&ar->ab->base_lock);
22812 #endif
22813 #if 0
22814 	peer = ath12k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
22815 	if (!peer) {
22816 		spin_unlock_bh(&ar->ab->base_lock);
22817 		mutex_unlock(&ar->ab->tbl_mtx_lock);
22818 		ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
22819 			    param->peer_addr, param->vdev_id);
22820 
22821 		ret = -ENOENT;
22822 		goto cleanup;
22823 	}
22824 
22825 	ret = ath12k_peer_rhash_add(ar->ab, peer);
22826 	if (ret) {
22827 		spin_unlock_bh(&ar->ab->base_lock);
22828 		mutex_unlock(&ar->ab->tbl_mtx_lock);
22829 		goto cleanup;
22830 	}
22831 #endif
22832 	peer->pdev_id = pdev_id;
22833 #if 0
22834 	peer->sta = sta;
22835 #endif
22836 	if (ic->ic_opmode == IEEE80211_M_STA) {
22837 		arvif->ast_hash = peer->ast_hash;
22838 		arvif->ast_idx = peer->hw_peer_id;
22839 	}
22840 #if 0
22841 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
22842 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
22843 
22844 	if (sta) {
22845 		struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
22846 		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
22847 				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
22848 						  peer->peer_id);
22849 
22850 		/* set HTT extension valid bit to 0 by default */
22851 		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
22852 	}
22853 #endif
22854 	sc->num_peers++;
22855 #ifdef notyet
22856 	spin_unlock_bh(&ar->ab->base_lock);
22857 	mutex_unlock(&ar->ab->tbl_mtx_lock);
22858 #endif
22859 	return 0;
22860 #if 0
22861 cleanup:
22862 	int fbret = qwz_peer_delete(sc, param->vdev_id, param->peer_addr);
22863 	if (fbret) {
22864 		printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
22865 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
22866 		    param->vdev_id, fbret);
22867 	}
22868 
22869 	return ret;
22870 #endif
22871 }
22872 
22873 int
22874 qwz_dp_tx_send_reo_cmd(struct qwz_softc *sc, struct dp_rx_tid *rx_tid,
22875     enum hal_reo_cmd_type type, struct ath12k_hal_reo_cmd *cmd,
22876     void (*cb)(struct qwz_dp *, void *, enum hal_reo_cmd_status))
22877 {
22878 	struct qwz_dp *dp = &sc->dp;
22879 	struct dp_reo_cmd *dp_cmd;
22880 	struct hal_srng *cmd_ring;
22881 	int cmd_num;
22882 
22883 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
22884 		return ESHUTDOWN;
22885 
22886 	cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
22887 	cmd_num = qwz_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
22888 	/* cmd_num should start from 1, during failure return the error code */
22889 	if (cmd_num < 0)
22890 		return cmd_num;
22891 
22892 	/* reo cmd ring descriptors has cmd_num starting from 1 */
22893 	if (cmd_num == 0)
22894 		return EINVAL;
22895 
22896 	if (!cb)
22897 		return 0;
22898 
22899 	/* Can this be optimized so that we keep the pending command list only
22900 	 * for tid delete command to free up the resource on the command status
22901 	 * indication?
22902 	 */
22903 	dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
22904 	if (!dp_cmd)
22905 		return ENOMEM;
22906 
22907 	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
22908 	dp_cmd->cmd_num = cmd_num;
22909 	dp_cmd->handler = cb;
22910 #ifdef notyet
22911 	spin_lock_bh(&dp->reo_cmd_lock);
22912 #endif
22913 	TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
22914 #ifdef notyet
22915 	spin_unlock_bh(&dp->reo_cmd_lock);
22916 #endif
22917 	return 0;
22918 }
22919 
22920 uint32_t
22921 qwz_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
22922 {
22923 	uint32_t num_ext_desc;
22924 
22925 	if (ba_window_size <= 1) {
22926 		if (tid != HAL_DESC_REO_NON_QOS_TID)
22927 			num_ext_desc = 1;
22928 		else
22929 			num_ext_desc = 0;
22930 	} else if (ba_window_size <= 105) {
22931 		num_ext_desc = 1;
22932 	} else if (ba_window_size <= 210) {
22933 		num_ext_desc = 2;
22934 	} else {
22935 		num_ext_desc = 3;
22936 	}
22937 
22938 	return sizeof(struct hal_rx_reo_queue) +
22939 		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
22940 }
22941 
22942 void
22943 qwz_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
22944 {
22945 	hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
22946 		     FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
22947 
22948 	/* Magic pattern in reserved bits for debugging */
22949 	hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
22950 }
22951 
22952 void
22953 qwz_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
22954     uint32_t start_seq, enum hal_pn_type type)
22955 {
22956 	struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
22957 	struct hal_rx_reo_queue_ext *ext_desc;
22958 
22959 	memset(qdesc, 0, sizeof(*qdesc));
22960 
22961 	qwz_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
22962 	    HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
22963 
22964 	qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
22965 
22966 	qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
22967 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
22968 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwz_tid_to_ac(tid));
22969 
22970 	if (ba_window_size < 1)
22971 		ba_window_size = 1;
22972 
22973 	if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
22974 		ba_window_size++;
22975 
22976 	if (ba_window_size == 1)
22977 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
22978 
22979 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
22980 				   ba_window_size - 1);
22981 	switch (type) {
22982 	case HAL_PN_TYPE_NONE:
22983 	case HAL_PN_TYPE_WAPI_EVEN:
22984 	case HAL_PN_TYPE_WAPI_UNEVEN:
22985 		break;
22986 	case HAL_PN_TYPE_WPA:
22987 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
22988 		    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
22989 		    HAL_RX_REO_QUEUE_PN_SIZE_48);
22990 		break;
22991 	}
22992 
22993 	/* TODO: Set Ignore ampdu flags based on BA window size and/or
22994 	 * AMPDU capabilities
22995 	 */
22996 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
22997 
22998 	qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
22999 
23000 	if (start_seq <= 0xfff)
23001 		qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
23002 		    start_seq);
23003 
23004 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23005 		return;
23006 
23007 	ext_desc = qdesc->ext_desc;
23008 
23009 	/* TODO: HW queue descriptors are currently allocated for max BA
23010 	 * window size for all QOS TIDs so that same descriptor can be used
23011 	 * later when ADDBA request is received. This should be changed to
23012 	 * allocate HW queue descriptors based on BA window size being
23013 	 * negotiated (0 for non BA cases), and reallocate when BA window
23014 	 * size changes and also send WMI message to FW to change the REO
23015 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
23016 	 */
23017 	memset(ext_desc, 0, sizeof(*ext_desc));
23018 	qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23019 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
23020 	ext_desc++;
23021 	memset(ext_desc, 0, sizeof(*ext_desc));
23022 	qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23023 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
23024 	ext_desc++;
23025 	memset(ext_desc, 0, sizeof(*ext_desc));
23026 	qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23027 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
23028 }
23029 
23030 void
23031 qwz_dp_reo_cmd_free(struct qwz_dp *dp, void *ctx,
23032     enum hal_reo_cmd_status status)
23033 {
23034 	struct qwz_softc *sc = dp->sc;
23035 	struct dp_rx_tid *rx_tid = ctx;
23036 
23037 	if (status != HAL_REO_CMD_SUCCESS)
23038 		printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
23039 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23040 
23041 	if (rx_tid->mem) {
23042 		qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
23043 		rx_tid->mem = NULL;
23044 		rx_tid->vaddr = NULL;
23045 		rx_tid->paddr = 0ULL;
23046 		rx_tid->size = 0;
23047 	}
23048 }
23049 
23050 void
23051 qwz_dp_reo_cache_flush(struct qwz_softc *sc, struct dp_rx_tid *rx_tid)
23052 {
23053 	struct ath12k_hal_reo_cmd cmd = {0};
23054 	unsigned long tot_desc_sz, desc_sz;
23055 	int ret;
23056 
23057 	tot_desc_sz = rx_tid->size;
23058 	desc_sz = qwz_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
23059 
23060 	while (tot_desc_sz > desc_sz) {
23061 		tot_desc_sz -= desc_sz;
23062 		cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
23063 		cmd.addr_hi = rx_tid->paddr >> 32;
23064 		ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid,
23065 		    HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
23066 		if (ret) {
23067 			printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
23068 			    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
23069 			    ret);
23070 		}
23071 	}
23072 
23073 	memset(&cmd, 0, sizeof(cmd));
23074 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23075 	cmd.addr_hi = rx_tid->paddr >> 32;
23076 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
23077 	ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
23078 	    &cmd, qwz_dp_reo_cmd_free);
23079 	if (ret) {
23080 		printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
23081 		    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
23082 		if (rx_tid->mem) {
23083 			qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
23084 			rx_tid->mem = NULL;
23085 			rx_tid->vaddr = NULL;
23086 			rx_tid->paddr = 0ULL;
23087 			rx_tid->size = 0;
23088 		}
23089 	}
23090 }
23091 
23092 void
23093 qwz_dp_rx_tid_del_func(struct qwz_dp *dp, void *ctx,
23094     enum hal_reo_cmd_status status)
23095 {
23096 	struct qwz_softc *sc = dp->sc;
23097 	struct dp_rx_tid *rx_tid = ctx;
23098 	struct dp_reo_cache_flush_elem *elem, *tmp;
23099 	uint64_t now;
23100 
23101 	if (status == HAL_REO_CMD_DRAIN) {
23102 		goto free_desc;
23103 	} else if (status != HAL_REO_CMD_SUCCESS) {
23104 		/* Shouldn't happen! Cleanup in case of other failure? */
23105 		printf("%s: failed to delete rx tid %d hw descriptor %d\n",
23106 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23107 		return;
23108 	}
23109 
23110 	elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
23111 	if (!elem)
23112 		goto free_desc;
23113 
23114 	now = getnsecuptime();
23115 	elem->ts = now;
23116 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
23117 
23118 	rx_tid->mem = NULL;
23119 	rx_tid->vaddr = NULL;
23120 	rx_tid->paddr = 0ULL;
23121 	rx_tid->size = 0;
23122 
23123 #ifdef notyet
23124 	spin_lock_bh(&dp->reo_cmd_lock);
23125 #endif
23126 	TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
23127 	dp->reo_cmd_cache_flush_count++;
23128 
23129 	/* Flush and invalidate aged REO desc from HW cache */
23130 	TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
23131 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
23132 		    now >= elem->ts + MSEC_TO_NSEC(DP_REO_DESC_FREE_TIMEOUT_MS)) {
23133 			TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
23134 			dp->reo_cmd_cache_flush_count--;
23135 #ifdef notyet
23136 			spin_unlock_bh(&dp->reo_cmd_lock);
23137 #endif
23138 			qwz_dp_reo_cache_flush(sc, &elem->data);
23139 			free(elem, M_DEVBUF, sizeof(*elem));
23140 #ifdef notyet
23141 			spin_lock_bh(&dp->reo_cmd_lock);
23142 #endif
23143 		}
23144 	}
23145 #ifdef notyet
23146 	spin_unlock_bh(&dp->reo_cmd_lock);
23147 #endif
23148 	return;
23149 free_desc:
23150 	if (rx_tid->mem) {
23151 		qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
23152 		rx_tid->mem = NULL;
23153 		rx_tid->vaddr = NULL;
23154 		rx_tid->paddr = 0ULL;
23155 		rx_tid->size = 0;
23156 	}
23157 }
23158 
23159 void
23160 qwz_peer_rx_tid_delete(struct qwz_softc *sc, struct ath12k_peer *peer,
23161     uint8_t tid)
23162 {
23163 	struct ath12k_hal_reo_cmd cmd = {0};
23164 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
23165 	int ret;
23166 
23167 	if (!rx_tid->active)
23168 		return;
23169 
23170 	rx_tid->active = 0;
23171 
23172 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23173 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23174 	cmd.addr_hi = rx_tid->paddr >> 32;
23175 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
23176 	ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23177 	    &cmd, qwz_dp_rx_tid_del_func);
23178 	if (ret) {
23179 		if (ret != ESHUTDOWN) {
23180 			printf("%s: failed to send "
23181 			    "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
23182 			    sc->sc_dev.dv_xname, tid, ret);
23183 		}
23184 
23185 		if (rx_tid->mem) {
23186 			qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
23187 			rx_tid->mem = NULL;
23188 			rx_tid->vaddr = NULL;
23189 			rx_tid->paddr = 0ULL;
23190 			rx_tid->size = 0;
23191 		}
23192 	}
23193 }
23194 
23195 void
23196 qwz_dp_rx_frags_cleanup(struct qwz_softc *sc, struct dp_rx_tid *rx_tid,
23197     int rel_link_desc)
23198 {
23199 #ifdef notyet
23200 	lockdep_assert_held(&ab->base_lock);
23201 #endif
23202 #if 0
23203 	if (rx_tid->dst_ring_desc) {
23204 		if (rel_link_desc)
23205 			ath12k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
23206 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
23207 		kfree(rx_tid->dst_ring_desc);
23208 		rx_tid->dst_ring_desc = NULL;
23209 	}
23210 #endif
23211 	rx_tid->cur_sn = 0;
23212 	rx_tid->last_frag_no = 0;
23213 	rx_tid->rx_frag_bitmap = 0;
23214 #if 0
23215 	__skb_queue_purge(&rx_tid->rx_frags);
23216 #endif
23217 }
23218 
23219 void
23220 qwz_peer_frags_flush(struct qwz_softc *sc, struct ath12k_peer *peer)
23221 {
23222 	struct dp_rx_tid *rx_tid;
23223 	int i;
23224 #ifdef notyet
23225 	lockdep_assert_held(&ar->ab->base_lock);
23226 #endif
23227 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23228 		rx_tid = &peer->rx_tid[i];
23229 
23230 		qwz_dp_rx_frags_cleanup(sc, rx_tid, 1);
23231 #if 0
23232 		spin_unlock_bh(&ar->ab->base_lock);
23233 		del_timer_sync(&rx_tid->frag_timer);
23234 		spin_lock_bh(&ar->ab->base_lock);
23235 #endif
23236 	}
23237 }
23238 
23239 void
23240 qwz_peer_rx_tid_cleanup(struct qwz_softc *sc, struct ath12k_peer *peer)
23241 {
23242 	struct dp_rx_tid *rx_tid;
23243 	int i;
23244 #ifdef notyet
23245 	lockdep_assert_held(&ar->ab->base_lock);
23246 #endif
23247 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23248 		rx_tid = &peer->rx_tid[i];
23249 
23250 		qwz_peer_rx_tid_delete(sc, peer, i);
23251 		qwz_dp_rx_frags_cleanup(sc, rx_tid, 1);
23252 #if 0
23253 		spin_unlock_bh(&ar->ab->base_lock);
23254 		del_timer_sync(&rx_tid->frag_timer);
23255 		spin_lock_bh(&ar->ab->base_lock);
23256 #endif
23257 	}
23258 }
23259 
23260 int
23261 qwz_peer_rx_tid_reo_update(struct qwz_softc *sc, struct ath12k_peer *peer,
23262     struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
23263     int update_ssn)
23264 {
23265 	struct ath12k_hal_reo_cmd cmd = {0};
23266 	int ret;
23267 
23268 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23269 	cmd.addr_hi = rx_tid->paddr >> 32;
23270 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23271 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
23272 	cmd.ba_window_size = ba_win_sz;
23273 
23274 	if (update_ssn) {
23275 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
23276 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
23277 	}
23278 
23279 	ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23280 	    &cmd, NULL);
23281 	if (ret) {
23282 		printf("%s: failed to update rx tid queue, tid %d (%d)\n",
23283 		    sc->sc_dev.dv_xname, rx_tid->tid, ret);
23284 		return ret;
23285 	}
23286 
23287 	rx_tid->ba_win_sz = ba_win_sz;
23288 
23289 	return 0;
23290 }
23291 
23292 void
23293 qwz_dp_rx_tid_mem_free(struct qwz_softc *sc, struct ieee80211_node *ni,
23294     int vdev_id, uint8_t tid)
23295 {
23296 	struct qwz_node *nq = (struct qwz_node *)ni;
23297 	struct ath12k_peer *peer = &nq->peer;
23298 	struct dp_rx_tid *rx_tid;
23299 #ifdef notyet
23300 	spin_lock_bh(&ab->base_lock);
23301 #endif
23302 	rx_tid = &peer->rx_tid[tid];
23303 
23304 	if (rx_tid->mem) {
23305 		qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
23306 		rx_tid->mem = NULL;
23307 		rx_tid->vaddr = NULL;
23308 		rx_tid->paddr = 0ULL;
23309 		rx_tid->size = 0;
23310 	}
23311 
23312 	rx_tid->active = 0;
23313 #ifdef notyet
23314 	spin_unlock_bh(&ab->base_lock);
23315 #endif
23316 }
23317 
23318 int
23319 qwz_peer_rx_tid_setup(struct qwz_softc *sc, struct ieee80211_node *ni,
23320     int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
23321     enum hal_pn_type pn_type)
23322 {
23323 	struct qwz_node *nq = (struct qwz_node *)ni;
23324 	struct ath12k_peer *peer = &nq->peer;
23325 	struct dp_rx_tid *rx_tid;
23326 	uint32_t hw_desc_sz;
23327 	void *vaddr;
23328 	uint64_t paddr;
23329 	int ret;
23330 #ifdef notyet
23331 	spin_lock_bh(&ab->base_lock);
23332 #endif
23333 	rx_tid = &peer->rx_tid[tid];
23334 	/* Update the tid queue if it is already setup */
23335 	if (rx_tid->active) {
23336 		paddr = rx_tid->paddr;
23337 		ret = qwz_peer_rx_tid_reo_update(sc, peer, rx_tid,
23338 		    ba_win_sz, ssn, 1);
23339 #ifdef notyet
23340 		spin_unlock_bh(&ab->base_lock);
23341 #endif
23342 		if (ret) {
23343 			printf("%s: failed to update reo for peer %s "
23344 			    "rx tid %d\n: %d", sc->sc_dev.dv_xname,
23345 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23346 			return ret;
23347 		}
23348 
23349 		ret = qwz_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
23350 		    pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
23351 		if (ret)
23352 			printf("%s: failed to send wmi rx reorder queue "
23353 			    "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
23354 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23355 		return ret;
23356 	}
23357 
23358 	rx_tid->tid = tid;
23359 
23360 	rx_tid->ba_win_sz = ba_win_sz;
23361 
23362 	/* TODO: Optimize the memory allocation for qos tid based on
23363 	 * the actual BA window size in REO tid update path.
23364 	 */
23365 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23366 		hw_desc_sz = qwz_hal_reo_qdesc_size(ba_win_sz, tid);
23367 	else
23368 		hw_desc_sz = qwz_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
23369 
23370 	rx_tid->mem = qwz_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
23371 	    HAL_LINK_DESC_ALIGN);
23372 	if (rx_tid->mem == NULL) {
23373 #ifdef notyet
23374 		spin_unlock_bh(&ab->base_lock);
23375 #endif
23376 		return ENOMEM;
23377 	}
23378 
23379 	vaddr = QWZ_DMA_KVA(rx_tid->mem);
23380 
23381 	qwz_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
23382 
23383 	paddr = QWZ_DMA_DVA(rx_tid->mem);
23384 
23385 	rx_tid->vaddr = vaddr;
23386 	rx_tid->paddr = paddr;
23387 	rx_tid->size = hw_desc_sz;
23388 	rx_tid->active = 1;
23389 #ifdef notyet
23390 	spin_unlock_bh(&ab->base_lock);
23391 #endif
23392 	ret = qwz_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
23393 	    ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
23394 	if (ret) {
23395 		printf("%s: failed to setup rx reorder queue for peer %s "
23396 		    "tid %d: %d\n", sc->sc_dev.dv_xname,
23397 		    ether_sprintf(ni->ni_macaddr), tid, ret);
23398 		qwz_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
23399 	}
23400 
23401 	return ret;
23402 }
23403 
23404 int
23405 qwz_peer_rx_frag_setup(struct qwz_softc *sc, struct ieee80211_node *ni,
23406     int vdev_id)
23407 {
23408 	struct qwz_node *nq = (struct qwz_node *)ni;
23409 	struct ath12k_peer *peer = &nq->peer;
23410 	struct dp_rx_tid *rx_tid;
23411 	int i;
23412 #ifdef notyet
23413 	spin_lock_bh(&ab->base_lock);
23414 #endif
23415 	for (i = 0; i <= nitems(peer->rx_tid); i++) {
23416 		rx_tid = &peer->rx_tid[i];
23417 #if 0
23418 		rx_tid->ab = ab;
23419 		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
23420 #endif
23421 	}
23422 #if 0
23423 	peer->dp_setup_done = true;
23424 #endif
23425 #ifdef notyet
23426 	spin_unlock_bh(&ab->base_lock);
23427 #endif
23428 	return 0;
23429 }
23430 
23431 int
23432 qwz_dp_peer_setup(struct qwz_softc *sc, int vdev_id, int pdev_id,
23433     struct ieee80211_node *ni)
23434 {
23435 	struct qwz_node *nq = (struct qwz_node *)ni;
23436 	struct ath12k_peer *peer = &nq->peer;
23437 	uint32_t reo_dest;
23438 	int ret = 0, tid;
23439 
23440 	/* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
23441 	reo_dest = sc->pdev_dp.mac_id + 1;
23442 	ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
23443 	    WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
23444 	if (ret) {
23445 		printf("%s: failed to set default routing %d peer %s "
23446 		    "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
23447 		    ether_sprintf(ni->ni_macaddr), vdev_id);
23448 		return ret;
23449 	}
23450 
23451 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
23452 		ret = qwz_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
23453 		    tid, 1, 0, HAL_PN_TYPE_NONE);
23454 		if (ret) {
23455 			printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
23456 			    sc->sc_dev.dv_xname, tid, ret);
23457 			goto peer_clean;
23458 		}
23459 	}
23460 
23461 	ret = qwz_peer_rx_frag_setup(sc, ni, vdev_id);
23462 	if (ret) {
23463 		printf("%s: failed to setup rx defrag context\n",
23464 		    sc->sc_dev.dv_xname);
23465 		tid--;
23466 		goto peer_clean;
23467 	}
23468 
23469 	/* TODO: Setup other peer specific resource used in data path */
23470 
23471 	return 0;
23472 
23473 peer_clean:
23474 #ifdef notyet
23475 	spin_lock_bh(&ab->base_lock);
23476 #endif
23477 #if 0
23478 	peer = ath12k_peer_find(ab, vdev_id, addr);
23479 	if (!peer) {
23480 		ath12k_warn(ab, "failed to find the peer to del rx tid\n");
23481 		spin_unlock_bh(&ab->base_lock);
23482 		return -ENOENT;
23483 	}
23484 #endif
23485 	for (; tid >= 0; tid--)
23486 		qwz_peer_rx_tid_delete(sc, peer, tid);
23487 #ifdef notyet
23488 	spin_unlock_bh(&ab->base_lock);
23489 #endif
23490 	return ret;
23491 }
23492 
23493 int
23494 qwz_dp_peer_rx_pn_replay_config(struct qwz_softc *sc, struct qwz_vif *arvif,
23495     struct ieee80211_node *ni, struct ieee80211_key *k, int delete_key)
23496 {
23497 	struct ath12k_hal_reo_cmd cmd = {0};
23498 	struct qwz_node *nq = (struct qwz_node *)ni;
23499 	struct ath12k_peer *peer = &nq->peer;
23500 	struct dp_rx_tid *rx_tid;
23501 	uint8_t tid;
23502 	int ret = 0;
23503 
23504 	/*
23505 	 * NOTE: Enable PN/TSC replay check offload only for unicast frames.
23506 	 * We use net80211 PN/TSC replay check functionality for bcast/mcast
23507 	 * for now.
23508 	 */
23509 	if (k->k_flags & IEEE80211_KEY_GROUP)
23510 		return 0;
23511 
23512 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
23513 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
23514 		    HAL_REO_CMD_UPD0_PN_SIZE |
23515 		    HAL_REO_CMD_UPD0_PN_VALID |
23516 		    HAL_REO_CMD_UPD0_PN_CHECK |
23517 		    HAL_REO_CMD_UPD0_SVLD;
23518 
23519 	switch (k->k_cipher) {
23520 	case IEEE80211_CIPHER_TKIP:
23521 	case IEEE80211_CIPHER_CCMP:
23522 #if 0
23523 	case WLAN_CIPHER_SUITE_CCMP_256:
23524 	case WLAN_CIPHER_SUITE_GCMP:
23525 	case WLAN_CIPHER_SUITE_GCMP_256:
23526 #endif
23527 		if (!delete_key) {
23528 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
23529 			cmd.pn_size = 48;
23530 		}
23531 		break;
23532 	default:
23533 		printf("%s: cipher %u is not supported\n",
23534 		    sc->sc_dev.dv_xname, k->k_cipher);
23535 		return EOPNOTSUPP;
23536 	}
23537 
23538 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
23539 		rx_tid = &peer->rx_tid[tid];
23540 		if (!rx_tid->active)
23541 			continue;
23542 		cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23543 		cmd.addr_hi = (rx_tid->paddr >> 32);
23544 		ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid,
23545 		    HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL);
23546 		if (ret) {
23547 			printf("%s: failed to configure rx tid %d queue "
23548 			    "for pn replay detection %d\n",
23549 			    sc->sc_dev.dv_xname, tid, ret);
23550 			break;
23551 		}
23552 	}
23553 
23554 	return ret;
23555 }
23556 
23557 enum hal_tcl_encap_type
23558 qwz_dp_tx_get_encap_type(struct qwz_softc *sc)
23559 {
23560 	if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
23561 		return HAL_TCL_ENCAP_TYPE_RAW;
23562 #if 0
23563 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
23564 		return HAL_TCL_ENCAP_TYPE_ETHERNET;
23565 #endif
23566 	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
23567 }
23568 
23569 uint8_t
23570 qwz_dp_tx_get_tid(struct mbuf *m)
23571 {
23572 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
23573 	uint16_t qos = ieee80211_get_qos(wh);
23574 	uint8_t tid = qos & IEEE80211_QOS_TID;
23575 
23576 	return tid;
23577 }
23578 
23579 void
23580 qwz_hal_tx_cmd_desc_setup(struct qwz_softc *sc, void *cmd,
23581     struct hal_tx_info *ti)
23582 {
23583 	struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
23584 
23585 	tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
23586 	    ti->paddr);
23587 	tcl_cmd->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
23588 	    ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
23589 	tcl_cmd->buf_addr_info.info1 |= FIELD_PREP(
23590 	    BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
23591 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
23592 
23593 	tcl_cmd->info0 =
23594 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
23595 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
23596 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE, ti->encrypt_type) |
23597 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE, ti->search_type) |
23598 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN, ti->addr_search_flags) |
23599 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM, ti->meta_data_flags);
23600 
23601 	tcl_cmd->info1 = ti->flags0 |
23602 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
23603 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
23604 
23605 	tcl_cmd->info2 = ti->flags1 |
23606 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
23607 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
23608 
23609 	tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
23610 	    ti->dscp_tid_tbl_idx) |
23611 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX, ti->bss_ast_idx) |
23612 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash);
23613 	tcl_cmd->info4 = 0;
23614 #ifdef notyet
23615 	if (ti->enable_mesh)
23616 		ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
23617 #endif
23618 }
23619 
23620 int
23621 qwz_dp_tx(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
23622     struct ieee80211_node *ni, struct mbuf *m)
23623 {
23624 	struct ieee80211com *ic = &sc->sc_ic;
23625 	struct qwz_dp *dp = &sc->dp;
23626 	struct hal_tx_info ti = {0};
23627 	struct qwz_tx_data *tx_data;
23628 	struct hal_srng *tcl_ring;
23629 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
23630 	struct ieee80211_key *k = NULL;
23631 	struct dp_tx_ring *tx_ring;
23632 	void *hal_tcl_desc;
23633 	uint8_t pool_id;
23634 	uint8_t hal_ring_id;
23635 	int ret, msdu_id, off;
23636 	uint32_t ring_selector = 0;
23637 	uint8_t ring_map = 0;
23638 
23639 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
23640 		m_freem(m);
23641 		return ESHUTDOWN;
23642 	}
23643 #if 0
23644 	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
23645 		     !ieee80211_is_data(hdr->frame_control)))
23646 		return -ENOTSUPP;
23647 #endif
23648 	pool_id = 0;
23649 	ring_selector = 0;
23650 
23651 	ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
23652 	ti.rbm_id = sc->hw_params.hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
23653 
23654 	ring_map |= (1 << ti.ring_id);
23655 
23656 	tx_ring = &dp->tx_ring[ti.ring_id];
23657 
23658 	if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
23659 		m_freem(m);
23660 		return ENOSPC;
23661 	}
23662 
23663 	msdu_id = tx_ring->cur;
23664 	tx_data = &tx_ring->data[msdu_id];
23665 	if (tx_data->m != NULL) {
23666 		m_freem(m);
23667 		return ENOSPC;
23668 	}
23669 
23670 	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, pdev_id) |
23671 	    FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, msdu_id) |
23672 	    FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
23673 	ti.encap_type = qwz_dp_tx_get_encap_type(sc);
23674 
23675 	ti.meta_data_flags = arvif->tcl_metadata;
23676 
23677 	if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
23678 	    ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
23679 		k = ieee80211_get_txkey(ic, wh, ni);
23680 		if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags)) {
23681 			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
23682 		} else {
23683 			switch (k->k_cipher) {
23684 			case IEEE80211_CIPHER_CCMP:
23685 				ti.encrypt_type = HAL_ENCRYPT_TYPE_CCMP_128;
23686 				if (m_makespace(m, m->m_pkthdr.len,
23687 				    IEEE80211_CCMP_MICLEN, &off) == NULL) {
23688 					m_freem(m);
23689 					return ENOSPC;
23690 				}
23691 				break;
23692 			case IEEE80211_CIPHER_TKIP:
23693 				ti.encrypt_type = HAL_ENCRYPT_TYPE_TKIP_MIC;
23694 				if (m_makespace(m, m->m_pkthdr.len,
23695 				    IEEE80211_TKIP_MICLEN, &off) == NULL) {
23696 					m_freem(m);
23697 					return ENOSPC;
23698 				}
23699 				break;
23700 			default:
23701 				ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
23702 				break;
23703 			}
23704 		}
23705 
23706 		if (ti.encrypt_type == HAL_ENCRYPT_TYPE_OPEN) {
23707 			/* Using software crypto. */
23708 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
23709 				return ENOBUFS;
23710 			/* 802.11 header may have moved. */
23711 			wh = mtod(m, struct ieee80211_frame *);
23712 		}
23713 	}
23714 
23715 	ti.addr_search_flags = arvif->hal_addr_search_flags;
23716 	ti.search_type = arvif->search_type;
23717 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
23718 	ti.pkt_offset = 0;
23719 	ti.lmac_id = qwz_hw_get_mac_from_pdev_id(sc, pdev_id);
23720 	ti.bss_ast_hash = arvif->ast_hash;
23721 	ti.bss_ast_idx = arvif->ast_idx;
23722 	ti.dscp_tid_tbl_idx = 0;
23723 #if 0
23724 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
23725 		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
23726 		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
23727 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
23728 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
23729 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
23730 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
23731 	}
23732 
23733 	if (ieee80211_vif_is_mesh(arvif->vif))
23734 		ti.enable_mesh = true;
23735 #endif
23736 	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
23737 
23738 	ti.tid = qwz_dp_tx_get_tid(m);
23739 #if 0
23740 	switch (ti.encap_type) {
23741 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
23742 		ath12k_dp_tx_encap_nwifi(skb);
23743 		break;
23744 	case HAL_TCL_ENCAP_TYPE_RAW:
23745 		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
23746 			ret = -EINVAL;
23747 			goto fail_remove_idr;
23748 		}
23749 		break;
23750 	case HAL_TCL_ENCAP_TYPE_ETHERNET:
23751 		/* no need to encap */
23752 		break;
23753 	case HAL_TCL_ENCAP_TYPE_802_3:
23754 	default:
23755 		/* TODO: Take care of other encap modes as well */
23756 		ret = -EINVAL;
23757 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
23758 		goto fail_remove_idr;
23759 	}
23760 #endif
23761 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
23762 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
23763 	if (ret && ret != EFBIG) {
23764 		printf("%s: failed to map Tx buffer: %d\n",
23765 		    sc->sc_dev.dv_xname, ret);
23766 		m_freem(m);
23767 		return ret;
23768 	}
23769 	if (ret) {
23770 		/* Too many DMA segments, linearize mbuf. */
23771 		if (m_defrag(m, M_DONTWAIT)) {
23772 			m_freem(m);
23773 			return ENOBUFS;
23774 		}
23775 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
23776 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
23777 		if (ret) {
23778 			printf("%s: failed to map Tx buffer: %d\n",
23779 			    sc->sc_dev.dv_xname, ret);
23780 			m_freem(m);
23781 			return ret;
23782 		}
23783 	}
23784 	ti.paddr = tx_data->map->dm_segs[0].ds_addr;
23785 
23786 	ti.data_len = m->m_pkthdr.len;
23787 
23788 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
23789 	tcl_ring = &sc->hal.srng_list[hal_ring_id];
23790 #ifdef notyet
23791 	spin_lock_bh(&tcl_ring->lock);
23792 #endif
23793 	qwz_hal_srng_access_begin(sc, tcl_ring);
23794 
23795 	hal_tcl_desc = (void *)qwz_hal_srng_src_get_next_entry(sc, tcl_ring);
23796 	if (!hal_tcl_desc) {
23797 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
23798 		 * desc because the desc is directly enqueued onto hw queue.
23799 		 */
23800 		qwz_hal_srng_access_end(sc, tcl_ring);
23801 #if 0
23802 		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
23803 #endif
23804 #ifdef notyet
23805 		spin_unlock_bh(&tcl_ring->lock);
23806 #endif
23807 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
23808 		m_freem(m);
23809 		return ENOMEM;
23810 	}
23811 
23812 	tx_data->m = m;
23813 	tx_data->ni = ni;
23814 
23815 	qwz_hal_tx_cmd_desc_setup(sc,
23816 	    hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti);
23817 
23818 	qwz_hal_srng_access_end(sc, tcl_ring);
23819 
23820 #ifdef notyet
23821 	spin_unlock_bh(&tcl_ring->lock);
23822 #endif
23823 	tx_ring->queued++;
23824 	tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
23825 
23826 	if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
23827 		sc->qfullmsk |= (1 << ti.ring_id);
23828 
23829 	return 0;
23830 }
23831 
23832 int
23833 qwz_mac_station_remove(struct qwz_softc *sc, struct qwz_vif *arvif,
23834     uint8_t pdev_id, struct ieee80211_node *ni)
23835 {
23836 	struct qwz_node *nq = (struct qwz_node *)ni;
23837 	struct ath12k_peer *peer = &nq->peer;
23838 	int ret;
23839 
23840 	qwz_peer_rx_tid_cleanup(sc, peer);
23841 
23842 	ret = qwz_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
23843 	if (ret) {
23844 		printf("%s: unable to delete BSS peer: %d\n",
23845 		   sc->sc_dev.dv_xname, ret);
23846 		return ret;
23847 	}
23848 
23849 	return 0;
23850 }
23851 
23852 int
23853 qwz_mac_station_add(struct qwz_softc *sc, struct qwz_vif *arvif,
23854     uint8_t pdev_id, struct ieee80211_node *ni)
23855 {
23856 	struct peer_create_params peer_param;
23857 	int ret;
23858 #ifdef notyet
23859 	lockdep_assert_held(&ar->conf_mutex);
23860 #endif
23861 	peer_param.vdev_id = arvif->vdev_id;
23862 	peer_param.peer_addr = ni->ni_macaddr;
23863 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
23864 
23865 	ret = qwz_peer_create(sc, arvif, pdev_id, ni, &peer_param);
23866 	if (ret) {
23867 		printf("%s: Failed to add peer: %s for VDEV: %d\n",
23868 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
23869 		    arvif->vdev_id);
23870 		return ret;
23871 	}
23872 
23873 	DNPRINTF(QWZ_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
23874 	    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
23875 
23876 	ret = qwz_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
23877 	if (ret) {
23878 		printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
23879 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
23880 		    arvif->vdev_id, ret);
23881 		goto free_peer;
23882 	}
23883 
23884 	return 0;
23885 
23886 free_peer:
23887 	qwz_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
23888 	return ret;
23889 }
23890 
23891 int
23892 qwz_mac_mgmt_tx_wmi(struct qwz_softc *sc, struct qwz_vif *arvif,
23893     uint8_t pdev_id, struct ieee80211_node *ni, struct mbuf *m)
23894 {
23895 	struct qwz_txmgmt_queue *txmgmt = &arvif->txmgmt;
23896 	struct qwz_tx_data *tx_data;
23897 	int buf_id;
23898 	int ret;
23899 
23900 	buf_id = txmgmt->cur;
23901 
23902 	DNPRINTF(QWZ_D_MAC, "%s: tx mgmt frame, buf id %d\n", __func__, buf_id);
23903 
23904 	if (txmgmt->queued >= nitems(txmgmt->data))
23905 		return ENOSPC;
23906 
23907 	tx_data = &txmgmt->data[buf_id];
23908 #if 0
23909 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
23910 		if ((ieee80211_is_action(hdr->frame_control) ||
23911 		     ieee80211_is_deauth(hdr->frame_control) ||
23912 		     ieee80211_is_disassoc(hdr->frame_control)) &&
23913 		     ieee80211_has_protected(hdr->frame_control)) {
23914 			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
23915 		}
23916 	}
23917 #endif
23918 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
23919 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
23920 	if (ret && ret != EFBIG) {
23921 		printf("%s: failed to map mgmt Tx buffer: %d\n",
23922 		    sc->sc_dev.dv_xname, ret);
23923 		return ret;
23924 	}
23925 	if (ret) {
23926 		/* Too many DMA segments, linearize mbuf. */
23927 		if (m_defrag(m, M_DONTWAIT)) {
23928 			m_freem(m);
23929 			return ENOBUFS;
23930 		}
23931 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
23932 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
23933 		if (ret) {
23934 			printf("%s: failed to map mgmt Tx buffer: %d\n",
23935 			    sc->sc_dev.dv_xname, ret);
23936 			m_freem(m);
23937 			return ret;
23938 		}
23939 	}
23940 
23941 	ret = qwz_wmi_mgmt_send(sc, arvif, pdev_id, buf_id, m, tx_data);
23942 	if (ret) {
23943 		printf("%s: failed to send mgmt frame: %d\n",
23944 		    sc->sc_dev.dv_xname, ret);
23945 		goto err_unmap_buf;
23946 	}
23947 	tx_data->ni = ni;
23948 
23949 	txmgmt->cur = (txmgmt->cur + 1) % nitems(txmgmt->data);
23950 	txmgmt->queued++;
23951 
23952 	if (txmgmt->queued >= nitems(txmgmt->data) - 1)
23953 		sc->qfullmsk |= (1U << QWZ_MGMT_QUEUE_ID);
23954 
23955 	return 0;
23956 
23957 err_unmap_buf:
23958 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
23959 	return ret;
23960 }
23961 
23962 void
23963 qwz_wmi_start_scan_init(struct qwz_softc *sc, struct scan_req_params *arg)
23964 {
23965 	/* setup commonly used values */
23966 	arg->scan_req_id = 1;
23967 	if (sc->state_11d == ATH12K_11D_PREPARING)
23968 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
23969 	else
23970 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
23971 	arg->dwell_time_active = 50;
23972 	arg->dwell_time_active_2g = 0;
23973 	arg->dwell_time_passive = 150;
23974 	arg->dwell_time_active_6g = 40;
23975 	arg->dwell_time_passive_6g = 30;
23976 	arg->min_rest_time = 50;
23977 	arg->max_rest_time = 500;
23978 	arg->repeat_probe_time = 0;
23979 	arg->probe_spacing_time = 0;
23980 	arg->idle_time = 0;
23981 	arg->max_scan_time = 20000;
23982 	arg->probe_delay = 5;
23983 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
23984 	    WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
23985 	    WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
23986 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
23987 
23988 	if (isset(sc->wmi.svc_map,
23989 	    WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
23990 		arg->scan_ctrl_flags_ext |=
23991 		    WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
23992 
23993 	arg->num_bssid = 1;
23994 
23995 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
23996 	 * ZEROs in probe request
23997 	 */
23998 	IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
23999 }
24000 
24001 int
24002 qwz_wmi_set_peer_param(struct qwz_softc *sc, uint8_t *peer_addr,
24003     uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
24004 {
24005 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24006 	struct wmi_peer_set_param_cmd *cmd;
24007 	struct mbuf *m;
24008 	int ret;
24009 
24010 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
24011 	if (!m)
24012 		return ENOMEM;
24013 
24014 	cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
24015 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24016 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
24017 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24018 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
24019 	cmd->vdev_id = vdev_id;
24020 	cmd->param_id = param_id;
24021 	cmd->param_value = param_val;
24022 
24023 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
24024 	if (ret) {
24025 		if (ret != ESHUTDOWN) {
24026 			printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
24027 			    sc->sc_dev.dv_xname);
24028 		}
24029 		m_freem(m);
24030 		return ret;
24031 	}
24032 
24033 	DNPRINTF(QWZ_D_WMI, "%s: cmd peer set param vdev %d peer %s "
24034 	    "set param %d value %d\n", __func__, vdev_id,
24035 	    ether_sprintf(peer_addr), param_id, param_val);
24036 
24037 	return 0;
24038 }
24039 
24040 int
24041 qwz_wmi_peer_rx_reorder_queue_setup(struct qwz_softc *sc, int vdev_id,
24042     int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
24043     uint8_t ba_window_size_valid, uint32_t ba_window_size)
24044 {
24045 	struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24046 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
24047 	struct mbuf *m;
24048 	int ret;
24049 
24050 	m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
24051 	if (!m)
24052 		return ENOMEM;
24053 
24054 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
24055 	    sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24056 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
24057 	    WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
24058 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24059 
24060 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
24061 	cmd->vdev_id = vdev_id;
24062 	cmd->tid = tid;
24063 	cmd->queue_ptr_lo = paddr & 0xffffffff;
24064 	cmd->queue_ptr_hi = paddr >> 32;
24065 	cmd->queue_no = tid;
24066 	cmd->ba_window_size_valid = ba_window_size_valid;
24067 	cmd->ba_window_size = ba_window_size;
24068 
24069 	ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
24070 	if (ret) {
24071 		if (ret != ESHUTDOWN) {
24072 			printf("%s: failed to send "
24073 			    "WMI_PEER_REORDER_QUEUE_SETUP\n",
24074 			    sc->sc_dev.dv_xname);
24075 		}
24076 		m_freem(m);
24077 	}
24078 
24079 	DNPRINTF(QWZ_D_WMI, "%s: cmd peer reorder queue setup addr %s "
24080 	    "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
24081 
24082 	return ret;
24083 }
24084 
24085 enum ath12k_spectral_mode
24086 qwz_spectral_get_mode(struct qwz_softc *sc)
24087 {
24088 #if 0
24089 	if (sc->spectral.enabled)
24090 		return ar->spectral.mode;
24091 	else
24092 #endif
24093 		return ATH12K_SPECTRAL_DISABLED;
24094 }
24095 
24096 void
24097 qwz_spectral_reset_buffer(struct qwz_softc *sc)
24098 {
24099 	printf("%s: not implemented\n", __func__);
24100 }
24101 
24102 int
24103 qwz_scan_stop(struct qwz_softc *sc)
24104 {
24105 	struct scan_cancel_param arg = {
24106 		.req_type = WLAN_SCAN_CANCEL_SINGLE,
24107 		.scan_id = ATH12K_SCAN_ID,
24108 	};
24109 	int ret;
24110 #ifdef notyet
24111 	lockdep_assert_held(&ar->conf_mutex);
24112 #endif
24113 	/* TODO: Fill other STOP Params */
24114 	arg.pdev_id = 0; /* TODO: derive pdev ID somehow? */
24115 	arg.vdev_id = sc->scan.vdev_id;
24116 
24117 	ret = qwz_wmi_send_scan_stop_cmd(sc, &arg);
24118 	if (ret) {
24119 		printf("%s: failed to stop wmi scan: %d\n",
24120 		    sc->sc_dev.dv_xname, ret);
24121 		goto out;
24122 	}
24123 
24124 	while (sc->scan.state != ATH12K_SCAN_IDLE) {
24125 		ret = tsleep_nsec(&sc->scan.state, 0, "qwzscstop",
24126 		    SEC_TO_NSEC(3));
24127 		if (ret) {
24128 			printf("%s: scan stop timeout\n", sc->sc_dev.dv_xname);
24129 			break;
24130 		}
24131 	}
24132 out:
24133 	/* Scan state should be updated upon scan completion but in case
24134 	 * firmware fails to deliver the event (for whatever reason) it is
24135 	 * desired to clean up scan state anyway. Firmware may have just
24136 	 * dropped the scan completion event delivery due to transport pipe
24137 	 * being overflown with data and/or it can recover on its own before
24138 	 * next scan request is submitted.
24139 	 */
24140 #ifdef notyet
24141 	spin_lock_bh(&ar->data_lock);
24142 #endif
24143 	if (sc->scan.state != ATH12K_SCAN_IDLE)
24144 		qwz_mac_scan_finish(sc);
24145 #ifdef notyet
24146 	spin_unlock_bh(&ar->data_lock);
24147 #endif
24148 	return ret;
24149 }
24150 
24151 void
24152 qwz_scan_timeout(void *arg)
24153 {
24154 	struct qwz_softc *sc = arg;
24155 	int s = splnet();
24156 
24157 #ifdef notyet
24158 	mutex_lock(&ar->conf_mutex);
24159 #endif
24160 	printf("%s\n", __func__);
24161 	qwz_scan_abort(sc);
24162 #ifdef notyet
24163 	mutex_unlock(&ar->conf_mutex);
24164 #endif
24165 	splx(s);
24166 }
24167 
24168 int
24169 qwz_start_scan(struct qwz_softc *sc, struct scan_req_params *arg)
24170 {
24171 	int ret;
24172 	unsigned long timeout = 1;
24173 #ifdef notyet
24174 	lockdep_assert_held(&ar->conf_mutex);
24175 #endif
24176 	if (qwz_spectral_get_mode(sc) == ATH12K_SPECTRAL_BACKGROUND)
24177 		qwz_spectral_reset_buffer(sc);
24178 
24179 	ret = qwz_wmi_send_scan_start_cmd(sc, arg);
24180 	if (ret)
24181 		return ret;
24182 
24183 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
24184 		timeout = 5;
24185 #if 0
24186 		if (ar->supports_6ghz)
24187 			timeout += 5 * HZ;
24188 #endif
24189 	}
24190 
24191 	while (sc->scan.state == ATH12K_SCAN_STARTING) {
24192 		ret = tsleep_nsec(&sc->scan.state, 0, "qwzscan",
24193 		    SEC_TO_NSEC(timeout));
24194 		if (ret) {
24195 			printf("%s: scan start timeout\n", sc->sc_dev.dv_xname);
24196 			qwz_scan_stop(sc);
24197 			break;
24198 		}
24199 	}
24200 
24201 #ifdef notyet
24202 	spin_lock_bh(&ar->data_lock);
24203 	spin_unlock_bh(&ar->data_lock);
24204 #endif
24205 	return ret;
24206 }
24207 
24208 #define ATH12K_MAC_SCAN_CMD_EVT_OVERHEAD		200 /* in msecs */
24209 
24210 int
24211 qwz_scan(struct qwz_softc *sc)
24212 {
24213 	struct ieee80211com *ic = &sc->sc_ic;
24214 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list);
24215 	struct scan_req_params *arg = NULL;
24216 	struct ieee80211_channel *chan, *lastc;
24217 	int ret = 0, num_channels, i;
24218 	uint32_t scan_timeout;
24219 
24220 	if (arvif == NULL) {
24221 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
24222 		return EINVAL;
24223 	}
24224 
24225 	/*
24226 	 * TODO Will we need separate scan iterations on devices with
24227 	 * multiple radios?
24228 	 */
24229 	if (sc->num_radios > 1)
24230 		printf("%s: TODO: only scanning with first vdev\n", __func__);
24231 
24232 	/* Firmwares advertising the support of triggering 11D algorithm
24233 	 * on the scan results of a regular scan expects driver to send
24234 	 * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
24235 	 * With this feature, separate 11D scan can be avoided since
24236 	 * regdomain can be determined with the scan results of the
24237 	 * regular scan.
24238 	 */
24239 	if (sc->state_11d == ATH12K_11D_PREPARING &&
24240 	    isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN))
24241 		qwz_mac_11d_scan_start(sc, arvif);
24242 #ifdef notyet
24243 	mutex_lock(&ar->conf_mutex);
24244 
24245 	spin_lock_bh(&ar->data_lock);
24246 #endif
24247 	switch (sc->scan.state) {
24248 	case ATH12K_SCAN_IDLE:
24249 		sc->scan.started = 0;
24250 		sc->scan.completed = 0;
24251 		sc->scan.state = ATH12K_SCAN_STARTING;
24252 		sc->scan.is_roc = 0;
24253 		sc->scan.vdev_id = arvif->vdev_id;
24254 		ret = 0;
24255 		break;
24256 	case ATH12K_SCAN_STARTING:
24257 	case ATH12K_SCAN_RUNNING:
24258 	case ATH12K_SCAN_ABORTING:
24259 		ret = EBUSY;
24260 		break;
24261 	}
24262 #ifdef notyet
24263 	spin_unlock_bh(&ar->data_lock);
24264 #endif
24265 	if (ret)
24266 		goto exit;
24267 
24268 	arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_NOWAIT);
24269 	if (!arg) {
24270 		ret = ENOMEM;
24271 		goto exit;
24272 	}
24273 
24274 	qwz_wmi_start_scan_init(sc, arg);
24275 	arg->vdev_id = arvif->vdev_id;
24276 	arg->scan_id = ATH12K_SCAN_ID;
24277 
24278 	if (ic->ic_des_esslen != 0) {
24279 		arg->num_ssids = 1;
24280 		arg->ssid[0].length  = ic->ic_des_esslen;
24281 		memcpy(&arg->ssid[0].ssid, ic->ic_des_essid,
24282 		    ic->ic_des_esslen);
24283 	} else
24284 		arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
24285 
24286 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
24287 	num_channels = 0;
24288 	for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24289 		if (chan->ic_flags == 0)
24290 			continue;
24291 		num_channels++;
24292 	}
24293 	if (num_channels) {
24294 		arg->num_chan = num_channels;
24295 		arg->chan_list = mallocarray(arg->num_chan,
24296 		    sizeof(*arg->chan_list), M_DEVBUF, M_NOWAIT | M_ZERO);
24297 
24298 		if (!arg->chan_list) {
24299 			ret = ENOMEM;
24300 			goto exit;
24301 		}
24302 
24303 		i = 0;
24304 		for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24305 			if (chan->ic_flags == 0)
24306 				continue;
24307 			if (isset(sc->wmi.svc_map,
24308 			    WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL)) {
24309 				arg->chan_list[i++] = chan->ic_freq &
24310 				    WMI_SCAN_CONFIG_PER_CHANNEL_MASK;
24311 #if 0
24312 				/* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
24313 				 * flags, then scan all PSC channels in 6 GHz band and
24314 				 * those non-PSC channels where RNR IE is found during
24315 				 * the legacy 2.4/5 GHz scan.
24316 				 * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
24317 				 * then all channels in 6 GHz will be scanned.
24318 				 */
24319 				if (req->channels[i]->band == NL80211_BAND_6GHZ &&
24320 				    req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
24321 				    !cfg80211_channel_is_psc(req->channels[i]))
24322 					arg->chan_list[i] |=
24323 						WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
24324 #endif
24325 			} else {
24326 				arg->chan_list[i++] = chan->ic_freq;
24327 			}
24328 		}
24329 	}
24330 #if 0
24331 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
24332 		arg->scan_f_add_spoofed_mac_in_probe = 1;
24333 		ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
24334 		ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
24335 	}
24336 #endif
24337 	scan_timeout = 5000;
24338 
24339 	/* Add a margin to account for event/command processing */
24340 	scan_timeout += ATH12K_MAC_SCAN_CMD_EVT_OVERHEAD;
24341 
24342 	ret = qwz_start_scan(sc, arg);
24343 	if (ret) {
24344 		if (ret != ESHUTDOWN) {
24345 			printf("%s: failed to start hw scan: %d\n",
24346 			    sc->sc_dev.dv_xname, ret);
24347 		}
24348 #ifdef notyet
24349 		spin_lock_bh(&ar->data_lock);
24350 #endif
24351 		sc->scan.state = ATH12K_SCAN_IDLE;
24352 #ifdef notyet
24353 		spin_unlock_bh(&ar->data_lock);
24354 #endif
24355 	} else {
24356 		/*
24357 		 * The current mode might have been fixed during association.
24358 		 * Ensure all channels get scanned.
24359 		 */
24360 		if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
24361 			ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
24362 	}
24363 #if 0
24364 	timeout_add_msec(&sc->scan.timeout, scan_timeout);
24365 #endif
24366 exit:
24367 	if (arg) {
24368 		free(arg->chan_list, M_DEVBUF,
24369 		    arg->num_chan * sizeof(*arg->chan_list));
24370 #if 0
24371 		kfree(arg->extraie.ptr);
24372 #endif
24373 		free(arg, M_DEVBUF, sizeof(*arg));
24374 	}
24375 #ifdef notyet
24376 	mutex_unlock(&ar->conf_mutex);
24377 #endif
24378 	if (sc->state_11d == ATH12K_11D_PREPARING)
24379 		qwz_mac_11d_scan_start(sc, arvif);
24380 
24381 	return ret;
24382 }
24383 
24384 void
24385 qwz_scan_abort(struct qwz_softc *sc)
24386 {
24387 	int ret;
24388 #ifdef notyet
24389 	lockdep_assert_held(&ar->conf_mutex);
24390 
24391 	spin_lock_bh(&ar->data_lock);
24392 #endif
24393 	switch (sc->scan.state) {
24394 	case ATH12K_SCAN_IDLE:
24395 		/* This can happen if timeout worker kicked in and called
24396 		 * abortion while scan completion was being processed.
24397 		 */
24398 		break;
24399 	case ATH12K_SCAN_STARTING:
24400 	case ATH12K_SCAN_ABORTING:
24401 		printf("%s: refusing scan abortion due to invalid "
24402 		    "scan state: %d\n", sc->sc_dev.dv_xname, sc->scan.state);
24403 		break;
24404 	case ATH12K_SCAN_RUNNING:
24405 		sc->scan.state = ATH12K_SCAN_ABORTING;
24406 #ifdef notyet
24407 		spin_unlock_bh(&ar->data_lock);
24408 #endif
24409 		ret = qwz_scan_stop(sc);
24410 		if (ret)
24411 			printf("%s: failed to abort scan: %d\n",
24412 			    sc->sc_dev.dv_xname, ret);
24413 #ifdef notyet
24414 		spin_lock_bh(&ar->data_lock);
24415 #endif
24416 		break;
24417 	}
24418 #ifdef notyet
24419 	spin_unlock_bh(&ar->data_lock);
24420 #endif
24421 }
24422 
24423 /*
24424  * Find a pdev which corresponds to a given channel.
24425  * This doesn't exactly match the semantics of the Linux driver
24426  * but because OpenBSD does not (yet) implement multi-bss mode
24427  * we can assume that only one PHY will be active in either the
24428  * 2 GHz or the 5 GHz band.
24429  */
24430 struct qwz_pdev *
24431 qwz_get_pdev_for_chan(struct qwz_softc *sc, struct ieee80211_channel *chan)
24432 {
24433 	struct qwz_pdev *pdev;
24434 	int i;
24435 
24436 	for (i = 0; i < sc->num_radios; i++) {
24437 		if ((sc->pdevs_active & (1 << i)) == 0)
24438 			continue;
24439 
24440 		pdev = &sc->pdevs[i];
24441 		if (IEEE80211_IS_CHAN_2GHZ(chan) &&
24442 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
24443 			return pdev;
24444 		if (IEEE80211_IS_CHAN_5GHZ(chan) &&
24445 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
24446 			return pdev;
24447 	}
24448 
24449 	return NULL;
24450 }
24451 
24452 void
24453 qwz_recalculate_mgmt_rate(struct qwz_softc *sc, struct ieee80211_node *ni,
24454     uint32_t vdev_id, uint32_t pdev_id)
24455 {
24456 	struct ieee80211com *ic = &sc->sc_ic;
24457 	int hw_rate_code;
24458 	uint32_t vdev_param;
24459 	int bitrate;
24460 	int ret;
24461 #ifdef notyet
24462 	lockdep_assert_held(&ar->conf_mutex);
24463 #endif
24464 	bitrate = ieee80211_min_basic_rate(ic);
24465 	hw_rate_code = qwz_mac_get_rate_hw_value(ic, ni, bitrate);
24466 	if (hw_rate_code < 0) {
24467 		DPRINTF("%s: bitrate not supported %d\n",
24468 		    sc->sc_dev.dv_xname, bitrate);
24469 		return;
24470 	}
24471 
24472 	vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
24473 	ret = qwz_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
24474 	    vdev_param, hw_rate_code);
24475 	if (ret)
24476 		printf("%s: failed to set mgmt tx rate\n",
24477 		    sc->sc_dev.dv_xname);
24478 #if 0
24479 	/* For WCN6855, firmware will clear this param when vdev starts, hence
24480 	 * cache it here so that we can reconfigure it once vdev starts.
24481 	 */
24482 	ab->hw_rate_code = hw_rate_code;
24483 #endif
24484 	vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
24485 	ret = qwz_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
24486 	    hw_rate_code);
24487 	if (ret)
24488 		printf("%s: failed to set beacon tx rate\n",
24489 		    sc->sc_dev.dv_xname);
24490 }
24491 
24492 int
24493 qwz_auth(struct qwz_softc *sc)
24494 {
24495 	struct ieee80211com *ic = &sc->sc_ic;
24496 	struct ieee80211_node *ni = ic->ic_bss;
24497 	uint32_t param_id;
24498 	struct qwz_vif *arvif;
24499 	struct qwz_pdev *pdev;
24500 	int ret;
24501 
24502 	arvif = TAILQ_FIRST(&sc->vif_list);
24503 	if (arvif == NULL) {
24504 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
24505 		return EINVAL;
24506 	}
24507 
24508 	pdev = qwz_get_pdev_for_chan(sc, ni->ni_chan);
24509 	if (pdev == NULL) {
24510 		printf("%s: no pdev found for channel %d\n",
24511 		    sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
24512 		return EINVAL;
24513 	}
24514 
24515 	param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
24516 	ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
24517 	    param_id, ni->ni_intval);
24518 	if (ret) {
24519 		printf("%s: failed to set beacon interval for VDEV: %d\n",
24520 		    sc->sc_dev.dv_xname, arvif->vdev_id);
24521 		return ret;
24522 	}
24523 
24524 	qwz_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
24525 	ni->ni_txrate = 0;
24526 
24527 	ret = qwz_mac_station_add(sc, arvif, pdev->pdev_id, ni);
24528 	if (ret)
24529 		return ret;
24530 
24531 	/* Start vdev. */
24532 	ret = qwz_mac_vdev_start(sc, arvif, pdev->pdev_id);
24533 	if (ret) {
24534 		printf("%s: failed to start MAC for VDEV: %d\n",
24535 		    sc->sc_dev.dv_xname, arvif->vdev_id);
24536 		return ret;
24537 	}
24538 
24539 	/*
24540 	 * WCN6855 firmware clears basic-rate parameters when vdev starts.
24541 	 * Set it once more.
24542 	 */
24543 	qwz_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
24544 
24545 	return ret;
24546 }
24547 
24548 int
24549 qwz_deauth(struct qwz_softc *sc)
24550 {
24551 	struct ieee80211com *ic = &sc->sc_ic;
24552 	struct ieee80211_node *ni = ic->ic_bss;
24553 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
24554 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
24555 	int ret;
24556 
24557 	ret = qwz_mac_vdev_stop(sc, arvif, pdev_id);
24558 	if (ret) {
24559 		printf("%s: unable to stop vdev vdev_id %d: %d\n",
24560 		   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
24561 		return ret;
24562 	}
24563 
24564 	ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
24565 	    pdev_id, WMI_PEER_AUTHORIZE, 0);
24566 	if (ret) {
24567 		printf("%s: unable to deauthorize BSS peer: %d\n",
24568 		   sc->sc_dev.dv_xname, ret);
24569 		return ret;
24570 	}
24571 
24572 	ret = qwz_mac_station_remove(sc, arvif, pdev_id, ni);
24573 	if (ret)
24574 		return ret;
24575 
24576 	DNPRINTF(QWZ_D_MAC, "%s: disassociated from bssid %s aid %d\n",
24577 	    __func__, ether_sprintf(ni->ni_bssid), arvif->aid);
24578 
24579 	return 0;
24580 }
24581 
24582 void
24583 qwz_peer_assoc_h_basic(struct qwz_softc *sc, struct qwz_vif *arvif,
24584     struct ieee80211_node *ni, struct peer_assoc_params *arg)
24585 {
24586 #ifdef notyet
24587 	lockdep_assert_held(&ar->conf_mutex);
24588 #endif
24589 
24590 	IEEE80211_ADDR_COPY(arg->peer_mac, ni->ni_macaddr);
24591 	arg->vdev_id = arvif->vdev_id;
24592 	arg->peer_associd = ni->ni_associd;
24593 	arg->auth_flag = 1;
24594 	arg->peer_listen_intval = ni->ni_intval;
24595 	arg->peer_nss = 1;
24596 	arg->peer_caps = ni->ni_capinfo;
24597 }
24598 
24599 void
24600 qwz_peer_assoc_h_crypto(struct qwz_softc *sc, struct qwz_vif *arvif,
24601     struct ieee80211_node *ni, struct peer_assoc_params *arg)
24602 {
24603 	struct ieee80211com *ic = &sc->sc_ic;
24604 
24605 	if (ic->ic_flags & IEEE80211_F_RSNON) {
24606 		arg->need_ptk_4_way = 1;
24607 		if (ni->ni_rsnprotos == IEEE80211_PROTO_WPA)
24608 			arg->need_gtk_2_way = 1;
24609 	}
24610 #if 0
24611 	if (sta->mfp) {
24612 		/* TODO: Need to check if FW supports PMF? */
24613 		arg->is_pmf_enabled = true;
24614 	}
24615 #endif
24616 }
24617 
24618 int
24619 qwz_mac_rate_is_cck(uint8_t rate)
24620 {
24621 	return (rate == 2 || rate == 4 || rate == 11 || rate == 22);
24622 }
24623 
24624 void
24625 qwz_peer_assoc_h_rates(struct ieee80211_node *ni, struct peer_assoc_params *arg)
24626 {
24627 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
24628 	struct ieee80211_rateset *rs = &ni->ni_rates;
24629 	int i;
24630 
24631 	for (i = 0, rateset->num_rates = 0;
24632 	    i < rs->rs_nrates && rateset->num_rates < nitems(rateset->rates);
24633 	    i++, rateset->num_rates++) {
24634 		uint8_t rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
24635 		if (qwz_mac_rate_is_cck(rate))
24636 			rate |= 0x80;
24637 		rateset->rates[rateset->num_rates] = rate;
24638 	}
24639 }
24640 
24641 void
24642 qwz_peer_assoc_h_phymode(struct qwz_softc *sc, struct ieee80211_node *ni,
24643     struct peer_assoc_params *arg)
24644 {
24645 	struct ieee80211com *ic = &sc->sc_ic;
24646 	enum wmi_phy_mode phymode;
24647 
24648 	switch (ic->ic_curmode) {
24649 	case IEEE80211_MODE_11A:
24650 		phymode = MODE_11A;
24651 		break;
24652 	case IEEE80211_MODE_11B:
24653 		phymode = MODE_11B;
24654 		break;
24655 	case IEEE80211_MODE_11G:
24656 		phymode = MODE_11G;
24657 		break;
24658 	default:
24659 		phymode = MODE_UNKNOWN;
24660 		break;
24661 	}
24662 
24663 	DNPRINTF(QWZ_D_MAC, "%s: peer %s phymode %s\n", __func__,
24664 	    ether_sprintf(ni->ni_macaddr), qwz_wmi_phymode_str(phymode));
24665 
24666 	arg->peer_phymode = phymode;
24667 }
24668 
24669 void
24670 qwz_peer_assoc_prepare(struct qwz_softc *sc, struct qwz_vif *arvif,
24671     struct ieee80211_node *ni, struct peer_assoc_params *arg, int reassoc)
24672 {
24673 	memset(arg, 0, sizeof(*arg));
24674 
24675 	arg->peer_new_assoc = !reassoc;
24676 	qwz_peer_assoc_h_basic(sc, arvif, ni, arg);
24677 	qwz_peer_assoc_h_crypto(sc, arvif, ni, arg);
24678 	qwz_peer_assoc_h_rates(ni, arg);
24679 	qwz_peer_assoc_h_phymode(sc, ni, arg);
24680 #if 0
24681 	qwz_peer_assoc_h_ht(sc, arvif, ni, arg);
24682 	qwz_peer_assoc_h_vht(sc, arvif, ni, arg);
24683 	qwz_peer_assoc_h_he(sc, arvif, ni, arg);
24684 	qwz_peer_assoc_h_he_6ghz(sc, arvif, ni, arg);
24685 	qwz_peer_assoc_h_qos(sc, arvif, ni, arg);
24686 	qwz_peer_assoc_h_smps(ni, arg);
24687 #endif
24688 #if 0
24689 	arsta->peer_nss = arg->peer_nss;
24690 #endif
24691 	/* TODO: amsdu_disable req? */
24692 }
24693 
24694 int
24695 qwz_run(struct qwz_softc *sc)
24696 {
24697 	struct ieee80211com *ic = &sc->sc_ic;
24698 	struct ieee80211_node *ni = ic->ic_bss;
24699 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
24700 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
24701 	struct peer_assoc_params peer_arg;
24702 	int ret;
24703 #ifdef notyet
24704 	lockdep_assert_held(&ar->conf_mutex);
24705 #endif
24706 
24707 	DNPRINTF(QWZ_D_MAC, "%s: vdev %i assoc bssid %pM aid %d\n",
24708 	    __func__, arvif->vdev_id, arvif->bssid, arvif->aid);
24709 
24710 	qwz_peer_assoc_prepare(sc, arvif, ni, &peer_arg, 0);
24711 
24712 	peer_arg.is_assoc = 1;
24713 
24714 	sc->peer_assoc_done = 0;
24715 	ret = qwz_wmi_send_peer_assoc_cmd(sc, pdev_id, &peer_arg);
24716 	if (ret) {
24717 		printf("%s: failed to run peer assoc for %s vdev %i: %d\n",
24718 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
24719 		    arvif->vdev_id, ret);
24720 		return ret;
24721 	}
24722 
24723 	while (!sc->peer_assoc_done) {
24724 		ret = tsleep_nsec(&sc->peer_assoc_done, 0, "qwzassoc",
24725 		    SEC_TO_NSEC(1));
24726 		if (ret) {
24727 			printf("%s: failed to get peer assoc conf event "
24728 			    "for %s vdev %i\n", sc->sc_dev.dv_xname,
24729 			    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
24730 			return ret;
24731 		}
24732 	}
24733 #if 0
24734 	ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
24735 				     &sta->deflink.ht_cap,
24736 				     le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
24737 	if (ret) {
24738 		ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
24739 			    arvif->vdev_id, ret);
24740 		return ret;
24741 	}
24742 
24743 	if (!ath12k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
24744 		ath12k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
24745 			    arvif->vdev_id, bss_conf->bssid);
24746 		return;
24747 	}
24748 
24749 	WARN_ON(arvif->is_up);
24750 #endif
24751 
24752 	arvif->aid = ni->ni_associd;
24753 	IEEE80211_ADDR_COPY(arvif->bssid, ni->ni_bssid);
24754 
24755 	ret = qwz_wmi_vdev_up(sc, arvif->vdev_id, pdev_id, arvif->aid,
24756 	    arvif->bssid, NULL, 0, 0);
24757 	if (ret) {
24758 		printf("%s: failed to set vdev %d up: %d\n",
24759 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
24760 		return ret;
24761 	}
24762 
24763 	arvif->is_up = 1;
24764 #if 0
24765 	arvif->rekey_data.enable_offload = 0;
24766 #endif
24767 
24768 	DNPRINTF(QWZ_D_MAC, "%s: vdev %d up (associated) bssid %s aid %d\n",
24769 	    __func__, arvif->vdev_id, ether_sprintf(ni->ni_bssid), arvif->aid);
24770 
24771 	ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
24772 	    pdev_id, WMI_PEER_AUTHORIZE, 1);
24773 	if (ret) {
24774 		printf("%s: unable to authorize BSS peer: %d\n",
24775 		   sc->sc_dev.dv_xname, ret);
24776 		return ret;
24777 	}
24778 
24779 	/* Enable "ext" IRQs for datapath. */
24780 	sc->ops.irq_enable(sc);
24781 
24782 	return 0;
24783 }
24784 
24785 int
24786 qwz_run_stop(struct qwz_softc *sc)
24787 {
24788 	struct ieee80211com *ic = &sc->sc_ic;
24789 	struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
24790 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
24791 	struct qwz_node *nq = (void *)ic->ic_bss;
24792 	int ret;
24793 
24794 	sc->ops.irq_disable(sc);
24795 
24796 	if (ic->ic_opmode == IEEE80211_M_STA) {
24797 		ic->ic_bss->ni_txrate = 0;
24798 		nq->flags = 0;
24799 	}
24800 
24801 	ret = qwz_wmi_vdev_down(sc, arvif->vdev_id, pdev_id);
24802 	if (ret)
24803 		return ret;
24804 
24805 	arvif->is_up = 0;
24806 
24807 	DNPRINTF(QWZ_D_MAC, "%s: vdev %d down\n", __func__, arvif->vdev_id);
24808 
24809 	return 0;
24810 }
24811 
24812 #if NBPFILTER > 0
24813 void
24814 qwz_radiotap_attach(struct qwz_softc *sc)
24815 {
24816 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
24817 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
24818 
24819 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
24820 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
24821 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
24822 
24823 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
24824 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
24825 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
24826 }
24827 #endif
24828 
24829 int
24830 qwz_attach(struct qwz_softc *sc)
24831 {
24832 	struct ieee80211com *ic = &sc->sc_ic;
24833 	struct ifnet *ifp = &ic->ic_if;
24834 	int error, i;
24835 
24836 	task_set(&sc->init_task, qwz_init_task, sc);
24837 	task_set(&sc->newstate_task, qwz_newstate_task, sc);
24838 	task_set(&sc->setkey_task, qwz_setkey_task, sc);
24839 	timeout_set_proc(&sc->scan.timeout, qwz_scan_timeout, sc);
24840 #if NBPFILTER > 0
24841 	qwz_radiotap_attach(sc);
24842 #endif
24843 	for (i = 0; i < nitems(sc->pdevs); i++)
24844 		sc->pdevs[i].sc = sc;
24845 
24846 	TAILQ_INIT(&sc->vif_list);
24847 
24848 	error = qwz_init(ifp);
24849 	if (error)
24850 		return error;
24851 
24852 	/* Turn device off until interface comes up. */
24853 	qwz_core_deinit(sc);
24854 
24855 	return 0;
24856 }
24857 
24858 void
24859 qwz_detach(struct qwz_softc *sc)
24860 {
24861 	if (sc->fwmem) {
24862 		qwz_dmamem_free(sc->sc_dmat, sc->fwmem);
24863 		sc->fwmem = NULL;
24864 	}
24865 
24866 	if (sc->m3_mem) {
24867 		qwz_dmamem_free(sc->sc_dmat, sc->m3_mem);
24868 		sc->m3_mem = NULL;
24869 	}
24870 
24871 	qwz_free_firmware(sc);
24872 }
24873 
24874 struct qwz_dmamem *
24875 qwz_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
24876 {
24877 	struct qwz_dmamem *adm;
24878 	int nsegs;
24879 
24880 	adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT | M_ZERO);
24881 	if (adm == NULL)
24882 		return NULL;
24883 	adm->size = size;
24884 
24885 	if (bus_dmamap_create(dmat, size, 1, size, 0,
24886 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &adm->map) != 0)
24887 		goto admfree;
24888 
24889 	if (bus_dmamem_alloc_range(dmat, size, align, 0, &adm->seg, 1,
24890 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 0, 0xffffffff) != 0)
24891 		goto destroy;
24892 
24893 	if (bus_dmamem_map(dmat, &adm->seg, nsegs, size,
24894 	    &adm->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0)
24895 		goto free;
24896 
24897 	if (bus_dmamap_load_raw(dmat, adm->map, &adm->seg, nsegs, size,
24898 	    BUS_DMA_NOWAIT) != 0)
24899 		goto unmap;
24900 
24901 	bzero(adm->kva, size);
24902 
24903 	return adm;
24904 
24905 unmap:
24906 	bus_dmamem_unmap(dmat, adm->kva, size);
24907 free:
24908 	bus_dmamem_free(dmat, &adm->seg, 1);
24909 destroy:
24910 	bus_dmamap_destroy(dmat, adm->map);
24911 admfree:
24912 	free(adm, M_DEVBUF, sizeof(*adm));
24913 
24914 	return NULL;
24915 }
24916 
24917 void
24918 qwz_dmamem_free(bus_dma_tag_t dmat, struct qwz_dmamem *adm)
24919 {
24920 	bus_dmamem_unmap(dmat, adm->kva, adm->size);
24921 	bus_dmamem_free(dmat, &adm->seg, 1);
24922 	bus_dmamap_destroy(dmat, adm->map);
24923 	free(adm, M_DEVBUF, sizeof(*adm));
24924 }
24925 
24926 int
24927 qwz_activate(struct device *self, int act)
24928 {
24929 	struct qwz_softc *sc = (struct qwz_softc *)self;
24930 	struct ifnet *ifp = &sc->sc_ic.ic_if;
24931 	int err = 0;
24932 
24933 	switch (act) {
24934 	case DVACT_QUIESCE:
24935 		if (ifp->if_flags & IFF_RUNNING) {
24936 			rw_enter_write(&sc->ioctl_rwl);
24937 			qwz_stop(ifp);
24938 			rw_exit(&sc->ioctl_rwl);
24939 		}
24940 		break;
24941 	case DVACT_RESUME:
24942 		break;
24943 	case DVACT_WAKEUP:
24944 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
24945 			err = qwz_init(ifp);
24946 			if (err)
24947 				printf("%s: could not initialize hardware\n",
24948 				    sc->sc_dev.dv_xname);
24949 		}
24950 		break;
24951 	}
24952 
24953 	return 0;
24954 }
24955