xref: /freebsd-src/sys/contrib/dev/iwlwifi/mvm/utils.c (revision a4128aad8503277614f2d214011ef60a19447b83)
1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2bfcc09ddSBjoern A. Zeeb /*
3*a4128aadSBjoern A. Zeeb  * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2015-2017 Intel Deutschland GmbH
6bfcc09ddSBjoern A. Zeeb  */
7bfcc09ddSBjoern A. Zeeb #if defined(__FreeBSD__)
8bfcc09ddSBjoern A. Zeeb #include <linux/math64.h>
9bfcc09ddSBjoern A. Zeeb #endif
10bfcc09ddSBjoern A. Zeeb #include <net/mac80211.h>
11bfcc09ddSBjoern A. Zeeb 
12bfcc09ddSBjoern A. Zeeb #include "iwl-debug.h"
13bfcc09ddSBjoern A. Zeeb #include "iwl-io.h"
14bfcc09ddSBjoern A. Zeeb #include "iwl-prph.h"
15bfcc09ddSBjoern A. Zeeb #include "iwl-csr.h"
16bfcc09ddSBjoern A. Zeeb #include "mvm.h"
17bfcc09ddSBjoern A. Zeeb #include "fw/api/rs.h"
18bfcc09ddSBjoern A. Zeeb #include "fw/img.h"
19bfcc09ddSBjoern A. Zeeb 
20bfcc09ddSBjoern A. Zeeb /*
21bfcc09ddSBjoern A. Zeeb  * Will return 0 even if the cmd failed when RFKILL is asserted unless
22bfcc09ddSBjoern A. Zeeb  * CMD_WANT_SKB is set in cmd->flags.
23bfcc09ddSBjoern A. Zeeb  */
24bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
25bfcc09ddSBjoern A. Zeeb {
26bfcc09ddSBjoern A. Zeeb 	int ret;
27bfcc09ddSBjoern A. Zeeb 
28bfcc09ddSBjoern A. Zeeb #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
29bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(mvm->d3_test_active))
30bfcc09ddSBjoern A. Zeeb 		return -EIO;
31bfcc09ddSBjoern A. Zeeb #endif
32bfcc09ddSBjoern A. Zeeb 
33bfcc09ddSBjoern A. Zeeb 	/*
34bfcc09ddSBjoern A. Zeeb 	 * Synchronous commands from this op-mode must hold
35bfcc09ddSBjoern A. Zeeb 	 * the mutex, this ensures we don't try to send two
36bfcc09ddSBjoern A. Zeeb 	 * (or more) synchronous commands at a time.
37bfcc09ddSBjoern A. Zeeb 	 */
38bfcc09ddSBjoern A. Zeeb 	if (!(cmd->flags & CMD_ASYNC))
39bfcc09ddSBjoern A. Zeeb 		lockdep_assert_held(&mvm->mutex);
40bfcc09ddSBjoern A. Zeeb 
41bfcc09ddSBjoern A. Zeeb 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
42bfcc09ddSBjoern A. Zeeb 
43bfcc09ddSBjoern A. Zeeb 	/*
44bfcc09ddSBjoern A. Zeeb 	 * If the caller wants the SKB, then don't hide any problems, the
45bfcc09ddSBjoern A. Zeeb 	 * caller might access the response buffer which will be NULL if
46bfcc09ddSBjoern A. Zeeb 	 * the command failed.
47bfcc09ddSBjoern A. Zeeb 	 */
48bfcc09ddSBjoern A. Zeeb 	if (cmd->flags & CMD_WANT_SKB)
49bfcc09ddSBjoern A. Zeeb 		return ret;
50bfcc09ddSBjoern A. Zeeb 
51bfcc09ddSBjoern A. Zeeb 	/*
52bfcc09ddSBjoern A. Zeeb 	 * Silently ignore failures if RFKILL is asserted or
53bfcc09ddSBjoern A. Zeeb 	 * we are in suspend\resume process
54bfcc09ddSBjoern A. Zeeb 	 */
55bfcc09ddSBjoern A. Zeeb 	if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
56bfcc09ddSBjoern A. Zeeb 		return 0;
57bfcc09ddSBjoern A. Zeeb 	return ret;
58bfcc09ddSBjoern A. Zeeb }
59bfcc09ddSBjoern A. Zeeb 
60bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
61bfcc09ddSBjoern A. Zeeb 			 u32 flags, u16 len, const void *data)
62bfcc09ddSBjoern A. Zeeb {
63bfcc09ddSBjoern A. Zeeb 	struct iwl_host_cmd cmd = {
64bfcc09ddSBjoern A. Zeeb 		.id = id,
65bfcc09ddSBjoern A. Zeeb 		.len = { len, },
66bfcc09ddSBjoern A. Zeeb 		.data = { data, },
67bfcc09ddSBjoern A. Zeeb 		.flags = flags,
68bfcc09ddSBjoern A. Zeeb 	};
69bfcc09ddSBjoern A. Zeeb 
70bfcc09ddSBjoern A. Zeeb 	return iwl_mvm_send_cmd(mvm, &cmd);
71bfcc09ddSBjoern A. Zeeb }
72bfcc09ddSBjoern A. Zeeb 
73bfcc09ddSBjoern A. Zeeb /*
74bfcc09ddSBjoern A. Zeeb  * We assume that the caller set the status to the success value
75bfcc09ddSBjoern A. Zeeb  */
76bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
77bfcc09ddSBjoern A. Zeeb 			    u32 *status)
78bfcc09ddSBjoern A. Zeeb {
79bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_packet *pkt;
80bfcc09ddSBjoern A. Zeeb 	struct iwl_cmd_response *resp;
81bfcc09ddSBjoern A. Zeeb 	int ret, resp_len;
82bfcc09ddSBjoern A. Zeeb 
83bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
84bfcc09ddSBjoern A. Zeeb 
85bfcc09ddSBjoern A. Zeeb #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
86bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(mvm->d3_test_active))
87bfcc09ddSBjoern A. Zeeb 		return -EIO;
88bfcc09ddSBjoern A. Zeeb #endif
89bfcc09ddSBjoern A. Zeeb 
90bfcc09ddSBjoern A. Zeeb 	/*
91bfcc09ddSBjoern A. Zeeb 	 * Only synchronous commands can wait for status,
92bfcc09ddSBjoern A. Zeeb 	 * we use WANT_SKB so the caller can't.
93bfcc09ddSBjoern A. Zeeb 	 */
94bfcc09ddSBjoern A. Zeeb 	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
95bfcc09ddSBjoern A. Zeeb 		      "cmd flags %x", cmd->flags))
96bfcc09ddSBjoern A. Zeeb 		return -EINVAL;
97bfcc09ddSBjoern A. Zeeb 
98bfcc09ddSBjoern A. Zeeb 	cmd->flags |= CMD_WANT_SKB;
99bfcc09ddSBjoern A. Zeeb 
100bfcc09ddSBjoern A. Zeeb 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
101bfcc09ddSBjoern A. Zeeb 	if (ret == -ERFKILL) {
102bfcc09ddSBjoern A. Zeeb 		/*
103bfcc09ddSBjoern A. Zeeb 		 * The command failed because of RFKILL, don't update
104bfcc09ddSBjoern A. Zeeb 		 * the status, leave it as success and return 0.
105bfcc09ddSBjoern A. Zeeb 		 */
106bfcc09ddSBjoern A. Zeeb 		return 0;
107bfcc09ddSBjoern A. Zeeb 	} else if (ret) {
108bfcc09ddSBjoern A. Zeeb 		return ret;
109bfcc09ddSBjoern A. Zeeb 	}
110bfcc09ddSBjoern A. Zeeb 
111bfcc09ddSBjoern A. Zeeb 	pkt = cmd->resp_pkt;
112bfcc09ddSBjoern A. Zeeb 
113bfcc09ddSBjoern A. Zeeb 	resp_len = iwl_rx_packet_payload_len(pkt);
114bfcc09ddSBjoern A. Zeeb 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
115bfcc09ddSBjoern A. Zeeb 		ret = -EIO;
116bfcc09ddSBjoern A. Zeeb 		goto out_free_resp;
117bfcc09ddSBjoern A. Zeeb 	}
118bfcc09ddSBjoern A. Zeeb 
119bfcc09ddSBjoern A. Zeeb 	resp = (void *)pkt->data;
120bfcc09ddSBjoern A. Zeeb 	*status = le32_to_cpu(resp->status);
121bfcc09ddSBjoern A. Zeeb  out_free_resp:
122bfcc09ddSBjoern A. Zeeb 	iwl_free_resp(cmd);
123bfcc09ddSBjoern A. Zeeb 	return ret;
124bfcc09ddSBjoern A. Zeeb }
125bfcc09ddSBjoern A. Zeeb 
126bfcc09ddSBjoern A. Zeeb /*
127bfcc09ddSBjoern A. Zeeb  * We assume that the caller set the status to the sucess value
128bfcc09ddSBjoern A. Zeeb  */
129bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
130bfcc09ddSBjoern A. Zeeb 				const void *data, u32 *status)
131bfcc09ddSBjoern A. Zeeb {
132bfcc09ddSBjoern A. Zeeb 	struct iwl_host_cmd cmd = {
133bfcc09ddSBjoern A. Zeeb 		.id = id,
134bfcc09ddSBjoern A. Zeeb 		.len = { len, },
135bfcc09ddSBjoern A. Zeeb 		.data = { data, },
136bfcc09ddSBjoern A. Zeeb 	};
137bfcc09ddSBjoern A. Zeeb 
138bfcc09ddSBjoern A. Zeeb 	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
139bfcc09ddSBjoern A. Zeeb }
140bfcc09ddSBjoern A. Zeeb 
141bfcc09ddSBjoern A. Zeeb int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
142bfcc09ddSBjoern A. Zeeb 					  enum nl80211_band band)
143bfcc09ddSBjoern A. Zeeb {
144bfcc09ddSBjoern A. Zeeb 	int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
145bfcc09ddSBjoern A. Zeeb 	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
146bfcc09ddSBjoern A. Zeeb 	bool is_LB = band == NL80211_BAND_2GHZ;
147bfcc09ddSBjoern A. Zeeb 
148bfcc09ddSBjoern A. Zeeb 	if (format == RATE_MCS_LEGACY_OFDM_MSK)
149bfcc09ddSBjoern A. Zeeb 		return is_LB ? rate + IWL_FIRST_OFDM_RATE :
150bfcc09ddSBjoern A. Zeeb 			rate;
151bfcc09ddSBjoern A. Zeeb 
152bfcc09ddSBjoern A. Zeeb 	/* CCK is not allowed in HB */
153bfcc09ddSBjoern A. Zeeb 	return is_LB ? rate : -1;
154bfcc09ddSBjoern A. Zeeb }
155bfcc09ddSBjoern A. Zeeb 
156bfcc09ddSBjoern A. Zeeb int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
157bfcc09ddSBjoern A. Zeeb 					enum nl80211_band band)
158bfcc09ddSBjoern A. Zeeb {
159bfcc09ddSBjoern A. Zeeb 	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
160bfcc09ddSBjoern A. Zeeb 	int idx;
161bfcc09ddSBjoern A. Zeeb 	int band_offset = 0;
162bfcc09ddSBjoern A. Zeeb 
163bfcc09ddSBjoern A. Zeeb 	/* Legacy rate format, search for match in table */
164bfcc09ddSBjoern A. Zeeb 	if (band != NL80211_BAND_2GHZ)
165bfcc09ddSBjoern A. Zeeb 		band_offset = IWL_FIRST_OFDM_RATE;
166bfcc09ddSBjoern A. Zeeb 	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
167bfcc09ddSBjoern A. Zeeb 		if (iwl_fw_rate_idx_to_plcp(idx) == rate)
168bfcc09ddSBjoern A. Zeeb 			return idx - band_offset;
169bfcc09ddSBjoern A. Zeeb 
170bfcc09ddSBjoern A. Zeeb 	return -1;
171bfcc09ddSBjoern A. Zeeb }
172bfcc09ddSBjoern A. Zeeb 
173bfcc09ddSBjoern A. Zeeb u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
174bfcc09ddSBjoern A. Zeeb {
175d9836fb4SBjoern A. Zeeb 	if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
176bfcc09ddSBjoern A. Zeeb 		/* In the new rate legacy rates are indexed:
177bfcc09ddSBjoern A. Zeeb 		 * 0 - 3 for CCK and 0 - 7 for OFDM.
178bfcc09ddSBjoern A. Zeeb 		 */
179bfcc09ddSBjoern A. Zeeb 		return (rate_idx >= IWL_FIRST_OFDM_RATE ?
180bfcc09ddSBjoern A. Zeeb 			rate_idx - IWL_FIRST_OFDM_RATE :
181bfcc09ddSBjoern A. Zeeb 			rate_idx);
182bfcc09ddSBjoern A. Zeeb 
183bfcc09ddSBjoern A. Zeeb 	return iwl_fw_rate_idx_to_plcp(rate_idx);
184bfcc09ddSBjoern A. Zeeb }
185bfcc09ddSBjoern A. Zeeb 
186bfcc09ddSBjoern A. Zeeb u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
187bfcc09ddSBjoern A. Zeeb {
188bfcc09ddSBjoern A. Zeeb 	static const u8 mac80211_ac_to_ucode_ac[] = {
189bfcc09ddSBjoern A. Zeeb 		AC_VO,
190bfcc09ddSBjoern A. Zeeb 		AC_VI,
191bfcc09ddSBjoern A. Zeeb 		AC_BE,
192bfcc09ddSBjoern A. Zeeb 		AC_BK
193bfcc09ddSBjoern A. Zeeb 	};
194bfcc09ddSBjoern A. Zeeb 
195bfcc09ddSBjoern A. Zeeb 	return mac80211_ac_to_ucode_ac[ac];
196bfcc09ddSBjoern A. Zeeb }
197bfcc09ddSBjoern A. Zeeb 
198bfcc09ddSBjoern A. Zeeb void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
199bfcc09ddSBjoern A. Zeeb {
200bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
201bfcc09ddSBjoern A. Zeeb 	struct iwl_error_resp *err_resp = (void *)pkt->data;
202bfcc09ddSBjoern A. Zeeb 
203bfcc09ddSBjoern A. Zeeb 	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
204bfcc09ddSBjoern A. Zeeb 		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
205bfcc09ddSBjoern A. Zeeb 	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
206bfcc09ddSBjoern A. Zeeb 		le16_to_cpu(err_resp->bad_cmd_seq_num),
207bfcc09ddSBjoern A. Zeeb 		le32_to_cpu(err_resp->error_service));
208bfcc09ddSBjoern A. Zeeb 	IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
209bfcc09ddSBjoern A. Zeeb 		le64_to_cpu(err_resp->timestamp));
210bfcc09ddSBjoern A. Zeeb }
211bfcc09ddSBjoern A. Zeeb 
212bfcc09ddSBjoern A. Zeeb /*
213bfcc09ddSBjoern A. Zeeb  * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
214bfcc09ddSBjoern A. Zeeb  * The parameter should also be a combination of ANT_[ABC].
215bfcc09ddSBjoern A. Zeeb  */
216bfcc09ddSBjoern A. Zeeb u8 first_antenna(u8 mask)
217bfcc09ddSBjoern A. Zeeb {
218bfcc09ddSBjoern A. Zeeb 	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
219bfcc09ddSBjoern A. Zeeb 	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
220bfcc09ddSBjoern A. Zeeb 		return BIT(0);
221bfcc09ddSBjoern A. Zeeb 	return BIT(ffs(mask) - 1);
222bfcc09ddSBjoern A. Zeeb }
223bfcc09ddSBjoern A. Zeeb 
224bfcc09ddSBjoern A. Zeeb #define MAX_ANT_NUM 2
225bfcc09ddSBjoern A. Zeeb /*
226bfcc09ddSBjoern A. Zeeb  * Toggles between TX antennas to send the probe request on.
227bfcc09ddSBjoern A. Zeeb  * Receives the bitmask of valid TX antennas and the *index* used
228bfcc09ddSBjoern A. Zeeb  * for the last TX, and returns the next valid *index* to use.
229bfcc09ddSBjoern A. Zeeb  * In order to set it in the tx_cmd, must do BIT(idx).
230bfcc09ddSBjoern A. Zeeb  */
231bfcc09ddSBjoern A. Zeeb u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
232bfcc09ddSBjoern A. Zeeb {
233bfcc09ddSBjoern A. Zeeb 	u8 ind = last_idx;
234bfcc09ddSBjoern A. Zeeb 	int i;
235bfcc09ddSBjoern A. Zeeb 
236bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < MAX_ANT_NUM; i++) {
237bfcc09ddSBjoern A. Zeeb 		ind = (ind + 1) % MAX_ANT_NUM;
238bfcc09ddSBjoern A. Zeeb 		if (valid & BIT(ind))
239bfcc09ddSBjoern A. Zeeb 			return ind;
240bfcc09ddSBjoern A. Zeeb 	}
241bfcc09ddSBjoern A. Zeeb 
242bfcc09ddSBjoern A. Zeeb 	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
243bfcc09ddSBjoern A. Zeeb 	return last_idx;
244bfcc09ddSBjoern A. Zeeb }
245bfcc09ddSBjoern A. Zeeb 
246bfcc09ddSBjoern A. Zeeb /**
247bfcc09ddSBjoern A. Zeeb  * iwl_mvm_send_lq_cmd() - Send link quality command
248bfcc09ddSBjoern A. Zeeb  * @mvm: Driver data.
249bfcc09ddSBjoern A. Zeeb  * @lq: Link quality command to send.
250bfcc09ddSBjoern A. Zeeb  *
251bfcc09ddSBjoern A. Zeeb  * The link quality command is sent as the last step of station creation.
252bfcc09ddSBjoern A. Zeeb  * This is the special case in which init is set and we call a callback in
253bfcc09ddSBjoern A. Zeeb  * this case to clear the state indicating that station creation is in
254bfcc09ddSBjoern A. Zeeb  * progress.
255*a4128aadSBjoern A. Zeeb  *
256*a4128aadSBjoern A. Zeeb  * Returns: an error code indicating success or failure
257bfcc09ddSBjoern A. Zeeb  */
258bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
259bfcc09ddSBjoern A. Zeeb {
260bfcc09ddSBjoern A. Zeeb 	struct iwl_host_cmd cmd = {
261bfcc09ddSBjoern A. Zeeb 		.id = LQ_CMD,
262bfcc09ddSBjoern A. Zeeb 		.len = { sizeof(struct iwl_lq_cmd), },
263bfcc09ddSBjoern A. Zeeb 		.flags = CMD_ASYNC,
264bfcc09ddSBjoern A. Zeeb 		.data = { lq, },
265bfcc09ddSBjoern A. Zeeb 	};
266bfcc09ddSBjoern A. Zeeb 
267bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
268bfcc09ddSBjoern A. Zeeb 		    iwl_mvm_has_tlc_offload(mvm)))
269bfcc09ddSBjoern A. Zeeb 		return -EINVAL;
270bfcc09ddSBjoern A. Zeeb 
271bfcc09ddSBjoern A. Zeeb 	return iwl_mvm_send_cmd(mvm, &cmd);
272bfcc09ddSBjoern A. Zeeb }
273bfcc09ddSBjoern A. Zeeb 
274bfcc09ddSBjoern A. Zeeb /**
275bfcc09ddSBjoern A. Zeeb  * iwl_mvm_update_smps - Get a request to change the SMPS mode
276bfcc09ddSBjoern A. Zeeb  * @mvm: Driver data.
277bfcc09ddSBjoern A. Zeeb  * @vif: Pointer to the ieee80211_vif structure
278bfcc09ddSBjoern A. Zeeb  * @req_type: The part of the driver who call for a change.
279bfcc09ddSBjoern A. Zeeb  * @smps_request: The request to change the SMPS mode.
2809af1bba4SBjoern A. Zeeb  * @link_id: for MLO link_id, otherwise 0 (deflink)
281bfcc09ddSBjoern A. Zeeb  *
282bfcc09ddSBjoern A. Zeeb  * Get a requst to change the SMPS mode,
283bfcc09ddSBjoern A. Zeeb  * and change it according to all other requests in the driver.
284bfcc09ddSBjoern A. Zeeb  */
285bfcc09ddSBjoern A. Zeeb void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
286bfcc09ddSBjoern A. Zeeb 			 enum iwl_mvm_smps_type_request req_type,
2879af1bba4SBjoern A. Zeeb 			 enum ieee80211_smps_mode smps_request,
2889af1bba4SBjoern A. Zeeb 			 unsigned int link_id)
289bfcc09ddSBjoern A. Zeeb {
290bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif;
291bfcc09ddSBjoern A. Zeeb 	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
292bfcc09ddSBjoern A. Zeeb 	int i;
293bfcc09ddSBjoern A. Zeeb 
294bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
295bfcc09ddSBjoern A. Zeeb 
296bfcc09ddSBjoern A. Zeeb 	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
297bfcc09ddSBjoern A. Zeeb 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
298bfcc09ddSBjoern A. Zeeb 		return;
299bfcc09ddSBjoern A. Zeeb 
300bfcc09ddSBjoern A. Zeeb 	if (vif->type != NL80211_IFTYPE_STATION)
301bfcc09ddSBjoern A. Zeeb 		return;
302bfcc09ddSBjoern A. Zeeb 
303bfcc09ddSBjoern A. Zeeb 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
3049af1bba4SBjoern A. Zeeb 
3059af1bba4SBjoern A. Zeeb 	if (WARN_ON_ONCE(!mvmvif->link[link_id]))
3069af1bba4SBjoern A. Zeeb 		return;
3079af1bba4SBjoern A. Zeeb 
3089af1bba4SBjoern A. Zeeb 	mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
309bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
3109af1bba4SBjoern A. Zeeb 		if (mvmvif->link[link_id]->smps_requests[i] ==
3119af1bba4SBjoern A. Zeeb 		    IEEE80211_SMPS_STATIC) {
312bfcc09ddSBjoern A. Zeeb 			smps_mode = IEEE80211_SMPS_STATIC;
313bfcc09ddSBjoern A. Zeeb 			break;
314bfcc09ddSBjoern A. Zeeb 		}
3159af1bba4SBjoern A. Zeeb 		if (mvmvif->link[link_id]->smps_requests[i] ==
3169af1bba4SBjoern A. Zeeb 		    IEEE80211_SMPS_DYNAMIC)
317bfcc09ddSBjoern A. Zeeb 			smps_mode = IEEE80211_SMPS_DYNAMIC;
318bfcc09ddSBjoern A. Zeeb 	}
319bfcc09ddSBjoern A. Zeeb 
3209af1bba4SBjoern A. Zeeb 	/* SMPS is disabled in eSR */
3219af1bba4SBjoern A. Zeeb 	if (mvmvif->esr_active)
3229af1bba4SBjoern A. Zeeb 		smps_mode = IEEE80211_SMPS_OFF;
3239af1bba4SBjoern A. Zeeb 
3249af1bba4SBjoern A. Zeeb 	ieee80211_request_smps(vif, link_id, smps_mode);
3259af1bba4SBjoern A. Zeeb }
3269af1bba4SBjoern A. Zeeb 
3279af1bba4SBjoern A. Zeeb void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
3289af1bba4SBjoern A. Zeeb 					 struct ieee80211_vif *vif,
3299af1bba4SBjoern A. Zeeb 					 enum iwl_mvm_smps_type_request req_type,
3309af1bba4SBjoern A. Zeeb 					 enum ieee80211_smps_mode smps_request)
3319af1bba4SBjoern A. Zeeb {
3329af1bba4SBjoern A. Zeeb 	struct ieee80211_bss_conf *link_conf;
3339af1bba4SBjoern A. Zeeb 	unsigned int link_id;
3349af1bba4SBjoern A. Zeeb 
3359af1bba4SBjoern A. Zeeb 	rcu_read_lock();
3369af1bba4SBjoern A. Zeeb 	for_each_vif_active_link(vif, link_conf, link_id)
3379af1bba4SBjoern A. Zeeb 		iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
3389af1bba4SBjoern A. Zeeb 				    link_id);
3399af1bba4SBjoern A. Zeeb 	rcu_read_unlock();
340bfcc09ddSBjoern A. Zeeb }
341bfcc09ddSBjoern A. Zeeb 
342d9836fb4SBjoern A. Zeeb static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
343d9836fb4SBjoern A. Zeeb 				    struct iwl_rx_packet *pkt, void *data)
344d9836fb4SBjoern A. Zeeb {
345d9836fb4SBjoern A. Zeeb 	WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
346d9836fb4SBjoern A. Zeeb 
347d9836fb4SBjoern A. Zeeb 	return true;
348d9836fb4SBjoern A. Zeeb }
349d9836fb4SBjoern A. Zeeb 
350*a4128aadSBjoern A. Zeeb #define PERIODIC_STAT_RATE 5
351*a4128aadSBjoern A. Zeeb 
352*a4128aadSBjoern A. Zeeb int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
353*a4128aadSBjoern A. Zeeb {
354*a4128aadSBjoern A. Zeeb 	u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
355*a4128aadSBjoern A. Zeeb 	u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
356*a4128aadSBjoern A. Zeeb 			     IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
357*a4128aadSBjoern A. Zeeb 	struct iwl_system_statistics_cmd system_cmd = {
358*a4128aadSBjoern A. Zeeb 		.cfg_mask = cpu_to_le32(flags),
359*a4128aadSBjoern A. Zeeb 		.config_time_sec = cpu_to_le32(enable ?
360*a4128aadSBjoern A. Zeeb 					       PERIODIC_STAT_RATE : 0),
361*a4128aadSBjoern A. Zeeb 		.type_id_mask = cpu_to_le32(type),
362*a4128aadSBjoern A. Zeeb 	};
363*a4128aadSBjoern A. Zeeb 
364*a4128aadSBjoern A. Zeeb 	return iwl_mvm_send_cmd_pdu(mvm,
365*a4128aadSBjoern A. Zeeb 				    WIDE_ID(SYSTEM_GROUP,
366*a4128aadSBjoern A. Zeeb 					    SYSTEM_STATISTICS_CMD),
367*a4128aadSBjoern A. Zeeb 				    0, sizeof(system_cmd), &system_cmd);
368*a4128aadSBjoern A. Zeeb }
369*a4128aadSBjoern A. Zeeb 
370*a4128aadSBjoern A. Zeeb static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
371*a4128aadSBjoern A. Zeeb 					     u8 cmd_ver)
372*a4128aadSBjoern A. Zeeb {
373*a4128aadSBjoern A. Zeeb 	struct iwl_system_statistics_cmd system_cmd = {
374*a4128aadSBjoern A. Zeeb 		.cfg_mask = clear ?
375*a4128aadSBjoern A. Zeeb 			    cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
376*a4128aadSBjoern A. Zeeb 			    cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
377*a4128aadSBjoern A. Zeeb 					IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
378*a4128aadSBjoern A. Zeeb 		.type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
379*a4128aadSBjoern A. Zeeb 					    IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
380*a4128aadSBjoern A. Zeeb 	};
381*a4128aadSBjoern A. Zeeb 	struct iwl_host_cmd cmd = {
382*a4128aadSBjoern A. Zeeb 		.id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
383*a4128aadSBjoern A. Zeeb 		.len[0] = sizeof(system_cmd),
384*a4128aadSBjoern A. Zeeb 		.data[0] = &system_cmd,
385*a4128aadSBjoern A. Zeeb 	};
386*a4128aadSBjoern A. Zeeb 	struct iwl_notification_wait stats_wait;
387*a4128aadSBjoern A. Zeeb 	static const u16 stats_complete[] = {
388*a4128aadSBjoern A. Zeeb 		WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
389*a4128aadSBjoern A. Zeeb 	};
390*a4128aadSBjoern A. Zeeb 	int ret;
391*a4128aadSBjoern A. Zeeb 
392*a4128aadSBjoern A. Zeeb 	if (cmd_ver != 1) {
393*a4128aadSBjoern A. Zeeb 		IWL_FW_CHECK_FAILED(mvm,
394*a4128aadSBjoern A. Zeeb 				    "Invalid system statistics command version:%d\n",
395*a4128aadSBjoern A. Zeeb 				    cmd_ver);
396*a4128aadSBjoern A. Zeeb 		return -EOPNOTSUPP;
397*a4128aadSBjoern A. Zeeb 	}
398*a4128aadSBjoern A. Zeeb 
399*a4128aadSBjoern A. Zeeb 	iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
400*a4128aadSBjoern A. Zeeb 				   stats_complete, ARRAY_SIZE(stats_complete),
401*a4128aadSBjoern A. Zeeb 				   NULL, NULL);
402*a4128aadSBjoern A. Zeeb 
403*a4128aadSBjoern A. Zeeb 	mvm->statistics_clear = clear;
404*a4128aadSBjoern A. Zeeb 	ret = iwl_mvm_send_cmd(mvm, &cmd);
405*a4128aadSBjoern A. Zeeb 	if (ret) {
406*a4128aadSBjoern A. Zeeb 		iwl_remove_notification(&mvm->notif_wait, &stats_wait);
407*a4128aadSBjoern A. Zeeb 		return ret;
408*a4128aadSBjoern A. Zeeb 	}
409*a4128aadSBjoern A. Zeeb 
410*a4128aadSBjoern A. Zeeb 	/* 500ms for OPERATIONAL, PART1 and END notification should be enough
411*a4128aadSBjoern A. Zeeb 	 * for FW to collect data from all LMACs and send
412*a4128aadSBjoern A. Zeeb 	 * STATISTICS_NOTIFICATION to host
413*a4128aadSBjoern A. Zeeb 	 */
414*a4128aadSBjoern A. Zeeb 	ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
415*a4128aadSBjoern A. Zeeb 	if (ret)
416*a4128aadSBjoern A. Zeeb 		return ret;
417*a4128aadSBjoern A. Zeeb 
418*a4128aadSBjoern A. Zeeb 	if (clear)
419*a4128aadSBjoern A. Zeeb 		iwl_mvm_accu_radio_stats(mvm);
420*a4128aadSBjoern A. Zeeb 
421*a4128aadSBjoern A. Zeeb 	return ret;
422*a4128aadSBjoern A. Zeeb }
423*a4128aadSBjoern A. Zeeb 
424bfcc09ddSBjoern A. Zeeb int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
425bfcc09ddSBjoern A. Zeeb {
426bfcc09ddSBjoern A. Zeeb 	struct iwl_statistics_cmd scmd = {
427bfcc09ddSBjoern A. Zeeb 		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
428bfcc09ddSBjoern A. Zeeb 	};
429d9836fb4SBjoern A. Zeeb 
430bfcc09ddSBjoern A. Zeeb 	struct iwl_host_cmd cmd = {
431bfcc09ddSBjoern A. Zeeb 		.id = STATISTICS_CMD,
432bfcc09ddSBjoern A. Zeeb 		.len[0] = sizeof(scmd),
433bfcc09ddSBjoern A. Zeeb 		.data[0] = &scmd,
434bfcc09ddSBjoern A. Zeeb 	};
435*a4128aadSBjoern A. Zeeb 	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
436*a4128aadSBjoern A. Zeeb 					   WIDE_ID(SYSTEM_GROUP,
437*a4128aadSBjoern A. Zeeb 						   SYSTEM_STATISTICS_CMD),
438*a4128aadSBjoern A. Zeeb 					   IWL_FW_CMD_VER_UNKNOWN);
439bfcc09ddSBjoern A. Zeeb 	int ret;
440bfcc09ddSBjoern A. Zeeb 
441*a4128aadSBjoern A. Zeeb 	/*
442*a4128aadSBjoern A. Zeeb 	 * Don't request statistics during restart, they'll not have any useful
443*a4128aadSBjoern A. Zeeb 	 * information right after restart, nor is clearing needed
444*a4128aadSBjoern A. Zeeb 	 */
445*a4128aadSBjoern A. Zeeb 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
446*a4128aadSBjoern A. Zeeb 		return 0;
447*a4128aadSBjoern A. Zeeb 
448*a4128aadSBjoern A. Zeeb 	if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
449*a4128aadSBjoern A. Zeeb 		return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
450*a4128aadSBjoern A. Zeeb 
451d9836fb4SBjoern A. Zeeb 	/* From version 15 - STATISTICS_NOTIFICATION, the reply for
452d9836fb4SBjoern A. Zeeb 	 * STATISTICS_CMD is empty, and the response is with
453d9836fb4SBjoern A. Zeeb 	 * STATISTICS_NOTIFICATION notification
454d9836fb4SBjoern A. Zeeb 	 */
455d9836fb4SBjoern A. Zeeb 	if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
456d9836fb4SBjoern A. Zeeb 				    STATISTICS_NOTIFICATION, 0) < 15) {
457d9836fb4SBjoern A. Zeeb 		cmd.flags = CMD_WANT_SKB;
458d9836fb4SBjoern A. Zeeb 
459bfcc09ddSBjoern A. Zeeb 		ret = iwl_mvm_send_cmd(mvm, &cmd);
460bfcc09ddSBjoern A. Zeeb 		if (ret)
461bfcc09ddSBjoern A. Zeeb 			return ret;
462bfcc09ddSBjoern A. Zeeb 
463bfcc09ddSBjoern A. Zeeb 		iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
464bfcc09ddSBjoern A. Zeeb 		iwl_free_resp(&cmd);
465d9836fb4SBjoern A. Zeeb 	} else {
466d9836fb4SBjoern A. Zeeb 		struct iwl_notification_wait stats_wait;
467d9836fb4SBjoern A. Zeeb 		static const u16 stats_complete[] = {
468d9836fb4SBjoern A. Zeeb 			STATISTICS_NOTIFICATION,
469d9836fb4SBjoern A. Zeeb 		};
470d9836fb4SBjoern A. Zeeb 
471d9836fb4SBjoern A. Zeeb 		iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
472d9836fb4SBjoern A. Zeeb 					   stats_complete, ARRAY_SIZE(stats_complete),
473d9836fb4SBjoern A. Zeeb 					   iwl_wait_stats_complete, NULL);
474d9836fb4SBjoern A. Zeeb 
475d9836fb4SBjoern A. Zeeb 		ret = iwl_mvm_send_cmd(mvm, &cmd);
476d9836fb4SBjoern A. Zeeb 		if (ret) {
477d9836fb4SBjoern A. Zeeb 			iwl_remove_notification(&mvm->notif_wait, &stats_wait);
478d9836fb4SBjoern A. Zeeb 			return ret;
479d9836fb4SBjoern A. Zeeb 		}
480d9836fb4SBjoern A. Zeeb 
481d9836fb4SBjoern A. Zeeb 		/* 200ms should be enough for FW to collect data from all
482d9836fb4SBjoern A. Zeeb 		 * LMACs and send STATISTICS_NOTIFICATION to host
483d9836fb4SBjoern A. Zeeb 		 */
484d9836fb4SBjoern A. Zeeb 		ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
485d9836fb4SBjoern A. Zeeb 		if (ret)
486d9836fb4SBjoern A. Zeeb 			return ret;
487d9836fb4SBjoern A. Zeeb 	}
488bfcc09ddSBjoern A. Zeeb 
489bfcc09ddSBjoern A. Zeeb 	if (clear)
490bfcc09ddSBjoern A. Zeeb 		iwl_mvm_accu_radio_stats(mvm);
491bfcc09ddSBjoern A. Zeeb 
492bfcc09ddSBjoern A. Zeeb 	return 0;
493bfcc09ddSBjoern A. Zeeb }
494bfcc09ddSBjoern A. Zeeb 
495bfcc09ddSBjoern A. Zeeb void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
496bfcc09ddSBjoern A. Zeeb {
497bfcc09ddSBjoern A. Zeeb 	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
498bfcc09ddSBjoern A. Zeeb 	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
499bfcc09ddSBjoern A. Zeeb 	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
500bfcc09ddSBjoern A. Zeeb 	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
501bfcc09ddSBjoern A. Zeeb }
502bfcc09ddSBjoern A. Zeeb 
503bfcc09ddSBjoern A. Zeeb struct iwl_mvm_diversity_iter_data {
504bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_phy_ctxt *ctxt;
505bfcc09ddSBjoern A. Zeeb 	bool result;
506bfcc09ddSBjoern A. Zeeb };
507bfcc09ddSBjoern A. Zeeb 
508bfcc09ddSBjoern A. Zeeb static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
509bfcc09ddSBjoern A. Zeeb 				   struct ieee80211_vif *vif)
510bfcc09ddSBjoern A. Zeeb {
511bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
512bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_diversity_iter_data *data = _data;
5139af1bba4SBjoern A. Zeeb 	int i, link_id;
514bfcc09ddSBjoern A. Zeeb 
5159af1bba4SBjoern A. Zeeb 	for_each_mvm_vif_valid_link(mvmvif, link_id) {
5169af1bba4SBjoern A. Zeeb 		struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
5179af1bba4SBjoern A. Zeeb 
5189af1bba4SBjoern A. Zeeb 		if (link_info->phy_ctxt != data->ctxt)
5199af1bba4SBjoern A. Zeeb 			continue;
520bfcc09ddSBjoern A. Zeeb 
521bfcc09ddSBjoern A. Zeeb 		for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
5229af1bba4SBjoern A. Zeeb 			if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
5239af1bba4SBjoern A. Zeeb 			    link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
524bfcc09ddSBjoern A. Zeeb 				data->result = false;
525bfcc09ddSBjoern A. Zeeb 				break;
526bfcc09ddSBjoern A. Zeeb 			}
527bfcc09ddSBjoern A. Zeeb 		}
528bfcc09ddSBjoern A. Zeeb 	}
5299af1bba4SBjoern A. Zeeb }
530bfcc09ddSBjoern A. Zeeb 
531bfcc09ddSBjoern A. Zeeb bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
532bfcc09ddSBjoern A. Zeeb 				  struct iwl_mvm_phy_ctxt *ctxt)
533bfcc09ddSBjoern A. Zeeb {
534bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_diversity_iter_data data = {
535bfcc09ddSBjoern A. Zeeb 		.ctxt = ctxt,
536bfcc09ddSBjoern A. Zeeb 		.result = true,
537bfcc09ddSBjoern A. Zeeb 	};
538bfcc09ddSBjoern A. Zeeb 
539bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
540bfcc09ddSBjoern A. Zeeb 
541bfcc09ddSBjoern A. Zeeb 	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
542bfcc09ddSBjoern A. Zeeb 		return false;
543bfcc09ddSBjoern A. Zeeb 
544bfcc09ddSBjoern A. Zeeb 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
545bfcc09ddSBjoern A. Zeeb 		return false;
546bfcc09ddSBjoern A. Zeeb 
547bfcc09ddSBjoern A. Zeeb 	if (mvm->cfg->rx_with_siso_diversity)
548bfcc09ddSBjoern A. Zeeb 		return false;
549bfcc09ddSBjoern A. Zeeb 
550bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(
551bfcc09ddSBjoern A. Zeeb 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
552bfcc09ddSBjoern A. Zeeb 			iwl_mvm_diversity_iter, &data);
553bfcc09ddSBjoern A. Zeeb 
554bfcc09ddSBjoern A. Zeeb 	return data.result;
555bfcc09ddSBjoern A. Zeeb }
556bfcc09ddSBjoern A. Zeeb 
557bfcc09ddSBjoern A. Zeeb void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
558bfcc09ddSBjoern A. Zeeb 				  bool low_latency, u16 mac_id)
559bfcc09ddSBjoern A. Zeeb {
560bfcc09ddSBjoern A. Zeeb 	struct iwl_mac_low_latency_cmd cmd = {
561bfcc09ddSBjoern A. Zeeb 		.mac_id = cpu_to_le32(mac_id)
562bfcc09ddSBjoern A. Zeeb 	};
563bfcc09ddSBjoern A. Zeeb 
564bfcc09ddSBjoern A. Zeeb 	if (!fw_has_capa(&mvm->fw->ucode_capa,
565bfcc09ddSBjoern A. Zeeb 			 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
566bfcc09ddSBjoern A. Zeeb 		return;
567bfcc09ddSBjoern A. Zeeb 
568bfcc09ddSBjoern A. Zeeb 	if (low_latency) {
569bfcc09ddSBjoern A. Zeeb 		/* currently we don't care about the direction */
570bfcc09ddSBjoern A. Zeeb 		cmd.low_latency_rx = 1;
571bfcc09ddSBjoern A. Zeeb 		cmd.low_latency_tx = 1;
572bfcc09ddSBjoern A. Zeeb 	}
573bfcc09ddSBjoern A. Zeeb 
574d9836fb4SBjoern A. Zeeb 	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
575bfcc09ddSBjoern A. Zeeb 				 0, sizeof(cmd), &cmd))
576bfcc09ddSBjoern A. Zeeb 		IWL_ERR(mvm, "Failed to send low latency command\n");
577bfcc09ddSBjoern A. Zeeb }
578bfcc09ddSBjoern A. Zeeb 
579bfcc09ddSBjoern A. Zeeb int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
580bfcc09ddSBjoern A. Zeeb 			       bool low_latency,
581bfcc09ddSBjoern A. Zeeb 			       enum iwl_mvm_low_latency_cause cause)
582bfcc09ddSBjoern A. Zeeb {
583bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
584bfcc09ddSBjoern A. Zeeb 	int res;
585bfcc09ddSBjoern A. Zeeb 	bool prev;
586bfcc09ddSBjoern A. Zeeb 
587bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
588bfcc09ddSBjoern A. Zeeb 
589bfcc09ddSBjoern A. Zeeb 	prev = iwl_mvm_vif_low_latency(mvmvif);
590bfcc09ddSBjoern A. Zeeb 	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
591bfcc09ddSBjoern A. Zeeb 
592bfcc09ddSBjoern A. Zeeb 	low_latency = iwl_mvm_vif_low_latency(mvmvif);
593bfcc09ddSBjoern A. Zeeb 
594bfcc09ddSBjoern A. Zeeb 	if (low_latency == prev)
595bfcc09ddSBjoern A. Zeeb 		return 0;
596bfcc09ddSBjoern A. Zeeb 
597bfcc09ddSBjoern A. Zeeb 	iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
598bfcc09ddSBjoern A. Zeeb 
599bfcc09ddSBjoern A. Zeeb 	res = iwl_mvm_update_quotas(mvm, false, NULL);
600bfcc09ddSBjoern A. Zeeb 	if (res)
601bfcc09ddSBjoern A. Zeeb 		return res;
602bfcc09ddSBjoern A. Zeeb 
603bfcc09ddSBjoern A. Zeeb 	iwl_mvm_bt_coex_vif_change(mvm);
604bfcc09ddSBjoern A. Zeeb 
605bfcc09ddSBjoern A. Zeeb 	return iwl_mvm_power_update_mac(mvm);
606bfcc09ddSBjoern A. Zeeb }
607bfcc09ddSBjoern A. Zeeb 
608bfcc09ddSBjoern A. Zeeb struct iwl_mvm_low_latency_iter {
609bfcc09ddSBjoern A. Zeeb 	bool result;
610bfcc09ddSBjoern A. Zeeb 	bool result_per_band[NUM_NL80211_BANDS];
611bfcc09ddSBjoern A. Zeeb };
612bfcc09ddSBjoern A. Zeeb 
613bfcc09ddSBjoern A. Zeeb static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
614bfcc09ddSBjoern A. Zeeb {
615bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_low_latency_iter *result = _data;
616bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
617bfcc09ddSBjoern A. Zeeb 	enum nl80211_band band;
618bfcc09ddSBjoern A. Zeeb 
619bfcc09ddSBjoern A. Zeeb 	if (iwl_mvm_vif_low_latency(mvmvif)) {
620bfcc09ddSBjoern A. Zeeb 		result->result = true;
621bfcc09ddSBjoern A. Zeeb 
6229af1bba4SBjoern A. Zeeb 		if (!mvmvif->deflink.phy_ctxt)
623bfcc09ddSBjoern A. Zeeb 			return;
624bfcc09ddSBjoern A. Zeeb 
6259af1bba4SBjoern A. Zeeb 		band = mvmvif->deflink.phy_ctxt->channel->band;
626bfcc09ddSBjoern A. Zeeb 		result->result_per_band[band] = true;
627bfcc09ddSBjoern A. Zeeb 	}
628bfcc09ddSBjoern A. Zeeb }
629bfcc09ddSBjoern A. Zeeb 
630bfcc09ddSBjoern A. Zeeb bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
631bfcc09ddSBjoern A. Zeeb {
632bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_low_latency_iter data = {};
633bfcc09ddSBjoern A. Zeeb 
634bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(
635bfcc09ddSBjoern A. Zeeb 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
636bfcc09ddSBjoern A. Zeeb 			iwl_mvm_ll_iter, &data);
637bfcc09ddSBjoern A. Zeeb 
638bfcc09ddSBjoern A. Zeeb 	return data.result;
639bfcc09ddSBjoern A. Zeeb }
640bfcc09ddSBjoern A. Zeeb 
641bfcc09ddSBjoern A. Zeeb bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
642bfcc09ddSBjoern A. Zeeb {
643bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_low_latency_iter data = {};
644bfcc09ddSBjoern A. Zeeb 
645bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(
646bfcc09ddSBjoern A. Zeeb 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
647bfcc09ddSBjoern A. Zeeb 			iwl_mvm_ll_iter, &data);
648bfcc09ddSBjoern A. Zeeb 
649bfcc09ddSBjoern A. Zeeb 	return data.result_per_band[band];
650bfcc09ddSBjoern A. Zeeb }
651bfcc09ddSBjoern A. Zeeb 
652bfcc09ddSBjoern A. Zeeb struct iwl_bss_iter_data {
653bfcc09ddSBjoern A. Zeeb 	struct ieee80211_vif *vif;
654bfcc09ddSBjoern A. Zeeb 	bool error;
655bfcc09ddSBjoern A. Zeeb };
656bfcc09ddSBjoern A. Zeeb 
657bfcc09ddSBjoern A. Zeeb static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
658bfcc09ddSBjoern A. Zeeb 				       struct ieee80211_vif *vif)
659bfcc09ddSBjoern A. Zeeb {
660bfcc09ddSBjoern A. Zeeb 	struct iwl_bss_iter_data *data = _data;
661bfcc09ddSBjoern A. Zeeb 
662bfcc09ddSBjoern A. Zeeb 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
663bfcc09ddSBjoern A. Zeeb 		return;
664bfcc09ddSBjoern A. Zeeb 
665bfcc09ddSBjoern A. Zeeb 	if (data->vif) {
666bfcc09ddSBjoern A. Zeeb 		data->error = true;
667bfcc09ddSBjoern A. Zeeb 		return;
668bfcc09ddSBjoern A. Zeeb 	}
669bfcc09ddSBjoern A. Zeeb 
670bfcc09ddSBjoern A. Zeeb 	data->vif = vif;
671bfcc09ddSBjoern A. Zeeb }
672bfcc09ddSBjoern A. Zeeb 
673bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
674bfcc09ddSBjoern A. Zeeb {
675bfcc09ddSBjoern A. Zeeb 	struct iwl_bss_iter_data bss_iter_data = {};
676bfcc09ddSBjoern A. Zeeb 
677bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(
678bfcc09ddSBjoern A. Zeeb 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
679bfcc09ddSBjoern A. Zeeb 		iwl_mvm_bss_iface_iterator, &bss_iter_data);
680bfcc09ddSBjoern A. Zeeb 
681bfcc09ddSBjoern A. Zeeb 	if (bss_iter_data.error) {
682bfcc09ddSBjoern A. Zeeb 		IWL_ERR(mvm, "More than one managed interface active!\n");
683bfcc09ddSBjoern A. Zeeb 		return ERR_PTR(-EINVAL);
684bfcc09ddSBjoern A. Zeeb 	}
685bfcc09ddSBjoern A. Zeeb 
686bfcc09ddSBjoern A. Zeeb 	return bss_iter_data.vif;
687bfcc09ddSBjoern A. Zeeb }
688bfcc09ddSBjoern A. Zeeb 
689bfcc09ddSBjoern A. Zeeb struct iwl_bss_find_iter_data {
690bfcc09ddSBjoern A. Zeeb 	struct ieee80211_vif *vif;
691bfcc09ddSBjoern A. Zeeb 	u32 macid;
692bfcc09ddSBjoern A. Zeeb };
693bfcc09ddSBjoern A. Zeeb 
694bfcc09ddSBjoern A. Zeeb static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
695bfcc09ddSBjoern A. Zeeb 					    struct ieee80211_vif *vif)
696bfcc09ddSBjoern A. Zeeb {
697bfcc09ddSBjoern A. Zeeb 	struct iwl_bss_find_iter_data *data = _data;
698bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
699bfcc09ddSBjoern A. Zeeb 
700bfcc09ddSBjoern A. Zeeb 	if (mvmvif->id == data->macid)
701bfcc09ddSBjoern A. Zeeb 		data->vif = vif;
702bfcc09ddSBjoern A. Zeeb }
703bfcc09ddSBjoern A. Zeeb 
704bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
705bfcc09ddSBjoern A. Zeeb {
706bfcc09ddSBjoern A. Zeeb 	struct iwl_bss_find_iter_data data = {
707bfcc09ddSBjoern A. Zeeb 		.macid = macid,
708bfcc09ddSBjoern A. Zeeb 	};
709bfcc09ddSBjoern A. Zeeb 
710bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
711bfcc09ddSBjoern A. Zeeb 
712bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(
713bfcc09ddSBjoern A. Zeeb 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
714bfcc09ddSBjoern A. Zeeb 		iwl_mvm_bss_find_iface_iterator, &data);
715bfcc09ddSBjoern A. Zeeb 
716bfcc09ddSBjoern A. Zeeb 	return data.vif;
717bfcc09ddSBjoern A. Zeeb }
718bfcc09ddSBjoern A. Zeeb 
719bfcc09ddSBjoern A. Zeeb struct iwl_sta_iter_data {
720bfcc09ddSBjoern A. Zeeb 	bool assoc;
721bfcc09ddSBjoern A. Zeeb };
722bfcc09ddSBjoern A. Zeeb 
723bfcc09ddSBjoern A. Zeeb static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
724bfcc09ddSBjoern A. Zeeb 				       struct ieee80211_vif *vif)
725bfcc09ddSBjoern A. Zeeb {
726bfcc09ddSBjoern A. Zeeb 	struct iwl_sta_iter_data *data = _data;
727bfcc09ddSBjoern A. Zeeb 
728bfcc09ddSBjoern A. Zeeb 	if (vif->type != NL80211_IFTYPE_STATION)
729bfcc09ddSBjoern A. Zeeb 		return;
730bfcc09ddSBjoern A. Zeeb 
7319af1bba4SBjoern A. Zeeb 	if (vif->cfg.assoc)
732bfcc09ddSBjoern A. Zeeb 		data->assoc = true;
733bfcc09ddSBjoern A. Zeeb }
734bfcc09ddSBjoern A. Zeeb 
735bfcc09ddSBjoern A. Zeeb bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
736bfcc09ddSBjoern A. Zeeb {
737bfcc09ddSBjoern A. Zeeb 	struct iwl_sta_iter_data data = {
738bfcc09ddSBjoern A. Zeeb 		.assoc = false,
739bfcc09ddSBjoern A. Zeeb 	};
740bfcc09ddSBjoern A. Zeeb 
741bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
742bfcc09ddSBjoern A. Zeeb 						   IEEE80211_IFACE_ITER_NORMAL,
743bfcc09ddSBjoern A. Zeeb 						   iwl_mvm_sta_iface_iterator,
744bfcc09ddSBjoern A. Zeeb 						   &data);
745bfcc09ddSBjoern A. Zeeb 	return data.assoc;
746bfcc09ddSBjoern A. Zeeb }
747bfcc09ddSBjoern A. Zeeb 
748bfcc09ddSBjoern A. Zeeb unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
749bfcc09ddSBjoern A. Zeeb 				    struct ieee80211_vif *vif,
750bfcc09ddSBjoern A. Zeeb 				    bool tdls, bool cmd_q)
751bfcc09ddSBjoern A. Zeeb {
752bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_tlv *trigger;
753bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
754bfcc09ddSBjoern A. Zeeb 	unsigned int default_timeout = cmd_q ?
755bfcc09ddSBjoern A. Zeeb 		IWL_DEF_WD_TIMEOUT :
756bfcc09ddSBjoern A. Zeeb 		mvm->trans->trans_cfg->base_params->wd_timeout;
757bfcc09ddSBjoern A. Zeeb 
758bfcc09ddSBjoern A. Zeeb 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
759bfcc09ddSBjoern A. Zeeb 		/*
760bfcc09ddSBjoern A. Zeeb 		 * We can't know when the station is asleep or awake, so we
761bfcc09ddSBjoern A. Zeeb 		 * must disable the queue hang detection.
762bfcc09ddSBjoern A. Zeeb 		 */
763bfcc09ddSBjoern A. Zeeb 		if (fw_has_capa(&mvm->fw->ucode_capa,
764bfcc09ddSBjoern A. Zeeb 				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
765bfcc09ddSBjoern A. Zeeb 		    vif && vif->type == NL80211_IFTYPE_AP)
766bfcc09ddSBjoern A. Zeeb 			return IWL_WATCHDOG_DISABLED;
767bfcc09ddSBjoern A. Zeeb 		return default_timeout;
768bfcc09ddSBjoern A. Zeeb 	}
769bfcc09ddSBjoern A. Zeeb 
770bfcc09ddSBjoern A. Zeeb 	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
771bfcc09ddSBjoern A. Zeeb 	txq_timer = (void *)trigger->data;
772bfcc09ddSBjoern A. Zeeb 
773bfcc09ddSBjoern A. Zeeb 	if (tdls)
774bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->tdls);
775bfcc09ddSBjoern A. Zeeb 
776bfcc09ddSBjoern A. Zeeb 	if (cmd_q)
777bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->command_queue);
778bfcc09ddSBjoern A. Zeeb 
779bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(!vif))
780bfcc09ddSBjoern A. Zeeb 		return default_timeout;
781bfcc09ddSBjoern A. Zeeb 
782bfcc09ddSBjoern A. Zeeb 	switch (ieee80211_vif_type_p2p(vif)) {
783bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_ADHOC:
784bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->ibss);
785bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_STATION:
786bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->bss);
787bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_AP:
788bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->softap);
789bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_P2P_CLIENT:
790bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->p2p_client);
791bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_P2P_GO:
792bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->p2p_go);
793bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_P2P_DEVICE:
794bfcc09ddSBjoern A. Zeeb 		return le32_to_cpu(txq_timer->p2p_device);
795bfcc09ddSBjoern A. Zeeb 	case NL80211_IFTYPE_MONITOR:
796bfcc09ddSBjoern A. Zeeb 		return default_timeout;
797bfcc09ddSBjoern A. Zeeb 	default:
798bfcc09ddSBjoern A. Zeeb 		WARN_ON(1);
799bfcc09ddSBjoern A. Zeeb 		return mvm->trans->trans_cfg->base_params->wd_timeout;
800bfcc09ddSBjoern A. Zeeb 	}
801bfcc09ddSBjoern A. Zeeb }
802bfcc09ddSBjoern A. Zeeb 
803bfcc09ddSBjoern A. Zeeb void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
804bfcc09ddSBjoern A. Zeeb 			     const char *errmsg)
805bfcc09ddSBjoern A. Zeeb {
806bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_tlv *trig;
807bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
808bfcc09ddSBjoern A. Zeeb 
809bfcc09ddSBjoern A. Zeeb 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
810bfcc09ddSBjoern A. Zeeb 				     FW_DBG_TRIGGER_MLME);
811bfcc09ddSBjoern A. Zeeb 	if (!trig)
812bfcc09ddSBjoern A. Zeeb 		goto out;
813bfcc09ddSBjoern A. Zeeb 
814bfcc09ddSBjoern A. Zeeb 	trig_mlme = (void *)trig->data;
815bfcc09ddSBjoern A. Zeeb 
816bfcc09ddSBjoern A. Zeeb 	if (trig_mlme->stop_connection_loss &&
817bfcc09ddSBjoern A. Zeeb 	    --trig_mlme->stop_connection_loss)
818bfcc09ddSBjoern A. Zeeb 		goto out;
819bfcc09ddSBjoern A. Zeeb 
820bfcc09ddSBjoern A. Zeeb 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
821bfcc09ddSBjoern A. Zeeb 
822bfcc09ddSBjoern A. Zeeb out:
823bfcc09ddSBjoern A. Zeeb 	ieee80211_connection_loss(vif);
824bfcc09ddSBjoern A. Zeeb }
825bfcc09ddSBjoern A. Zeeb 
826bfcc09ddSBjoern A. Zeeb void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
827bfcc09ddSBjoern A. Zeeb 					  struct ieee80211_vif *vif,
828bfcc09ddSBjoern A. Zeeb 					  const struct ieee80211_sta *sta,
829bfcc09ddSBjoern A. Zeeb 					  u16 tid)
830bfcc09ddSBjoern A. Zeeb {
831bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_tlv *trig;
832bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_ba *ba_trig;
833bfcc09ddSBjoern A. Zeeb 
834bfcc09ddSBjoern A. Zeeb 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
835bfcc09ddSBjoern A. Zeeb 				     FW_DBG_TRIGGER_BA);
836bfcc09ddSBjoern A. Zeeb 	if (!trig)
837bfcc09ddSBjoern A. Zeeb 		return;
838bfcc09ddSBjoern A. Zeeb 
839bfcc09ddSBjoern A. Zeeb 	ba_trig = (void *)trig->data;
840bfcc09ddSBjoern A. Zeeb 
841bfcc09ddSBjoern A. Zeeb 	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
842bfcc09ddSBjoern A. Zeeb 		return;
843bfcc09ddSBjoern A. Zeeb 
844bfcc09ddSBjoern A. Zeeb 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
845bfcc09ddSBjoern A. Zeeb 				"Frame from %pM timed out, tid %d",
846bfcc09ddSBjoern A. Zeeb 				sta->addr, tid);
847bfcc09ddSBjoern A. Zeeb }
848bfcc09ddSBjoern A. Zeeb 
849bfcc09ddSBjoern A. Zeeb u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
850bfcc09ddSBjoern A. Zeeb {
851bfcc09ddSBjoern A. Zeeb 	if (!elapsed)
852bfcc09ddSBjoern A. Zeeb 		return 0;
853bfcc09ddSBjoern A. Zeeb 
854bfcc09ddSBjoern A. Zeeb 	return (100 * airtime / elapsed) / USEC_PER_MSEC;
855bfcc09ddSBjoern A. Zeeb }
856bfcc09ddSBjoern A. Zeeb 
857bfcc09ddSBjoern A. Zeeb static enum iwl_mvm_traffic_load
858bfcc09ddSBjoern A. Zeeb iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
859bfcc09ddSBjoern A. Zeeb {
860bfcc09ddSBjoern A. Zeeb 	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
861bfcc09ddSBjoern A. Zeeb 
862bfcc09ddSBjoern A. Zeeb 	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
863bfcc09ddSBjoern A. Zeeb 		return IWL_MVM_TRAFFIC_HIGH;
864bfcc09ddSBjoern A. Zeeb 	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
865bfcc09ddSBjoern A. Zeeb 		return IWL_MVM_TRAFFIC_MEDIUM;
866bfcc09ddSBjoern A. Zeeb 
867bfcc09ddSBjoern A. Zeeb 	return IWL_MVM_TRAFFIC_LOW;
868bfcc09ddSBjoern A. Zeeb }
869bfcc09ddSBjoern A. Zeeb 
870bfcc09ddSBjoern A. Zeeb static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
871bfcc09ddSBjoern A. Zeeb {
872bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm *mvm = _data;
873bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
874bfcc09ddSBjoern A. Zeeb 	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
875bfcc09ddSBjoern A. Zeeb 
876bfcc09ddSBjoern A. Zeeb 	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
877bfcc09ddSBjoern A. Zeeb 		return;
878bfcc09ddSBjoern A. Zeeb 
879bfcc09ddSBjoern A. Zeeb 	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
880bfcc09ddSBjoern A. Zeeb 
881bfcc09ddSBjoern A. Zeeb 	if (!mvm->tcm.result.change[mvmvif->id] &&
882bfcc09ddSBjoern A. Zeeb 	    prev == low_latency) {
883bfcc09ddSBjoern A. Zeeb 		iwl_mvm_update_quotas(mvm, false, NULL);
884bfcc09ddSBjoern A. Zeeb 		return;
885bfcc09ddSBjoern A. Zeeb 	}
886bfcc09ddSBjoern A. Zeeb 
887bfcc09ddSBjoern A. Zeeb 	if (prev != low_latency) {
888bfcc09ddSBjoern A. Zeeb 		/* this sends traffic load and updates quota as well */
889bfcc09ddSBjoern A. Zeeb 		iwl_mvm_update_low_latency(mvm, vif, low_latency,
890bfcc09ddSBjoern A. Zeeb 					   LOW_LATENCY_TRAFFIC);
891bfcc09ddSBjoern A. Zeeb 	} else {
892bfcc09ddSBjoern A. Zeeb 		iwl_mvm_update_quotas(mvm, false, NULL);
893bfcc09ddSBjoern A. Zeeb 	}
894bfcc09ddSBjoern A. Zeeb }
895bfcc09ddSBjoern A. Zeeb 
896bfcc09ddSBjoern A. Zeeb static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
897bfcc09ddSBjoern A. Zeeb {
898*a4128aadSBjoern A. Zeeb 	guard(mvm)(mvm);
899bfcc09ddSBjoern A. Zeeb 
900bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces(
901bfcc09ddSBjoern A. Zeeb 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
902bfcc09ddSBjoern A. Zeeb 		iwl_mvm_tcm_iter, mvm);
903bfcc09ddSBjoern A. Zeeb 
904bfcc09ddSBjoern A. Zeeb 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
905bfcc09ddSBjoern A. Zeeb 		iwl_mvm_config_scan(mvm);
906bfcc09ddSBjoern A. Zeeb }
907bfcc09ddSBjoern A. Zeeb 
908bfcc09ddSBjoern A. Zeeb static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
909bfcc09ddSBjoern A. Zeeb {
910bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm *mvm;
911bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif;
912bfcc09ddSBjoern A. Zeeb 	struct ieee80211_vif *vif;
913bfcc09ddSBjoern A. Zeeb 
914bfcc09ddSBjoern A. Zeeb 	mvmvif = container_of(wk, struct iwl_mvm_vif,
915bfcc09ddSBjoern A. Zeeb 			      uapsd_nonagg_detected_wk.work);
916bfcc09ddSBjoern A. Zeeb 	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
917bfcc09ddSBjoern A. Zeeb 	mvm = mvmvif->mvm;
918bfcc09ddSBjoern A. Zeeb 
919bfcc09ddSBjoern A. Zeeb 	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
920bfcc09ddSBjoern A. Zeeb 		return;
921bfcc09ddSBjoern A. Zeeb 
922bfcc09ddSBjoern A. Zeeb 	/* remember that this AP is broken */
923bfcc09ddSBjoern A. Zeeb 	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
924bfcc09ddSBjoern A. Zeeb 	       vif->bss_conf.bssid, ETH_ALEN);
925bfcc09ddSBjoern A. Zeeb 	mvm->uapsd_noagg_bssid_write_idx++;
926bfcc09ddSBjoern A. Zeeb 	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
927bfcc09ddSBjoern A. Zeeb 		mvm->uapsd_noagg_bssid_write_idx = 0;
928bfcc09ddSBjoern A. Zeeb 
929bfcc09ddSBjoern A. Zeeb 	iwl_mvm_connection_loss(mvm, vif,
930bfcc09ddSBjoern A. Zeeb 				"AP isn't using AMPDU with uAPSD enabled");
931bfcc09ddSBjoern A. Zeeb }
932bfcc09ddSBjoern A. Zeeb 
933bfcc09ddSBjoern A. Zeeb static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
934bfcc09ddSBjoern A. Zeeb 					 struct ieee80211_vif *vif)
935bfcc09ddSBjoern A. Zeeb {
936bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
937bfcc09ddSBjoern A. Zeeb 
938bfcc09ddSBjoern A. Zeeb 	if (vif->type != NL80211_IFTYPE_STATION)
939bfcc09ddSBjoern A. Zeeb 		return;
940bfcc09ddSBjoern A. Zeeb 
9419af1bba4SBjoern A. Zeeb 	if (!vif->cfg.assoc)
942bfcc09ddSBjoern A. Zeeb 		return;
943bfcc09ddSBjoern A. Zeeb 
9449af1bba4SBjoern A. Zeeb 	if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
9459af1bba4SBjoern A. Zeeb 	    !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
9469af1bba4SBjoern A. Zeeb 	    !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
9479af1bba4SBjoern A. Zeeb 	    !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
948bfcc09ddSBjoern A. Zeeb 		return;
949bfcc09ddSBjoern A. Zeeb 
950bfcc09ddSBjoern A. Zeeb 	if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
951bfcc09ddSBjoern A. Zeeb 		return;
952bfcc09ddSBjoern A. Zeeb 
953bfcc09ddSBjoern A. Zeeb 	mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
954bfcc09ddSBjoern A. Zeeb 	IWL_INFO(mvm,
955bfcc09ddSBjoern A. Zeeb 		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
9569af1bba4SBjoern A. Zeeb 	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
9579af1bba4SBjoern A. Zeeb 			      15 * HZ);
958bfcc09ddSBjoern A. Zeeb }
959bfcc09ddSBjoern A. Zeeb 
960bfcc09ddSBjoern A. Zeeb static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
961bfcc09ddSBjoern A. Zeeb 						 unsigned int elapsed,
962bfcc09ddSBjoern A. Zeeb 						 int mac)
963bfcc09ddSBjoern A. Zeeb {
964bfcc09ddSBjoern A. Zeeb 	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
965bfcc09ddSBjoern A. Zeeb 	u64 tpt;
966bfcc09ddSBjoern A. Zeeb 	unsigned long rate;
967bfcc09ddSBjoern A. Zeeb 	struct ieee80211_vif *vif;
968bfcc09ddSBjoern A. Zeeb 
969bfcc09ddSBjoern A. Zeeb 	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
970bfcc09ddSBjoern A. Zeeb 
971bfcc09ddSBjoern A. Zeeb 	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
972bfcc09ddSBjoern A. Zeeb 	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
973bfcc09ddSBjoern A. Zeeb 		return;
974bfcc09ddSBjoern A. Zeeb 
975bfcc09ddSBjoern A. Zeeb 	if (iwl_mvm_has_new_rx_api(mvm)) {
976bfcc09ddSBjoern A. Zeeb 		tpt = 8 * bytes; /* kbps */
977bfcc09ddSBjoern A. Zeeb 		do_div(tpt, elapsed);
978bfcc09ddSBjoern A. Zeeb 		rate *= 1000; /* kbps */
979bfcc09ddSBjoern A. Zeeb 		if (tpt < 22 * rate / 100)
980bfcc09ddSBjoern A. Zeeb 			return;
981bfcc09ddSBjoern A. Zeeb 	} else {
982bfcc09ddSBjoern A. Zeeb 		/*
983bfcc09ddSBjoern A. Zeeb 		 * the rate here is actually the threshold, in 100Kbps units,
984bfcc09ddSBjoern A. Zeeb 		 * so do the needed conversion from bytes to 100Kbps:
985bfcc09ddSBjoern A. Zeeb 		 * 100kb = bits / (100 * 1000),
986bfcc09ddSBjoern A. Zeeb 		 * 100kbps = 100kb / (msecs / 1000) ==
987bfcc09ddSBjoern A. Zeeb 		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
988bfcc09ddSBjoern A. Zeeb 		 *           bits / (100 * msecs)
989bfcc09ddSBjoern A. Zeeb 		 */
990bfcc09ddSBjoern A. Zeeb 		tpt = (8 * bytes);
991bfcc09ddSBjoern A. Zeeb 		do_div(tpt, elapsed * 100);
992bfcc09ddSBjoern A. Zeeb 		if (tpt < rate)
993bfcc09ddSBjoern A. Zeeb 			return;
994bfcc09ddSBjoern A. Zeeb 	}
995bfcc09ddSBjoern A. Zeeb 
996bfcc09ddSBjoern A. Zeeb 	rcu_read_lock();
997bfcc09ddSBjoern A. Zeeb 	vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
998bfcc09ddSBjoern A. Zeeb 	if (vif)
999bfcc09ddSBjoern A. Zeeb 		iwl_mvm_uapsd_agg_disconnect(mvm, vif);
1000bfcc09ddSBjoern A. Zeeb 	rcu_read_unlock();
1001bfcc09ddSBjoern A. Zeeb }
1002bfcc09ddSBjoern A. Zeeb 
1003bfcc09ddSBjoern A. Zeeb static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
1004bfcc09ddSBjoern A. Zeeb 				 struct ieee80211_vif *vif)
1005bfcc09ddSBjoern A. Zeeb {
1006bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1007bfcc09ddSBjoern A. Zeeb 	u32 *band = _data;
1008bfcc09ddSBjoern A. Zeeb 
10099af1bba4SBjoern A. Zeeb 	if (!mvmvif->deflink.phy_ctxt)
1010bfcc09ddSBjoern A. Zeeb 		return;
1011bfcc09ddSBjoern A. Zeeb 
10129af1bba4SBjoern A. Zeeb 	band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
1013bfcc09ddSBjoern A. Zeeb }
1014bfcc09ddSBjoern A. Zeeb 
1015bfcc09ddSBjoern A. Zeeb static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
1016bfcc09ddSBjoern A. Zeeb 					    unsigned long ts,
1017bfcc09ddSBjoern A. Zeeb 					    bool handle_uapsd)
1018bfcc09ddSBjoern A. Zeeb {
1019bfcc09ddSBjoern A. Zeeb 	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
1020bfcc09ddSBjoern A. Zeeb 	unsigned int uapsd_elapsed =
1021bfcc09ddSBjoern A. Zeeb 		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
1022bfcc09ddSBjoern A. Zeeb 	u32 total_airtime = 0;
1023bfcc09ddSBjoern A. Zeeb 	u32 band_airtime[NUM_NL80211_BANDS] = {0};
1024bfcc09ddSBjoern A. Zeeb 	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
1025bfcc09ddSBjoern A. Zeeb 	int ac, mac, i;
1026bfcc09ddSBjoern A. Zeeb 	bool low_latency = false;
1027bfcc09ddSBjoern A. Zeeb 	enum iwl_mvm_traffic_load load, band_load;
1028bfcc09ddSBjoern A. Zeeb 	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1029bfcc09ddSBjoern A. Zeeb 
1030bfcc09ddSBjoern A. Zeeb 	if (handle_ll)
1031bfcc09ddSBjoern A. Zeeb 		mvm->tcm.ll_ts = ts;
1032bfcc09ddSBjoern A. Zeeb 	if (handle_uapsd)
1033bfcc09ddSBjoern A. Zeeb 		mvm->tcm.uapsd_nonagg_ts = ts;
1034bfcc09ddSBjoern A. Zeeb 
1035bfcc09ddSBjoern A. Zeeb 	mvm->tcm.result.elapsed = elapsed;
1036bfcc09ddSBjoern A. Zeeb 
1037bfcc09ddSBjoern A. Zeeb 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1038bfcc09ddSBjoern A. Zeeb 						   IEEE80211_IFACE_ITER_NORMAL,
1039bfcc09ddSBjoern A. Zeeb 						   iwl_mvm_tcm_iterator,
1040bfcc09ddSBjoern A. Zeeb 						   &band);
1041bfcc09ddSBjoern A. Zeeb 
1042bfcc09ddSBjoern A. Zeeb 	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1043bfcc09ddSBjoern A. Zeeb 		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1044bfcc09ddSBjoern A. Zeeb 		u32 vo_vi_pkts = 0;
1045bfcc09ddSBjoern A. Zeeb 		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1046bfcc09ddSBjoern A. Zeeb 
1047bfcc09ddSBjoern A. Zeeb 		total_airtime += airtime;
1048bfcc09ddSBjoern A. Zeeb 		band_airtime[band[mac]] += airtime;
1049bfcc09ddSBjoern A. Zeeb 
1050bfcc09ddSBjoern A. Zeeb 		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1051bfcc09ddSBjoern A. Zeeb 		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1052bfcc09ddSBjoern A. Zeeb 		mvm->tcm.result.load[mac] = load;
1053bfcc09ddSBjoern A. Zeeb 		mvm->tcm.result.airtime[mac] = airtime;
1054bfcc09ddSBjoern A. Zeeb 
1055bfcc09ddSBjoern A. Zeeb 		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1056bfcc09ddSBjoern A. Zeeb 			vo_vi_pkts += mdata->rx.pkts[ac] +
1057bfcc09ddSBjoern A. Zeeb 				      mdata->tx.pkts[ac];
1058bfcc09ddSBjoern A. Zeeb 
1059bfcc09ddSBjoern A. Zeeb 		/* enable immediately with enough packets but defer disabling */
1060bfcc09ddSBjoern A. Zeeb 		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1061bfcc09ddSBjoern A. Zeeb 			mvm->tcm.result.low_latency[mac] = true;
1062bfcc09ddSBjoern A. Zeeb 		else if (handle_ll)
1063bfcc09ddSBjoern A. Zeeb 			mvm->tcm.result.low_latency[mac] = false;
1064bfcc09ddSBjoern A. Zeeb 
1065bfcc09ddSBjoern A. Zeeb 		if (handle_ll) {
1066bfcc09ddSBjoern A. Zeeb 			/* clear old data */
1067bfcc09ddSBjoern A. Zeeb 			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1068bfcc09ddSBjoern A. Zeeb 			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1069bfcc09ddSBjoern A. Zeeb 		}
1070bfcc09ddSBjoern A. Zeeb 		low_latency |= mvm->tcm.result.low_latency[mac];
1071bfcc09ddSBjoern A. Zeeb 
1072bfcc09ddSBjoern A. Zeeb 		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1073bfcc09ddSBjoern A. Zeeb 			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1074bfcc09ddSBjoern A. Zeeb 							     mac);
1075bfcc09ddSBjoern A. Zeeb 		/* clear old data */
1076bfcc09ddSBjoern A. Zeeb 		if (handle_uapsd)
1077bfcc09ddSBjoern A. Zeeb 			mdata->uapsd_nonagg_detect.rx_bytes = 0;
1078bfcc09ddSBjoern A. Zeeb 		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1079bfcc09ddSBjoern A. Zeeb 		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1080bfcc09ddSBjoern A. Zeeb 	}
1081bfcc09ddSBjoern A. Zeeb 
1082bfcc09ddSBjoern A. Zeeb 	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1083bfcc09ddSBjoern A. Zeeb 	mvm->tcm.result.global_load = load;
1084bfcc09ddSBjoern A. Zeeb 
1085bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
1086bfcc09ddSBjoern A. Zeeb 		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1087bfcc09ddSBjoern A. Zeeb 		mvm->tcm.result.band_load[i] = band_load;
1088bfcc09ddSBjoern A. Zeeb 	}
1089bfcc09ddSBjoern A. Zeeb 
1090bfcc09ddSBjoern A. Zeeb 	/*
1091bfcc09ddSBjoern A. Zeeb 	 * If the current load isn't low we need to force re-evaluation
1092bfcc09ddSBjoern A. Zeeb 	 * in the TCM period, so that we can return to low load if there
1093bfcc09ddSBjoern A. Zeeb 	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1094bfcc09ddSBjoern A. Zeeb 	 * triggered by traffic).
1095bfcc09ddSBjoern A. Zeeb 	 */
1096bfcc09ddSBjoern A. Zeeb 	if (load != IWL_MVM_TRAFFIC_LOW)
1097bfcc09ddSBjoern A. Zeeb 		return MVM_TCM_PERIOD;
1098bfcc09ddSBjoern A. Zeeb 	/*
1099bfcc09ddSBjoern A. Zeeb 	 * If low-latency is active we need to force re-evaluation after
1100bfcc09ddSBjoern A. Zeeb 	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1101bfcc09ddSBjoern A. Zeeb 	 * when there's no traffic at all.
1102bfcc09ddSBjoern A. Zeeb 	 */
1103bfcc09ddSBjoern A. Zeeb 	if (low_latency)
1104bfcc09ddSBjoern A. Zeeb 		return MVM_LL_PERIOD;
1105bfcc09ddSBjoern A. Zeeb 	/*
1106bfcc09ddSBjoern A. Zeeb 	 * Otherwise, we don't need to run the work struct because we're
1107bfcc09ddSBjoern A. Zeeb 	 * in the default "idle" state - traffic indication is low (which
1108bfcc09ddSBjoern A. Zeeb 	 * also covers the "no traffic" case) and low-latency is disabled
1109bfcc09ddSBjoern A. Zeeb 	 * so there's no state that may need to be disabled when there's
1110bfcc09ddSBjoern A. Zeeb 	 * no traffic at all.
1111bfcc09ddSBjoern A. Zeeb 	 *
1112bfcc09ddSBjoern A. Zeeb 	 * Note that this has no impact on the regular scheduling of the
1113bfcc09ddSBjoern A. Zeeb 	 * updates triggered by traffic - those happen whenever one of the
1114bfcc09ddSBjoern A. Zeeb 	 * two timeouts expire (if there's traffic at all.)
1115bfcc09ddSBjoern A. Zeeb 	 */
1116bfcc09ddSBjoern A. Zeeb 	return 0;
1117bfcc09ddSBjoern A. Zeeb }
1118bfcc09ddSBjoern A. Zeeb 
1119bfcc09ddSBjoern A. Zeeb void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1120bfcc09ddSBjoern A. Zeeb {
1121bfcc09ddSBjoern A. Zeeb 	unsigned long ts = jiffies;
1122bfcc09ddSBjoern A. Zeeb 	bool handle_uapsd =
1123bfcc09ddSBjoern A. Zeeb 		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1124bfcc09ddSBjoern A. Zeeb 			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1125bfcc09ddSBjoern A. Zeeb 
1126bfcc09ddSBjoern A. Zeeb 	spin_lock(&mvm->tcm.lock);
1127bfcc09ddSBjoern A. Zeeb 	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1128bfcc09ddSBjoern A. Zeeb 		spin_unlock(&mvm->tcm.lock);
1129bfcc09ddSBjoern A. Zeeb 		return;
1130bfcc09ddSBjoern A. Zeeb 	}
1131bfcc09ddSBjoern A. Zeeb 	spin_unlock(&mvm->tcm.lock);
1132bfcc09ddSBjoern A. Zeeb 
1133bfcc09ddSBjoern A. Zeeb 	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1134*a4128aadSBjoern A. Zeeb 		guard(mvm)(mvm);
1135bfcc09ddSBjoern A. Zeeb 		if (iwl_mvm_request_statistics(mvm, true))
1136bfcc09ddSBjoern A. Zeeb 			handle_uapsd = false;
1137bfcc09ddSBjoern A. Zeeb 	}
1138bfcc09ddSBjoern A. Zeeb 
1139bfcc09ddSBjoern A. Zeeb 	spin_lock(&mvm->tcm.lock);
1140bfcc09ddSBjoern A. Zeeb 	/* re-check if somebody else won the recheck race */
1141bfcc09ddSBjoern A. Zeeb 	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1142bfcc09ddSBjoern A. Zeeb 		/* calculate statistics */
1143bfcc09ddSBjoern A. Zeeb 		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1144bfcc09ddSBjoern A. Zeeb 								  handle_uapsd);
1145bfcc09ddSBjoern A. Zeeb 
1146bfcc09ddSBjoern A. Zeeb 		/* the memset needs to be visible before the timestamp */
1147bfcc09ddSBjoern A. Zeeb 		smp_mb();
1148bfcc09ddSBjoern A. Zeeb 		mvm->tcm.ts = ts;
1149bfcc09ddSBjoern A. Zeeb 		if (work_delay)
1150bfcc09ddSBjoern A. Zeeb 			schedule_delayed_work(&mvm->tcm.work, work_delay);
1151bfcc09ddSBjoern A. Zeeb 	}
1152bfcc09ddSBjoern A. Zeeb 	spin_unlock(&mvm->tcm.lock);
1153bfcc09ddSBjoern A. Zeeb 
1154bfcc09ddSBjoern A. Zeeb 	iwl_mvm_tcm_results(mvm);
1155bfcc09ddSBjoern A. Zeeb }
1156bfcc09ddSBjoern A. Zeeb 
1157bfcc09ddSBjoern A. Zeeb void iwl_mvm_tcm_work(struct work_struct *work)
1158bfcc09ddSBjoern A. Zeeb {
1159bfcc09ddSBjoern A. Zeeb 	struct delayed_work *delayed_work = to_delayed_work(work);
1160bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1161bfcc09ddSBjoern A. Zeeb 					   tcm.work);
1162bfcc09ddSBjoern A. Zeeb 
1163bfcc09ddSBjoern A. Zeeb 	iwl_mvm_recalc_tcm(mvm);
1164bfcc09ddSBjoern A. Zeeb }
1165bfcc09ddSBjoern A. Zeeb 
1166bfcc09ddSBjoern A. Zeeb void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1167bfcc09ddSBjoern A. Zeeb {
1168bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&mvm->tcm.lock);
1169bfcc09ddSBjoern A. Zeeb 	mvm->tcm.paused = true;
1170bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&mvm->tcm.lock);
1171bfcc09ddSBjoern A. Zeeb 	if (with_cancel)
1172bfcc09ddSBjoern A. Zeeb 		cancel_delayed_work_sync(&mvm->tcm.work);
1173bfcc09ddSBjoern A. Zeeb }
1174bfcc09ddSBjoern A. Zeeb 
1175bfcc09ddSBjoern A. Zeeb void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1176bfcc09ddSBjoern A. Zeeb {
1177bfcc09ddSBjoern A. Zeeb 	int mac;
1178bfcc09ddSBjoern A. Zeeb 	bool low_latency = false;
1179bfcc09ddSBjoern A. Zeeb 
1180bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&mvm->tcm.lock);
1181bfcc09ddSBjoern A. Zeeb 	mvm->tcm.ts = jiffies;
1182bfcc09ddSBjoern A. Zeeb 	mvm->tcm.ll_ts = jiffies;
1183bfcc09ddSBjoern A. Zeeb 	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1184bfcc09ddSBjoern A. Zeeb 		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1185bfcc09ddSBjoern A. Zeeb 
1186bfcc09ddSBjoern A. Zeeb 		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1187bfcc09ddSBjoern A. Zeeb 		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1188bfcc09ddSBjoern A. Zeeb 		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1189bfcc09ddSBjoern A. Zeeb 		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1190bfcc09ddSBjoern A. Zeeb 
1191bfcc09ddSBjoern A. Zeeb 		if (mvm->tcm.result.low_latency[mac])
1192bfcc09ddSBjoern A. Zeeb 			low_latency = true;
1193bfcc09ddSBjoern A. Zeeb 	}
1194bfcc09ddSBjoern A. Zeeb 	/* The TCM data needs to be reset before "paused" flag changes */
1195bfcc09ddSBjoern A. Zeeb 	smp_mb();
1196bfcc09ddSBjoern A. Zeeb 	mvm->tcm.paused = false;
1197bfcc09ddSBjoern A. Zeeb 
1198bfcc09ddSBjoern A. Zeeb 	/*
1199bfcc09ddSBjoern A. Zeeb 	 * if the current load is not low or low latency is active, force
1200bfcc09ddSBjoern A. Zeeb 	 * re-evaluation to cover the case of no traffic.
1201bfcc09ddSBjoern A. Zeeb 	 */
1202bfcc09ddSBjoern A. Zeeb 	if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1203bfcc09ddSBjoern A. Zeeb 		schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1204bfcc09ddSBjoern A. Zeeb 	else if (low_latency)
1205bfcc09ddSBjoern A. Zeeb 		schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1206bfcc09ddSBjoern A. Zeeb 
1207bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&mvm->tcm.lock);
1208bfcc09ddSBjoern A. Zeeb }
1209bfcc09ddSBjoern A. Zeeb 
1210bfcc09ddSBjoern A. Zeeb void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1211bfcc09ddSBjoern A. Zeeb {
1212bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1213bfcc09ddSBjoern A. Zeeb 
1214bfcc09ddSBjoern A. Zeeb 	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1215bfcc09ddSBjoern A. Zeeb 			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1216bfcc09ddSBjoern A. Zeeb }
1217bfcc09ddSBjoern A. Zeeb 
1218bfcc09ddSBjoern A. Zeeb void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1219bfcc09ddSBjoern A. Zeeb {
1220bfcc09ddSBjoern A. Zeeb 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1221bfcc09ddSBjoern A. Zeeb 
1222bfcc09ddSBjoern A. Zeeb 	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1223bfcc09ddSBjoern A. Zeeb }
1224bfcc09ddSBjoern A. Zeeb 
1225bfcc09ddSBjoern A. Zeeb u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1226bfcc09ddSBjoern A. Zeeb {
1227bfcc09ddSBjoern A. Zeeb 	u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1228bfcc09ddSBjoern A. Zeeb 
1229bfcc09ddSBjoern A. Zeeb 	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1230bfcc09ddSBjoern A. Zeeb 	    mvm->trans->cfg->gp2_reg_addr)
1231bfcc09ddSBjoern A. Zeeb 		reg_addr = mvm->trans->cfg->gp2_reg_addr;
1232bfcc09ddSBjoern A. Zeeb 
1233bfcc09ddSBjoern A. Zeeb 	return iwl_read_prph(mvm->trans, reg_addr);
1234bfcc09ddSBjoern A. Zeeb }
1235bfcc09ddSBjoern A. Zeeb 
1236bfcc09ddSBjoern A. Zeeb void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1237bfcc09ddSBjoern A. Zeeb 			   u32 *gp2, u64 *boottime, ktime_t *realtime)
1238bfcc09ddSBjoern A. Zeeb {
1239bfcc09ddSBjoern A. Zeeb 	bool ps_disabled;
1240bfcc09ddSBjoern A. Zeeb 
1241bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&mvm->mutex);
1242bfcc09ddSBjoern A. Zeeb 
1243bfcc09ddSBjoern A. Zeeb 	/* Disable power save when reading GP2 */
1244bfcc09ddSBjoern A. Zeeb 	ps_disabled = mvm->ps_disabled;
1245bfcc09ddSBjoern A. Zeeb 	if (!ps_disabled) {
1246bfcc09ddSBjoern A. Zeeb 		mvm->ps_disabled = true;
1247bfcc09ddSBjoern A. Zeeb 		iwl_mvm_power_update_device(mvm);
1248bfcc09ddSBjoern A. Zeeb 	}
1249bfcc09ddSBjoern A. Zeeb 
1250bfcc09ddSBjoern A. Zeeb 	*gp2 = iwl_mvm_get_systime(mvm);
1251bfcc09ddSBjoern A. Zeeb 
1252bfcc09ddSBjoern A. Zeeb 	if (clock_type == CLOCK_BOOTTIME && boottime)
1253bfcc09ddSBjoern A. Zeeb 		*boottime = ktime_get_boottime_ns();
1254bfcc09ddSBjoern A. Zeeb 	else if (clock_type == CLOCK_REALTIME && realtime)
1255bfcc09ddSBjoern A. Zeeb 		*realtime = ktime_get_real();
1256bfcc09ddSBjoern A. Zeeb 
1257bfcc09ddSBjoern A. Zeeb 	if (!ps_disabled) {
1258bfcc09ddSBjoern A. Zeeb 		mvm->ps_disabled = ps_disabled;
1259bfcc09ddSBjoern A. Zeeb 		iwl_mvm_power_update_device(mvm);
1260bfcc09ddSBjoern A. Zeeb 	}
1261bfcc09ddSBjoern A. Zeeb }
12629af1bba4SBjoern A. Zeeb 
12639af1bba4SBjoern A. Zeeb /* Find if at least two links from different vifs use same channel
12649af1bba4SBjoern A. Zeeb  * FIXME: consider having a refcount array in struct iwl_mvm_vif for
12659af1bba4SBjoern A. Zeeb  * used phy_ctxt ids.
12669af1bba4SBjoern A. Zeeb  */
12679af1bba4SBjoern A. Zeeb bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
12689af1bba4SBjoern A. Zeeb 				     struct iwl_mvm_vif *vif2)
12699af1bba4SBjoern A. Zeeb {
12709af1bba4SBjoern A. Zeeb 	unsigned int i, j;
12719af1bba4SBjoern A. Zeeb 
12729af1bba4SBjoern A. Zeeb 	for_each_mvm_vif_valid_link(vif1, i) {
12739af1bba4SBjoern A. Zeeb 		for_each_mvm_vif_valid_link(vif2, j) {
12749af1bba4SBjoern A. Zeeb 			if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
12759af1bba4SBjoern A. Zeeb 				return true;
12769af1bba4SBjoern A. Zeeb 		}
12779af1bba4SBjoern A. Zeeb 	}
12789af1bba4SBjoern A. Zeeb 
12799af1bba4SBjoern A. Zeeb 	return false;
12809af1bba4SBjoern A. Zeeb }
12819af1bba4SBjoern A. Zeeb 
12829af1bba4SBjoern A. Zeeb bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
12839af1bba4SBjoern A. Zeeb {
12849af1bba4SBjoern A. Zeeb 	unsigned int i;
12859af1bba4SBjoern A. Zeeb 
12869af1bba4SBjoern A. Zeeb 	/* FIXME: can it fail when phy_ctxt is assigned? */
12879af1bba4SBjoern A. Zeeb 	for_each_mvm_vif_valid_link(mvmvif, i) {
12889af1bba4SBjoern A. Zeeb 		if (mvmvif->link[i]->phy_ctxt &&
12899af1bba4SBjoern A. Zeeb 		    mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
12909af1bba4SBjoern A. Zeeb 			return true;
12919af1bba4SBjoern A. Zeeb 	}
12929af1bba4SBjoern A. Zeeb 
12939af1bba4SBjoern A. Zeeb 	return false;
12949af1bba4SBjoern A. Zeeb }
1295