mt76x02_mac.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
  5. */
  6. #include "mt76x02.h"
  7. #include "mt76x02_trace.h"
  8. #include "trace.h"
  9. void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
  10. {
  11. int i;
  12. mt76_rr(dev, MT_RX_STAT_0);
  13. mt76_rr(dev, MT_RX_STAT_1);
  14. mt76_rr(dev, MT_RX_STAT_2);
  15. mt76_rr(dev, MT_TX_STA_0);
  16. mt76_rr(dev, MT_TX_STA_1);
  17. mt76_rr(dev, MT_TX_STA_2);
  18. for (i = 0; i < 16; i++)
  19. mt76_rr(dev, MT_TX_AGG_CNT(i));
  20. for (i = 0; i < 16; i++)
  21. mt76_rr(dev, MT_TX_STAT_FIFO);
  22. memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
  23. }
  24. EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
  25. static enum mt76x02_cipher_type
  26. mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
  27. {
  28. memset(key_data, 0, 32);
  29. if (!key)
  30. return MT_CIPHER_NONE;
  31. if (key->keylen > 32)
  32. return MT_CIPHER_NONE;
  33. memcpy(key_data, key->key, key->keylen);
  34. switch (key->cipher) {
  35. case WLAN_CIPHER_SUITE_WEP40:
  36. return MT_CIPHER_WEP40;
  37. case WLAN_CIPHER_SUITE_WEP104:
  38. return MT_CIPHER_WEP104;
  39. case WLAN_CIPHER_SUITE_TKIP:
  40. return MT_CIPHER_TKIP;
  41. case WLAN_CIPHER_SUITE_CCMP:
  42. return MT_CIPHER_AES_CCMP;
  43. default:
  44. return MT_CIPHER_NONE;
  45. }
  46. }
  47. int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
  48. u8 key_idx, struct ieee80211_key_conf *key)
  49. {
  50. enum mt76x02_cipher_type cipher;
  51. u8 key_data[32];
  52. u32 val;
  53. cipher = mt76x02_mac_get_key_info(key, key_data);
  54. if (cipher == MT_CIPHER_NONE && key)
  55. return -EOPNOTSUPP;
  56. val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
  57. val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
  58. val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
  59. mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
  60. mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
  61. sizeof(key_data));
  62. return 0;
  63. }
  64. EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
  65. void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
  66. struct ieee80211_key_conf *key)
  67. {
  68. enum mt76x02_cipher_type cipher;
  69. u8 key_data[32];
  70. u32 iv, eiv;
  71. u64 pn;
  72. cipher = mt76x02_mac_get_key_info(key, key_data);
  73. iv = mt76_rr(dev, MT_WCID_IV(idx));
  74. eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
  75. pn = (u64)eiv << 16;
  76. if (cipher == MT_CIPHER_TKIP) {
  77. pn |= (iv >> 16) & 0xff;
  78. pn |= (iv & 0xff) << 8;
  79. } else if (cipher >= MT_CIPHER_AES_CCMP) {
  80. pn |= iv & 0xffff;
  81. } else {
  82. return;
  83. }
  84. atomic64_set(&key->tx_pn, pn);
  85. }
  86. int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
  87. struct ieee80211_key_conf *key)
  88. {
  89. enum mt76x02_cipher_type cipher;
  90. u8 key_data[32];
  91. u8 iv_data[8];
  92. u64 pn;
  93. cipher = mt76x02_mac_get_key_info(key, key_data);
  94. if (cipher == MT_CIPHER_NONE && key)
  95. return -EOPNOTSUPP;
  96. mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
  97. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
  98. memset(iv_data, 0, sizeof(iv_data));
  99. if (key) {
  100. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
  101. !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
  102. pn = atomic64_read(&key->tx_pn);
  103. iv_data[3] = key->keyidx << 6;
  104. if (cipher >= MT_CIPHER_TKIP) {
  105. iv_data[3] |= 0x20;
  106. put_unaligned_le32(pn >> 16, &iv_data[4]);
  107. }
  108. if (cipher == MT_CIPHER_TKIP) {
  109. iv_data[0] = (pn >> 8) & 0xff;
  110. iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
  111. iv_data[2] = pn & 0xff;
  112. } else if (cipher >= MT_CIPHER_AES_CCMP) {
  113. put_unaligned_le16((pn & 0xffff), &iv_data[0]);
  114. }
  115. }
  116. mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
  117. return 0;
  118. }
  119. void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
  120. u8 vif_idx, u8 *mac)
  121. {
  122. struct mt76_wcid_addr addr = {};
  123. u32 attr;
  124. attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
  125. FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
  126. mt76_wr(dev, MT_WCID_ATTR(idx), attr);
  127. if (idx >= 128)
  128. return;
  129. if (mac)
  130. memcpy(addr.macaddr, mac, ETH_ALEN);
  131. mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
  132. }
  133. EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
  134. void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
  135. {
  136. u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
  137. u32 bit = MT_WCID_DROP_MASK(idx);
  138. /* prevent unnecessary writes */
  139. if ((val & bit) != (bit * drop))
  140. mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
  141. }
  142. static __le16
  143. mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
  144. const struct ieee80211_tx_rate *rate, u8 *nss_val)
  145. {
  146. u8 phy, rate_idx, nss, bw = 0;
  147. u16 rateval;
  148. if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
  149. rate_idx = rate->idx;
  150. nss = 1 + (rate->idx >> 4);
  151. phy = MT_PHY_TYPE_VHT;
  152. if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
  153. bw = 2;
  154. else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  155. bw = 1;
  156. } else if (rate->flags & IEEE80211_TX_RC_MCS) {
  157. rate_idx = rate->idx;
  158. nss = 1 + (rate->idx >> 3);
  159. phy = MT_PHY_TYPE_HT;
  160. if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  161. phy = MT_PHY_TYPE_HT_GF;
  162. if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  163. bw = 1;
  164. } else {
  165. const struct ieee80211_rate *r;
  166. int band = dev->mphy.chandef.chan->band;
  167. u16 val;
  168. r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
  169. if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  170. val = r->hw_value_short;
  171. else
  172. val = r->hw_value;
  173. phy = val >> 8;
  174. rate_idx = val & 0xff;
  175. nss = 1;
  176. }
  177. rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
  178. rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
  179. rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
  180. if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
  181. rateval |= MT_RXWI_RATE_SGI;
  182. *nss_val = nss;
  183. return cpu_to_le16(rateval);
  184. }
  185. void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
  186. const struct ieee80211_tx_rate *rate)
  187. {
  188. s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
  189. __le16 rateval;
  190. u32 tx_info;
  191. s8 nss;
  192. rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
  193. tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
  194. FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
  195. FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
  196. MT_WCID_TX_INFO_SET;
  197. wcid->tx_info = tx_info;
  198. }
  199. void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
  200. {
  201. if (enable)
  202. mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
  203. else
  204. mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
  205. }
  206. bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
  207. struct mt76x02_tx_status *stat)
  208. {
  209. u32 stat1, stat2;
  210. stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
  211. stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
  212. stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
  213. if (!stat->valid)
  214. return false;
  215. stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
  216. stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
  217. stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
  218. stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
  219. stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
  220. stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
  221. stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
  222. trace_mac_txstat_fetch(dev, stat);
  223. return true;
  224. }
  225. static int
  226. mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
  227. enum nl80211_band band)
  228. {
  229. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  230. txrate->idx = 0;
  231. txrate->flags = 0;
  232. txrate->count = 1;
  233. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  234. case MT_PHY_TYPE_OFDM:
  235. if (band == NL80211_BAND_2GHZ)
  236. idx += 4;
  237. txrate->idx = idx;
  238. return 0;
  239. case MT_PHY_TYPE_CCK:
  240. if (idx >= 8)
  241. idx -= 8;
  242. txrate->idx = idx;
  243. return 0;
  244. case MT_PHY_TYPE_HT_GF:
  245. txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  246. fallthrough;
  247. case MT_PHY_TYPE_HT:
  248. txrate->flags |= IEEE80211_TX_RC_MCS;
  249. txrate->idx = idx;
  250. break;
  251. case MT_PHY_TYPE_VHT:
  252. txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
  253. txrate->idx = idx;
  254. break;
  255. default:
  256. return -EINVAL;
  257. }
  258. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  259. case MT_PHY_BW_20:
  260. break;
  261. case MT_PHY_BW_40:
  262. txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  263. break;
  264. case MT_PHY_BW_80:
  265. txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
  266. break;
  267. default:
  268. return -EINVAL;
  269. }
  270. if (rate & MT_RXWI_RATE_SGI)
  271. txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
  272. return 0;
  273. }
  274. void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
  275. struct sk_buff *skb, struct mt76_wcid *wcid,
  276. struct ieee80211_sta *sta, int len)
  277. {
  278. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  279. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  280. struct ieee80211_tx_rate *rate = &info->control.rates[0];
  281. struct ieee80211_key_conf *key = info->control.hw_key;
  282. u32 wcid_tx_info;
  283. u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
  284. u16 txwi_flags = 0;
  285. u8 nss;
  286. s8 txpwr_adj, max_txpwr_adj;
  287. u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
  288. memset(txwi, 0, sizeof(*txwi));
  289. mt76_tx_check_agg_ssn(sta, skb);
  290. if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
  291. ieee80211_has_protected(hdr->frame_control)) {
  292. wcid = NULL;
  293. ieee80211_get_tx_rates(info->control.vif, sta, skb,
  294. info->control.rates, 1);
  295. }
  296. if (wcid)
  297. txwi->wcid = wcid->idx;
  298. else
  299. txwi->wcid = 0xff;
  300. if (wcid && wcid->sw_iv && key) {
  301. u64 pn = atomic64_inc_return(&key->tx_pn);
  302. ccmp_pn[0] = pn;
  303. ccmp_pn[1] = pn >> 8;
  304. ccmp_pn[2] = 0;
  305. ccmp_pn[3] = 0x20 | (key->keyidx << 6);
  306. ccmp_pn[4] = pn >> 16;
  307. ccmp_pn[5] = pn >> 24;
  308. ccmp_pn[6] = pn >> 32;
  309. ccmp_pn[7] = pn >> 40;
  310. txwi->iv = *((__le32 *)&ccmp_pn[0]);
  311. txwi->eiv = *((__le32 *)&ccmp_pn[4]);
  312. }
  313. if (wcid && (rate->idx < 0 || !rate->count)) {
  314. wcid_tx_info = wcid->tx_info;
  315. txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
  316. max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
  317. wcid_tx_info);
  318. nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
  319. } else {
  320. txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
  321. max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
  322. }
  323. txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
  324. max_txpwr_adj);
  325. txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
  326. if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
  327. txwi->txstream = 0x13;
  328. else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
  329. !(txwi->rate & cpu_to_le16(rate_ht_mask)))
  330. txwi->txstream = 0x93;
  331. if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
  332. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
  333. if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
  334. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
  335. if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  336. txwi_flags |= MT_TXWI_FLAGS_MMPS;
  337. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  338. txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
  339. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  340. txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
  341. if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
  342. u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
  343. u8 ampdu_density = sta->ht_cap.ampdu_density;
  344. ba_size <<= sta->ht_cap.ampdu_factor;
  345. ba_size = min_t(int, 63, ba_size - 1);
  346. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  347. ba_size = 0;
  348. txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
  349. if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
  350. ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
  351. txwi_flags |= MT_TXWI_FLAGS_AMPDU |
  352. FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
  353. }
  354. if (ieee80211_is_probe_resp(hdr->frame_control) ||
  355. ieee80211_is_beacon(hdr->frame_control))
  356. txwi_flags |= MT_TXWI_FLAGS_TS;
  357. txwi->flags |= cpu_to_le16(txwi_flags);
  358. txwi->len_ctl = cpu_to_le16(len);
  359. }
  360. EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
  361. static void
  362. mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
  363. {
  364. u8 mcs, nss;
  365. if (!idx)
  366. return;
  367. rates += idx - 1;
  368. rates[1] = rates[0];
  369. switch (phy) {
  370. case MT_PHY_TYPE_VHT:
  371. mcs = ieee80211_rate_get_vht_mcs(rates);
  372. nss = ieee80211_rate_get_vht_nss(rates);
  373. if (mcs == 0)
  374. nss = max_t(int, nss - 1, 1);
  375. else
  376. mcs--;
  377. ieee80211_rate_set_vht(rates + 1, mcs, nss);
  378. break;
  379. case MT_PHY_TYPE_HT_GF:
  380. case MT_PHY_TYPE_HT:
  381. /* MCS 8 falls back to MCS 0 */
  382. if (rates[0].idx == 8) {
  383. rates[1].idx = 0;
  384. break;
  385. }
  386. fallthrough;
  387. default:
  388. rates[1].idx = max_t(int, rates[0].idx - 1, 0);
  389. break;
  390. }
  391. }
  392. static void
  393. mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
  394. struct ieee80211_tx_info *info,
  395. struct mt76x02_tx_status *st, int n_frames)
  396. {
  397. struct ieee80211_tx_rate *rate = info->status.rates;
  398. struct ieee80211_tx_rate last_rate;
  399. u16 first_rate;
  400. int retry = st->retry;
  401. int phy;
  402. int i;
  403. if (!n_frames)
  404. return;
  405. phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
  406. if (st->pktid & MT_PACKET_ID_HAS_RATE) {
  407. first_rate = st->rate & ~MT_PKTID_RATE;
  408. first_rate |= st->pktid & MT_PKTID_RATE;
  409. mt76x02_mac_process_tx_rate(&rate[0], first_rate,
  410. dev->mphy.chandef.chan->band);
  411. } else if (rate[0].idx < 0) {
  412. if (!msta)
  413. return;
  414. mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
  415. dev->mphy.chandef.chan->band);
  416. }
  417. mt76x02_mac_process_tx_rate(&last_rate, st->rate,
  418. dev->mphy.chandef.chan->band);
  419. for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
  420. retry--;
  421. if (i + 1 == ARRAY_SIZE(info->status.rates)) {
  422. info->status.rates[i] = last_rate;
  423. info->status.rates[i].count = max_t(int, retry, 1);
  424. break;
  425. }
  426. mt76x02_tx_rate_fallback(info->status.rates, i, phy);
  427. if (info->status.rates[i].idx == last_rate.idx)
  428. break;
  429. }
  430. if (i + 1 < ARRAY_SIZE(info->status.rates)) {
  431. info->status.rates[i + 1].idx = -1;
  432. info->status.rates[i + 1].count = 0;
  433. }
  434. info->status.ampdu_len = n_frames;
  435. info->status.ampdu_ack_len = st->success ? n_frames : 0;
  436. if (st->aggr)
  437. info->flags |= IEEE80211_TX_CTL_AMPDU |
  438. IEEE80211_TX_STAT_AMPDU;
  439. if (!st->ack_req)
  440. info->flags |= IEEE80211_TX_CTL_NO_ACK;
  441. else if (st->success)
  442. info->flags |= IEEE80211_TX_STAT_ACK;
  443. }
  444. void mt76x02_send_tx_status(struct mt76x02_dev *dev,
  445. struct mt76x02_tx_status *stat, u8 *update)
  446. {
  447. struct ieee80211_tx_info info = {};
  448. struct ieee80211_tx_status status = {
  449. .info = &info
  450. };
  451. static const u8 ac_to_tid[4] = {
  452. [IEEE80211_AC_BE] = 0,
  453. [IEEE80211_AC_BK] = 1,
  454. [IEEE80211_AC_VI] = 4,
  455. [IEEE80211_AC_VO] = 6
  456. };
  457. struct mt76_wcid *wcid = NULL;
  458. struct mt76x02_sta *msta = NULL;
  459. struct mt76_dev *mdev = &dev->mt76;
  460. struct sk_buff_head list;
  461. u32 duration = 0;
  462. u8 cur_pktid;
  463. u32 ac = 0;
  464. int len = 0;
  465. if (stat->pktid == MT_PACKET_ID_NO_ACK)
  466. return;
  467. rcu_read_lock();
  468. if (stat->wcid < MT76x02_N_WCIDS)
  469. wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
  470. if (wcid && wcid->sta) {
  471. void *priv;
  472. priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
  473. status.sta = container_of(priv, struct ieee80211_sta,
  474. drv_priv);
  475. }
  476. mt76_tx_status_lock(mdev, &list);
  477. if (wcid) {
  478. if (mt76_is_skb_pktid(stat->pktid))
  479. status.skb = mt76_tx_status_skb_get(mdev, wcid,
  480. stat->pktid, &list);
  481. if (status.skb)
  482. status.info = IEEE80211_SKB_CB(status.skb);
  483. }
  484. if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
  485. mt76_tx_status_unlock(mdev, &list);
  486. goto out;
  487. }
  488. if (msta && stat->aggr && !status.skb) {
  489. u32 stat_val, stat_cache;
  490. stat_val = stat->rate;
  491. stat_val |= ((u32)stat->retry) << 16;
  492. stat_cache = msta->status.rate;
  493. stat_cache |= ((u32)msta->status.retry) << 16;
  494. if (*update == 0 && stat_val == stat_cache &&
  495. stat->wcid == msta->status.wcid && msta->n_frames < 32) {
  496. msta->n_frames++;
  497. mt76_tx_status_unlock(mdev, &list);
  498. goto out;
  499. }
  500. cur_pktid = msta->status.pktid;
  501. mt76x02_mac_fill_tx_status(dev, msta, status.info,
  502. &msta->status, msta->n_frames);
  503. msta->status = *stat;
  504. msta->n_frames = 1;
  505. *update = 0;
  506. } else {
  507. cur_pktid = stat->pktid;
  508. mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
  509. *update = 1;
  510. }
  511. if (status.skb) {
  512. info = *status.info;
  513. len = status.skb->len;
  514. ac = skb_get_queue_mapping(status.skb);
  515. mt76_tx_status_skb_done(mdev, status.skb, &list);
  516. } else if (msta) {
  517. len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
  518. ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
  519. }
  520. mt76_tx_status_unlock(mdev, &list);
  521. if (!status.skb)
  522. ieee80211_tx_status_ext(mt76_hw(dev), &status);
  523. if (!len)
  524. goto out;
  525. duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
  526. spin_lock_bh(&dev->mt76.cc_lock);
  527. dev->tx_airtime += duration;
  528. spin_unlock_bh(&dev->mt76.cc_lock);
  529. if (msta)
  530. ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
  531. out:
  532. rcu_read_unlock();
  533. }
  534. static int
  535. mt76x02_mac_process_rate(struct mt76x02_dev *dev,
  536. struct mt76_rx_status *status,
  537. u16 rate)
  538. {
  539. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  540. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  541. case MT_PHY_TYPE_OFDM:
  542. if (idx >= 8)
  543. idx = 0;
  544. if (status->band == NL80211_BAND_2GHZ)
  545. idx += 4;
  546. status->rate_idx = idx;
  547. return 0;
  548. case MT_PHY_TYPE_CCK:
  549. if (idx >= 8) {
  550. idx -= 8;
  551. status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
  552. }
  553. if (idx >= 4)
  554. idx = 0;
  555. status->rate_idx = idx;
  556. return 0;
  557. case MT_PHY_TYPE_HT_GF:
  558. status->enc_flags |= RX_ENC_FLAG_HT_GF;
  559. fallthrough;
  560. case MT_PHY_TYPE_HT:
  561. status->encoding = RX_ENC_HT;
  562. status->rate_idx = idx;
  563. break;
  564. case MT_PHY_TYPE_VHT: {
  565. u8 n_rxstream = dev->mphy.chainmask & 0xf;
  566. status->encoding = RX_ENC_VHT;
  567. status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
  568. status->nss = min_t(u8, n_rxstream,
  569. FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
  570. break;
  571. }
  572. default:
  573. return -EINVAL;
  574. }
  575. if (rate & MT_RXWI_RATE_LDPC)
  576. status->enc_flags |= RX_ENC_FLAG_LDPC;
  577. if (rate & MT_RXWI_RATE_SGI)
  578. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  579. if (rate & MT_RXWI_RATE_STBC)
  580. status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
  581. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  582. case MT_PHY_BW_20:
  583. break;
  584. case MT_PHY_BW_40:
  585. status->bw = RATE_INFO_BW_40;
  586. break;
  587. case MT_PHY_BW_80:
  588. status->bw = RATE_INFO_BW_80;
  589. break;
  590. default:
  591. break;
  592. }
  593. return 0;
  594. }
  595. void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
  596. {
  597. static const u8 null_addr[ETH_ALEN] = {};
  598. int i;
  599. ether_addr_copy(dev->mphy.macaddr, addr);
  600. if (!is_valid_ether_addr(dev->mphy.macaddr)) {
  601. eth_random_addr(dev->mphy.macaddr);
  602. dev_info(dev->mt76.dev,
  603. "Invalid MAC address, using random address %pM\n",
  604. dev->mphy.macaddr);
  605. }
  606. mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr));
  607. mt76_wr(dev, MT_MAC_ADDR_DW1,
  608. get_unaligned_le16(dev->mphy.macaddr + 4) |
  609. FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
  610. mt76_wr(dev, MT_MAC_BSSID_DW0,
  611. get_unaligned_le32(dev->mphy.macaddr));
  612. mt76_wr(dev, MT_MAC_BSSID_DW1,
  613. get_unaligned_le16(dev->mphy.macaddr + 4) |
  614. FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
  615. MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
  616. /* enable 7 additional beacon slots and control them with bypass mask */
  617. mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
  618. for (i = 0; i < 16; i++)
  619. mt76x02_mac_set_bssid(dev, i, null_addr);
  620. }
  621. EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
  622. static int
  623. mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
  624. {
  625. struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
  626. rssi += cal->rssi_offset[chain];
  627. rssi -= cal->lna_gain;
  628. return rssi;
  629. }
  630. int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
  631. void *rxi)
  632. {
  633. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  634. struct ieee80211_hdr *hdr;
  635. struct mt76x02_rxwi *rxwi = rxi;
  636. struct mt76x02_sta *sta;
  637. u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
  638. u32 ctl = le32_to_cpu(rxwi->ctl);
  639. u16 rate = le16_to_cpu(rxwi->rate);
  640. u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
  641. bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
  642. int pad_len = 0, nstreams = dev->mphy.chainmask & 0xf;
  643. s8 signal;
  644. u8 pn_len;
  645. u8 wcid;
  646. int len;
  647. if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
  648. return -EINVAL;
  649. if (rxinfo & MT_RXINFO_L2PAD)
  650. pad_len += 2;
  651. if (rxinfo & MT_RXINFO_DECRYPT) {
  652. status->flag |= RX_FLAG_DECRYPTED;
  653. status->flag |= RX_FLAG_MMIC_STRIPPED;
  654. status->flag |= RX_FLAG_MIC_STRIPPED;
  655. status->flag |= RX_FLAG_IV_STRIPPED;
  656. }
  657. wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
  658. sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
  659. status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
  660. len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
  661. pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
  662. if (pn_len) {
  663. int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
  664. u8 *data = skb->data + offset;
  665. status->iv[0] = data[7];
  666. status->iv[1] = data[6];
  667. status->iv[2] = data[5];
  668. status->iv[3] = data[4];
  669. status->iv[4] = data[1];
  670. status->iv[5] = data[0];
  671. /*
  672. * Driver CCMP validation can't deal with fragments.
  673. * Let mac80211 take care of it.
  674. */
  675. if (rxinfo & MT_RXINFO_FRAG) {
  676. status->flag &= ~RX_FLAG_IV_STRIPPED;
  677. } else {
  678. pad_len += pn_len << 2;
  679. len -= pn_len << 2;
  680. }
  681. }
  682. mt76x02_remove_hdr_pad(skb, pad_len);
  683. if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
  684. status->aggr = true;
  685. if (rxinfo & MT_RXINFO_AMPDU) {
  686. status->flag |= RX_FLAG_AMPDU_DETAILS;
  687. status->ampdu_ref = dev->ampdu_ref;
  688. /*
  689. * When receiving an A-MPDU subframe and RSSI info is not valid,
  690. * we can assume that more subframes belonging to the same A-MPDU
  691. * are coming. The last one will have valid RSSI info
  692. */
  693. if (rxinfo & MT_RXINFO_RSSI) {
  694. if (!++dev->ampdu_ref)
  695. dev->ampdu_ref++;
  696. }
  697. }
  698. if (WARN_ON_ONCE(len > skb->len))
  699. return -EINVAL;
  700. pskb_trim(skb, len);
  701. status->chains = BIT(0);
  702. signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
  703. status->chain_signal[0] = signal;
  704. if (nstreams > 1) {
  705. status->chains |= BIT(1);
  706. status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
  707. rxwi->rssi[1],
  708. 1);
  709. signal = max_t(s8, signal, status->chain_signal[1]);
  710. }
  711. status->signal = signal;
  712. status->freq = dev->mphy.chandef.chan->center_freq;
  713. status->band = dev->mphy.chandef.chan->band;
  714. hdr = (struct ieee80211_hdr *)skb->data;
  715. status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
  716. status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
  717. return mt76x02_mac_process_rate(dev, status, rate);
  718. }
  719. void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
  720. {
  721. struct mt76x02_tx_status stat = {};
  722. u8 update = 1;
  723. bool ret;
  724. if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
  725. return;
  726. trace_mac_txstat_poll(dev);
  727. while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
  728. if (!spin_trylock(&dev->txstatus_fifo_lock))
  729. break;
  730. ret = mt76x02_mac_load_tx_status(dev, &stat);
  731. spin_unlock(&dev->txstatus_fifo_lock);
  732. if (!ret)
  733. break;
  734. if (!irq) {
  735. mt76x02_send_tx_status(dev, &stat, &update);
  736. continue;
  737. }
  738. kfifo_put(&dev->txstatus_fifo, stat);
  739. }
  740. }
  741. void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
  742. {
  743. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  744. struct mt76x02_txwi *txwi;
  745. u8 *txwi_ptr;
  746. if (!e->txwi) {
  747. dev_kfree_skb_any(e->skb);
  748. return;
  749. }
  750. mt76x02_mac_poll_tx_status(dev, false);
  751. txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
  752. txwi = (struct mt76x02_txwi *)txwi_ptr;
  753. trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
  754. mt76_tx_complete_skb(mdev, e->wcid, e->skb);
  755. }
  756. EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
  757. void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
  758. {
  759. u32 data = 0;
  760. if (val != ~0)
  761. data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
  762. MT_PROT_CFG_RTS_THRESH;
  763. mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
  764. mt76_rmw(dev, MT_CCK_PROT_CFG,
  765. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  766. mt76_rmw(dev, MT_OFDM_PROT_CFG,
  767. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  768. }
  769. void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
  770. int ht_mode)
  771. {
  772. int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
  773. bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
  774. u32 prot[6];
  775. u32 vht_prot[3];
  776. int i;
  777. u16 rts_thr;
  778. for (i = 0; i < ARRAY_SIZE(prot); i++) {
  779. prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
  780. prot[i] &= ~MT_PROT_CFG_CTRL;
  781. if (i >= 2)
  782. prot[i] &= ~MT_PROT_CFG_RATE;
  783. }
  784. for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
  785. vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
  786. vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
  787. }
  788. rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
  789. if (rts_thr != 0xffff)
  790. prot[0] |= MT_PROT_CTRL_RTS_CTS;
  791. if (legacy_prot) {
  792. prot[1] |= MT_PROT_CTRL_CTS2SELF;
  793. prot[2] |= MT_PROT_RATE_CCK_11;
  794. prot[3] |= MT_PROT_RATE_CCK_11;
  795. prot[4] |= MT_PROT_RATE_CCK_11;
  796. prot[5] |= MT_PROT_RATE_CCK_11;
  797. vht_prot[0] |= MT_PROT_RATE_CCK_11;
  798. vht_prot[1] |= MT_PROT_RATE_CCK_11;
  799. vht_prot[2] |= MT_PROT_RATE_CCK_11;
  800. } else {
  801. if (rts_thr != 0xffff)
  802. prot[1] |= MT_PROT_CTRL_RTS_CTS;
  803. prot[2] |= MT_PROT_RATE_OFDM_24;
  804. prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
  805. prot[4] |= MT_PROT_RATE_OFDM_24;
  806. prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
  807. vht_prot[0] |= MT_PROT_RATE_OFDM_24;
  808. vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
  809. vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
  810. }
  811. switch (mode) {
  812. case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
  813. case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
  814. prot[2] |= MT_PROT_CTRL_RTS_CTS;
  815. prot[3] |= MT_PROT_CTRL_RTS_CTS;
  816. prot[4] |= MT_PROT_CTRL_RTS_CTS;
  817. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  818. vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
  819. vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
  820. vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
  821. break;
  822. case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
  823. prot[3] |= MT_PROT_CTRL_RTS_CTS;
  824. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  825. vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
  826. vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
  827. break;
  828. }
  829. if (non_gf) {
  830. prot[4] |= MT_PROT_CTRL_RTS_CTS;
  831. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  832. }
  833. for (i = 0; i < ARRAY_SIZE(prot); i++)
  834. mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
  835. for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
  836. mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
  837. }
  838. void mt76x02_update_channel(struct mt76_dev *mdev)
  839. {
  840. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  841. struct mt76_channel_state *state;
  842. state = mdev->phy.chan_state;
  843. state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
  844. spin_lock_bh(&dev->mt76.cc_lock);
  845. state->cc_tx += dev->tx_airtime;
  846. dev->tx_airtime = 0;
  847. spin_unlock_bh(&dev->mt76.cc_lock);
  848. }
  849. EXPORT_SYMBOL_GPL(mt76x02_update_channel);
  850. static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
  851. {
  852. u32 val = mt76_rr(dev, 0x10f4);
  853. if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
  854. return;
  855. dev_err(dev->mt76.dev, "mac specific condition occurred\n");
  856. mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
  857. udelay(10);
  858. mt76_wr(dev, MT_MAC_SYS_CTRL,
  859. MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
  860. }
  861. static void
  862. mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
  863. {
  864. if (enable) {
  865. u32 data;
  866. mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  867. mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
  868. /* enable pa-lna */
  869. data = mt76_rr(dev, MT_TX_PIN_CFG);
  870. data |= MT_TX_PIN_CFG_TXANT |
  871. MT_TX_PIN_CFG_RXANT |
  872. MT_TX_PIN_RFTR_EN |
  873. MT_TX_PIN_TRSW_EN;
  874. mt76_wr(dev, MT_TX_PIN_CFG, data);
  875. } else {
  876. mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  877. mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
  878. /* disable pa-lna */
  879. mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
  880. mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
  881. }
  882. dev->ed_tx_blocked = !enable;
  883. }
  884. void mt76x02_edcca_init(struct mt76x02_dev *dev)
  885. {
  886. dev->ed_trigger = 0;
  887. dev->ed_silent = 0;
  888. if (dev->ed_monitor) {
  889. struct ieee80211_channel *chan = dev->mphy.chandef.chan;
  890. u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
  891. mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
  892. mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  893. mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
  894. ed_th << 8 | ed_th);
  895. mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
  896. } else {
  897. mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
  898. mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  899. if (is_mt76x2(dev)) {
  900. mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
  901. mt76_set(dev, MT_TXOP_HLDR_ET,
  902. MT_TXOP_HLDR_TX40M_BLK_EN);
  903. } else {
  904. mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
  905. mt76_clear(dev, MT_TXOP_HLDR_ET,
  906. MT_TXOP_HLDR_TX40M_BLK_EN);
  907. }
  908. }
  909. mt76x02_edcca_tx_enable(dev, true);
  910. dev->ed_monitor_learning = true;
  911. /* clear previous CCA timer value */
  912. mt76_rr(dev, MT_ED_CCA_TIMER);
  913. dev->ed_time = ktime_get_boottime();
  914. }
  915. EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
  916. #define MT_EDCCA_TH 92
  917. #define MT_EDCCA_BLOCK_TH 2
  918. #define MT_EDCCA_LEARN_TH 50
  919. #define MT_EDCCA_LEARN_CCA 180
  920. #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
  921. static void mt76x02_edcca_check(struct mt76x02_dev *dev)
  922. {
  923. ktime_t cur_time;
  924. u32 active, val, busy;
  925. cur_time = ktime_get_boottime();
  926. val = mt76_rr(dev, MT_ED_CCA_TIMER);
  927. active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
  928. dev->ed_time = cur_time;
  929. busy = (val * 100) / active;
  930. busy = min_t(u32, busy, 100);
  931. if (busy > MT_EDCCA_TH) {
  932. dev->ed_trigger++;
  933. dev->ed_silent = 0;
  934. } else {
  935. dev->ed_silent++;
  936. dev->ed_trigger = 0;
  937. }
  938. if (dev->cal.agc_lowest_gain &&
  939. dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
  940. dev->ed_trigger > MT_EDCCA_LEARN_TH) {
  941. dev->ed_monitor_learning = false;
  942. dev->ed_trigger_timeout = jiffies + 20 * HZ;
  943. } else if (!dev->ed_monitor_learning &&
  944. time_is_after_jiffies(dev->ed_trigger_timeout)) {
  945. dev->ed_monitor_learning = true;
  946. mt76x02_edcca_tx_enable(dev, true);
  947. }
  948. if (dev->ed_monitor_learning)
  949. return;
  950. if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
  951. mt76x02_edcca_tx_enable(dev, false);
  952. else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
  953. mt76x02_edcca_tx_enable(dev, true);
  954. }
  955. void mt76x02_mac_work(struct work_struct *work)
  956. {
  957. struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
  958. mphy.mac_work.work);
  959. int i, idx;
  960. mutex_lock(&dev->mt76.mutex);
  961. mt76_update_survey(&dev->mt76);
  962. for (i = 0, idx = 0; i < 16; i++) {
  963. u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
  964. dev->mt76.aggr_stats[idx++] += val & 0xffff;
  965. dev->mt76.aggr_stats[idx++] += val >> 16;
  966. }
  967. if (!dev->mt76.beacon_mask)
  968. mt76x02_check_mac_err(dev);
  969. if (dev->ed_monitor)
  970. mt76x02_edcca_check(dev);
  971. mutex_unlock(&dev->mt76.mutex);
  972. mt76_tx_status_check(&dev->mt76, NULL, false);
  973. ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
  974. MT_MAC_WORK_INTERVAL);
  975. }
  976. void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
  977. {
  978. dev->mphy.survey_time = ktime_get_boottime();
  979. mt76_wr(dev, MT_CH_TIME_CFG,
  980. MT_CH_TIME_CFG_TIMER_EN |
  981. MT_CH_TIME_CFG_TX_AS_BUSY |
  982. MT_CH_TIME_CFG_RX_AS_BUSY |
  983. MT_CH_TIME_CFG_NAV_AS_BUSY |
  984. MT_CH_TIME_CFG_EIFS_AS_BUSY |
  985. MT_CH_CCA_RC_EN |
  986. FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
  987. /* channel cycle counters read-and-clear */
  988. mt76_rr(dev, MT_CH_BUSY);
  989. mt76_rr(dev, MT_CH_IDLE);
  990. }
  991. EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
  992. void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
  993. {
  994. idx &= 7;
  995. mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
  996. mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
  997. get_unaligned_le16(addr + 4));
  998. }