mt76x02_mac.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
  5. */
  6. #include "mt76x02.h"
  7. #include "mt76x02_trace.h"
  8. #include "trace.h"
  9. void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
  10. {
  11. int i;
  12. mt76_rr(dev, MT_RX_STAT_0);
  13. mt76_rr(dev, MT_RX_STAT_1);
  14. mt76_rr(dev, MT_RX_STAT_2);
  15. mt76_rr(dev, MT_TX_STA_0);
  16. mt76_rr(dev, MT_TX_STA_1);
  17. mt76_rr(dev, MT_TX_STA_2);
  18. for (i = 0; i < 16; i++)
  19. mt76_rr(dev, MT_TX_AGG_CNT(i));
  20. for (i = 0; i < 16; i++)
  21. mt76_rr(dev, MT_TX_STAT_FIFO);
  22. memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
  23. }
  24. EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
  25. static enum mt76x02_cipher_type
  26. mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
  27. {
  28. memset(key_data, 0, 32);
  29. if (!key)
  30. return MT76X02_CIPHER_NONE;
  31. if (key->keylen > 32)
  32. return MT76X02_CIPHER_NONE;
  33. memcpy(key_data, key->key, key->keylen);
  34. switch (key->cipher) {
  35. case WLAN_CIPHER_SUITE_WEP40:
  36. return MT76X02_CIPHER_WEP40;
  37. case WLAN_CIPHER_SUITE_WEP104:
  38. return MT76X02_CIPHER_WEP104;
  39. case WLAN_CIPHER_SUITE_TKIP:
  40. return MT76X02_CIPHER_TKIP;
  41. case WLAN_CIPHER_SUITE_CCMP:
  42. return MT76X02_CIPHER_AES_CCMP;
  43. default:
  44. return MT76X02_CIPHER_NONE;
  45. }
  46. }
  47. int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
  48. u8 key_idx, struct ieee80211_key_conf *key)
  49. {
  50. enum mt76x02_cipher_type cipher;
  51. u8 key_data[32];
  52. u32 val;
  53. cipher = mt76x02_mac_get_key_info(key, key_data);
  54. if (cipher == MT76X02_CIPHER_NONE && key)
  55. return -EOPNOTSUPP;
  56. val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
  57. val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
  58. val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
  59. mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
  60. mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
  61. sizeof(key_data));
  62. return 0;
  63. }
  64. EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
  65. void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
  66. struct ieee80211_key_conf *key)
  67. {
  68. enum mt76x02_cipher_type cipher;
  69. u8 key_data[32];
  70. u32 iv, eiv;
  71. u64 pn;
  72. cipher = mt76x02_mac_get_key_info(key, key_data);
  73. iv = mt76_rr(dev, MT_WCID_IV(idx));
  74. eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
  75. pn = (u64)eiv << 16;
  76. if (cipher == MT76X02_CIPHER_TKIP) {
  77. pn |= (iv >> 16) & 0xff;
  78. pn |= (iv & 0xff) << 8;
  79. } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
  80. pn |= iv & 0xffff;
  81. } else {
  82. return;
  83. }
  84. atomic64_set(&key->tx_pn, pn);
  85. }
  86. int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
  87. struct ieee80211_key_conf *key)
  88. {
  89. enum mt76x02_cipher_type cipher;
  90. u8 key_data[32];
  91. u8 iv_data[8];
  92. u64 pn;
  93. cipher = mt76x02_mac_get_key_info(key, key_data);
  94. if (cipher == MT76X02_CIPHER_NONE && key)
  95. return -EOPNOTSUPP;
  96. mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
  97. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
  98. memset(iv_data, 0, sizeof(iv_data));
  99. if (key) {
  100. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
  101. !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
  102. pn = atomic64_read(&key->tx_pn);
  103. iv_data[3] = key->keyidx << 6;
  104. if (cipher >= MT76X02_CIPHER_TKIP) {
  105. iv_data[3] |= 0x20;
  106. put_unaligned_le32(pn >> 16, &iv_data[4]);
  107. }
  108. if (cipher == MT76X02_CIPHER_TKIP) {
  109. iv_data[0] = (pn >> 8) & 0xff;
  110. iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
  111. iv_data[2] = pn & 0xff;
  112. } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
  113. put_unaligned_le16((pn & 0xffff), &iv_data[0]);
  114. }
  115. }
  116. mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
  117. return 0;
  118. }
  119. void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
  120. u8 vif_idx, u8 *mac)
  121. {
  122. struct mt76_wcid_addr addr = {};
  123. u32 attr;
  124. attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
  125. FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
  126. mt76_wr(dev, MT_WCID_ATTR(idx), attr);
  127. if (idx >= 128)
  128. return;
  129. if (mac)
  130. memcpy(addr.macaddr, mac, ETH_ALEN);
  131. mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
  132. }
  133. EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
  134. void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
  135. {
  136. u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
  137. u32 bit = MT_WCID_DROP_MASK(idx);
  138. /* prevent unnecessary writes */
  139. if ((val & bit) != (bit * drop))
  140. mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
  141. }
  142. static u16
  143. mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
  144. const struct ieee80211_tx_rate *rate, u8 *nss_val)
  145. {
  146. u8 phy, rate_idx, nss, bw = 0;
  147. u16 rateval;
  148. if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
  149. rate_idx = rate->idx;
  150. nss = 1 + (rate->idx >> 4);
  151. phy = MT_PHY_TYPE_VHT;
  152. if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
  153. bw = 2;
  154. else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  155. bw = 1;
  156. } else if (rate->flags & IEEE80211_TX_RC_MCS) {
  157. rate_idx = rate->idx;
  158. nss = 1 + (rate->idx >> 3);
  159. phy = MT_PHY_TYPE_HT;
  160. if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  161. phy = MT_PHY_TYPE_HT_GF;
  162. if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  163. bw = 1;
  164. } else {
  165. const struct ieee80211_rate *r;
  166. int band = dev->mphy.chandef.chan->band;
  167. u16 val;
  168. r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
  169. if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  170. val = r->hw_value_short;
  171. else
  172. val = r->hw_value;
  173. phy = val >> 8;
  174. rate_idx = val & 0xff;
  175. nss = 1;
  176. }
  177. rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
  178. rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
  179. rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
  180. if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
  181. rateval |= MT_RXWI_RATE_SGI;
  182. *nss_val = nss;
  183. return rateval;
  184. }
  185. void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
  186. const struct ieee80211_tx_rate *rate)
  187. {
  188. s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
  189. u16 rateval;
  190. u32 tx_info;
  191. s8 nss;
  192. rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
  193. tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
  194. FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
  195. FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
  196. MT_WCID_TX_INFO_SET;
  197. wcid->tx_info = tx_info;
  198. }
  199. void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
  200. {
  201. if (enable)
  202. mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
  203. else
  204. mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
  205. }
  206. bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
  207. struct mt76x02_tx_status *stat)
  208. {
  209. u32 stat1, stat2;
  210. stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
  211. stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
  212. stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
  213. if (!stat->valid)
  214. return false;
  215. stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
  216. stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
  217. stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
  218. stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
  219. stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
  220. stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
  221. stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
  222. trace_mac_txstat_fetch(dev, stat);
  223. return true;
  224. }
  225. static int
  226. mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
  227. enum nl80211_band band)
  228. {
  229. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  230. txrate->idx = 0;
  231. txrate->flags = 0;
  232. txrate->count = 1;
  233. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  234. case MT_PHY_TYPE_OFDM:
  235. if (band == NL80211_BAND_2GHZ)
  236. idx += 4;
  237. txrate->idx = idx;
  238. return 0;
  239. case MT_PHY_TYPE_CCK:
  240. if (idx >= 8)
  241. idx -= 8;
  242. txrate->idx = idx;
  243. return 0;
  244. case MT_PHY_TYPE_HT_GF:
  245. txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  246. fallthrough;
  247. case MT_PHY_TYPE_HT:
  248. txrate->flags |= IEEE80211_TX_RC_MCS;
  249. txrate->idx = idx;
  250. break;
  251. case MT_PHY_TYPE_VHT:
  252. txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
  253. txrate->idx = idx;
  254. break;
  255. default:
  256. return -EINVAL;
  257. }
  258. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  259. case MT_PHY_BW_20:
  260. break;
  261. case MT_PHY_BW_40:
  262. txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  263. break;
  264. case MT_PHY_BW_80:
  265. txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
  266. break;
  267. default:
  268. return -EINVAL;
  269. }
  270. if (rate & MT_RXWI_RATE_SGI)
  271. txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
  272. return 0;
  273. }
  274. void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
  275. struct sk_buff *skb, struct mt76_wcid *wcid,
  276. struct ieee80211_sta *sta, int len)
  277. {
  278. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  279. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  280. struct ieee80211_tx_rate *rate = &info->control.rates[0];
  281. struct ieee80211_key_conf *key = info->control.hw_key;
  282. u32 wcid_tx_info;
  283. u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
  284. u16 txwi_flags = 0, rateval;
  285. u8 nss;
  286. s8 txpwr_adj, max_txpwr_adj;
  287. u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
  288. memset(txwi, 0, sizeof(*txwi));
  289. mt76_tx_check_agg_ssn(sta, skb);
  290. if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
  291. ieee80211_has_protected(hdr->frame_control)) {
  292. wcid = NULL;
  293. ieee80211_get_tx_rates(info->control.vif, sta, skb,
  294. info->control.rates, 1);
  295. }
  296. if (wcid)
  297. txwi->wcid = wcid->idx;
  298. else
  299. txwi->wcid = 0xff;
  300. if (wcid && wcid->sw_iv && key) {
  301. u64 pn = atomic64_inc_return(&key->tx_pn);
  302. ccmp_pn[0] = pn;
  303. ccmp_pn[1] = pn >> 8;
  304. ccmp_pn[2] = 0;
  305. ccmp_pn[3] = 0x20 | (key->keyidx << 6);
  306. ccmp_pn[4] = pn >> 16;
  307. ccmp_pn[5] = pn >> 24;
  308. ccmp_pn[6] = pn >> 32;
  309. ccmp_pn[7] = pn >> 40;
  310. txwi->iv = *((__le32 *)&ccmp_pn[0]);
  311. txwi->eiv = *((__le32 *)&ccmp_pn[4]);
  312. }
  313. if (wcid && (rate->idx < 0 || !rate->count)) {
  314. wcid_tx_info = wcid->tx_info;
  315. rateval = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
  316. max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
  317. wcid_tx_info);
  318. nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
  319. } else {
  320. rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
  321. max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
  322. }
  323. txwi->rate = cpu_to_le16(rateval);
  324. txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
  325. max_txpwr_adj);
  326. txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
  327. if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
  328. txwi->txstream = 0x13;
  329. else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
  330. !(txwi->rate & cpu_to_le16(rate_ht_mask)))
  331. txwi->txstream = 0x93;
  332. if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
  333. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
  334. if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
  335. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
  336. if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  337. txwi_flags |= MT_TXWI_FLAGS_MMPS;
  338. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  339. txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
  340. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  341. txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
  342. if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
  343. u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
  344. u8 ampdu_density = sta->ht_cap.ampdu_density;
  345. ba_size <<= sta->ht_cap.ampdu_factor;
  346. ba_size = min_t(int, 63, ba_size - 1);
  347. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  348. ba_size = 0;
  349. txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
  350. if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
  351. ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
  352. txwi_flags |= MT_TXWI_FLAGS_AMPDU |
  353. FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
  354. }
  355. if (ieee80211_is_probe_resp(hdr->frame_control) ||
  356. ieee80211_is_beacon(hdr->frame_control))
  357. txwi_flags |= MT_TXWI_FLAGS_TS;
  358. txwi->flags |= cpu_to_le16(txwi_flags);
  359. txwi->len_ctl = cpu_to_le16(len);
  360. }
  361. EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
  362. static void
  363. mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
  364. {
  365. u8 mcs, nss;
  366. if (!idx)
  367. return;
  368. rates += idx - 1;
  369. rates[1] = rates[0];
  370. switch (phy) {
  371. case MT_PHY_TYPE_VHT:
  372. mcs = ieee80211_rate_get_vht_mcs(rates);
  373. nss = ieee80211_rate_get_vht_nss(rates);
  374. if (mcs == 0)
  375. nss = max_t(int, nss - 1, 1);
  376. else
  377. mcs--;
  378. ieee80211_rate_set_vht(rates + 1, mcs, nss);
  379. break;
  380. case MT_PHY_TYPE_HT_GF:
  381. case MT_PHY_TYPE_HT:
  382. /* MCS 8 falls back to MCS 0 */
  383. if (rates[0].idx == 8) {
  384. rates[1].idx = 0;
  385. break;
  386. }
  387. fallthrough;
  388. default:
  389. rates[1].idx = max_t(int, rates[0].idx - 1, 0);
  390. break;
  391. }
  392. }
  393. static void
  394. mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
  395. struct ieee80211_tx_info *info,
  396. struct mt76x02_tx_status *st, int n_frames)
  397. {
  398. struct ieee80211_tx_rate *rate = info->status.rates;
  399. struct ieee80211_tx_rate last_rate;
  400. u16 first_rate;
  401. int retry = st->retry;
  402. int phy;
  403. int i;
  404. if (!n_frames)
  405. return;
  406. phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
  407. if (st->pktid & MT_PACKET_ID_HAS_RATE) {
  408. first_rate = st->rate & ~MT_PKTID_RATE;
  409. first_rate |= st->pktid & MT_PKTID_RATE;
  410. mt76x02_mac_process_tx_rate(&rate[0], first_rate,
  411. dev->mphy.chandef.chan->band);
  412. } else if (rate[0].idx < 0) {
  413. if (!msta)
  414. return;
  415. mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
  416. dev->mphy.chandef.chan->band);
  417. }
  418. mt76x02_mac_process_tx_rate(&last_rate, st->rate,
  419. dev->mphy.chandef.chan->band);
  420. for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
  421. retry--;
  422. if (i + 1 == ARRAY_SIZE(info->status.rates)) {
  423. info->status.rates[i] = last_rate;
  424. info->status.rates[i].count = max_t(int, retry, 1);
  425. break;
  426. }
  427. mt76x02_tx_rate_fallback(info->status.rates, i, phy);
  428. if (info->status.rates[i].idx == last_rate.idx)
  429. break;
  430. }
  431. if (i + 1 < ARRAY_SIZE(info->status.rates)) {
  432. info->status.rates[i + 1].idx = -1;
  433. info->status.rates[i + 1].count = 0;
  434. }
  435. info->status.ampdu_len = n_frames;
  436. info->status.ampdu_ack_len = st->success ? n_frames : 0;
  437. if (st->aggr)
  438. info->flags |= IEEE80211_TX_CTL_AMPDU |
  439. IEEE80211_TX_STAT_AMPDU;
  440. if (!st->ack_req)
  441. info->flags |= IEEE80211_TX_CTL_NO_ACK;
  442. else if (st->success)
  443. info->flags |= IEEE80211_TX_STAT_ACK;
  444. }
  445. void mt76x02_send_tx_status(struct mt76x02_dev *dev,
  446. struct mt76x02_tx_status *stat, u8 *update)
  447. {
  448. struct ieee80211_tx_info info = {};
  449. struct ieee80211_tx_status status = {
  450. .info = &info
  451. };
  452. static const u8 ac_to_tid[4] = {
  453. [IEEE80211_AC_BE] = 0,
  454. [IEEE80211_AC_BK] = 1,
  455. [IEEE80211_AC_VI] = 4,
  456. [IEEE80211_AC_VO] = 6
  457. };
  458. struct mt76_wcid *wcid = NULL;
  459. struct mt76x02_sta *msta = NULL;
  460. struct mt76_dev *mdev = &dev->mt76;
  461. struct sk_buff_head list;
  462. u32 duration = 0;
  463. u8 cur_pktid;
  464. u32 ac = 0;
  465. int len = 0;
  466. if (stat->pktid == MT_PACKET_ID_NO_ACK)
  467. return;
  468. rcu_read_lock();
  469. if (stat->wcid < MT76x02_N_WCIDS)
  470. wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
  471. if (wcid && wcid->sta) {
  472. void *priv;
  473. priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
  474. status.sta = container_of(priv, struct ieee80211_sta,
  475. drv_priv);
  476. }
  477. mt76_tx_status_lock(mdev, &list);
  478. if (wcid) {
  479. if (mt76_is_skb_pktid(stat->pktid))
  480. status.skb = mt76_tx_status_skb_get(mdev, wcid,
  481. stat->pktid, &list);
  482. if (status.skb)
  483. status.info = IEEE80211_SKB_CB(status.skb);
  484. }
  485. if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
  486. mt76_tx_status_unlock(mdev, &list);
  487. goto out;
  488. }
  489. if (msta && stat->aggr && !status.skb) {
  490. u32 stat_val, stat_cache;
  491. stat_val = stat->rate;
  492. stat_val |= ((u32)stat->retry) << 16;
  493. stat_cache = msta->status.rate;
  494. stat_cache |= ((u32)msta->status.retry) << 16;
  495. if (*update == 0 && stat_val == stat_cache &&
  496. stat->wcid == msta->status.wcid && msta->n_frames < 32) {
  497. msta->n_frames++;
  498. mt76_tx_status_unlock(mdev, &list);
  499. goto out;
  500. }
  501. cur_pktid = msta->status.pktid;
  502. mt76x02_mac_fill_tx_status(dev, msta, status.info,
  503. &msta->status, msta->n_frames);
  504. msta->status = *stat;
  505. msta->n_frames = 1;
  506. *update = 0;
  507. } else {
  508. cur_pktid = stat->pktid;
  509. mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
  510. *update = 1;
  511. }
  512. if (status.skb) {
  513. info = *status.info;
  514. len = status.skb->len;
  515. ac = skb_get_queue_mapping(status.skb);
  516. mt76_tx_status_skb_done(mdev, status.skb, &list);
  517. } else if (msta) {
  518. len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
  519. ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
  520. }
  521. mt76_tx_status_unlock(mdev, &list);
  522. if (!status.skb)
  523. ieee80211_tx_status_ext(mt76_hw(dev), &status);
  524. if (!len)
  525. goto out;
  526. duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
  527. spin_lock_bh(&dev->mt76.cc_lock);
  528. dev->tx_airtime += duration;
  529. spin_unlock_bh(&dev->mt76.cc_lock);
  530. if (msta)
  531. ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
  532. out:
  533. rcu_read_unlock();
  534. }
  535. static int
  536. mt76x02_mac_process_rate(struct mt76x02_dev *dev,
  537. struct mt76_rx_status *status,
  538. u16 rate)
  539. {
  540. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  541. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  542. case MT_PHY_TYPE_OFDM:
  543. if (idx >= 8)
  544. idx = 0;
  545. if (status->band == NL80211_BAND_2GHZ)
  546. idx += 4;
  547. status->rate_idx = idx;
  548. return 0;
  549. case MT_PHY_TYPE_CCK:
  550. if (idx >= 8) {
  551. idx -= 8;
  552. status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
  553. }
  554. if (idx >= 4)
  555. idx = 0;
  556. status->rate_idx = idx;
  557. return 0;
  558. case MT_PHY_TYPE_HT_GF:
  559. status->enc_flags |= RX_ENC_FLAG_HT_GF;
  560. fallthrough;
  561. case MT_PHY_TYPE_HT:
  562. status->encoding = RX_ENC_HT;
  563. status->rate_idx = idx;
  564. break;
  565. case MT_PHY_TYPE_VHT: {
  566. u8 n_rxstream = dev->mphy.chainmask & 0xf;
  567. status->encoding = RX_ENC_VHT;
  568. status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
  569. status->nss = min_t(u8, n_rxstream,
  570. FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
  571. break;
  572. }
  573. default:
  574. return -EINVAL;
  575. }
  576. if (rate & MT_RXWI_RATE_LDPC)
  577. status->enc_flags |= RX_ENC_FLAG_LDPC;
  578. if (rate & MT_RXWI_RATE_SGI)
  579. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  580. if (rate & MT_RXWI_RATE_STBC)
  581. status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
  582. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  583. case MT_PHY_BW_20:
  584. break;
  585. case MT_PHY_BW_40:
  586. status->bw = RATE_INFO_BW_40;
  587. break;
  588. case MT_PHY_BW_80:
  589. status->bw = RATE_INFO_BW_80;
  590. break;
  591. default:
  592. break;
  593. }
  594. return 0;
  595. }
  596. void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
  597. {
  598. static const u8 null_addr[ETH_ALEN] = {};
  599. int i;
  600. ether_addr_copy(dev->mphy.macaddr, addr);
  601. if (!is_valid_ether_addr(dev->mphy.macaddr)) {
  602. eth_random_addr(dev->mphy.macaddr);
  603. dev_info(dev->mt76.dev,
  604. "Invalid MAC address, using random address %pM\n",
  605. dev->mphy.macaddr);
  606. }
  607. mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr));
  608. mt76_wr(dev, MT_MAC_ADDR_DW1,
  609. get_unaligned_le16(dev->mphy.macaddr + 4) |
  610. FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
  611. mt76_wr(dev, MT_MAC_BSSID_DW0,
  612. get_unaligned_le32(dev->mphy.macaddr));
  613. mt76_wr(dev, MT_MAC_BSSID_DW1,
  614. get_unaligned_le16(dev->mphy.macaddr + 4) |
  615. FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
  616. MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
  617. /* enable 7 additional beacon slots and control them with bypass mask */
  618. mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
  619. for (i = 0; i < 16; i++)
  620. mt76x02_mac_set_bssid(dev, i, null_addr);
  621. }
  622. EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
  623. static int
  624. mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
  625. {
  626. struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
  627. rssi += cal->rssi_offset[chain];
  628. rssi -= cal->lna_gain;
  629. return rssi;
  630. }
  631. int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
  632. void *rxi)
  633. {
  634. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  635. struct ieee80211_hdr *hdr;
  636. struct mt76x02_rxwi *rxwi = rxi;
  637. struct mt76x02_sta *sta;
  638. u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
  639. u32 ctl = le32_to_cpu(rxwi->ctl);
  640. u16 rate = le16_to_cpu(rxwi->rate);
  641. u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
  642. bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
  643. int pad_len = 0, nstreams = dev->mphy.chainmask & 0xf;
  644. s8 signal;
  645. u8 pn_len;
  646. u8 wcid;
  647. int len;
  648. if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
  649. return -EINVAL;
  650. if (rxinfo & MT_RXINFO_L2PAD)
  651. pad_len += 2;
  652. if (rxinfo & MT_RXINFO_DECRYPT) {
  653. status->flag |= RX_FLAG_DECRYPTED;
  654. status->flag |= RX_FLAG_MMIC_STRIPPED;
  655. status->flag |= RX_FLAG_MIC_STRIPPED;
  656. status->flag |= RX_FLAG_IV_STRIPPED;
  657. }
  658. wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
  659. sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
  660. status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
  661. len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
  662. pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
  663. if (pn_len) {
  664. int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
  665. u8 *data = skb->data + offset;
  666. status->iv[0] = data[7];
  667. status->iv[1] = data[6];
  668. status->iv[2] = data[5];
  669. status->iv[3] = data[4];
  670. status->iv[4] = data[1];
  671. status->iv[5] = data[0];
  672. /*
  673. * Driver CCMP validation can't deal with fragments.
  674. * Let mac80211 take care of it.
  675. */
  676. if (rxinfo & MT_RXINFO_FRAG) {
  677. status->flag &= ~RX_FLAG_IV_STRIPPED;
  678. } else {
  679. pad_len += pn_len << 2;
  680. len -= pn_len << 2;
  681. }
  682. }
  683. mt76x02_remove_hdr_pad(skb, pad_len);
  684. if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
  685. status->aggr = true;
  686. if (rxinfo & MT_RXINFO_AMPDU) {
  687. status->flag |= RX_FLAG_AMPDU_DETAILS;
  688. status->ampdu_ref = dev->ampdu_ref;
  689. /*
  690. * When receiving an A-MPDU subframe and RSSI info is not valid,
  691. * we can assume that more subframes belonging to the same A-MPDU
  692. * are coming. The last one will have valid RSSI info
  693. */
  694. if (rxinfo & MT_RXINFO_RSSI) {
  695. if (!++dev->ampdu_ref)
  696. dev->ampdu_ref++;
  697. }
  698. }
  699. if (WARN_ON_ONCE(len > skb->len))
  700. return -EINVAL;
  701. pskb_trim(skb, len);
  702. status->chains = BIT(0);
  703. signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
  704. status->chain_signal[0] = signal;
  705. if (nstreams > 1) {
  706. status->chains |= BIT(1);
  707. status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
  708. rxwi->rssi[1],
  709. 1);
  710. signal = max_t(s8, signal, status->chain_signal[1]);
  711. }
  712. status->signal = signal;
  713. status->freq = dev->mphy.chandef.chan->center_freq;
  714. status->band = dev->mphy.chandef.chan->band;
  715. hdr = (struct ieee80211_hdr *)skb->data;
  716. status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
  717. status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
  718. return mt76x02_mac_process_rate(dev, status, rate);
  719. }
  720. void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
  721. {
  722. struct mt76x02_tx_status stat = {};
  723. u8 update = 1;
  724. bool ret;
  725. if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
  726. return;
  727. trace_mac_txstat_poll(dev);
  728. while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
  729. if (!spin_trylock(&dev->txstatus_fifo_lock))
  730. break;
  731. ret = mt76x02_mac_load_tx_status(dev, &stat);
  732. spin_unlock(&dev->txstatus_fifo_lock);
  733. if (!ret)
  734. break;
  735. if (!irq) {
  736. mt76x02_send_tx_status(dev, &stat, &update);
  737. continue;
  738. }
  739. kfifo_put(&dev->txstatus_fifo, stat);
  740. }
  741. }
  742. void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
  743. {
  744. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  745. struct mt76x02_txwi *txwi;
  746. u8 *txwi_ptr;
  747. if (!e->txwi) {
  748. dev_kfree_skb_any(e->skb);
  749. return;
  750. }
  751. mt76x02_mac_poll_tx_status(dev, false);
  752. txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
  753. txwi = (struct mt76x02_txwi *)txwi_ptr;
  754. trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
  755. mt76_tx_complete_skb(mdev, e->wcid, e->skb);
  756. }
  757. EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
  758. void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
  759. {
  760. u32 data = 0;
  761. if (val != ~0)
  762. data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
  763. MT_PROT_CFG_RTS_THRESH;
  764. mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
  765. mt76_rmw(dev, MT_CCK_PROT_CFG,
  766. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  767. mt76_rmw(dev, MT_OFDM_PROT_CFG,
  768. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  769. }
  770. void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
  771. int ht_mode)
  772. {
  773. int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
  774. bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
  775. u32 prot[6];
  776. u32 vht_prot[3];
  777. int i;
  778. u16 rts_thr;
  779. for (i = 0; i < ARRAY_SIZE(prot); i++) {
  780. prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
  781. prot[i] &= ~MT_PROT_CFG_CTRL;
  782. if (i >= 2)
  783. prot[i] &= ~MT_PROT_CFG_RATE;
  784. }
  785. for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
  786. vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
  787. vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
  788. }
  789. rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
  790. if (rts_thr != 0xffff)
  791. prot[0] |= MT_PROT_CTRL_RTS_CTS;
  792. if (legacy_prot) {
  793. prot[1] |= MT_PROT_CTRL_CTS2SELF;
  794. prot[2] |= MT_PROT_RATE_CCK_11;
  795. prot[3] |= MT_PROT_RATE_CCK_11;
  796. prot[4] |= MT_PROT_RATE_CCK_11;
  797. prot[5] |= MT_PROT_RATE_CCK_11;
  798. vht_prot[0] |= MT_PROT_RATE_CCK_11;
  799. vht_prot[1] |= MT_PROT_RATE_CCK_11;
  800. vht_prot[2] |= MT_PROT_RATE_CCK_11;
  801. } else {
  802. if (rts_thr != 0xffff)
  803. prot[1] |= MT_PROT_CTRL_RTS_CTS;
  804. prot[2] |= MT_PROT_RATE_OFDM_24;
  805. prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
  806. prot[4] |= MT_PROT_RATE_OFDM_24;
  807. prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
  808. vht_prot[0] |= MT_PROT_RATE_OFDM_24;
  809. vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
  810. vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
  811. }
  812. switch (mode) {
  813. case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
  814. case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
  815. prot[2] |= MT_PROT_CTRL_RTS_CTS;
  816. prot[3] |= MT_PROT_CTRL_RTS_CTS;
  817. prot[4] |= MT_PROT_CTRL_RTS_CTS;
  818. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  819. vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
  820. vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
  821. vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
  822. break;
  823. case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
  824. prot[3] |= MT_PROT_CTRL_RTS_CTS;
  825. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  826. vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
  827. vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
  828. break;
  829. }
  830. if (non_gf) {
  831. prot[4] |= MT_PROT_CTRL_RTS_CTS;
  832. prot[5] |= MT_PROT_CTRL_RTS_CTS;
  833. }
  834. for (i = 0; i < ARRAY_SIZE(prot); i++)
  835. mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
  836. for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
  837. mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
  838. }
  839. void mt76x02_update_channel(struct mt76_phy *mphy)
  840. {
  841. struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
  842. struct mt76_channel_state *state;
  843. state = mphy->chan_state;
  844. state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
  845. spin_lock_bh(&dev->mt76.cc_lock);
  846. state->cc_tx += dev->tx_airtime;
  847. dev->tx_airtime = 0;
  848. spin_unlock_bh(&dev->mt76.cc_lock);
  849. }
  850. EXPORT_SYMBOL_GPL(mt76x02_update_channel);
  851. static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
  852. {
  853. u32 val = mt76_rr(dev, 0x10f4);
  854. if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
  855. return;
  856. dev_err(dev->mt76.dev, "mac specific condition occurred\n");
  857. mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
  858. udelay(10);
  859. mt76_wr(dev, MT_MAC_SYS_CTRL,
  860. MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
  861. }
  862. static void
  863. mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
  864. {
  865. if (enable) {
  866. u32 data;
  867. mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  868. mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
  869. /* enable pa-lna */
  870. data = mt76_rr(dev, MT_TX_PIN_CFG);
  871. data |= MT_TX_PIN_CFG_TXANT |
  872. MT_TX_PIN_CFG_RXANT |
  873. MT_TX_PIN_RFTR_EN |
  874. MT_TX_PIN_TRSW_EN;
  875. mt76_wr(dev, MT_TX_PIN_CFG, data);
  876. } else {
  877. mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  878. mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
  879. /* disable pa-lna */
  880. mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
  881. mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
  882. }
  883. dev->ed_tx_blocked = !enable;
  884. }
  885. void mt76x02_edcca_init(struct mt76x02_dev *dev)
  886. {
  887. dev->ed_trigger = 0;
  888. dev->ed_silent = 0;
  889. if (dev->ed_monitor) {
  890. struct ieee80211_channel *chan = dev->mphy.chandef.chan;
  891. u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
  892. mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
  893. mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  894. mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
  895. ed_th << 8 | ed_th);
  896. mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
  897. } else {
  898. mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
  899. mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  900. if (is_mt76x2(dev)) {
  901. mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
  902. mt76_set(dev, MT_TXOP_HLDR_ET,
  903. MT_TXOP_HLDR_TX40M_BLK_EN);
  904. } else {
  905. mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
  906. mt76_clear(dev, MT_TXOP_HLDR_ET,
  907. MT_TXOP_HLDR_TX40M_BLK_EN);
  908. }
  909. }
  910. mt76x02_edcca_tx_enable(dev, true);
  911. dev->ed_monitor_learning = true;
  912. /* clear previous CCA timer value */
  913. mt76_rr(dev, MT_ED_CCA_TIMER);
  914. dev->ed_time = ktime_get_boottime();
  915. }
  916. EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
  917. #define MT_EDCCA_TH 92
  918. #define MT_EDCCA_BLOCK_TH 2
  919. #define MT_EDCCA_LEARN_TH 50
  920. #define MT_EDCCA_LEARN_CCA 180
  921. #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
  922. static void mt76x02_edcca_check(struct mt76x02_dev *dev)
  923. {
  924. ktime_t cur_time;
  925. u32 active, val, busy;
  926. cur_time = ktime_get_boottime();
  927. val = mt76_rr(dev, MT_ED_CCA_TIMER);
  928. active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
  929. dev->ed_time = cur_time;
  930. busy = (val * 100) / active;
  931. busy = min_t(u32, busy, 100);
  932. if (busy > MT_EDCCA_TH) {
  933. dev->ed_trigger++;
  934. dev->ed_silent = 0;
  935. } else {
  936. dev->ed_silent++;
  937. dev->ed_trigger = 0;
  938. }
  939. if (dev->cal.agc_lowest_gain &&
  940. dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
  941. dev->ed_trigger > MT_EDCCA_LEARN_TH) {
  942. dev->ed_monitor_learning = false;
  943. dev->ed_trigger_timeout = jiffies + 20 * HZ;
  944. } else if (!dev->ed_monitor_learning &&
  945. time_is_after_jiffies(dev->ed_trigger_timeout)) {
  946. dev->ed_monitor_learning = true;
  947. mt76x02_edcca_tx_enable(dev, true);
  948. }
  949. if (dev->ed_monitor_learning)
  950. return;
  951. if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
  952. mt76x02_edcca_tx_enable(dev, false);
  953. else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
  954. mt76x02_edcca_tx_enable(dev, true);
  955. }
  956. void mt76x02_mac_work(struct work_struct *work)
  957. {
  958. struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
  959. mphy.mac_work.work);
  960. int i, idx;
  961. mutex_lock(&dev->mt76.mutex);
  962. mt76_update_survey(&dev->mphy);
  963. for (i = 0, idx = 0; i < 16; i++) {
  964. u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
  965. dev->mt76.aggr_stats[idx++] += val & 0xffff;
  966. dev->mt76.aggr_stats[idx++] += val >> 16;
  967. }
  968. if (!dev->mt76.beacon_mask)
  969. mt76x02_check_mac_err(dev);
  970. if (dev->ed_monitor)
  971. mt76x02_edcca_check(dev);
  972. mutex_unlock(&dev->mt76.mutex);
  973. mt76_tx_status_check(&dev->mt76, NULL, false);
  974. ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
  975. MT_MAC_WORK_INTERVAL);
  976. }
  977. void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
  978. {
  979. dev->mphy.survey_time = ktime_get_boottime();
  980. mt76_wr(dev, MT_CH_TIME_CFG,
  981. MT_CH_TIME_CFG_TIMER_EN |
  982. MT_CH_TIME_CFG_TX_AS_BUSY |
  983. MT_CH_TIME_CFG_RX_AS_BUSY |
  984. MT_CH_TIME_CFG_NAV_AS_BUSY |
  985. MT_CH_TIME_CFG_EIFS_AS_BUSY |
  986. MT_CH_CCA_RC_EN |
  987. FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
  988. /* channel cycle counters read-and-clear */
  989. mt76_rr(dev, MT_CH_BUSY);
  990. mt76_rr(dev, MT_CH_IDLE);
  991. }
  992. EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
  993. void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
  994. {
  995. idx &= 7;
  996. mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
  997. mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
  998. get_unaligned_le16(addr + 4));
  999. }