mac80211.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/of.h>
  7. #include "mt76.h"
  8. #define CHAN2G(_idx, _freq) { \
  9. .band = NL80211_BAND_2GHZ, \
  10. .center_freq = (_freq), \
  11. .hw_value = (_idx), \
  12. .max_power = 30, \
  13. }
  14. #define CHAN5G(_idx, _freq) { \
  15. .band = NL80211_BAND_5GHZ, \
  16. .center_freq = (_freq), \
  17. .hw_value = (_idx), \
  18. .max_power = 30, \
  19. }
  20. static const struct ieee80211_channel mt76_channels_2ghz[] = {
  21. CHAN2G(1, 2412),
  22. CHAN2G(2, 2417),
  23. CHAN2G(3, 2422),
  24. CHAN2G(4, 2427),
  25. CHAN2G(5, 2432),
  26. CHAN2G(6, 2437),
  27. CHAN2G(7, 2442),
  28. CHAN2G(8, 2447),
  29. CHAN2G(9, 2452),
  30. CHAN2G(10, 2457),
  31. CHAN2G(11, 2462),
  32. CHAN2G(12, 2467),
  33. CHAN2G(13, 2472),
  34. CHAN2G(14, 2484),
  35. };
  36. static const struct ieee80211_channel mt76_channels_5ghz[] = {
  37. CHAN5G(36, 5180),
  38. CHAN5G(40, 5200),
  39. CHAN5G(44, 5220),
  40. CHAN5G(48, 5240),
  41. CHAN5G(52, 5260),
  42. CHAN5G(56, 5280),
  43. CHAN5G(60, 5300),
  44. CHAN5G(64, 5320),
  45. CHAN5G(100, 5500),
  46. CHAN5G(104, 5520),
  47. CHAN5G(108, 5540),
  48. CHAN5G(112, 5560),
  49. CHAN5G(116, 5580),
  50. CHAN5G(120, 5600),
  51. CHAN5G(124, 5620),
  52. CHAN5G(128, 5640),
  53. CHAN5G(132, 5660),
  54. CHAN5G(136, 5680),
  55. CHAN5G(140, 5700),
  56. CHAN5G(144, 5720),
  57. CHAN5G(149, 5745),
  58. CHAN5G(153, 5765),
  59. CHAN5G(157, 5785),
  60. CHAN5G(161, 5805),
  61. CHAN5G(165, 5825),
  62. CHAN5G(169, 5845),
  63. CHAN5G(173, 5865),
  64. };
  65. static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
  66. { .throughput = 0 * 1024, .blink_time = 334 },
  67. { .throughput = 1 * 1024, .blink_time = 260 },
  68. { .throughput = 5 * 1024, .blink_time = 220 },
  69. { .throughput = 10 * 1024, .blink_time = 190 },
  70. { .throughput = 20 * 1024, .blink_time = 170 },
  71. { .throughput = 50 * 1024, .blink_time = 150 },
  72. { .throughput = 70 * 1024, .blink_time = 130 },
  73. { .throughput = 100 * 1024, .blink_time = 110 },
  74. { .throughput = 200 * 1024, .blink_time = 80 },
  75. { .throughput = 300 * 1024, .blink_time = 50 },
  76. };
  77. static int mt76_led_init(struct mt76_dev *dev)
  78. {
  79. struct device_node *np = dev->dev->of_node;
  80. struct ieee80211_hw *hw = dev->hw;
  81. int led_pin;
  82. if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
  83. return 0;
  84. snprintf(dev->led_name, sizeof(dev->led_name),
  85. "mt76-%s", wiphy_name(hw->wiphy));
  86. dev->led_cdev.name = dev->led_name;
  87. dev->led_cdev.default_trigger =
  88. ieee80211_create_tpt_led_trigger(hw,
  89. IEEE80211_TPT_LEDTRIG_FL_RADIO,
  90. mt76_tpt_blink,
  91. ARRAY_SIZE(mt76_tpt_blink));
  92. np = of_get_child_by_name(np, "led");
  93. if (np) {
  94. if (!of_property_read_u32(np, "led-sources", &led_pin))
  95. dev->led_pin = led_pin;
  96. dev->led_al = of_property_read_bool(np, "led-active-low");
  97. }
  98. return led_classdev_register(dev->dev, &dev->led_cdev);
  99. }
  100. static void mt76_led_cleanup(struct mt76_dev *dev)
  101. {
  102. if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
  103. return;
  104. led_classdev_unregister(&dev->led_cdev);
  105. }
  106. static void mt76_init_stream_cap(struct mt76_phy *phy,
  107. struct ieee80211_supported_band *sband,
  108. bool vht)
  109. {
  110. struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
  111. int i, nstream = hweight8(phy->antenna_mask);
  112. struct ieee80211_sta_vht_cap *vht_cap;
  113. u16 mcs_map = 0;
  114. if (nstream > 1)
  115. ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
  116. else
  117. ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
  118. for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
  119. ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
  120. if (!vht)
  121. return;
  122. vht_cap = &sband->vht_cap;
  123. if (nstream > 1)
  124. vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
  125. else
  126. vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
  127. for (i = 0; i < 8; i++) {
  128. if (i < nstream)
  129. mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
  130. else
  131. mcs_map |=
  132. (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
  133. }
  134. vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
  135. vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
  136. }
  137. void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
  138. {
  139. if (phy->cap.has_2ghz)
  140. mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
  141. if (phy->cap.has_5ghz)
  142. mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
  143. }
  144. EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
  145. static int
  146. mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
  147. const struct ieee80211_channel *chan, int n_chan,
  148. struct ieee80211_rate *rates, int n_rates, bool vht)
  149. {
  150. struct ieee80211_supported_band *sband = &msband->sband;
  151. struct ieee80211_sta_vht_cap *vht_cap;
  152. struct ieee80211_sta_ht_cap *ht_cap;
  153. struct mt76_dev *dev = phy->dev;
  154. void *chanlist;
  155. int size;
  156. size = n_chan * sizeof(*chan);
  157. chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
  158. if (!chanlist)
  159. return -ENOMEM;
  160. msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
  161. GFP_KERNEL);
  162. if (!msband->chan)
  163. return -ENOMEM;
  164. sband->channels = chanlist;
  165. sband->n_channels = n_chan;
  166. sband->bitrates = rates;
  167. sband->n_bitrates = n_rates;
  168. ht_cap = &sband->ht_cap;
  169. ht_cap->ht_supported = true;
  170. ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  171. IEEE80211_HT_CAP_GRN_FLD |
  172. IEEE80211_HT_CAP_SGI_20 |
  173. IEEE80211_HT_CAP_SGI_40 |
  174. (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  175. ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
  176. ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  177. mt76_init_stream_cap(phy, sband, vht);
  178. if (!vht)
  179. return 0;
  180. vht_cap = &sband->vht_cap;
  181. vht_cap->vht_supported = true;
  182. vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
  183. IEEE80211_VHT_CAP_RXSTBC_1 |
  184. IEEE80211_VHT_CAP_SHORT_GI_80 |
  185. IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
  186. IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
  187. (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
  188. return 0;
  189. }
  190. static int
  191. mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
  192. int n_rates)
  193. {
  194. phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
  195. return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
  196. ARRAY_SIZE(mt76_channels_2ghz), rates,
  197. n_rates, false);
  198. }
  199. static int
  200. mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
  201. int n_rates, bool vht)
  202. {
  203. phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
  204. return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
  205. ARRAY_SIZE(mt76_channels_5ghz), rates,
  206. n_rates, vht);
  207. }
  208. static void
  209. mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
  210. enum nl80211_band band)
  211. {
  212. struct ieee80211_supported_band *sband = &msband->sband;
  213. bool found = false;
  214. int i;
  215. if (!sband)
  216. return;
  217. for (i = 0; i < sband->n_channels; i++) {
  218. if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
  219. continue;
  220. found = true;
  221. break;
  222. }
  223. if (found) {
  224. phy->chandef.chan = &sband->channels[0];
  225. phy->chan_state = &msband->chan[0];
  226. return;
  227. }
  228. sband->n_channels = 0;
  229. phy->hw->wiphy->bands[band] = NULL;
  230. }
  231. static void
  232. mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
  233. {
  234. struct mt76_dev *dev = phy->dev;
  235. struct wiphy *wiphy = hw->wiphy;
  236. SET_IEEE80211_DEV(hw, dev->dev);
  237. SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
  238. wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
  239. wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
  240. WIPHY_FLAG_SUPPORTS_TDLS |
  241. WIPHY_FLAG_AP_UAPSD;
  242. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
  243. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
  244. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
  245. wiphy->available_antennas_tx = dev->phy.antenna_mask;
  246. wiphy->available_antennas_rx = dev->phy.antenna_mask;
  247. hw->txq_data_size = sizeof(struct mt76_txq);
  248. hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
  249. if (!hw->max_tx_fragments)
  250. hw->max_tx_fragments = 16;
  251. ieee80211_hw_set(hw, SIGNAL_DBM);
  252. ieee80211_hw_set(hw, AMPDU_AGGREGATION);
  253. ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
  254. ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
  255. ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
  256. ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
  257. ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
  258. if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
  259. ieee80211_hw_set(hw, TX_AMSDU);
  260. ieee80211_hw_set(hw, TX_FRAG_LIST);
  261. }
  262. ieee80211_hw_set(hw, MFP_CAPABLE);
  263. ieee80211_hw_set(hw, AP_LINK_PS);
  264. ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
  265. wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
  266. wiphy->interface_modes =
  267. BIT(NL80211_IFTYPE_STATION) |
  268. BIT(NL80211_IFTYPE_AP) |
  269. #ifdef CONFIG_MAC80211_MESH
  270. BIT(NL80211_IFTYPE_MESH_POINT) |
  271. #endif
  272. BIT(NL80211_IFTYPE_P2P_CLIENT) |
  273. BIT(NL80211_IFTYPE_P2P_GO) |
  274. BIT(NL80211_IFTYPE_ADHOC);
  275. }
  276. struct mt76_phy *
  277. mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
  278. const struct ieee80211_ops *ops)
  279. {
  280. struct ieee80211_hw *hw;
  281. unsigned int phy_size;
  282. struct mt76_phy *phy;
  283. phy_size = ALIGN(sizeof(*phy), 8);
  284. hw = ieee80211_alloc_hw(size + phy_size, ops);
  285. if (!hw)
  286. return NULL;
  287. phy = hw->priv;
  288. phy->dev = dev;
  289. phy->hw = hw;
  290. phy->priv = hw->priv + phy_size;
  291. return phy;
  292. }
  293. EXPORT_SYMBOL_GPL(mt76_alloc_phy);
  294. int mt76_register_phy(struct mt76_phy *phy, bool vht,
  295. struct ieee80211_rate *rates, int n_rates)
  296. {
  297. int ret;
  298. mt76_phy_init(phy, phy->hw);
  299. if (phy->cap.has_2ghz) {
  300. ret = mt76_init_sband_2g(phy, rates, n_rates);
  301. if (ret)
  302. return ret;
  303. }
  304. if (phy->cap.has_5ghz) {
  305. ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
  306. if (ret)
  307. return ret;
  308. }
  309. wiphy_read_of_freq_limits(phy->hw->wiphy);
  310. mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
  311. mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
  312. ret = ieee80211_register_hw(phy->hw);
  313. if (ret)
  314. return ret;
  315. phy->dev->phy2 = phy;
  316. return 0;
  317. }
  318. EXPORT_SYMBOL_GPL(mt76_register_phy);
  319. void mt76_unregister_phy(struct mt76_phy *phy)
  320. {
  321. struct mt76_dev *dev = phy->dev;
  322. mt76_tx_status_check(dev, NULL, true);
  323. ieee80211_unregister_hw(phy->hw);
  324. dev->phy2 = NULL;
  325. }
  326. EXPORT_SYMBOL_GPL(mt76_unregister_phy);
  327. struct mt76_dev *
  328. mt76_alloc_device(struct device *pdev, unsigned int size,
  329. const struct ieee80211_ops *ops,
  330. const struct mt76_driver_ops *drv_ops)
  331. {
  332. struct ieee80211_hw *hw;
  333. struct mt76_phy *phy;
  334. struct mt76_dev *dev;
  335. int i;
  336. hw = ieee80211_alloc_hw(size, ops);
  337. if (!hw)
  338. return NULL;
  339. dev = hw->priv;
  340. dev->hw = hw;
  341. dev->dev = pdev;
  342. dev->drv = drv_ops;
  343. phy = &dev->phy;
  344. phy->dev = dev;
  345. phy->hw = hw;
  346. spin_lock_init(&dev->rx_lock);
  347. spin_lock_init(&dev->lock);
  348. spin_lock_init(&dev->cc_lock);
  349. mutex_init(&dev->mutex);
  350. init_waitqueue_head(&dev->tx_wait);
  351. skb_queue_head_init(&dev->status_list);
  352. skb_queue_head_init(&dev->mcu.res_q);
  353. init_waitqueue_head(&dev->mcu.wait);
  354. mutex_init(&dev->mcu.mutex);
  355. dev->tx_worker.fn = mt76_tx_worker;
  356. spin_lock_init(&dev->token_lock);
  357. idr_init(&dev->token);
  358. INIT_LIST_HEAD(&dev->txwi_cache);
  359. for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
  360. skb_queue_head_init(&dev->rx_skb[i]);
  361. dev->wq = alloc_ordered_workqueue("mt76", 0);
  362. if (!dev->wq) {
  363. ieee80211_free_hw(hw);
  364. return NULL;
  365. }
  366. return dev;
  367. }
  368. EXPORT_SYMBOL_GPL(mt76_alloc_device);
  369. int mt76_register_device(struct mt76_dev *dev, bool vht,
  370. struct ieee80211_rate *rates, int n_rates)
  371. {
  372. struct ieee80211_hw *hw = dev->hw;
  373. struct mt76_phy *phy = &dev->phy;
  374. int ret;
  375. dev_set_drvdata(dev->dev, dev);
  376. mt76_phy_init(phy, hw);
  377. if (phy->cap.has_2ghz) {
  378. ret = mt76_init_sband_2g(phy, rates, n_rates);
  379. if (ret)
  380. return ret;
  381. }
  382. if (phy->cap.has_5ghz) {
  383. ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
  384. if (ret)
  385. return ret;
  386. }
  387. wiphy_read_of_freq_limits(hw->wiphy);
  388. mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
  389. mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
  390. if (IS_ENABLED(CONFIG_MT76_LEDS)) {
  391. ret = mt76_led_init(dev);
  392. if (ret)
  393. return ret;
  394. }
  395. ret = ieee80211_register_hw(hw);
  396. if (ret)
  397. return ret;
  398. WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
  399. sched_set_fifo_low(dev->tx_worker.task);
  400. return 0;
  401. }
  402. EXPORT_SYMBOL_GPL(mt76_register_device);
  403. void mt76_unregister_device(struct mt76_dev *dev)
  404. {
  405. struct ieee80211_hw *hw = dev->hw;
  406. if (IS_ENABLED(CONFIG_MT76_LEDS))
  407. mt76_led_cleanup(dev);
  408. mt76_tx_status_check(dev, NULL, true);
  409. ieee80211_unregister_hw(hw);
  410. }
  411. EXPORT_SYMBOL_GPL(mt76_unregister_device);
  412. void mt76_free_device(struct mt76_dev *dev)
  413. {
  414. mt76_worker_teardown(&dev->tx_worker);
  415. if (dev->wq) {
  416. destroy_workqueue(dev->wq);
  417. dev->wq = NULL;
  418. }
  419. ieee80211_free_hw(dev->hw);
  420. }
  421. EXPORT_SYMBOL_GPL(mt76_free_device);
  422. static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
  423. {
  424. struct sk_buff *skb = phy->rx_amsdu[q].head;
  425. struct mt76_dev *dev = phy->dev;
  426. phy->rx_amsdu[q].head = NULL;
  427. phy->rx_amsdu[q].tail = NULL;
  428. __skb_queue_tail(&dev->rx_skb[q], skb);
  429. }
  430. static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
  431. struct sk_buff *skb)
  432. {
  433. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  434. if (phy->rx_amsdu[q].head &&
  435. (!status->amsdu || status->first_amsdu ||
  436. status->seqno != phy->rx_amsdu[q].seqno))
  437. mt76_rx_release_amsdu(phy, q);
  438. if (!phy->rx_amsdu[q].head) {
  439. phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
  440. phy->rx_amsdu[q].seqno = status->seqno;
  441. phy->rx_amsdu[q].head = skb;
  442. } else {
  443. *phy->rx_amsdu[q].tail = skb;
  444. phy->rx_amsdu[q].tail = &skb->next;
  445. }
  446. if (!status->amsdu || status->last_amsdu)
  447. mt76_rx_release_amsdu(phy, q);
  448. }
  449. void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
  450. {
  451. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  452. struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
  453. if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
  454. dev_kfree_skb(skb);
  455. return;
  456. }
  457. #ifdef CONFIG_NL80211_TESTMODE
  458. if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
  459. phy->test.rx_stats.packets[q]++;
  460. if (status->flag & RX_FLAG_FAILED_FCS_CRC)
  461. phy->test.rx_stats.fcs_error[q]++;
  462. }
  463. #endif
  464. mt76_rx_release_burst(phy, q, skb);
  465. }
  466. EXPORT_SYMBOL_GPL(mt76_rx);
  467. bool mt76_has_tx_pending(struct mt76_phy *phy)
  468. {
  469. struct mt76_queue *q;
  470. int i;
  471. for (i = 0; i < __MT_TXQ_MAX; i++) {
  472. q = phy->q_tx[i];
  473. if (q && q->queued)
  474. return true;
  475. }
  476. return false;
  477. }
  478. EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
  479. static struct mt76_channel_state *
  480. mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
  481. {
  482. struct mt76_sband *msband;
  483. int idx;
  484. if (c->band == NL80211_BAND_2GHZ)
  485. msband = &phy->sband_2g;
  486. else
  487. msband = &phy->sband_5g;
  488. idx = c - &msband->sband.channels[0];
  489. return &msband->chan[idx];
  490. }
  491. void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
  492. {
  493. struct mt76_channel_state *state = phy->chan_state;
  494. state->cc_active += ktime_to_us(ktime_sub(time,
  495. phy->survey_time));
  496. phy->survey_time = time;
  497. }
  498. EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
  499. void mt76_update_survey(struct mt76_dev *dev)
  500. {
  501. ktime_t cur_time;
  502. if (dev->drv->update_survey)
  503. dev->drv->update_survey(dev);
  504. cur_time = ktime_get_boottime();
  505. mt76_update_survey_active_time(&dev->phy, cur_time);
  506. if (dev->phy2)
  507. mt76_update_survey_active_time(dev->phy2, cur_time);
  508. if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
  509. struct mt76_channel_state *state = dev->phy.chan_state;
  510. spin_lock_bh(&dev->cc_lock);
  511. state->cc_bss_rx += dev->cur_cc_bss_rx;
  512. dev->cur_cc_bss_rx = 0;
  513. spin_unlock_bh(&dev->cc_lock);
  514. }
  515. }
  516. EXPORT_SYMBOL_GPL(mt76_update_survey);
  517. void mt76_set_channel(struct mt76_phy *phy)
  518. {
  519. struct mt76_dev *dev = phy->dev;
  520. struct ieee80211_hw *hw = phy->hw;
  521. struct cfg80211_chan_def *chandef = &hw->conf.chandef;
  522. bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
  523. int timeout = HZ / 5;
  524. wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
  525. mt76_update_survey(dev);
  526. phy->chandef = *chandef;
  527. phy->chan_state = mt76_channel_state(phy, chandef->chan);
  528. if (!offchannel)
  529. phy->main_chan = chandef->chan;
  530. if (chandef->chan != phy->main_chan)
  531. memset(phy->chan_state, 0, sizeof(*phy->chan_state));
  532. }
  533. EXPORT_SYMBOL_GPL(mt76_set_channel);
  534. int mt76_get_survey(struct ieee80211_hw *hw, int idx,
  535. struct survey_info *survey)
  536. {
  537. struct mt76_phy *phy = hw->priv;
  538. struct mt76_dev *dev = phy->dev;
  539. struct mt76_sband *sband;
  540. struct ieee80211_channel *chan;
  541. struct mt76_channel_state *state;
  542. int ret = 0;
  543. mutex_lock(&dev->mutex);
  544. if (idx == 0 && dev->drv->update_survey)
  545. mt76_update_survey(dev);
  546. sband = &phy->sband_2g;
  547. if (idx >= sband->sband.n_channels) {
  548. idx -= sband->sband.n_channels;
  549. sband = &phy->sband_5g;
  550. }
  551. if (idx >= sband->sband.n_channels) {
  552. ret = -ENOENT;
  553. goto out;
  554. }
  555. chan = &sband->sband.channels[idx];
  556. state = mt76_channel_state(phy, chan);
  557. memset(survey, 0, sizeof(*survey));
  558. survey->channel = chan;
  559. survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
  560. survey->filled |= dev->drv->survey_flags;
  561. if (state->noise)
  562. survey->filled |= SURVEY_INFO_NOISE_DBM;
  563. if (chan == phy->main_chan) {
  564. survey->filled |= SURVEY_INFO_IN_USE;
  565. if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
  566. survey->filled |= SURVEY_INFO_TIME_BSS_RX;
  567. }
  568. survey->time_busy = div_u64(state->cc_busy, 1000);
  569. survey->time_rx = div_u64(state->cc_rx, 1000);
  570. survey->time = div_u64(state->cc_active, 1000);
  571. survey->noise = state->noise;
  572. spin_lock_bh(&dev->cc_lock);
  573. survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
  574. survey->time_tx = div_u64(state->cc_tx, 1000);
  575. spin_unlock_bh(&dev->cc_lock);
  576. out:
  577. mutex_unlock(&dev->mutex);
  578. return ret;
  579. }
  580. EXPORT_SYMBOL_GPL(mt76_get_survey);
  581. void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
  582. struct ieee80211_key_conf *key)
  583. {
  584. struct ieee80211_key_seq seq;
  585. int i;
  586. wcid->rx_check_pn = false;
  587. if (!key)
  588. return;
  589. if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
  590. return;
  591. wcid->rx_check_pn = true;
  592. for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
  593. ieee80211_get_key_rx_seq(key, i, &seq);
  594. memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
  595. }
  596. }
  597. EXPORT_SYMBOL(mt76_wcid_key_setup);
  598. static void
  599. mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
  600. struct ieee80211_hw **hw,
  601. struct ieee80211_sta **sta)
  602. {
  603. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  604. struct mt76_rx_status mstat;
  605. mstat = *((struct mt76_rx_status *)skb->cb);
  606. memset(status, 0, sizeof(*status));
  607. status->flag = mstat.flag;
  608. status->freq = mstat.freq;
  609. status->enc_flags = mstat.enc_flags;
  610. status->encoding = mstat.encoding;
  611. status->bw = mstat.bw;
  612. status->he_ru = mstat.he_ru;
  613. status->he_gi = mstat.he_gi;
  614. status->he_dcm = mstat.he_dcm;
  615. status->rate_idx = mstat.rate_idx;
  616. status->nss = mstat.nss;
  617. status->band = mstat.band;
  618. status->signal = mstat.signal;
  619. status->chains = mstat.chains;
  620. status->ampdu_reference = mstat.ampdu_ref;
  621. status->device_timestamp = mstat.timestamp;
  622. status->mactime = mstat.timestamp;
  623. BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
  624. BUILD_BUG_ON(sizeof(status->chain_signal) !=
  625. sizeof(mstat.chain_signal));
  626. memcpy(status->chain_signal, mstat.chain_signal,
  627. sizeof(mstat.chain_signal));
  628. *sta = wcid_to_sta(mstat.wcid);
  629. *hw = mt76_phy_hw(dev, mstat.ext_phy);
  630. }
  631. static int
  632. mt76_check_ccmp_pn(struct sk_buff *skb)
  633. {
  634. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  635. struct mt76_wcid *wcid = status->wcid;
  636. struct ieee80211_hdr *hdr;
  637. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  638. int ret;
  639. if (!(status->flag & RX_FLAG_DECRYPTED))
  640. return 0;
  641. if (!wcid || !wcid->rx_check_pn)
  642. return 0;
  643. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  644. /*
  645. * Validate the first fragment both here and in mac80211
  646. * All further fragments will be validated by mac80211 only.
  647. */
  648. hdr = mt76_skb_get_hdr(skb);
  649. if (ieee80211_is_frag(hdr) &&
  650. !ieee80211_is_first_frag(hdr->frame_control))
  651. return 0;
  652. }
  653. BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
  654. ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
  655. sizeof(status->iv));
  656. if (ret <= 0)
  657. return -EINVAL; /* replay */
  658. memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
  659. if (status->flag & RX_FLAG_IV_STRIPPED)
  660. status->flag |= RX_FLAG_PN_VALIDATED;
  661. return 0;
  662. }
  663. static void
  664. mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
  665. int len)
  666. {
  667. struct mt76_wcid *wcid = status->wcid;
  668. struct ieee80211_rx_status info = {
  669. .enc_flags = status->enc_flags,
  670. .rate_idx = status->rate_idx,
  671. .encoding = status->encoding,
  672. .band = status->band,
  673. .nss = status->nss,
  674. .bw = status->bw,
  675. };
  676. struct ieee80211_sta *sta;
  677. u32 airtime;
  678. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  679. airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
  680. spin_lock(&dev->cc_lock);
  681. dev->cur_cc_bss_rx += airtime;
  682. spin_unlock(&dev->cc_lock);
  683. if (!wcid || !wcid->sta)
  684. return;
  685. sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
  686. ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
  687. }
  688. static void
  689. mt76_airtime_flush_ampdu(struct mt76_dev *dev)
  690. {
  691. struct mt76_wcid *wcid;
  692. int wcid_idx;
  693. if (!dev->rx_ampdu_len)
  694. return;
  695. wcid_idx = dev->rx_ampdu_status.wcid_idx;
  696. if (wcid_idx < ARRAY_SIZE(dev->wcid))
  697. wcid = rcu_dereference(dev->wcid[wcid_idx]);
  698. else
  699. wcid = NULL;
  700. dev->rx_ampdu_status.wcid = wcid;
  701. mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
  702. dev->rx_ampdu_len = 0;
  703. dev->rx_ampdu_ref = 0;
  704. }
  705. static void
  706. mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
  707. {
  708. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  709. struct mt76_wcid *wcid = status->wcid;
  710. if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
  711. return;
  712. if (!wcid || !wcid->sta) {
  713. struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
  714. if (status->flag & RX_FLAG_8023)
  715. return;
  716. if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
  717. return;
  718. wcid = NULL;
  719. }
  720. if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
  721. status->ampdu_ref != dev->rx_ampdu_ref)
  722. mt76_airtime_flush_ampdu(dev);
  723. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  724. if (!dev->rx_ampdu_len ||
  725. status->ampdu_ref != dev->rx_ampdu_ref) {
  726. dev->rx_ampdu_status = *status;
  727. dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
  728. dev->rx_ampdu_ref = status->ampdu_ref;
  729. }
  730. dev->rx_ampdu_len += skb->len;
  731. return;
  732. }
  733. mt76_airtime_report(dev, status, skb->len);
  734. }
  735. static void
  736. mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
  737. {
  738. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  739. struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
  740. struct ieee80211_sta *sta;
  741. struct ieee80211_hw *hw;
  742. struct mt76_wcid *wcid = status->wcid;
  743. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  744. bool ps;
  745. hw = mt76_phy_hw(dev, status->ext_phy);
  746. if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
  747. !(status->flag & RX_FLAG_8023)) {
  748. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
  749. if (sta)
  750. wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
  751. }
  752. mt76_airtime_check(dev, skb);
  753. if (!wcid || !wcid->sta)
  754. return;
  755. sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
  756. if (status->signal <= 0)
  757. ewma_signal_add(&wcid->rssi, -status->signal);
  758. wcid->inactive_count = 0;
  759. if (status->flag & RX_FLAG_8023)
  760. return;
  761. if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
  762. return;
  763. if (ieee80211_is_pspoll(hdr->frame_control)) {
  764. ieee80211_sta_pspoll(sta);
  765. return;
  766. }
  767. if (ieee80211_has_morefrags(hdr->frame_control) ||
  768. !(ieee80211_is_mgmt(hdr->frame_control) ||
  769. ieee80211_is_data(hdr->frame_control)))
  770. return;
  771. ps = ieee80211_has_pm(hdr->frame_control);
  772. if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
  773. ieee80211_is_qos_nullfunc(hdr->frame_control)))
  774. ieee80211_sta_uapsd_trigger(sta, tidno);
  775. if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
  776. return;
  777. if (ps)
  778. set_bit(MT_WCID_FLAG_PS, &wcid->flags);
  779. else
  780. clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
  781. dev->drv->sta_ps(dev, sta, ps);
  782. ieee80211_sta_ps_transition(sta, ps);
  783. }
  784. void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
  785. struct napi_struct *napi)
  786. {
  787. struct ieee80211_sta *sta;
  788. struct ieee80211_hw *hw;
  789. struct sk_buff *skb, *tmp;
  790. LIST_HEAD(list);
  791. spin_lock(&dev->rx_lock);
  792. while ((skb = __skb_dequeue(frames)) != NULL) {
  793. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  794. if (mt76_check_ccmp_pn(skb)) {
  795. dev_kfree_skb(skb);
  796. continue;
  797. }
  798. skb_shinfo(skb)->frag_list = NULL;
  799. mt76_rx_convert(dev, skb, &hw, &sta);
  800. ieee80211_rx_list(hw, sta, skb, &list);
  801. /* subsequent amsdu frames */
  802. while (nskb) {
  803. skb = nskb;
  804. nskb = nskb->next;
  805. skb->next = NULL;
  806. mt76_rx_convert(dev, skb, &hw, &sta);
  807. ieee80211_rx_list(hw, sta, skb, &list);
  808. }
  809. }
  810. spin_unlock(&dev->rx_lock);
  811. if (!napi) {
  812. netif_receive_skb_list(&list);
  813. return;
  814. }
  815. list_for_each_entry_safe(skb, tmp, &list, list) {
  816. skb_list_del_init(skb);
  817. napi_gro_receive(napi, skb);
  818. }
  819. }
  820. void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
  821. struct napi_struct *napi)
  822. {
  823. struct sk_buff_head frames;
  824. struct sk_buff *skb;
  825. __skb_queue_head_init(&frames);
  826. while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
  827. mt76_check_sta(dev, skb);
  828. mt76_rx_aggr_reorder(skb, &frames);
  829. }
  830. mt76_rx_complete(dev, &frames, napi);
  831. }
  832. EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
  833. static int
  834. mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
  835. struct ieee80211_sta *sta, bool ext_phy)
  836. {
  837. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  838. int ret;
  839. int i;
  840. mutex_lock(&dev->mutex);
  841. ret = dev->drv->sta_add(dev, vif, sta);
  842. if (ret)
  843. goto out;
  844. for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
  845. struct mt76_txq *mtxq;
  846. if (!sta->txq[i])
  847. continue;
  848. mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
  849. mtxq->wcid = wcid;
  850. }
  851. ewma_signal_init(&wcid->rssi);
  852. if (ext_phy)
  853. mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
  854. wcid->ext_phy = ext_phy;
  855. rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
  856. out:
  857. mutex_unlock(&dev->mutex);
  858. return ret;
  859. }
  860. void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
  861. struct ieee80211_sta *sta)
  862. {
  863. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  864. int i, idx = wcid->idx;
  865. for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
  866. mt76_rx_aggr_stop(dev, wcid, i);
  867. if (dev->drv->sta_remove)
  868. dev->drv->sta_remove(dev, vif, sta);
  869. mt76_tx_status_check(dev, wcid, true);
  870. mt76_wcid_mask_clear(dev->wcid_mask, idx);
  871. mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
  872. }
  873. EXPORT_SYMBOL_GPL(__mt76_sta_remove);
  874. static void
  875. mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
  876. struct ieee80211_sta *sta)
  877. {
  878. mutex_lock(&dev->mutex);
  879. __mt76_sta_remove(dev, vif, sta);
  880. mutex_unlock(&dev->mutex);
  881. }
  882. int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  883. struct ieee80211_sta *sta,
  884. enum ieee80211_sta_state old_state,
  885. enum ieee80211_sta_state new_state)
  886. {
  887. struct mt76_phy *phy = hw->priv;
  888. struct mt76_dev *dev = phy->dev;
  889. bool ext_phy = phy != &dev->phy;
  890. if (old_state == IEEE80211_STA_NOTEXIST &&
  891. new_state == IEEE80211_STA_NONE)
  892. return mt76_sta_add(dev, vif, sta, ext_phy);
  893. if (old_state == IEEE80211_STA_AUTH &&
  894. new_state == IEEE80211_STA_ASSOC &&
  895. dev->drv->sta_assoc)
  896. dev->drv->sta_assoc(dev, vif, sta);
  897. if (old_state == IEEE80211_STA_NONE &&
  898. new_state == IEEE80211_STA_NOTEXIST)
  899. mt76_sta_remove(dev, vif, sta);
  900. return 0;
  901. }
  902. EXPORT_SYMBOL_GPL(mt76_sta_state);
  903. void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  904. struct ieee80211_sta *sta)
  905. {
  906. struct mt76_phy *phy = hw->priv;
  907. struct mt76_dev *dev = phy->dev;
  908. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  909. mutex_lock(&dev->mutex);
  910. rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
  911. mutex_unlock(&dev->mutex);
  912. }
  913. EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
  914. int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  915. int *dbm)
  916. {
  917. struct mt76_phy *phy = hw->priv;
  918. int n_chains = hweight8(phy->antenna_mask);
  919. int delta = mt76_tx_power_nss_delta(n_chains);
  920. *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
  921. return 0;
  922. }
  923. EXPORT_SYMBOL_GPL(mt76_get_txpower);
  924. static void
  925. __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
  926. {
  927. if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
  928. ieee80211_csa_finish(vif);
  929. }
  930. void mt76_csa_finish(struct mt76_dev *dev)
  931. {
  932. if (!dev->csa_complete)
  933. return;
  934. ieee80211_iterate_active_interfaces_atomic(dev->hw,
  935. IEEE80211_IFACE_ITER_RESUME_ALL,
  936. __mt76_csa_finish, dev);
  937. dev->csa_complete = 0;
  938. }
  939. EXPORT_SYMBOL_GPL(mt76_csa_finish);
  940. static void
  941. __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
  942. {
  943. struct mt76_dev *dev = priv;
  944. if (!vif->csa_active)
  945. return;
  946. dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
  947. }
  948. void mt76_csa_check(struct mt76_dev *dev)
  949. {
  950. ieee80211_iterate_active_interfaces_atomic(dev->hw,
  951. IEEE80211_IFACE_ITER_RESUME_ALL,
  952. __mt76_csa_check, dev);
  953. }
  954. EXPORT_SYMBOL_GPL(mt76_csa_check);
  955. int
  956. mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
  957. {
  958. return 0;
  959. }
  960. EXPORT_SYMBOL_GPL(mt76_set_tim);
  961. void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
  962. {
  963. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  964. int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
  965. u8 *hdr, *pn = status->iv;
  966. __skb_push(skb, 8);
  967. memmove(skb->data, skb->data + 8, hdr_len);
  968. hdr = skb->data + hdr_len;
  969. hdr[0] = pn[5];
  970. hdr[1] = pn[4];
  971. hdr[2] = 0;
  972. hdr[3] = 0x20 | (key_id << 6);
  973. hdr[4] = pn[3];
  974. hdr[5] = pn[2];
  975. hdr[6] = pn[1];
  976. hdr[7] = pn[0];
  977. status->flag &= ~RX_FLAG_IV_STRIPPED;
  978. }
  979. EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
  980. int mt76_get_rate(struct mt76_dev *dev,
  981. struct ieee80211_supported_band *sband,
  982. int idx, bool cck)
  983. {
  984. int i, offset = 0, len = sband->n_bitrates;
  985. if (cck) {
  986. if (sband == &dev->phy.sband_5g.sband)
  987. return 0;
  988. idx &= ~BIT(2); /* short preamble */
  989. } else if (sband == &dev->phy.sband_2g.sband) {
  990. offset = 4;
  991. }
  992. for (i = offset; i < len; i++) {
  993. if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
  994. return i;
  995. }
  996. return 0;
  997. }
  998. EXPORT_SYMBOL_GPL(mt76_get_rate);
  999. void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  1000. const u8 *mac)
  1001. {
  1002. struct mt76_phy *phy = hw->priv;
  1003. set_bit(MT76_SCANNING, &phy->state);
  1004. }
  1005. EXPORT_SYMBOL_GPL(mt76_sw_scan);
  1006. void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1007. {
  1008. struct mt76_phy *phy = hw->priv;
  1009. clear_bit(MT76_SCANNING, &phy->state);
  1010. }
  1011. EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
  1012. int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
  1013. {
  1014. struct mt76_phy *phy = hw->priv;
  1015. struct mt76_dev *dev = phy->dev;
  1016. mutex_lock(&dev->mutex);
  1017. *tx_ant = phy->antenna_mask;
  1018. *rx_ant = phy->antenna_mask;
  1019. mutex_unlock(&dev->mutex);
  1020. return 0;
  1021. }
  1022. EXPORT_SYMBOL_GPL(mt76_get_antenna);
  1023. struct mt76_queue *
  1024. mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
  1025. int ring_base)
  1026. {
  1027. struct mt76_queue *hwq;
  1028. int err;
  1029. hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
  1030. if (!hwq)
  1031. return ERR_PTR(-ENOMEM);
  1032. err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
  1033. if (err < 0)
  1034. return ERR_PTR(err);
  1035. return hwq;
  1036. }
  1037. EXPORT_SYMBOL_GPL(mt76_init_queue);