mac80211.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/of.h>
  7. #include "mt76.h"
  8. #define CHAN2G(_idx, _freq) { \
  9. .band = NL80211_BAND_2GHZ, \
  10. .center_freq = (_freq), \
  11. .hw_value = (_idx), \
  12. .max_power = 30, \
  13. }
  14. #define CHAN5G(_idx, _freq) { \
  15. .band = NL80211_BAND_5GHZ, \
  16. .center_freq = (_freq), \
  17. .hw_value = (_idx), \
  18. .max_power = 30, \
  19. }
  20. static const struct ieee80211_channel mt76_channels_2ghz[] = {
  21. CHAN2G(1, 2412),
  22. CHAN2G(2, 2417),
  23. CHAN2G(3, 2422),
  24. CHAN2G(4, 2427),
  25. CHAN2G(5, 2432),
  26. CHAN2G(6, 2437),
  27. CHAN2G(7, 2442),
  28. CHAN2G(8, 2447),
  29. CHAN2G(9, 2452),
  30. CHAN2G(10, 2457),
  31. CHAN2G(11, 2462),
  32. CHAN2G(12, 2467),
  33. CHAN2G(13, 2472),
  34. CHAN2G(14, 2484),
  35. };
  36. static const struct ieee80211_channel mt76_channels_5ghz[] = {
  37. CHAN5G(36, 5180),
  38. CHAN5G(40, 5200),
  39. CHAN5G(44, 5220),
  40. CHAN5G(48, 5240),
  41. CHAN5G(52, 5260),
  42. CHAN5G(56, 5280),
  43. CHAN5G(60, 5300),
  44. CHAN5G(64, 5320),
  45. CHAN5G(100, 5500),
  46. CHAN5G(104, 5520),
  47. CHAN5G(108, 5540),
  48. CHAN5G(112, 5560),
  49. CHAN5G(116, 5580),
  50. CHAN5G(120, 5600),
  51. CHAN5G(124, 5620),
  52. CHAN5G(128, 5640),
  53. CHAN5G(132, 5660),
  54. CHAN5G(136, 5680),
  55. CHAN5G(140, 5700),
  56. CHAN5G(144, 5720),
  57. CHAN5G(149, 5745),
  58. CHAN5G(153, 5765),
  59. CHAN5G(157, 5785),
  60. CHAN5G(161, 5805),
  61. CHAN5G(165, 5825),
  62. CHAN5G(169, 5845),
  63. CHAN5G(173, 5865),
  64. };
  65. static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
  66. { .throughput = 0 * 1024, .blink_time = 334 },
  67. { .throughput = 1 * 1024, .blink_time = 260 },
  68. { .throughput = 5 * 1024, .blink_time = 220 },
  69. { .throughput = 10 * 1024, .blink_time = 190 },
  70. { .throughput = 20 * 1024, .blink_time = 170 },
  71. { .throughput = 50 * 1024, .blink_time = 150 },
  72. { .throughput = 70 * 1024, .blink_time = 130 },
  73. { .throughput = 100 * 1024, .blink_time = 110 },
  74. { .throughput = 200 * 1024, .blink_time = 80 },
  75. { .throughput = 300 * 1024, .blink_time = 50 },
  76. };
  77. static int mt76_led_init(struct mt76_dev *dev)
  78. {
  79. struct device_node *np = dev->dev->of_node;
  80. struct ieee80211_hw *hw = dev->hw;
  81. int led_pin;
  82. if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
  83. return 0;
  84. snprintf(dev->led_name, sizeof(dev->led_name),
  85. "mt76-%s", wiphy_name(hw->wiphy));
  86. dev->led_cdev.name = dev->led_name;
  87. dev->led_cdev.default_trigger =
  88. ieee80211_create_tpt_led_trigger(hw,
  89. IEEE80211_TPT_LEDTRIG_FL_RADIO,
  90. mt76_tpt_blink,
  91. ARRAY_SIZE(mt76_tpt_blink));
  92. np = of_get_child_by_name(np, "led");
  93. if (np) {
  94. if (!of_property_read_u32(np, "led-sources", &led_pin))
  95. dev->led_pin = led_pin;
  96. dev->led_al = of_property_read_bool(np, "led-active-low");
  97. }
  98. return led_classdev_register(dev->dev, &dev->led_cdev);
  99. }
  100. static void mt76_led_cleanup(struct mt76_dev *dev)
  101. {
  102. if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
  103. return;
  104. led_classdev_unregister(&dev->led_cdev);
  105. }
  106. static void mt76_init_stream_cap(struct mt76_phy *phy,
  107. struct ieee80211_supported_band *sband,
  108. bool vht)
  109. {
  110. struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
  111. int i, nstream = hweight8(phy->antenna_mask);
  112. struct ieee80211_sta_vht_cap *vht_cap;
  113. u16 mcs_map = 0;
  114. if (nstream > 1)
  115. ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
  116. else
  117. ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
  118. for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
  119. ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
  120. if (!vht)
  121. return;
  122. vht_cap = &sband->vht_cap;
  123. if (nstream > 1)
  124. vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
  125. else
  126. vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
  127. for (i = 0; i < 8; i++) {
  128. if (i < nstream)
  129. mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
  130. else
  131. mcs_map |=
  132. (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
  133. }
  134. vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
  135. vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
  136. }
  137. void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
  138. {
  139. if (phy->cap.has_2ghz)
  140. mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
  141. if (phy->cap.has_5ghz)
  142. mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
  143. }
  144. EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
  145. static int
  146. mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
  147. const struct ieee80211_channel *chan, int n_chan,
  148. struct ieee80211_rate *rates, int n_rates, bool vht)
  149. {
  150. struct ieee80211_supported_band *sband = &msband->sband;
  151. struct ieee80211_sta_vht_cap *vht_cap;
  152. struct ieee80211_sta_ht_cap *ht_cap;
  153. struct mt76_dev *dev = phy->dev;
  154. void *chanlist;
  155. int size;
  156. size = n_chan * sizeof(*chan);
  157. chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
  158. if (!chanlist)
  159. return -ENOMEM;
  160. msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
  161. GFP_KERNEL);
  162. if (!msband->chan)
  163. return -ENOMEM;
  164. sband->channels = chanlist;
  165. sband->n_channels = n_chan;
  166. sband->bitrates = rates;
  167. sband->n_bitrates = n_rates;
  168. ht_cap = &sband->ht_cap;
  169. ht_cap->ht_supported = true;
  170. ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  171. IEEE80211_HT_CAP_GRN_FLD |
  172. IEEE80211_HT_CAP_SGI_20 |
  173. IEEE80211_HT_CAP_SGI_40 |
  174. (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  175. ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
  176. ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  177. mt76_init_stream_cap(phy, sband, vht);
  178. if (!vht)
  179. return 0;
  180. vht_cap = &sband->vht_cap;
  181. vht_cap->vht_supported = true;
  182. vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
  183. IEEE80211_VHT_CAP_RXSTBC_1 |
  184. IEEE80211_VHT_CAP_SHORT_GI_80 |
  185. IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
  186. IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
  187. (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
  188. return 0;
  189. }
  190. static int
  191. mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
  192. int n_rates)
  193. {
  194. phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
  195. return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
  196. ARRAY_SIZE(mt76_channels_2ghz), rates,
  197. n_rates, false);
  198. }
  199. static int
  200. mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
  201. int n_rates, bool vht)
  202. {
  203. phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
  204. return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
  205. ARRAY_SIZE(mt76_channels_5ghz), rates,
  206. n_rates, vht);
  207. }
  208. static void
  209. mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
  210. enum nl80211_band band)
  211. {
  212. struct ieee80211_supported_band *sband = &msband->sband;
  213. bool found = false;
  214. int i;
  215. if (!sband)
  216. return;
  217. for (i = 0; i < sband->n_channels; i++) {
  218. if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
  219. continue;
  220. found = true;
  221. break;
  222. }
  223. if (found) {
  224. phy->chandef.chan = &sband->channels[0];
  225. phy->chan_state = &msband->chan[0];
  226. return;
  227. }
  228. sband->n_channels = 0;
  229. phy->hw->wiphy->bands[band] = NULL;
  230. }
  231. static void
  232. mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
  233. {
  234. struct mt76_dev *dev = phy->dev;
  235. struct wiphy *wiphy = hw->wiphy;
  236. SET_IEEE80211_DEV(hw, dev->dev);
  237. SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
  238. wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
  239. wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
  240. WIPHY_FLAG_SUPPORTS_TDLS |
  241. WIPHY_FLAG_AP_UAPSD;
  242. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
  243. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
  244. wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
  245. wiphy->available_antennas_tx = dev->phy.antenna_mask;
  246. wiphy->available_antennas_rx = dev->phy.antenna_mask;
  247. hw->txq_data_size = sizeof(struct mt76_txq);
  248. hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
  249. if (!hw->max_tx_fragments)
  250. hw->max_tx_fragments = 16;
  251. ieee80211_hw_set(hw, SIGNAL_DBM);
  252. ieee80211_hw_set(hw, AMPDU_AGGREGATION);
  253. ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
  254. ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
  255. ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
  256. ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
  257. ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
  258. if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
  259. ieee80211_hw_set(hw, TX_AMSDU);
  260. ieee80211_hw_set(hw, TX_FRAG_LIST);
  261. }
  262. ieee80211_hw_set(hw, MFP_CAPABLE);
  263. ieee80211_hw_set(hw, AP_LINK_PS);
  264. ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
  265. wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
  266. wiphy->interface_modes =
  267. BIT(NL80211_IFTYPE_STATION) |
  268. BIT(NL80211_IFTYPE_AP) |
  269. #ifdef CONFIG_MAC80211_MESH
  270. BIT(NL80211_IFTYPE_MESH_POINT) |
  271. #endif
  272. BIT(NL80211_IFTYPE_P2P_CLIENT) |
  273. BIT(NL80211_IFTYPE_P2P_GO) |
  274. BIT(NL80211_IFTYPE_ADHOC);
  275. }
  276. struct mt76_phy *
  277. mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
  278. const struct ieee80211_ops *ops)
  279. {
  280. struct ieee80211_hw *hw;
  281. unsigned int phy_size;
  282. struct mt76_phy *phy;
  283. phy_size = ALIGN(sizeof(*phy), 8);
  284. hw = ieee80211_alloc_hw(size + phy_size, ops);
  285. if (!hw)
  286. return NULL;
  287. phy = hw->priv;
  288. phy->dev = dev;
  289. phy->hw = hw;
  290. phy->priv = hw->priv + phy_size;
  291. return phy;
  292. }
  293. EXPORT_SYMBOL_GPL(mt76_alloc_phy);
  294. int mt76_register_phy(struct mt76_phy *phy, bool vht,
  295. struct ieee80211_rate *rates, int n_rates)
  296. {
  297. int ret;
  298. mt76_phy_init(phy, phy->hw);
  299. if (phy->cap.has_2ghz) {
  300. ret = mt76_init_sband_2g(phy, rates, n_rates);
  301. if (ret)
  302. return ret;
  303. }
  304. if (phy->cap.has_5ghz) {
  305. ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
  306. if (ret)
  307. return ret;
  308. }
  309. wiphy_read_of_freq_limits(phy->hw->wiphy);
  310. mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
  311. mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
  312. ret = ieee80211_register_hw(phy->hw);
  313. if (ret)
  314. return ret;
  315. phy->dev->phy2 = phy;
  316. return 0;
  317. }
  318. EXPORT_SYMBOL_GPL(mt76_register_phy);
  319. void mt76_unregister_phy(struct mt76_phy *phy)
  320. {
  321. struct mt76_dev *dev = phy->dev;
  322. mt76_tx_status_check(dev, NULL, true);
  323. ieee80211_unregister_hw(phy->hw);
  324. dev->phy2 = NULL;
  325. }
  326. EXPORT_SYMBOL_GPL(mt76_unregister_phy);
  327. struct mt76_dev *
  328. mt76_alloc_device(struct device *pdev, unsigned int size,
  329. const struct ieee80211_ops *ops,
  330. const struct mt76_driver_ops *drv_ops)
  331. {
  332. struct ieee80211_hw *hw;
  333. struct mt76_phy *phy;
  334. struct mt76_dev *dev;
  335. int i;
  336. hw = ieee80211_alloc_hw(size, ops);
  337. if (!hw)
  338. return NULL;
  339. dev = hw->priv;
  340. dev->hw = hw;
  341. dev->dev = pdev;
  342. dev->drv = drv_ops;
  343. phy = &dev->phy;
  344. phy->dev = dev;
  345. phy->hw = hw;
  346. spin_lock_init(&dev->rx_lock);
  347. spin_lock_init(&dev->lock);
  348. spin_lock_init(&dev->cc_lock);
  349. mutex_init(&dev->mutex);
  350. init_waitqueue_head(&dev->tx_wait);
  351. skb_queue_head_init(&dev->status_list);
  352. skb_queue_head_init(&dev->mcu.res_q);
  353. init_waitqueue_head(&dev->mcu.wait);
  354. mutex_init(&dev->mcu.mutex);
  355. dev->tx_worker.fn = mt76_tx_worker;
  356. INIT_LIST_HEAD(&dev->txwi_cache);
  357. for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
  358. skb_queue_head_init(&dev->rx_skb[i]);
  359. dev->wq = alloc_ordered_workqueue("mt76", 0);
  360. if (!dev->wq) {
  361. ieee80211_free_hw(hw);
  362. return NULL;
  363. }
  364. return dev;
  365. }
  366. EXPORT_SYMBOL_GPL(mt76_alloc_device);
  367. int mt76_register_device(struct mt76_dev *dev, bool vht,
  368. struct ieee80211_rate *rates, int n_rates)
  369. {
  370. struct ieee80211_hw *hw = dev->hw;
  371. struct mt76_phy *phy = &dev->phy;
  372. int ret;
  373. dev_set_drvdata(dev->dev, dev);
  374. mt76_phy_init(phy, hw);
  375. if (phy->cap.has_2ghz) {
  376. ret = mt76_init_sband_2g(phy, rates, n_rates);
  377. if (ret)
  378. return ret;
  379. }
  380. if (phy->cap.has_5ghz) {
  381. ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
  382. if (ret)
  383. return ret;
  384. }
  385. wiphy_read_of_freq_limits(hw->wiphy);
  386. mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
  387. mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
  388. if (IS_ENABLED(CONFIG_MT76_LEDS)) {
  389. ret = mt76_led_init(dev);
  390. if (ret)
  391. return ret;
  392. }
  393. ret = ieee80211_register_hw(hw);
  394. if (ret)
  395. return ret;
  396. WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
  397. sched_set_fifo_low(dev->tx_worker.task);
  398. return 0;
  399. }
  400. EXPORT_SYMBOL_GPL(mt76_register_device);
  401. void mt76_unregister_device(struct mt76_dev *dev)
  402. {
  403. struct ieee80211_hw *hw = dev->hw;
  404. if (IS_ENABLED(CONFIG_MT76_LEDS))
  405. mt76_led_cleanup(dev);
  406. mt76_tx_status_check(dev, NULL, true);
  407. ieee80211_unregister_hw(hw);
  408. }
  409. EXPORT_SYMBOL_GPL(mt76_unregister_device);
  410. void mt76_free_device(struct mt76_dev *dev)
  411. {
  412. mt76_worker_teardown(&dev->tx_worker);
  413. if (dev->wq) {
  414. destroy_workqueue(dev->wq);
  415. dev->wq = NULL;
  416. }
  417. ieee80211_free_hw(dev->hw);
  418. }
  419. EXPORT_SYMBOL_GPL(mt76_free_device);
  420. static void mt76_rx_release_burst(struct mt76_dev *dev, enum mt76_rxq_id q,
  421. struct sk_buff *skb)
  422. {
  423. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  424. struct sk_buff *nskb = dev->rx_amsdu[q].head;
  425. /* first amsdu subframe */
  426. if (status->first_amsdu) {
  427. dev->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
  428. dev->rx_amsdu[q].seqno = status->seqno;
  429. dev->rx_amsdu[q].head = skb;
  430. goto enqueue;
  431. }
  432. /* ampdu or out-of-order amsdu subframes */
  433. if (!status->amsdu || status->seqno != dev->rx_amsdu[q].seqno) {
  434. /* release pending frames */
  435. if (dev->rx_amsdu[q].head)
  436. __skb_queue_tail(&dev->rx_skb[q],
  437. dev->rx_amsdu[q].head);
  438. nskb = skb;
  439. goto reset_burst;
  440. }
  441. /* trailing amsdu subframes */
  442. *dev->rx_amsdu[q].tail = skb;
  443. if (!status->last_amsdu) {
  444. dev->rx_amsdu[q].tail = &skb->next;
  445. return;
  446. }
  447. reset_burst:
  448. dev->rx_amsdu[q].head = NULL;
  449. dev->rx_amsdu[q].tail = NULL;
  450. enqueue:
  451. if (nskb)
  452. __skb_queue_tail(&dev->rx_skb[q], nskb);
  453. }
  454. void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
  455. {
  456. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  457. struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
  458. if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
  459. dev_kfree_skb(skb);
  460. return;
  461. }
  462. #ifdef CONFIG_NL80211_TESTMODE
  463. if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
  464. phy->test.rx_stats.packets[q]++;
  465. if (status->flag & RX_FLAG_FAILED_FCS_CRC)
  466. phy->test.rx_stats.fcs_error[q]++;
  467. }
  468. #endif
  469. mt76_rx_release_burst(dev, q, skb);
  470. }
  471. EXPORT_SYMBOL_GPL(mt76_rx);
  472. bool mt76_has_tx_pending(struct mt76_phy *phy)
  473. {
  474. struct mt76_queue *q;
  475. int i;
  476. for (i = 0; i < __MT_TXQ_MAX; i++) {
  477. q = phy->q_tx[i];
  478. if (q && q->queued)
  479. return true;
  480. }
  481. return false;
  482. }
  483. EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
  484. static struct mt76_channel_state *
  485. mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
  486. {
  487. struct mt76_sband *msband;
  488. int idx;
  489. if (c->band == NL80211_BAND_2GHZ)
  490. msband = &phy->sband_2g;
  491. else
  492. msband = &phy->sband_5g;
  493. idx = c - &msband->sband.channels[0];
  494. return &msband->chan[idx];
  495. }
  496. void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
  497. {
  498. struct mt76_channel_state *state = phy->chan_state;
  499. state->cc_active += ktime_to_us(ktime_sub(time,
  500. phy->survey_time));
  501. phy->survey_time = time;
  502. }
  503. EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
  504. void mt76_update_survey(struct mt76_dev *dev)
  505. {
  506. ktime_t cur_time;
  507. if (dev->drv->update_survey)
  508. dev->drv->update_survey(dev);
  509. cur_time = ktime_get_boottime();
  510. mt76_update_survey_active_time(&dev->phy, cur_time);
  511. if (dev->phy2)
  512. mt76_update_survey_active_time(dev->phy2, cur_time);
  513. if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
  514. struct mt76_channel_state *state = dev->phy.chan_state;
  515. spin_lock_bh(&dev->cc_lock);
  516. state->cc_bss_rx += dev->cur_cc_bss_rx;
  517. dev->cur_cc_bss_rx = 0;
  518. spin_unlock_bh(&dev->cc_lock);
  519. }
  520. }
  521. EXPORT_SYMBOL_GPL(mt76_update_survey);
  522. void mt76_set_channel(struct mt76_phy *phy)
  523. {
  524. struct mt76_dev *dev = phy->dev;
  525. struct ieee80211_hw *hw = phy->hw;
  526. struct cfg80211_chan_def *chandef = &hw->conf.chandef;
  527. bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
  528. int timeout = HZ / 5;
  529. wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
  530. mt76_update_survey(dev);
  531. phy->chandef = *chandef;
  532. phy->chan_state = mt76_channel_state(phy, chandef->chan);
  533. if (!offchannel)
  534. phy->main_chan = chandef->chan;
  535. if (chandef->chan != phy->main_chan)
  536. memset(phy->chan_state, 0, sizeof(*phy->chan_state));
  537. }
  538. EXPORT_SYMBOL_GPL(mt76_set_channel);
  539. int mt76_get_survey(struct ieee80211_hw *hw, int idx,
  540. struct survey_info *survey)
  541. {
  542. struct mt76_phy *phy = hw->priv;
  543. struct mt76_dev *dev = phy->dev;
  544. struct mt76_sband *sband;
  545. struct ieee80211_channel *chan;
  546. struct mt76_channel_state *state;
  547. int ret = 0;
  548. mutex_lock(&dev->mutex);
  549. if (idx == 0 && dev->drv->update_survey)
  550. mt76_update_survey(dev);
  551. sband = &phy->sband_2g;
  552. if (idx >= sband->sband.n_channels) {
  553. idx -= sband->sband.n_channels;
  554. sband = &phy->sband_5g;
  555. }
  556. if (idx >= sband->sband.n_channels) {
  557. ret = -ENOENT;
  558. goto out;
  559. }
  560. chan = &sband->sband.channels[idx];
  561. state = mt76_channel_state(phy, chan);
  562. memset(survey, 0, sizeof(*survey));
  563. survey->channel = chan;
  564. survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
  565. survey->filled |= dev->drv->survey_flags;
  566. if (state->noise)
  567. survey->filled |= SURVEY_INFO_NOISE_DBM;
  568. if (chan == phy->main_chan) {
  569. survey->filled |= SURVEY_INFO_IN_USE;
  570. if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
  571. survey->filled |= SURVEY_INFO_TIME_BSS_RX;
  572. }
  573. survey->time_busy = div_u64(state->cc_busy, 1000);
  574. survey->time_rx = div_u64(state->cc_rx, 1000);
  575. survey->time = div_u64(state->cc_active, 1000);
  576. survey->noise = state->noise;
  577. spin_lock_bh(&dev->cc_lock);
  578. survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
  579. survey->time_tx = div_u64(state->cc_tx, 1000);
  580. spin_unlock_bh(&dev->cc_lock);
  581. out:
  582. mutex_unlock(&dev->mutex);
  583. return ret;
  584. }
  585. EXPORT_SYMBOL_GPL(mt76_get_survey);
  586. void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
  587. struct ieee80211_key_conf *key)
  588. {
  589. struct ieee80211_key_seq seq;
  590. int i;
  591. wcid->rx_check_pn = false;
  592. if (!key)
  593. return;
  594. if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
  595. return;
  596. wcid->rx_check_pn = true;
  597. for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
  598. ieee80211_get_key_rx_seq(key, i, &seq);
  599. memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
  600. }
  601. }
  602. EXPORT_SYMBOL(mt76_wcid_key_setup);
  603. static void
  604. mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
  605. struct ieee80211_hw **hw,
  606. struct ieee80211_sta **sta)
  607. {
  608. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  609. struct mt76_rx_status mstat;
  610. mstat = *((struct mt76_rx_status *)skb->cb);
  611. memset(status, 0, sizeof(*status));
  612. status->flag = mstat.flag;
  613. status->freq = mstat.freq;
  614. status->enc_flags = mstat.enc_flags;
  615. status->encoding = mstat.encoding;
  616. status->bw = mstat.bw;
  617. status->he_ru = mstat.he_ru;
  618. status->he_gi = mstat.he_gi;
  619. status->he_dcm = mstat.he_dcm;
  620. status->rate_idx = mstat.rate_idx;
  621. status->nss = mstat.nss;
  622. status->band = mstat.band;
  623. status->signal = mstat.signal;
  624. status->chains = mstat.chains;
  625. status->ampdu_reference = mstat.ampdu_ref;
  626. BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
  627. BUILD_BUG_ON(sizeof(status->chain_signal) !=
  628. sizeof(mstat.chain_signal));
  629. memcpy(status->chain_signal, mstat.chain_signal,
  630. sizeof(mstat.chain_signal));
  631. *sta = wcid_to_sta(mstat.wcid);
  632. *hw = mt76_phy_hw(dev, mstat.ext_phy);
  633. }
  634. static int
  635. mt76_check_ccmp_pn(struct sk_buff *skb)
  636. {
  637. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  638. struct mt76_wcid *wcid = status->wcid;
  639. struct ieee80211_hdr *hdr;
  640. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  641. int ret;
  642. if (!(status->flag & RX_FLAG_DECRYPTED))
  643. return 0;
  644. if (!wcid || !wcid->rx_check_pn)
  645. return 0;
  646. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  647. /*
  648. * Validate the first fragment both here and in mac80211
  649. * All further fragments will be validated by mac80211 only.
  650. */
  651. hdr = mt76_skb_get_hdr(skb);
  652. if (ieee80211_is_frag(hdr) &&
  653. !ieee80211_is_first_frag(hdr->frame_control))
  654. return 0;
  655. }
  656. BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
  657. ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
  658. sizeof(status->iv));
  659. if (ret <= 0)
  660. return -EINVAL; /* replay */
  661. memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
  662. if (status->flag & RX_FLAG_IV_STRIPPED)
  663. status->flag |= RX_FLAG_PN_VALIDATED;
  664. return 0;
  665. }
  666. static void
  667. mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
  668. int len)
  669. {
  670. struct mt76_wcid *wcid = status->wcid;
  671. struct ieee80211_rx_status info = {
  672. .enc_flags = status->enc_flags,
  673. .rate_idx = status->rate_idx,
  674. .encoding = status->encoding,
  675. .band = status->band,
  676. .nss = status->nss,
  677. .bw = status->bw,
  678. };
  679. struct ieee80211_sta *sta;
  680. u32 airtime;
  681. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  682. airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
  683. spin_lock(&dev->cc_lock);
  684. dev->cur_cc_bss_rx += airtime;
  685. spin_unlock(&dev->cc_lock);
  686. if (!wcid || !wcid->sta)
  687. return;
  688. sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
  689. ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
  690. }
  691. static void
  692. mt76_airtime_flush_ampdu(struct mt76_dev *dev)
  693. {
  694. struct mt76_wcid *wcid;
  695. int wcid_idx;
  696. if (!dev->rx_ampdu_len)
  697. return;
  698. wcid_idx = dev->rx_ampdu_status.wcid_idx;
  699. if (wcid_idx < ARRAY_SIZE(dev->wcid))
  700. wcid = rcu_dereference(dev->wcid[wcid_idx]);
  701. else
  702. wcid = NULL;
  703. dev->rx_ampdu_status.wcid = wcid;
  704. mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
  705. dev->rx_ampdu_len = 0;
  706. dev->rx_ampdu_ref = 0;
  707. }
  708. static void
  709. mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
  710. {
  711. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  712. struct mt76_wcid *wcid = status->wcid;
  713. if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
  714. return;
  715. if (!wcid || !wcid->sta) {
  716. struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
  717. if (status->flag & RX_FLAG_8023)
  718. return;
  719. if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
  720. return;
  721. wcid = NULL;
  722. }
  723. if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
  724. status->ampdu_ref != dev->rx_ampdu_ref)
  725. mt76_airtime_flush_ampdu(dev);
  726. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  727. if (!dev->rx_ampdu_len ||
  728. status->ampdu_ref != dev->rx_ampdu_ref) {
  729. dev->rx_ampdu_status = *status;
  730. dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
  731. dev->rx_ampdu_ref = status->ampdu_ref;
  732. }
  733. dev->rx_ampdu_len += skb->len;
  734. return;
  735. }
  736. mt76_airtime_report(dev, status, skb->len);
  737. }
  738. static void
  739. mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
  740. {
  741. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  742. struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
  743. struct ieee80211_sta *sta;
  744. struct ieee80211_hw *hw;
  745. struct mt76_wcid *wcid = status->wcid;
  746. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  747. bool ps;
  748. hw = mt76_phy_hw(dev, status->ext_phy);
  749. if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
  750. !(status->flag & RX_FLAG_8023)) {
  751. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
  752. if (sta)
  753. wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
  754. }
  755. mt76_airtime_check(dev, skb);
  756. if (!wcid || !wcid->sta)
  757. return;
  758. sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
  759. if (status->signal <= 0)
  760. ewma_signal_add(&wcid->rssi, -status->signal);
  761. wcid->inactive_count = 0;
  762. if (status->flag & RX_FLAG_8023)
  763. return;
  764. if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
  765. return;
  766. if (ieee80211_is_pspoll(hdr->frame_control)) {
  767. ieee80211_sta_pspoll(sta);
  768. return;
  769. }
  770. if (ieee80211_has_morefrags(hdr->frame_control) ||
  771. !(ieee80211_is_mgmt(hdr->frame_control) ||
  772. ieee80211_is_data(hdr->frame_control)))
  773. return;
  774. ps = ieee80211_has_pm(hdr->frame_control);
  775. if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
  776. ieee80211_is_qos_nullfunc(hdr->frame_control)))
  777. ieee80211_sta_uapsd_trigger(sta, tidno);
  778. if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
  779. return;
  780. if (ps)
  781. set_bit(MT_WCID_FLAG_PS, &wcid->flags);
  782. else
  783. clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
  784. dev->drv->sta_ps(dev, sta, ps);
  785. ieee80211_sta_ps_transition(sta, ps);
  786. }
  787. void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
  788. struct napi_struct *napi)
  789. {
  790. struct ieee80211_sta *sta;
  791. struct ieee80211_hw *hw;
  792. struct sk_buff *skb, *tmp;
  793. LIST_HEAD(list);
  794. spin_lock(&dev->rx_lock);
  795. while ((skb = __skb_dequeue(frames)) != NULL) {
  796. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  797. if (mt76_check_ccmp_pn(skb)) {
  798. dev_kfree_skb(skb);
  799. continue;
  800. }
  801. skb_shinfo(skb)->frag_list = NULL;
  802. mt76_rx_convert(dev, skb, &hw, &sta);
  803. ieee80211_rx_list(hw, sta, skb, &list);
  804. /* subsequent amsdu frames */
  805. while (nskb) {
  806. skb = nskb;
  807. nskb = nskb->next;
  808. skb->next = NULL;
  809. mt76_rx_convert(dev, skb, &hw, &sta);
  810. ieee80211_rx_list(hw, sta, skb, &list);
  811. }
  812. }
  813. spin_unlock(&dev->rx_lock);
  814. if (!napi) {
  815. netif_receive_skb_list(&list);
  816. return;
  817. }
  818. list_for_each_entry_safe(skb, tmp, &list, list) {
  819. skb_list_del_init(skb);
  820. napi_gro_receive(napi, skb);
  821. }
  822. }
  823. void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
  824. struct napi_struct *napi)
  825. {
  826. struct sk_buff_head frames;
  827. struct sk_buff *skb;
  828. __skb_queue_head_init(&frames);
  829. while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
  830. mt76_check_sta(dev, skb);
  831. mt76_rx_aggr_reorder(skb, &frames);
  832. }
  833. mt76_rx_complete(dev, &frames, napi);
  834. }
  835. EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
  836. static int
  837. mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
  838. struct ieee80211_sta *sta, bool ext_phy)
  839. {
  840. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  841. int ret;
  842. int i;
  843. mutex_lock(&dev->mutex);
  844. ret = dev->drv->sta_add(dev, vif, sta);
  845. if (ret)
  846. goto out;
  847. for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
  848. struct mt76_txq *mtxq;
  849. if (!sta->txq[i])
  850. continue;
  851. mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
  852. mtxq->wcid = wcid;
  853. }
  854. ewma_signal_init(&wcid->rssi);
  855. if (ext_phy)
  856. mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
  857. wcid->ext_phy = ext_phy;
  858. rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
  859. out:
  860. mutex_unlock(&dev->mutex);
  861. return ret;
  862. }
  863. void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
  864. struct ieee80211_sta *sta)
  865. {
  866. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  867. int i, idx = wcid->idx;
  868. for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
  869. mt76_rx_aggr_stop(dev, wcid, i);
  870. if (dev->drv->sta_remove)
  871. dev->drv->sta_remove(dev, vif, sta);
  872. mt76_tx_status_check(dev, wcid, true);
  873. mt76_wcid_mask_clear(dev->wcid_mask, idx);
  874. mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
  875. }
  876. EXPORT_SYMBOL_GPL(__mt76_sta_remove);
  877. static void
  878. mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
  879. struct ieee80211_sta *sta)
  880. {
  881. mutex_lock(&dev->mutex);
  882. __mt76_sta_remove(dev, vif, sta);
  883. mutex_unlock(&dev->mutex);
  884. }
  885. int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  886. struct ieee80211_sta *sta,
  887. enum ieee80211_sta_state old_state,
  888. enum ieee80211_sta_state new_state)
  889. {
  890. struct mt76_phy *phy = hw->priv;
  891. struct mt76_dev *dev = phy->dev;
  892. bool ext_phy = phy != &dev->phy;
  893. if (old_state == IEEE80211_STA_NOTEXIST &&
  894. new_state == IEEE80211_STA_NONE)
  895. return mt76_sta_add(dev, vif, sta, ext_phy);
  896. if (old_state == IEEE80211_STA_AUTH &&
  897. new_state == IEEE80211_STA_ASSOC &&
  898. dev->drv->sta_assoc)
  899. dev->drv->sta_assoc(dev, vif, sta);
  900. if (old_state == IEEE80211_STA_NONE &&
  901. new_state == IEEE80211_STA_NOTEXIST)
  902. mt76_sta_remove(dev, vif, sta);
  903. return 0;
  904. }
  905. EXPORT_SYMBOL_GPL(mt76_sta_state);
  906. void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  907. struct ieee80211_sta *sta)
  908. {
  909. struct mt76_phy *phy = hw->priv;
  910. struct mt76_dev *dev = phy->dev;
  911. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  912. mutex_lock(&dev->mutex);
  913. rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
  914. mutex_unlock(&dev->mutex);
  915. }
  916. EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
  917. int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  918. int *dbm)
  919. {
  920. struct mt76_phy *phy = hw->priv;
  921. int n_chains = hweight8(phy->antenna_mask);
  922. int delta = mt76_tx_power_nss_delta(n_chains);
  923. *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
  924. return 0;
  925. }
  926. EXPORT_SYMBOL_GPL(mt76_get_txpower);
  927. static void
  928. __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
  929. {
  930. if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
  931. ieee80211_csa_finish(vif);
  932. }
  933. void mt76_csa_finish(struct mt76_dev *dev)
  934. {
  935. if (!dev->csa_complete)
  936. return;
  937. ieee80211_iterate_active_interfaces_atomic(dev->hw,
  938. IEEE80211_IFACE_ITER_RESUME_ALL,
  939. __mt76_csa_finish, dev);
  940. dev->csa_complete = 0;
  941. }
  942. EXPORT_SYMBOL_GPL(mt76_csa_finish);
  943. static void
  944. __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
  945. {
  946. struct mt76_dev *dev = priv;
  947. if (!vif->csa_active)
  948. return;
  949. dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
  950. }
  951. void mt76_csa_check(struct mt76_dev *dev)
  952. {
  953. ieee80211_iterate_active_interfaces_atomic(dev->hw,
  954. IEEE80211_IFACE_ITER_RESUME_ALL,
  955. __mt76_csa_check, dev);
  956. }
  957. EXPORT_SYMBOL_GPL(mt76_csa_check);
  958. int
  959. mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
  960. {
  961. return 0;
  962. }
  963. EXPORT_SYMBOL_GPL(mt76_set_tim);
  964. void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
  965. {
  966. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  967. int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
  968. u8 *hdr, *pn = status->iv;
  969. __skb_push(skb, 8);
  970. memmove(skb->data, skb->data + 8, hdr_len);
  971. hdr = skb->data + hdr_len;
  972. hdr[0] = pn[5];
  973. hdr[1] = pn[4];
  974. hdr[2] = 0;
  975. hdr[3] = 0x20 | (key_id << 6);
  976. hdr[4] = pn[3];
  977. hdr[5] = pn[2];
  978. hdr[6] = pn[1];
  979. hdr[7] = pn[0];
  980. status->flag &= ~RX_FLAG_IV_STRIPPED;
  981. }
  982. EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
  983. int mt76_get_rate(struct mt76_dev *dev,
  984. struct ieee80211_supported_band *sband,
  985. int idx, bool cck)
  986. {
  987. int i, offset = 0, len = sband->n_bitrates;
  988. if (cck) {
  989. if (sband == &dev->phy.sband_5g.sband)
  990. return 0;
  991. idx &= ~BIT(2); /* short preamble */
  992. } else if (sband == &dev->phy.sband_2g.sband) {
  993. offset = 4;
  994. }
  995. for (i = offset; i < len; i++) {
  996. if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
  997. return i;
  998. }
  999. return 0;
  1000. }
  1001. EXPORT_SYMBOL_GPL(mt76_get_rate);
  1002. void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  1003. const u8 *mac)
  1004. {
  1005. struct mt76_phy *phy = hw->priv;
  1006. set_bit(MT76_SCANNING, &phy->state);
  1007. }
  1008. EXPORT_SYMBOL_GPL(mt76_sw_scan);
  1009. void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1010. {
  1011. struct mt76_phy *phy = hw->priv;
  1012. clear_bit(MT76_SCANNING, &phy->state);
  1013. }
  1014. EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
  1015. int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
  1016. {
  1017. struct mt76_phy *phy = hw->priv;
  1018. struct mt76_dev *dev = phy->dev;
  1019. mutex_lock(&dev->mutex);
  1020. *tx_ant = phy->antenna_mask;
  1021. *rx_ant = phy->antenna_mask;
  1022. mutex_unlock(&dev->mutex);
  1023. return 0;
  1024. }
  1025. EXPORT_SYMBOL_GPL(mt76_get_antenna);
  1026. struct mt76_queue *
  1027. mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
  1028. int ring_base)
  1029. {
  1030. struct mt76_queue *hwq;
  1031. int err;
  1032. hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
  1033. if (!hwq)
  1034. return ERR_PTR(-ENOMEM);
  1035. err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
  1036. if (err < 0)
  1037. return ERR_PTR(err);
  1038. return hwq;
  1039. }
  1040. EXPORT_SYMBOL_GPL(mt76_init_queue);