mt76x02_usb_core.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  4. */
  5. #include "mt76x02_usb.h"
  6. static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
  7. {
  8. int hdr_len;
  9. skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
  10. hdr_len = ieee80211_get_hdrlen_from_skb(skb);
  11. if (hdr_len % 4)
  12. mt76x02_remove_hdr_pad(skb, 2);
  13. }
  14. void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
  15. {
  16. mt76x02u_remove_dma_hdr(e->skb);
  17. mt76_tx_complete_skb(mdev, e->wcid, e->skb);
  18. }
  19. EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
  20. int mt76x02u_mac_start(struct mt76x02_dev *dev)
  21. {
  22. mt76x02_mac_reset_counters(dev);
  23. mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  24. if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
  25. return -ETIMEDOUT;
  26. mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
  27. mt76_wr(dev, MT_MAC_SYS_CTRL,
  28. MT_MAC_SYS_CTRL_ENABLE_TX |
  29. MT_MAC_SYS_CTRL_ENABLE_RX);
  30. if (!mt76x02_wait_for_wpdma(&dev->mt76, 50))
  31. return -ETIMEDOUT;
  32. return 0;
  33. }
  34. EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
  35. int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
  36. {
  37. u32 info, pad;
  38. /* Buffer layout:
  39. * | 4B | xfer len | pad | 4B |
  40. * | TXINFO | pkt/cmd | zero pad to 4B | zero |
  41. *
  42. * length field of TXINFO should be set to 'xfer len'.
  43. */
  44. info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
  45. FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
  46. put_unaligned_le32(info, skb_push(skb, sizeof(info)));
  47. pad = round_up(skb->len, 4) + 4 - skb->len;
  48. return mt76_skb_adjust_pad(skb, pad);
  49. }
  50. int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
  51. enum mt76_txq_id qid, struct mt76_wcid *wcid,
  52. struct ieee80211_sta *sta,
  53. struct mt76_tx_info *tx_info)
  54. {
  55. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  56. int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
  57. struct mt76x02_txwi *txwi;
  58. bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
  59. enum mt76_qsel qsel;
  60. u32 flags;
  61. mt76_insert_hdr_pad(tx_info->skb);
  62. txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
  63. mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
  64. skb_push(tx_info->skb, sizeof(*txwi));
  65. pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
  66. /* encode packet rate for no-skb packet id to fix up status reporting */
  67. if (pid == MT_PACKET_ID_NO_SKB)
  68. pid = MT_PACKET_ID_HAS_RATE |
  69. (le16_to_cpu(txwi->rate) & MT_PKTID_RATE) |
  70. FIELD_PREP(MT_PKTID_AC,
  71. skb_get_queue_mapping(tx_info->skb));
  72. txwi->pktid = pid;
  73. if ((mt76_is_skb_pktid(pid) && ampdu) || ep == MT_EP_OUT_HCCA)
  74. qsel = MT_QSEL_MGMT;
  75. else
  76. qsel = MT_QSEL_EDCA;
  77. flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
  78. MT_TXD_INFO_80211;
  79. if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
  80. flags |= MT_TXD_INFO_WIV;
  81. if (sta) {
  82. struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
  83. ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
  84. }
  85. return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
  86. }
  87. EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
  88. /* Trigger pre-TBTT event 8 ms before TBTT */
  89. #define PRE_TBTT_USEC 8000
  90. /* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
  91. * (which can be 1500 bytes big) via beacon memory. That make limit of number
  92. * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
  93. */
  94. #define N_BCN_SLOTS 5
  95. static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
  96. {
  97. u64 time;
  98. u32 tbtt;
  99. /* Get remaining TBTT in usec */
  100. tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
  101. tbtt *= 32;
  102. if (tbtt <= PRE_TBTT_USEC) {
  103. queue_work(system_highpri_wq, &dev->pre_tbtt_work);
  104. return;
  105. }
  106. time = (tbtt - PRE_TBTT_USEC) * 1000ull;
  107. hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
  108. }
  109. static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
  110. {
  111. u32 tbtt, dw0, dw1;
  112. u64 tsf, time;
  113. /* Get remaining TBTT in usec */
  114. tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
  115. tbtt *= 32;
  116. dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
  117. dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
  118. tsf = (u64)dw0 << 32 | dw1;
  119. dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
  120. /* Convert beacon interval in TU (1024 usec) to nsec */
  121. time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
  122. /* Adjust time to trigger hrtimer 8ms before TBTT */
  123. if (tbtt < PRE_TBTT_USEC)
  124. time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
  125. else
  126. time += (tbtt - PRE_TBTT_USEC) * 1000ull;
  127. hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
  128. }
  129. static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
  130. {
  131. do {
  132. hrtimer_cancel(&dev->pre_tbtt_timer);
  133. cancel_work_sync(&dev->pre_tbtt_work);
  134. /* Timer can be rearmed by work. */
  135. } while (hrtimer_active(&dev->pre_tbtt_timer));
  136. }
  137. static void mt76x02u_pre_tbtt_work(struct work_struct *work)
  138. {
  139. struct mt76x02_dev *dev =
  140. container_of(work, struct mt76x02_dev, pre_tbtt_work);
  141. struct beacon_bc_data data = {};
  142. struct sk_buff *skb;
  143. int nbeacons;
  144. if (!dev->mt76.beacon_mask)
  145. return;
  146. if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
  147. return;
  148. mt76x02_resync_beacon_timer(dev);
  149. /* Prevent corrupt transmissions during update */
  150. mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
  151. dev->beacon_data_count = 0;
  152. ieee80211_iterate_active_interfaces(mt76_hw(dev),
  153. IEEE80211_IFACE_ITER_RESUME_ALL,
  154. mt76x02_update_beacon_iter, dev);
  155. mt76_csa_check(&dev->mt76);
  156. if (dev->mt76.csa_complete) {
  157. mt76_csa_finish(&dev->mt76);
  158. goto out;
  159. }
  160. nbeacons = hweight8(dev->mt76.beacon_mask);
  161. mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
  162. while ((skb = __skb_dequeue(&data.q)) != NULL)
  163. mt76x02_mac_set_beacon(dev, skb);
  164. out:
  165. mt76_wr(dev, MT_BCN_BYPASS_MASK,
  166. 0xff00 | ~(0xff00 >> dev->beacon_data_count));
  167. mt76x02u_restart_pre_tbtt_timer(dev);
  168. }
  169. static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
  170. {
  171. struct mt76x02_dev *dev =
  172. container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
  173. queue_work(system_highpri_wq, &dev->pre_tbtt_work);
  174. return HRTIMER_NORESTART;
  175. }
  176. static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
  177. {
  178. if (en && dev->mt76.beacon_mask &&
  179. !hrtimer_active(&dev->pre_tbtt_timer))
  180. mt76x02u_start_pre_tbtt_timer(dev);
  181. if (!en)
  182. mt76x02u_stop_pre_tbtt_timer(dev);
  183. }
  184. static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
  185. {
  186. if (WARN_ON_ONCE(!dev->mt76.beacon_int))
  187. return;
  188. if (en)
  189. mt76x02u_start_pre_tbtt_timer(dev);
  190. }
  191. void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
  192. {
  193. static const struct mt76x02_beacon_ops beacon_ops = {
  194. .nslots = N_BCN_SLOTS,
  195. .slot_size = (8192 / N_BCN_SLOTS) & ~63,
  196. .pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
  197. .beacon_enable = mt76x02u_beacon_enable,
  198. };
  199. dev->beacon_ops = &beacon_ops;
  200. hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  201. dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
  202. INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
  203. mt76x02_init_beacon_config(dev);
  204. }
  205. EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
  206. void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
  207. {
  208. if (!test_bit(MT76_REMOVED, &dev->mphy.state))
  209. mt76_clear(dev, MT_BEACON_TIME_CFG,
  210. MT_BEACON_TIME_CFG_TIMER_EN |
  211. MT_BEACON_TIME_CFG_SYNC_MODE |
  212. MT_BEACON_TIME_CFG_TBTT_EN |
  213. MT_BEACON_TIME_CFG_BEACON_TX);
  214. mt76x02u_stop_pre_tbtt_timer(dev);
  215. }
  216. EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);