mt76x02_txrx.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include "mt76x02.h"
  8. void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
  9. struct sk_buff *skb)
  10. {
  11. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  12. struct mt76x02_dev *dev = hw->priv;
  13. struct ieee80211_vif *vif = info->control.vif;
  14. struct mt76_wcid *wcid = &dev->mt76.global_wcid;
  15. if (control->sta) {
  16. struct mt76x02_sta *msta;
  17. msta = (struct mt76x02_sta *)control->sta->drv_priv;
  18. wcid = &msta->wcid;
  19. } else if (vif) {
  20. struct mt76x02_vif *mvif;
  21. mvif = (struct mt76x02_vif *)vif->drv_priv;
  22. wcid = &mvif->group_wcid;
  23. }
  24. mt76_tx(&dev->mphy, control->sta, wcid, skb);
  25. }
  26. EXPORT_SYMBOL_GPL(mt76x02_tx);
  27. void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
  28. struct sk_buff *skb)
  29. {
  30. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  31. void *rxwi = skb->data;
  32. if (q == MT_RXQ_MCU) {
  33. mt76_mcu_rx_event(&dev->mt76, skb);
  34. return;
  35. }
  36. skb_pull(skb, sizeof(struct mt76x02_rxwi));
  37. if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
  38. dev_kfree_skb(skb);
  39. return;
  40. }
  41. mt76_rx(mdev, q, skb);
  42. }
  43. EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
  44. s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
  45. const struct ieee80211_tx_rate *rate)
  46. {
  47. s8 max_txpwr;
  48. if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
  49. u8 mcs = ieee80211_rate_get_vht_mcs(rate);
  50. if (mcs == 8 || mcs == 9) {
  51. max_txpwr = dev->mt76.rate_power.vht[8];
  52. } else {
  53. u8 nss, idx;
  54. nss = ieee80211_rate_get_vht_nss(rate);
  55. idx = ((nss - 1) << 3) + mcs;
  56. max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
  57. }
  58. } else if (rate->flags & IEEE80211_TX_RC_MCS) {
  59. max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
  60. } else {
  61. enum nl80211_band band = dev->mphy.chandef.chan->band;
  62. if (band == NL80211_BAND_2GHZ) {
  63. const struct ieee80211_rate *r;
  64. struct wiphy *wiphy = dev->mt76.hw->wiphy;
  65. struct mt76_rate_power *rp = &dev->mt76.rate_power;
  66. r = &wiphy->bands[band]->bitrates[rate->idx];
  67. if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
  68. max_txpwr = rp->cck[r->hw_value & 0x3];
  69. else
  70. max_txpwr = rp->ofdm[r->hw_value & 0x7];
  71. } else {
  72. max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
  73. }
  74. }
  75. return max_txpwr;
  76. }
  77. s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
  78. {
  79. txpwr = min_t(s8, txpwr, dev->txpower_conf);
  80. txpwr -= (dev->target_power + dev->target_power_delta[0]);
  81. txpwr = min_t(s8, txpwr, max_txpwr_adj);
  82. if (!dev->enable_tpc)
  83. return 0;
  84. else if (txpwr >= 0)
  85. return min_t(s8, txpwr, 7);
  86. else
  87. return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
  88. }
  89. void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
  90. {
  91. s8 txpwr_adj;
  92. txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
  93. dev->mt76.rate_power.ofdm[4]);
  94. mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
  95. MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
  96. mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
  97. MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
  98. }
  99. EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
  100. bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
  101. {
  102. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  103. struct mt76x02_tx_status stat;
  104. if (!mt76x02_mac_load_tx_status(dev, &stat))
  105. return false;
  106. mt76x02_send_tx_status(dev, &stat, update);
  107. return true;
  108. }
  109. EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
  110. int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
  111. enum mt76_txq_id qid, struct mt76_wcid *wcid,
  112. struct ieee80211_sta *sta,
  113. struct mt76_tx_info *tx_info)
  114. {
  115. struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
  116. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
  117. struct mt76x02_txwi *txwi = txwi_ptr;
  118. bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
  119. int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
  120. if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
  121. mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
  122. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  123. len = tx_info->skb->len - (hdrlen & 2);
  124. mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
  125. pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
  126. /* encode packet rate for no-skb packet id to fix up status reporting */
  127. if (pid == MT_PACKET_ID_NO_SKB)
  128. pid = MT_PACKET_ID_HAS_RATE |
  129. (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX) |
  130. FIELD_PREP(MT_PKTID_AC,
  131. skb_get_queue_mapping(tx_info->skb));
  132. txwi->pktid = pid;
  133. if (mt76_is_skb_pktid(pid) && ampdu)
  134. qsel = MT_QSEL_MGMT;
  135. tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
  136. MT_TXD_INFO_80211;
  137. if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
  138. tx_info->info |= MT_TXD_INFO_WIV;
  139. if (sta) {
  140. struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
  141. ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
  142. }
  143. return 0;
  144. }
  145. EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);