mt76_connac_mac.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc. */
  3. #include "mt76_connac.h"
  4. int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
  5. {
  6. struct mt76_dev *dev = phy->dev;
  7. if (!pm->enable)
  8. return 0;
  9. if (!mt76_is_mmio(dev))
  10. return 0;
  11. if (!test_bit(MT76_STATE_PM, &phy->state))
  12. return 0;
  13. if (test_bit(MT76_HW_SCANNING, &phy->state) ||
  14. test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
  15. return 0;
  16. if (queue_work(dev->wq, &pm->wake_work))
  17. reinit_completion(&pm->wake_cmpl);
  18. if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) {
  19. ieee80211_wake_queues(phy->hw);
  20. return -ETIMEDOUT;
  21. }
  22. return 0;
  23. }
  24. EXPORT_SYMBOL_GPL(mt76_connac_pm_wake);
  25. void mt76_connac_power_save_sched(struct mt76_phy *phy,
  26. struct mt76_connac_pm *pm)
  27. {
  28. struct mt76_dev *dev = phy->dev;
  29. if (!mt76_is_mmio(dev))
  30. return;
  31. if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state))
  32. return;
  33. pm->last_activity = jiffies;
  34. if (test_bit(MT76_HW_SCANNING, &phy->state) ||
  35. test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
  36. return;
  37. if (!test_bit(MT76_STATE_PM, &phy->state))
  38. queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
  39. }
  40. EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);
  41. void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
  42. struct mt76_wcid *wcid)
  43. {
  44. int i;
  45. spin_lock_bh(&pm->txq_lock);
  46. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  47. if (wcid && pm->tx_q[i].wcid != wcid)
  48. continue;
  49. dev_kfree_skb(pm->tx_q[i].skb);
  50. pm->tx_q[i].skb = NULL;
  51. }
  52. spin_unlock_bh(&pm->txq_lock);
  53. }
  54. EXPORT_SYMBOL_GPL(mt76_connac_free_pending_tx_skbs);
  55. void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
  56. struct mt76_connac_pm *pm,
  57. struct mt76_wcid *wcid,
  58. struct sk_buff *skb)
  59. {
  60. int qid = skb_get_queue_mapping(skb);
  61. struct mt76_phy *phy = hw->priv;
  62. spin_lock_bh(&pm->txq_lock);
  63. if (!pm->tx_q[qid].skb) {
  64. ieee80211_stop_queues(hw);
  65. pm->tx_q[qid].wcid = wcid;
  66. pm->tx_q[qid].skb = skb;
  67. queue_work(phy->dev->wq, &pm->wake_work);
  68. } else {
  69. dev_kfree_skb(skb);
  70. }
  71. spin_unlock_bh(&pm->txq_lock);
  72. }
  73. EXPORT_SYMBOL_GPL(mt76_connac_pm_queue_skb);
  74. void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
  75. struct mt76_connac_pm *pm)
  76. {
  77. int i;
  78. spin_lock_bh(&pm->txq_lock);
  79. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  80. struct mt76_wcid *wcid = pm->tx_q[i].wcid;
  81. struct ieee80211_sta *sta = NULL;
  82. if (!pm->tx_q[i].skb)
  83. continue;
  84. if (wcid && wcid->sta)
  85. sta = container_of((void *)wcid, struct ieee80211_sta,
  86. drv_priv);
  87. mt76_tx(phy, sta, wcid, pm->tx_q[i].skb);
  88. pm->tx_q[i].skb = NULL;
  89. }
  90. spin_unlock_bh(&pm->txq_lock);
  91. mt76_worker_schedule(&phy->dev->tx_worker);
  92. }
  93. EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);