mt76_connac_mac.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc. */
  3. #include "mt76_connac.h"
  4. int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
  5. {
  6. struct mt76_dev *dev = phy->dev;
  7. if (!pm->enable)
  8. return 0;
  9. if (!mt76_is_mmio(dev))
  10. return 0;
  11. cancel_delayed_work_sync(&pm->ps_work);
  12. if (!test_bit(MT76_STATE_PM, &phy->state))
  13. return 0;
  14. queue_work(dev->wq, &pm->wake_work);
  15. if (!wait_event_timeout(pm->wait,
  16. !test_bit(MT76_STATE_PM, &phy->state),
  17. 3 * HZ)) {
  18. ieee80211_wake_queues(phy->hw);
  19. return -ETIMEDOUT;
  20. }
  21. return 0;
  22. }
  23. EXPORT_SYMBOL_GPL(mt76_connac_pm_wake);
  24. void mt76_connac_power_save_sched(struct mt76_phy *phy,
  25. struct mt76_connac_pm *pm)
  26. {
  27. struct mt76_dev *dev = phy->dev;
  28. if (!mt76_is_mmio(dev))
  29. return;
  30. if (!pm->enable)
  31. return;
  32. pm->last_activity = jiffies;
  33. if (!test_bit(MT76_STATE_PM, &phy->state)) {
  34. cancel_delayed_work(&phy->mac_work);
  35. queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
  36. }
  37. }
  38. EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);
  39. void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
  40. struct mt76_wcid *wcid)
  41. {
  42. int i;
  43. spin_lock_bh(&pm->txq_lock);
  44. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  45. if (wcid && pm->tx_q[i].wcid != wcid)
  46. continue;
  47. dev_kfree_skb(pm->tx_q[i].skb);
  48. pm->tx_q[i].skb = NULL;
  49. }
  50. spin_unlock_bh(&pm->txq_lock);
  51. }
  52. EXPORT_SYMBOL_GPL(mt76_connac_free_pending_tx_skbs);
  53. void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
  54. struct mt76_connac_pm *pm,
  55. struct mt76_wcid *wcid,
  56. struct sk_buff *skb)
  57. {
  58. int qid = skb_get_queue_mapping(skb);
  59. struct mt76_phy *phy = hw->priv;
  60. spin_lock_bh(&pm->txq_lock);
  61. if (!pm->tx_q[qid].skb) {
  62. ieee80211_stop_queues(hw);
  63. pm->tx_q[qid].wcid = wcid;
  64. pm->tx_q[qid].skb = skb;
  65. queue_work(phy->dev->wq, &pm->wake_work);
  66. } else {
  67. dev_kfree_skb(skb);
  68. }
  69. spin_unlock_bh(&pm->txq_lock);
  70. }
  71. EXPORT_SYMBOL_GPL(mt76_connac_pm_queue_skb);
  72. void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
  73. struct mt76_connac_pm *pm)
  74. {
  75. int i;
  76. spin_lock_bh(&pm->txq_lock);
  77. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  78. struct mt76_wcid *wcid = pm->tx_q[i].wcid;
  79. struct ieee80211_sta *sta = NULL;
  80. if (!pm->tx_q[i].skb)
  81. continue;
  82. if (wcid && wcid->sta)
  83. sta = container_of((void *)wcid, struct ieee80211_sta,
  84. drv_priv);
  85. mt76_tx(phy, sta, wcid, pm->tx_q[i].skb);
  86. pm->tx_q[i].skb = NULL;
  87. }
  88. spin_unlock_bh(&pm->txq_lock);
  89. mt76_worker_schedule(&phy->dev->tx_worker);
  90. }
  91. EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);