sdio.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc.
  3. *
  4. * This file is written based on mt76/usb.c.
  5. *
  6. * Author: Felix Fietkau <nbd@nbd.name>
  7. * Lorenzo Bianconi <lorenzo@kernel.org>
  8. * Sean Wang <sean.wang@mediatek.com>
  9. */
  10. #include <linux/iopoll.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/mmc/sdio_func.h>
  14. #include <linux/sched.h>
  15. #include <linux/kthread.h>
  16. #include "mt76.h"
  17. static int
  18. mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
  19. {
  20. struct mt76_queue *q = &dev->q_rx[qid];
  21. spin_lock_init(&q->lock);
  22. q->entry = devm_kcalloc(dev->dev,
  23. MT_NUM_RX_ENTRIES, sizeof(*q->entry),
  24. GFP_KERNEL);
  25. if (!q->entry)
  26. return -ENOMEM;
  27. q->ndesc = MT_NUM_RX_ENTRIES;
  28. q->head = q->tail = 0;
  29. q->queued = 0;
  30. return 0;
  31. }
  32. static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
  33. {
  34. struct mt76_queue *q;
  35. q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
  36. if (!q)
  37. return ERR_PTR(-ENOMEM);
  38. spin_lock_init(&q->lock);
  39. q->entry = devm_kcalloc(dev->dev,
  40. MT_NUM_TX_ENTRIES, sizeof(*q->entry),
  41. GFP_KERNEL);
  42. if (!q->entry)
  43. return ERR_PTR(-ENOMEM);
  44. q->ndesc = MT_NUM_TX_ENTRIES;
  45. return q;
  46. }
  47. static int mt76s_alloc_tx(struct mt76_dev *dev)
  48. {
  49. struct mt76_queue *q;
  50. int i;
  51. for (i = 0; i <= MT_TXQ_PSD; i++) {
  52. q = mt76s_alloc_tx_queue(dev);
  53. if (IS_ERR(q))
  54. return PTR_ERR(q);
  55. q->qid = i;
  56. dev->phy.q_tx[i] = q;
  57. }
  58. q = mt76s_alloc_tx_queue(dev);
  59. if (IS_ERR(q))
  60. return PTR_ERR(q);
  61. q->qid = MT_MCUQ_WM;
  62. dev->q_mcu[MT_MCUQ_WM] = q;
  63. return 0;
  64. }
  65. int mt76s_alloc_queues(struct mt76_dev *dev)
  66. {
  67. int err;
  68. err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
  69. if (err < 0)
  70. return err;
  71. return mt76s_alloc_tx(dev);
  72. }
  73. EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
  74. static struct mt76_queue_entry *
  75. mt76s_get_next_rx_entry(struct mt76_queue *q)
  76. {
  77. struct mt76_queue_entry *e = NULL;
  78. spin_lock_bh(&q->lock);
  79. if (q->queued > 0) {
  80. e = &q->entry[q->tail];
  81. q->tail = (q->tail + 1) % q->ndesc;
  82. q->queued--;
  83. }
  84. spin_unlock_bh(&q->lock);
  85. return e;
  86. }
  87. static int
  88. mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  89. {
  90. int qid = q - &dev->q_rx[MT_RXQ_MAIN];
  91. int nframes = 0;
  92. while (true) {
  93. struct mt76_queue_entry *e;
  94. if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
  95. break;
  96. e = mt76s_get_next_rx_entry(q);
  97. if (!e || !e->skb)
  98. break;
  99. dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
  100. e->skb = NULL;
  101. nframes++;
  102. }
  103. if (qid == MT_RXQ_MAIN)
  104. mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
  105. return nframes;
  106. }
  107. static void mt76s_net_worker(struct mt76_worker *w)
  108. {
  109. struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
  110. net_worker);
  111. struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
  112. int i, nframes;
  113. do {
  114. nframes = 0;
  115. local_bh_disable();
  116. rcu_read_lock();
  117. mt76_for_each_q_rx(dev, i)
  118. nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
  119. rcu_read_unlock();
  120. local_bh_enable();
  121. } while (nframes > 0);
  122. }
  123. static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  124. {
  125. struct mt76_queue_entry entry;
  126. int nframes = 0;
  127. bool mcu;
  128. if (!q)
  129. return 0;
  130. mcu = q == dev->q_mcu[MT_MCUQ_WM];
  131. while (q->queued > 0) {
  132. if (!q->entry[q->tail].done)
  133. break;
  134. entry = q->entry[q->tail];
  135. q->entry[q->tail].done = false;
  136. if (mcu) {
  137. dev_kfree_skb(entry.skb);
  138. entry.skb = NULL;
  139. }
  140. mt76_queue_tx_complete(dev, q, &entry);
  141. nframes++;
  142. }
  143. if (!q->queued)
  144. wake_up(&dev->tx_wait);
  145. if (!mcu)
  146. mt76_txq_schedule(&dev->phy, q->qid);
  147. return nframes;
  148. }
  149. static void mt76s_status_worker(struct mt76_worker *w)
  150. {
  151. struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
  152. status_worker);
  153. struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
  154. int i, nframes;
  155. do {
  156. nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
  157. for (i = 0; i <= MT_TXQ_PSD; i++)
  158. nframes += mt76s_process_tx_queue(dev,
  159. dev->phy.q_tx[i]);
  160. if (dev->drv->tx_status_data &&
  161. !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
  162. queue_work(dev->wq, &dev->sdio.stat_work);
  163. } while (nframes > 0);
  164. }
  165. static void mt76s_tx_status_data(struct work_struct *work)
  166. {
  167. struct mt76_sdio *sdio;
  168. struct mt76_dev *dev;
  169. u8 update = 1;
  170. u16 count = 0;
  171. sdio = container_of(work, struct mt76_sdio, stat_work);
  172. dev = container_of(sdio, struct mt76_dev, sdio);
  173. while (true) {
  174. if (test_bit(MT76_REMOVED, &dev->phy.state))
  175. break;
  176. if (!dev->drv->tx_status_data(dev, &update))
  177. break;
  178. count++;
  179. }
  180. if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
  181. queue_work(dev->wq, &sdio->stat_work);
  182. else
  183. clear_bit(MT76_READING_STATS, &dev->phy.state);
  184. }
  185. static int
  186. mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  187. struct sk_buff *skb, struct mt76_wcid *wcid,
  188. struct ieee80211_sta *sta)
  189. {
  190. struct mt76_tx_info tx_info = {
  191. .skb = skb,
  192. };
  193. int err, len = skb->len;
  194. u16 idx = q->head;
  195. if (q->queued == q->ndesc)
  196. return -ENOSPC;
  197. skb->prev = skb->next = NULL;
  198. err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
  199. if (err < 0)
  200. return err;
  201. q->entry[q->head].skb = tx_info.skb;
  202. q->entry[q->head].buf_sz = len;
  203. q->head = (q->head + 1) % q->ndesc;
  204. q->queued++;
  205. return idx;
  206. }
  207. static int
  208. mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
  209. struct sk_buff *skb, u32 tx_info)
  210. {
  211. int ret = -ENOSPC, len = skb->len, pad;
  212. if (q->queued == q->ndesc)
  213. goto error;
  214. pad = round_up(skb->len, 4) - skb->len;
  215. ret = mt76_skb_adjust_pad(skb, pad);
  216. if (ret)
  217. goto error;
  218. spin_lock_bh(&q->lock);
  219. q->entry[q->head].buf_sz = len;
  220. q->entry[q->head].skb = skb;
  221. q->head = (q->head + 1) % q->ndesc;
  222. q->queued++;
  223. spin_unlock_bh(&q->lock);
  224. return 0;
  225. error:
  226. dev_kfree_skb(skb);
  227. return ret;
  228. }
  229. static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
  230. {
  231. struct mt76_sdio *sdio = &dev->sdio;
  232. mt76_worker_schedule(&sdio->txrx_worker);
  233. }
  234. static const struct mt76_queue_ops sdio_queue_ops = {
  235. .tx_queue_skb = mt76s_tx_queue_skb,
  236. .kick = mt76s_tx_kick,
  237. .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
  238. };
  239. void mt76s_deinit(struct mt76_dev *dev)
  240. {
  241. struct mt76_sdio *sdio = &dev->sdio;
  242. int i;
  243. mt76_worker_teardown(&sdio->txrx_worker);
  244. mt76_worker_teardown(&sdio->status_worker);
  245. mt76_worker_teardown(&sdio->net_worker);
  246. cancel_work_sync(&sdio->stat_work);
  247. clear_bit(MT76_READING_STATS, &dev->phy.state);
  248. mt76_tx_status_check(dev, NULL, true);
  249. sdio_claim_host(sdio->func);
  250. sdio_release_irq(sdio->func);
  251. sdio_release_host(sdio->func);
  252. mt76_for_each_q_rx(dev, i) {
  253. struct mt76_queue *q = &dev->q_rx[i];
  254. int j;
  255. for (j = 0; j < q->ndesc; j++) {
  256. struct mt76_queue_entry *e = &q->entry[j];
  257. if (!e->skb)
  258. continue;
  259. dev_kfree_skb(e->skb);
  260. e->skb = NULL;
  261. }
  262. }
  263. }
  264. EXPORT_SYMBOL_GPL(mt76s_deinit);
  265. int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
  266. const struct mt76_bus_ops *bus_ops)
  267. {
  268. struct mt76_sdio *sdio = &dev->sdio;
  269. int err;
  270. err = mt76_worker_setup(dev->hw, &sdio->status_worker,
  271. mt76s_status_worker, "sdio-status");
  272. if (err)
  273. return err;
  274. err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
  275. "sdio-net");
  276. if (err)
  277. return err;
  278. sched_set_fifo_low(sdio->status_worker.task);
  279. sched_set_fifo_low(sdio->net_worker.task);
  280. INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
  281. dev->queue_ops = &sdio_queue_ops;
  282. dev->bus = bus_ops;
  283. dev->sdio.func = func;
  284. return 0;
  285. }
  286. EXPORT_SYMBOL_GPL(mt76s_init);
  287. MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
  288. MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
  289. MODULE_LICENSE("Dual BSD/GPL");