mt76x02_mmio.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  4. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/irq.h>
  8. #include "mt76x02.h"
  9. #include "mt76x02_mcu.h"
  10. #include "trace.h"
  11. static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
  12. {
  13. struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
  14. struct mt76_dev *mdev = &dev->mt76;
  15. struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
  16. struct beacon_bc_data data = {};
  17. struct sk_buff *skb;
  18. int i;
  19. if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
  20. return;
  21. mt76x02_resync_beacon_timer(dev);
  22. /* Prevent corrupt transmissions during update */
  23. mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
  24. dev->beacon_data_count = 0;
  25. ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
  26. IEEE80211_IFACE_ITER_RESUME_ALL,
  27. mt76x02_update_beacon_iter, dev);
  28. mt76_wr(dev, MT_BCN_BYPASS_MASK,
  29. 0xff00 | ~(0xff00 >> dev->beacon_data_count));
  30. mt76_csa_check(mdev);
  31. if (mdev->csa_complete)
  32. return;
  33. mt76x02_enqueue_buffered_bc(dev, &data, 8);
  34. if (!skb_queue_len(&data.q))
  35. return;
  36. for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
  37. if (!data.tail[i])
  38. continue;
  39. mt76_skb_set_moredata(data.tail[i], false);
  40. }
  41. spin_lock_bh(&q->lock);
  42. while ((skb = __skb_dequeue(&data.q)) != NULL) {
  43. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  44. struct ieee80211_vif *vif = info->control.vif;
  45. struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
  46. mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
  47. }
  48. spin_unlock_bh(&q->lock);
  49. }
  50. static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
  51. {
  52. if (en)
  53. tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
  54. else
  55. tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
  56. }
  57. static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
  58. {
  59. mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
  60. if (en)
  61. mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  62. else
  63. mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  64. }
  65. void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
  66. {
  67. static const struct mt76x02_beacon_ops beacon_ops = {
  68. .nslots = 8,
  69. .slot_size = 1024,
  70. .pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
  71. .beacon_enable = mt76x02e_beacon_enable,
  72. };
  73. dev->beacon_ops = &beacon_ops;
  74. /* Fire a pre-TBTT interrupt 8 ms before TBTT */
  75. mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
  76. 8 << 4);
  77. mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
  78. MT_DFS_GP_INTERVAL);
  79. mt76_wr(dev, MT_INT_TIMER_EN, 0);
  80. mt76x02_init_beacon_config(dev);
  81. }
  82. EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
  83. static int
  84. mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
  85. int idx, int n_desc, int bufsize)
  86. {
  87. int err;
  88. err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
  89. MT_RX_RING_BASE);
  90. if (err < 0)
  91. return err;
  92. mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
  93. return 0;
  94. }
  95. static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
  96. {
  97. struct mt76x02_tx_status stat;
  98. u8 update = 1;
  99. while (kfifo_get(&dev->txstatus_fifo, &stat))
  100. mt76x02_send_tx_status(dev, &stat, &update);
  101. }
  102. static void mt76x02_tx_worker(struct mt76_worker *w)
  103. {
  104. struct mt76x02_dev *dev;
  105. dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
  106. mt76x02_mac_poll_tx_status(dev, false);
  107. mt76x02_process_tx_status_fifo(dev);
  108. mt76_txq_schedule_all(&dev->mphy);
  109. }
  110. static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
  111. {
  112. struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
  113. mt76.tx_napi);
  114. int i;
  115. mt76x02_mac_poll_tx_status(dev, false);
  116. mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
  117. for (i = MT_TXQ_PSD; i >= 0; i--)
  118. mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
  119. if (napi_complete_done(napi, 0))
  120. mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
  121. mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
  122. for (i = MT_TXQ_PSD; i >= 0; i--)
  123. mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
  124. mt76_worker_schedule(&dev->mt76.tx_worker);
  125. return 0;
  126. }
  127. int mt76x02_dma_init(struct mt76x02_dev *dev)
  128. {
  129. struct mt76_txwi_cache __maybe_unused *t;
  130. int i, ret, fifo_size;
  131. struct mt76_queue *q;
  132. void *status_fifo;
  133. BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
  134. fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
  135. status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
  136. if (!status_fifo)
  137. return -ENOMEM;
  138. dev->mt76.tx_worker.fn = mt76x02_tx_worker;
  139. tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet);
  140. spin_lock_init(&dev->txstatus_fifo_lock);
  141. kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
  142. mt76_dma_attach(&dev->mt76);
  143. mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
  144. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  145. ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
  146. MT76x02_TX_RING_SIZE,
  147. MT_TX_RING_BASE);
  148. if (ret)
  149. return ret;
  150. }
  151. ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
  152. MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
  153. if (ret)
  154. return ret;
  155. ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
  156. MT_MCU_RING_SIZE, MT_TX_RING_BASE);
  157. if (ret)
  158. return ret;
  159. mt76x02_irq_enable(dev,
  160. MT_INT_TX_DONE(IEEE80211_AC_VO) |
  161. MT_INT_TX_DONE(IEEE80211_AC_VI) |
  162. MT_INT_TX_DONE(IEEE80211_AC_BE) |
  163. MT_INT_TX_DONE(IEEE80211_AC_BK) |
  164. MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
  165. MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU));
  166. ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
  167. MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
  168. if (ret)
  169. return ret;
  170. q = &dev->mt76.q_rx[MT_RXQ_MAIN];
  171. q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
  172. ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
  173. MT_RX_BUF_SIZE);
  174. if (ret)
  175. return ret;
  176. ret = mt76_init_queues(dev);
  177. if (ret)
  178. return ret;
  179. netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
  180. mt76x02_poll_tx, NAPI_POLL_WEIGHT);
  181. napi_enable(&dev->mt76.tx_napi);
  182. return 0;
  183. }
  184. EXPORT_SYMBOL_GPL(mt76x02_dma_init);
  185. void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
  186. {
  187. struct mt76x02_dev *dev;
  188. dev = container_of(mdev, struct mt76x02_dev, mt76);
  189. mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
  190. }
  191. EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
  192. irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
  193. {
  194. struct mt76x02_dev *dev = dev_instance;
  195. u32 intr, mask;
  196. intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
  197. intr &= dev->mt76.mmio.irqmask;
  198. mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
  199. if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
  200. return IRQ_NONE;
  201. trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
  202. mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
  203. if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
  204. mask |= MT_INT_TX_DONE_ALL;
  205. mt76x02_irq_disable(dev, mask);
  206. if (intr & MT_INT_RX_DONE(0))
  207. napi_schedule(&dev->mt76.napi[0]);
  208. if (intr & MT_INT_RX_DONE(1))
  209. napi_schedule(&dev->mt76.napi[1]);
  210. if (intr & MT_INT_PRE_TBTT)
  211. tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
  212. /* send buffered multicast frames now */
  213. if (intr & MT_INT_TBTT) {
  214. if (dev->mt76.csa_complete)
  215. mt76_csa_finish(&dev->mt76);
  216. else
  217. mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
  218. }
  219. if (intr & MT_INT_TX_STAT)
  220. mt76x02_mac_poll_tx_status(dev, true);
  221. if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
  222. napi_schedule(&dev->mt76.tx_napi);
  223. if (intr & MT_INT_GPTIMER)
  224. tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
  225. return IRQ_HANDLED;
  226. }
  227. EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
  228. static void mt76x02_dma_enable(struct mt76x02_dev *dev)
  229. {
  230. u32 val;
  231. mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
  232. mt76x02_wait_for_wpdma(&dev->mt76, 1000);
  233. usleep_range(50, 100);
  234. val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
  235. MT_WPDMA_GLO_CFG_TX_DMA_EN |
  236. MT_WPDMA_GLO_CFG_RX_DMA_EN;
  237. mt76_set(dev, MT_WPDMA_GLO_CFG, val);
  238. mt76_clear(dev, MT_WPDMA_GLO_CFG,
  239. MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
  240. }
  241. void mt76x02_dma_disable(struct mt76x02_dev *dev)
  242. {
  243. u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
  244. val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
  245. MT_WPDMA_GLO_CFG_BIG_ENDIAN |
  246. MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
  247. val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
  248. mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
  249. }
  250. EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
  251. void mt76x02_mac_start(struct mt76x02_dev *dev)
  252. {
  253. mt76x02_mac_reset_counters(dev);
  254. mt76x02_dma_enable(dev);
  255. mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
  256. mt76_wr(dev, MT_MAC_SYS_CTRL,
  257. MT_MAC_SYS_CTRL_ENABLE_TX |
  258. MT_MAC_SYS_CTRL_ENABLE_RX);
  259. mt76x02_irq_enable(dev,
  260. MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
  261. MT_INT_TX_STAT);
  262. }
  263. EXPORT_SYMBOL_GPL(mt76x02_mac_start);
  264. static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
  265. {
  266. u32 dma_idx, prev_dma_idx;
  267. struct mt76_queue *q;
  268. int i;
  269. for (i = 0; i < 4; i++) {
  270. q = dev->mphy.q_tx[i];
  271. if (!q->queued)
  272. continue;
  273. prev_dma_idx = dev->mt76.tx_dma_idx[i];
  274. dma_idx = readl(&q->regs->dma_idx);
  275. dev->mt76.tx_dma_idx[i] = dma_idx;
  276. if (prev_dma_idx == dma_idx)
  277. break;
  278. }
  279. return i < 4;
  280. }
  281. static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  282. struct ieee80211_sta *sta,
  283. struct ieee80211_key_conf *key, void *data)
  284. {
  285. struct mt76x02_dev *dev = hw->priv;
  286. struct mt76_wcid *wcid;
  287. if (!sta)
  288. return;
  289. wcid = (struct mt76_wcid *)sta->drv_priv;
  290. if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
  291. return;
  292. mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
  293. }
  294. static void mt76x02_reset_state(struct mt76x02_dev *dev)
  295. {
  296. int i;
  297. lockdep_assert_held(&dev->mt76.mutex);
  298. clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
  299. rcu_read_lock();
  300. ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
  301. rcu_read_unlock();
  302. for (i = 0; i < MT76x02_N_WCIDS; i++) {
  303. struct ieee80211_sta *sta;
  304. struct ieee80211_vif *vif;
  305. struct mt76x02_sta *msta;
  306. struct mt76_wcid *wcid;
  307. void *priv;
  308. wcid = rcu_dereference_protected(dev->mt76.wcid[i],
  309. lockdep_is_held(&dev->mt76.mutex));
  310. if (!wcid)
  311. continue;
  312. rcu_assign_pointer(dev->mt76.wcid[i], NULL);
  313. priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
  314. sta = container_of(priv, struct ieee80211_sta, drv_priv);
  315. priv = msta->vif;
  316. vif = container_of(priv, struct ieee80211_vif, drv_priv);
  317. __mt76_sta_remove(&dev->mt76, vif, sta);
  318. memset(msta, 0, sizeof(*msta));
  319. }
  320. dev->mt76.vif_mask = 0;
  321. dev->mt76.beacon_mask = 0;
  322. }
  323. static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
  324. {
  325. u32 mask = dev->mt76.mmio.irqmask;
  326. bool restart = dev->mt76.mcu_ops->mcu_restart;
  327. int i;
  328. ieee80211_stop_queues(dev->mt76.hw);
  329. set_bit(MT76_RESET, &dev->mphy.state);
  330. tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
  331. mt76_worker_disable(&dev->mt76.tx_worker);
  332. napi_disable(&dev->mt76.tx_napi);
  333. mt76_for_each_q_rx(&dev->mt76, i) {
  334. napi_disable(&dev->mt76.napi[i]);
  335. }
  336. mutex_lock(&dev->mt76.mutex);
  337. dev->mcu_timeout = 0;
  338. if (restart)
  339. mt76x02_reset_state(dev);
  340. if (dev->mt76.beacon_mask)
  341. mt76_clear(dev, MT_BEACON_TIME_CFG,
  342. MT_BEACON_TIME_CFG_BEACON_TX |
  343. MT_BEACON_TIME_CFG_TBTT_EN);
  344. mt76x02_irq_disable(dev, mask);
  345. /* perform device reset */
  346. mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  347. mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
  348. mt76_clear(dev, MT_WPDMA_GLO_CFG,
  349. MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
  350. usleep_range(5000, 10000);
  351. mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
  352. /* let fw reset DMA */
  353. mt76_set(dev, 0x734, 0x3);
  354. if (restart)
  355. mt76_mcu_restart(dev);
  356. mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
  357. for (i = 0; i < __MT_TXQ_MAX; i++)
  358. mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
  359. mt76_for_each_q_rx(&dev->mt76, i) {
  360. mt76_queue_rx_reset(dev, i);
  361. }
  362. mt76x02_mac_start(dev);
  363. if (dev->ed_monitor)
  364. mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
  365. if (dev->mt76.beacon_mask && !restart)
  366. mt76_set(dev, MT_BEACON_TIME_CFG,
  367. MT_BEACON_TIME_CFG_BEACON_TX |
  368. MT_BEACON_TIME_CFG_TBTT_EN);
  369. mt76x02_irq_enable(dev, mask);
  370. mutex_unlock(&dev->mt76.mutex);
  371. clear_bit(MT76_RESET, &dev->mphy.state);
  372. mt76_worker_enable(&dev->mt76.tx_worker);
  373. napi_enable(&dev->mt76.tx_napi);
  374. napi_schedule(&dev->mt76.tx_napi);
  375. tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
  376. mt76_for_each_q_rx(&dev->mt76, i) {
  377. napi_enable(&dev->mt76.napi[i]);
  378. napi_schedule(&dev->mt76.napi[i]);
  379. }
  380. if (restart) {
  381. set_bit(MT76_RESTART, &dev->mphy.state);
  382. mt76x02_mcu_function_select(dev, Q_SELECT, 1);
  383. ieee80211_restart_hw(dev->mt76.hw);
  384. } else {
  385. ieee80211_wake_queues(dev->mt76.hw);
  386. mt76_txq_schedule_all(&dev->mphy);
  387. }
  388. }
  389. void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
  390. enum ieee80211_reconfig_type reconfig_type)
  391. {
  392. struct mt76x02_dev *dev = hw->priv;
  393. if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
  394. return;
  395. clear_bit(MT76_RESTART, &dev->mphy.state);
  396. }
  397. EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
  398. static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
  399. {
  400. if (test_bit(MT76_RESTART, &dev->mphy.state))
  401. return;
  402. if (mt76x02_tx_hang(dev)) {
  403. if (++dev->tx_hang_check >= MT_TX_HANG_TH)
  404. goto restart;
  405. } else {
  406. dev->tx_hang_check = 0;
  407. }
  408. if (dev->mcu_timeout)
  409. goto restart;
  410. return;
  411. restart:
  412. mt76x02_watchdog_reset(dev);
  413. dev->tx_hang_reset++;
  414. dev->tx_hang_check = 0;
  415. memset(dev->mt76.tx_dma_idx, 0xff,
  416. sizeof(dev->mt76.tx_dma_idx));
  417. }
  418. void mt76x02_wdt_work(struct work_struct *work)
  419. {
  420. struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
  421. wdt_work.work);
  422. mt76x02_check_tx_hang(dev);
  423. ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
  424. MT_WATCHDOG_TIME);
  425. }