testmode.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
  3. #include "mt76.h"
  4. static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
  5. [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
  6. [MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
  7. [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
  8. [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
  9. [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
  10. [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
  11. [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 },
  12. [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
  13. [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
  14. [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
  15. [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
  16. [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
  17. [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
  18. [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED },
  19. [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
  20. [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
  21. [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
  22. [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
  23. };
  24. void mt76_testmode_tx_pending(struct mt76_phy *phy)
  25. {
  26. struct mt76_testmode_data *td = &phy->test;
  27. struct mt76_dev *dev = phy->dev;
  28. struct mt76_wcid *wcid = &dev->global_wcid;
  29. struct sk_buff *skb = td->tx_skb;
  30. struct mt76_queue *q;
  31. u16 tx_queued_limit;
  32. int qid;
  33. if (!skb || !td->tx_pending)
  34. return;
  35. qid = skb_get_queue_mapping(skb);
  36. q = phy->q_tx[qid];
  37. tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
  38. spin_lock_bh(&q->lock);
  39. while (td->tx_pending > 0 &&
  40. td->tx_queued - td->tx_done < tx_queued_limit &&
  41. q->queued < q->ndesc / 2) {
  42. int ret;
  43. ret = dev->queue_ops->tx_queue_skb(dev, q, skb_get(skb), wcid,
  44. NULL);
  45. if (ret < 0)
  46. break;
  47. td->tx_pending--;
  48. td->tx_queued++;
  49. }
  50. dev->queue_ops->kick(dev, q);
  51. spin_unlock_bh(&q->lock);
  52. }
  53. static int
  54. mt76_testmode_tx_init(struct mt76_phy *phy)
  55. {
  56. struct mt76_testmode_data *td = &phy->test;
  57. struct ieee80211_tx_info *info;
  58. struct ieee80211_hdr *hdr;
  59. struct sk_buff *skb;
  60. u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
  61. IEEE80211_FCTL_FROMDS;
  62. struct ieee80211_tx_rate *rate;
  63. u8 max_nss = hweight8(phy->antenna_mask);
  64. bool ext_phy = phy != &phy->dev->phy;
  65. if (td->tx_antenna_mask)
  66. max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
  67. skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
  68. if (!skb)
  69. return -ENOMEM;
  70. dev_kfree_skb(td->tx_skb);
  71. td->tx_skb = skb;
  72. hdr = __skb_put_zero(skb, td->tx_msdu_len);
  73. hdr->frame_control = cpu_to_le16(fc);
  74. memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr));
  75. memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr));
  76. memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr));
  77. info = IEEE80211_SKB_CB(skb);
  78. info->flags = IEEE80211_TX_CTL_INJECTED |
  79. IEEE80211_TX_CTL_NO_ACK |
  80. IEEE80211_TX_CTL_NO_PS_BUFFER;
  81. if (ext_phy)
  82. info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
  83. if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT)
  84. goto out;
  85. rate = &info->control.rates[0];
  86. rate->count = 1;
  87. rate->idx = td->tx_rate_idx;
  88. switch (td->tx_rate_mode) {
  89. case MT76_TM_TX_MODE_CCK:
  90. if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
  91. return -EINVAL;
  92. if (rate->idx > 4)
  93. return -EINVAL;
  94. break;
  95. case MT76_TM_TX_MODE_OFDM:
  96. if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
  97. break;
  98. if (rate->idx > 8)
  99. return -EINVAL;
  100. rate->idx += 4;
  101. break;
  102. case MT76_TM_TX_MODE_HT:
  103. if (rate->idx > 8 * max_nss &&
  104. !(rate->idx == 32 &&
  105. phy->chandef.width >= NL80211_CHAN_WIDTH_40))
  106. return -EINVAL;
  107. rate->flags |= IEEE80211_TX_RC_MCS;
  108. break;
  109. case MT76_TM_TX_MODE_VHT:
  110. if (rate->idx > 9)
  111. return -EINVAL;
  112. if (td->tx_rate_nss > max_nss)
  113. return -EINVAL;
  114. ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss);
  115. rate->flags |= IEEE80211_TX_RC_VHT_MCS;
  116. break;
  117. default:
  118. break;
  119. }
  120. if (td->tx_rate_sgi)
  121. rate->flags |= IEEE80211_TX_RC_SHORT_GI;
  122. if (td->tx_rate_ldpc)
  123. info->flags |= IEEE80211_TX_CTL_LDPC;
  124. if (td->tx_rate_stbc)
  125. info->flags |= IEEE80211_TX_CTL_STBC;
  126. if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) {
  127. switch (phy->chandef.width) {
  128. case NL80211_CHAN_WIDTH_40:
  129. rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  130. break;
  131. case NL80211_CHAN_WIDTH_80:
  132. rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
  133. break;
  134. case NL80211_CHAN_WIDTH_80P80:
  135. case NL80211_CHAN_WIDTH_160:
  136. rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
  137. break;
  138. default:
  139. break;
  140. }
  141. }
  142. out:
  143. skb_set_queue_mapping(skb, IEEE80211_AC_BE);
  144. return 0;
  145. }
  146. static void
  147. mt76_testmode_tx_start(struct mt76_phy *phy)
  148. {
  149. struct mt76_testmode_data *td = &phy->test;
  150. struct mt76_dev *dev = phy->dev;
  151. td->tx_queued = 0;
  152. td->tx_done = 0;
  153. td->tx_pending = td->tx_count;
  154. mt76_worker_schedule(&dev->tx_worker);
  155. }
  156. static void
  157. mt76_testmode_tx_stop(struct mt76_phy *phy)
  158. {
  159. struct mt76_testmode_data *td = &phy->test;
  160. struct mt76_dev *dev = phy->dev;
  161. mt76_worker_disable(&dev->tx_worker);
  162. td->tx_pending = 0;
  163. mt76_worker_enable(&dev->tx_worker);
  164. wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
  165. MT76_TM_TIMEOUT * HZ);
  166. dev_kfree_skb(td->tx_skb);
  167. td->tx_skb = NULL;
  168. }
  169. static inline void
  170. mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx)
  171. {
  172. td->param_set[idx / 32] |= BIT(idx % 32);
  173. }
  174. static inline bool
  175. mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx)
  176. {
  177. return td->param_set[idx / 32] & BIT(idx % 32);
  178. }
  179. static void
  180. mt76_testmode_init_defaults(struct mt76_phy *phy)
  181. {
  182. struct mt76_testmode_data *td = &phy->test;
  183. if (td->tx_msdu_len > 0)
  184. return;
  185. td->tx_msdu_len = 1024;
  186. td->tx_count = 1;
  187. td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
  188. td->tx_rate_nss = 1;
  189. }
  190. static int
  191. __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
  192. {
  193. enum mt76_testmode_state prev_state = phy->test.state;
  194. struct mt76_dev *dev = phy->dev;
  195. int err;
  196. if (prev_state == MT76_TM_STATE_TX_FRAMES)
  197. mt76_testmode_tx_stop(phy);
  198. if (state == MT76_TM_STATE_TX_FRAMES) {
  199. err = mt76_testmode_tx_init(phy);
  200. if (err)
  201. return err;
  202. }
  203. err = dev->test_ops->set_state(phy, state);
  204. if (err) {
  205. if (state == MT76_TM_STATE_TX_FRAMES)
  206. mt76_testmode_tx_stop(phy);
  207. return err;
  208. }
  209. if (state == MT76_TM_STATE_TX_FRAMES)
  210. mt76_testmode_tx_start(phy);
  211. else if (state == MT76_TM_STATE_RX_FRAMES) {
  212. memset(&phy->test.rx_stats, 0, sizeof(phy->test.rx_stats));
  213. }
  214. phy->test.state = state;
  215. return 0;
  216. }
  217. int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
  218. {
  219. struct mt76_testmode_data *td = &phy->test;
  220. struct ieee80211_hw *hw = phy->hw;
  221. if (state == td->state && state == MT76_TM_STATE_OFF)
  222. return 0;
  223. if (state > MT76_TM_STATE_OFF &&
  224. (!test_bit(MT76_STATE_RUNNING, &phy->state) ||
  225. !(hw->conf.flags & IEEE80211_CONF_MONITOR)))
  226. return -ENOTCONN;
  227. if (state != MT76_TM_STATE_IDLE &&
  228. td->state != MT76_TM_STATE_IDLE) {
  229. int ret;
  230. ret = __mt76_testmode_set_state(phy, MT76_TM_STATE_IDLE);
  231. if (ret)
  232. return ret;
  233. }
  234. return __mt76_testmode_set_state(phy, state);
  235. }
  236. EXPORT_SYMBOL(mt76_testmode_set_state);
  237. static int
  238. mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
  239. {
  240. u8 val;
  241. if (!attr)
  242. return 0;
  243. val = nla_get_u8(attr);
  244. if (val < min || val > max)
  245. return -EINVAL;
  246. *dest = val;
  247. return 0;
  248. }
  249. int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  250. void *data, int len)
  251. {
  252. struct mt76_phy *phy = hw->priv;
  253. struct mt76_dev *dev = phy->dev;
  254. struct mt76_testmode_data *td = &phy->test;
  255. struct nlattr *tb[NUM_MT76_TM_ATTRS];
  256. bool ext_phy = phy != &dev->phy;
  257. u32 state;
  258. int err;
  259. int i;
  260. if (!dev->test_ops)
  261. return -EOPNOTSUPP;
  262. err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
  263. mt76_tm_policy, NULL);
  264. if (err)
  265. return err;
  266. err = -EINVAL;
  267. mutex_lock(&dev->mutex);
  268. if (tb[MT76_TM_ATTR_RESET]) {
  269. mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
  270. memset(td, 0, sizeof(*td));
  271. }
  272. mt76_testmode_init_defaults(phy);
  273. if (tb[MT76_TM_ATTR_TX_COUNT])
  274. td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
  275. if (tb[MT76_TM_ATTR_TX_LENGTH]) {
  276. u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
  277. if (val > IEEE80211_MAX_FRAME_LEN ||
  278. val < sizeof(struct ieee80211_hdr))
  279. goto out;
  280. td->tx_msdu_len = val;
  281. }
  282. if (tb[MT76_TM_ATTR_TX_RATE_IDX])
  283. td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
  284. if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode,
  285. 0, MT76_TM_TX_MODE_MAX) ||
  286. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss,
  287. 1, hweight8(phy->antenna_mask)) ||
  288. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 2) ||
  289. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
  290. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
  291. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
  292. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
  293. 1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
  294. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
  295. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
  296. &td->tx_duty_cycle, 0, 99) ||
  297. mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
  298. &td->tx_power_control, 0, 1))
  299. goto out;
  300. if (tb[MT76_TM_ATTR_TX_IPG])
  301. td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]);
  302. if (tb[MT76_TM_ATTR_TX_TIME])
  303. td->tx_time = nla_get_u32(tb[MT76_TM_ATTR_TX_TIME]);
  304. if (tb[MT76_TM_ATTR_FREQ_OFFSET])
  305. td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]);
  306. if (tb[MT76_TM_ATTR_STATE]) {
  307. state = nla_get_u32(tb[MT76_TM_ATTR_STATE]);
  308. if (state > MT76_TM_STATE_MAX)
  309. goto out;
  310. } else {
  311. state = td->state;
  312. }
  313. if (tb[MT76_TM_ATTR_TX_POWER]) {
  314. struct nlattr *cur;
  315. int idx = 0;
  316. int rem;
  317. nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
  318. if (nla_len(cur) != 1 ||
  319. idx >= ARRAY_SIZE(td->tx_power))
  320. goto out;
  321. td->tx_power[idx++] = nla_get_u8(cur);
  322. }
  323. }
  324. if (dev->test_ops->set_params) {
  325. err = dev->test_ops->set_params(phy, tb, state);
  326. if (err)
  327. goto out;
  328. }
  329. for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++)
  330. if (tb[i])
  331. mt76_testmode_param_set(td, i);
  332. err = 0;
  333. if (tb[MT76_TM_ATTR_STATE])
  334. err = mt76_testmode_set_state(phy, state);
  335. out:
  336. mutex_unlock(&dev->mutex);
  337. return err;
  338. }
  339. EXPORT_SYMBOL(mt76_testmode_cmd);
  340. static int
  341. mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
  342. {
  343. struct mt76_testmode_data *td = &phy->test;
  344. struct mt76_dev *dev = phy->dev;
  345. u64 rx_packets = 0;
  346. u64 rx_fcs_error = 0;
  347. int i;
  348. for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
  349. rx_packets += td->rx_stats.packets[i];
  350. rx_fcs_error += td->rx_stats.fcs_error[i];
  351. }
  352. if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) ||
  353. nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) ||
  354. nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) ||
  355. nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets,
  356. MT76_TM_STATS_ATTR_PAD) ||
  357. nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error,
  358. MT76_TM_STATS_ATTR_PAD))
  359. return -EMSGSIZE;
  360. if (dev->test_ops->dump_stats)
  361. return dev->test_ops->dump_stats(phy, msg);
  362. return 0;
  363. }
  364. int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
  365. struct netlink_callback *cb, void *data, int len)
  366. {
  367. struct mt76_phy *phy = hw->priv;
  368. struct mt76_dev *dev = phy->dev;
  369. struct mt76_testmode_data *td = &phy->test;
  370. struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
  371. int err = 0;
  372. void *a;
  373. int i;
  374. if (!dev->test_ops)
  375. return -EOPNOTSUPP;
  376. if (cb->args[2]++ > 0)
  377. return -ENOENT;
  378. if (data) {
  379. err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
  380. mt76_tm_policy, NULL);
  381. if (err)
  382. return err;
  383. }
  384. mutex_lock(&dev->mutex);
  385. if (tb[MT76_TM_ATTR_STATS]) {
  386. err = -EINVAL;
  387. a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
  388. if (a) {
  389. err = mt76_testmode_dump_stats(phy, msg);
  390. nla_nest_end(msg, a);
  391. }
  392. goto out;
  393. }
  394. mt76_testmode_init_defaults(phy);
  395. err = -EMSGSIZE;
  396. if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state))
  397. goto out;
  398. if (dev->test_mtd.name &&
  399. (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, dev->test_mtd.name) ||
  400. nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, dev->test_mtd.offset)))
  401. goto out;
  402. if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
  403. nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
  404. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
  405. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
  406. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
  407. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
  408. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
  409. nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
  410. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
  411. nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
  412. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) &&
  413. nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) ||
  414. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_SPE_IDX) &&
  415. nla_put_u8(msg, MT76_TM_ATTR_TX_SPE_IDX, td->tx_spe_idx)) ||
  416. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_DUTY_CYCLE) &&
  417. nla_put_u8(msg, MT76_TM_ATTR_TX_DUTY_CYCLE, td->tx_duty_cycle)) ||
  418. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_IPG) &&
  419. nla_put_u32(msg, MT76_TM_ATTR_TX_IPG, td->tx_ipg)) ||
  420. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_TIME) &&
  421. nla_put_u32(msg, MT76_TM_ATTR_TX_TIME, td->tx_time)) ||
  422. (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
  423. nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
  424. (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
  425. nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
  426. goto out;
  427. if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
  428. a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
  429. if (!a)
  430. goto out;
  431. for (i = 0; i < ARRAY_SIZE(td->tx_power); i++)
  432. if (nla_put_u8(msg, i, td->tx_power[i]))
  433. goto out;
  434. nla_nest_end(msg, a);
  435. }
  436. err = 0;
  437. out:
  438. mutex_unlock(&dev->mutex);
  439. return err;
  440. }
  441. EXPORT_SYMBOL(mt76_testmode_dump);