mt76_connac_mcu.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc. */
  3. #include "mt76_connac_mcu.h"
  4. int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
  5. {
  6. struct {
  7. __le32 option;
  8. __le32 addr;
  9. } req = {
  10. .option = cpu_to_le32(option),
  11. .addr = cpu_to_le32(addr),
  12. };
  13. return mt76_mcu_send_msg(dev, MCU_CMD_FW_START_REQ, &req, sizeof(req),
  14. true);
  15. }
  16. EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_firmware);
  17. int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get)
  18. {
  19. u32 op = get ? PATCH_SEM_GET : PATCH_SEM_RELEASE;
  20. struct {
  21. __le32 op;
  22. } req = {
  23. .op = cpu_to_le32(op),
  24. };
  25. return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_SEM_CONTROL, &req,
  26. sizeof(req), true);
  27. }
  28. EXPORT_SYMBOL_GPL(mt76_connac_mcu_patch_sem_ctrl);
  29. int mt76_connac_mcu_start_patch(struct mt76_dev *dev)
  30. {
  31. struct {
  32. u8 check_crc;
  33. u8 reserved[3];
  34. } req = {
  35. .check_crc = 0,
  36. };
  37. return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_FINISH_REQ, &req,
  38. sizeof(req), true);
  39. }
  40. EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_patch);
  41. #define MCU_PATCH_ADDRESS 0x200000
  42. int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
  43. u32 mode)
  44. {
  45. struct {
  46. __le32 addr;
  47. __le32 len;
  48. __le32 mode;
  49. } req = {
  50. .addr = cpu_to_le32(addr),
  51. .len = cpu_to_le32(len),
  52. .mode = cpu_to_le32(mode),
  53. };
  54. int cmd;
  55. if (is_mt7921(dev) &&
  56. (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
  57. cmd = MCU_CMD_PATCH_START_REQ;
  58. else
  59. cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ;
  60. return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true);
  61. }
  62. EXPORT_SYMBOL_GPL(mt76_connac_mcu_init_download);
  63. int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
  64. {
  65. struct mt76_dev *dev = phy->dev;
  66. struct mt76_connac_mcu_channel_domain {
  67. u8 alpha2[4]; /* regulatory_request.alpha2 */
  68. u8 bw_2g; /* BW_20_40M 0
  69. * BW_20M 1
  70. * BW_20_40_80M 2
  71. * BW_20_40_80_160M 3
  72. * BW_20_40_80_8080M 4
  73. */
  74. u8 bw_5g;
  75. __le16 pad;
  76. u8 n_2ch;
  77. u8 n_5ch;
  78. __le16 pad2;
  79. } __packed hdr = {
  80. .bw_2g = 0,
  81. .bw_5g = 3,
  82. };
  83. struct mt76_connac_mcu_chan {
  84. __le16 hw_value;
  85. __le16 pad;
  86. __le32 flags;
  87. } __packed channel;
  88. int len, i, n_max_channels, n_2ch = 0, n_5ch = 0;
  89. struct ieee80211_channel *chan;
  90. struct sk_buff *skb;
  91. n_max_channels = phy->sband_2g.sband.n_channels +
  92. phy->sband_5g.sband.n_channels;
  93. len = sizeof(hdr) + n_max_channels * sizeof(channel);
  94. skb = mt76_mcu_msg_alloc(dev, NULL, len);
  95. if (!skb)
  96. return -ENOMEM;
  97. skb_reserve(skb, sizeof(hdr));
  98. for (i = 0; i < phy->sband_2g.sband.n_channels; i++) {
  99. chan = &phy->sband_2g.sband.channels[i];
  100. if (chan->flags & IEEE80211_CHAN_DISABLED)
  101. continue;
  102. channel.hw_value = cpu_to_le16(chan->hw_value);
  103. channel.flags = cpu_to_le32(chan->flags);
  104. channel.pad = 0;
  105. skb_put_data(skb, &channel, sizeof(channel));
  106. n_2ch++;
  107. }
  108. for (i = 0; i < phy->sband_5g.sband.n_channels; i++) {
  109. chan = &phy->sband_5g.sband.channels[i];
  110. if (chan->flags & IEEE80211_CHAN_DISABLED)
  111. continue;
  112. channel.hw_value = cpu_to_le16(chan->hw_value);
  113. channel.flags = cpu_to_le32(chan->flags);
  114. channel.pad = 0;
  115. skb_put_data(skb, &channel, sizeof(channel));
  116. n_5ch++;
  117. }
  118. BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(hdr.alpha2));
  119. memcpy(hdr.alpha2, dev->alpha2, sizeof(dev->alpha2));
  120. hdr.n_2ch = n_2ch;
  121. hdr.n_5ch = n_5ch;
  122. memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
  123. return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false);
  124. }
  125. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain);
  126. int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable,
  127. bool hdr_trans)
  128. {
  129. struct {
  130. u8 enable;
  131. u8 band;
  132. u8 rsv[2];
  133. } __packed req_mac = {
  134. .enable = enable,
  135. .band = band,
  136. };
  137. return mt76_mcu_send_msg(dev, MCU_EXT_CMD_MAC_INIT_CTRL, &req_mac,
  138. sizeof(req_mac), true);
  139. }
  140. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
  141. int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
  142. {
  143. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  144. struct {
  145. u8 bss_idx;
  146. u8 ps_state; /* 0: device awake
  147. * 1: static power save
  148. * 2: dynamic power saving
  149. */
  150. } req = {
  151. .bss_idx = mvif->idx,
  152. .ps_state = vif->bss_conf.ps ? 2 : 0,
  153. };
  154. if (vif->type != NL80211_IFTYPE_STATION)
  155. return -EOPNOTSUPP;
  156. return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req,
  157. sizeof(req), false);
  158. }
  159. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps);
  160. int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band)
  161. {
  162. struct {
  163. u8 prot_idx;
  164. u8 band;
  165. u8 rsv[2];
  166. __le32 len_thresh;
  167. __le32 pkt_thresh;
  168. } __packed req = {
  169. .prot_idx = 1,
  170. .band = band,
  171. .len_thresh = cpu_to_le32(val),
  172. .pkt_thresh = cpu_to_le32(0x2),
  173. };
  174. return mt76_mcu_send_msg(dev, MCU_EXT_CMD_PROTECT_CTRL, &req,
  175. sizeof(req), true);
  176. }
  177. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
  178. void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
  179. struct ieee80211_vif *vif)
  180. {
  181. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  182. struct mt76_connac_beacon_loss_event *event = priv;
  183. if (mvif->idx != event->bss_idx)
  184. return;
  185. if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
  186. return;
  187. ieee80211_beacon_loss(vif);
  188. }
  189. EXPORT_SYMBOL_GPL(mt76_connac_mcu_beacon_loss_iter);
  190. struct tlv *
  191. mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
  192. void *sta_ntlv, void *sta_wtbl)
  193. {
  194. struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
  195. struct tlv *sta_hdr = sta_wtbl;
  196. struct tlv *ptlv, tlv = {
  197. .tag = cpu_to_le16(tag),
  198. .len = cpu_to_le16(len),
  199. };
  200. u16 ntlv;
  201. ptlv = skb_put(skb, len);
  202. memcpy(ptlv, &tlv, sizeof(tlv));
  203. ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
  204. ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
  205. if (sta_hdr) {
  206. u16 size = le16_to_cpu(sta_hdr->len);
  207. sta_hdr->len = cpu_to_le16(size + len);
  208. }
  209. return ptlv;
  210. }
  211. EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
  212. struct sk_buff *
  213. mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
  214. struct mt76_wcid *wcid)
  215. {
  216. struct sta_req_hdr hdr = {
  217. .bss_idx = mvif->idx,
  218. .muar_idx = wcid ? mvif->omac_idx : 0,
  219. .is_tlv_append = 1,
  220. };
  221. struct sk_buff *skb;
  222. mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
  223. &hdr.wlan_idx_hi);
  224. skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
  225. if (!skb)
  226. return ERR_PTR(-ENOMEM);
  227. skb_put_data(skb, &hdr, sizeof(hdr));
  228. return skb;
  229. }
  230. EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
  231. struct wtbl_req_hdr *
  232. mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
  233. int cmd, void *sta_wtbl, struct sk_buff **skb)
  234. {
  235. struct tlv *sta_hdr = sta_wtbl;
  236. struct wtbl_req_hdr hdr = {
  237. .operation = cmd,
  238. };
  239. struct sk_buff *nskb = *skb;
  240. mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
  241. &hdr.wlan_idx_hi);
  242. if (!nskb) {
  243. nskb = mt76_mcu_msg_alloc(dev, NULL,
  244. MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
  245. if (!nskb)
  246. return ERR_PTR(-ENOMEM);
  247. *skb = nskb;
  248. }
  249. if (sta_hdr)
  250. sta_hdr->len = cpu_to_le16(sizeof(hdr));
  251. return skb_put_data(nskb, &hdr, sizeof(hdr));
  252. }
  253. EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
  254. void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
  255. struct ieee80211_vif *vif,
  256. struct ieee80211_sta *sta,
  257. bool enable)
  258. {
  259. struct sta_rec_basic *basic;
  260. struct tlv *tlv;
  261. int conn_type;
  262. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
  263. basic = (struct sta_rec_basic *)tlv;
  264. basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
  265. if (enable) {
  266. basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
  267. basic->conn_state = CONN_STATE_PORT_SECURE;
  268. } else {
  269. basic->conn_state = CONN_STATE_DISCONNECT;
  270. }
  271. if (!sta) {
  272. basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
  273. eth_broadcast_addr(basic->peer_addr);
  274. return;
  275. }
  276. switch (vif->type) {
  277. case NL80211_IFTYPE_MESH_POINT:
  278. case NL80211_IFTYPE_AP:
  279. if (vif->p2p)
  280. conn_type = CONNECTION_P2P_GC;
  281. else
  282. conn_type = CONNECTION_INFRA_STA;
  283. basic->conn_type = cpu_to_le32(conn_type);
  284. basic->aid = cpu_to_le16(sta->aid);
  285. break;
  286. case NL80211_IFTYPE_STATION:
  287. if (vif->p2p)
  288. conn_type = CONNECTION_P2P_GO;
  289. else
  290. conn_type = CONNECTION_INFRA_AP;
  291. basic->conn_type = cpu_to_le32(conn_type);
  292. basic->aid = cpu_to_le16(vif->bss_conf.aid);
  293. break;
  294. case NL80211_IFTYPE_ADHOC:
  295. basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  296. basic->aid = cpu_to_le16(sta->aid);
  297. break;
  298. default:
  299. WARN_ON(1);
  300. break;
  301. }
  302. memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
  303. basic->qos = sta->wme;
  304. }
  305. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
  306. static void
  307. mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
  308. struct ieee80211_sta *sta)
  309. {
  310. struct sta_rec_uapsd *uapsd;
  311. struct tlv *tlv;
  312. if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
  313. return;
  314. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
  315. uapsd = (struct sta_rec_uapsd *)tlv;
  316. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
  317. uapsd->dac_map |= BIT(3);
  318. uapsd->tac_map |= BIT(3);
  319. }
  320. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
  321. uapsd->dac_map |= BIT(2);
  322. uapsd->tac_map |= BIT(2);
  323. }
  324. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
  325. uapsd->dac_map |= BIT(1);
  326. uapsd->tac_map |= BIT(1);
  327. }
  328. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
  329. uapsd->dac_map |= BIT(0);
  330. uapsd->tac_map |= BIT(0);
  331. }
  332. uapsd->max_sp = sta->max_sp;
  333. }
  334. void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
  335. struct ieee80211_vif *vif,
  336. struct mt76_wcid *wcid,
  337. void *sta_wtbl, void *wtbl_tlv)
  338. {
  339. struct wtbl_hdr_trans *htr;
  340. struct tlv *tlv;
  341. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS,
  342. sizeof(*htr),
  343. wtbl_tlv, sta_wtbl);
  344. htr = (struct wtbl_hdr_trans *)tlv;
  345. htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
  346. if (vif->type == NL80211_IFTYPE_STATION)
  347. htr->to_ds = true;
  348. else
  349. htr->from_ds = true;
  350. if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
  351. htr->to_ds = true;
  352. htr->from_ds = true;
  353. }
  354. }
  355. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
  356. int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
  357. struct ieee80211_vif *vif,
  358. struct mt76_wcid *wcid, int cmd)
  359. {
  360. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  361. struct wtbl_req_hdr *wtbl_hdr;
  362. struct tlv *sta_wtbl;
  363. struct sk_buff *skb;
  364. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  365. if (IS_ERR(skb))
  366. return PTR_ERR(skb);
  367. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  368. sizeof(struct tlv));
  369. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
  370. sta_wtbl, &skb);
  371. if (IS_ERR(wtbl_hdr))
  372. return PTR_ERR(wtbl_hdr);
  373. mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
  374. return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
  375. }
  376. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
  377. void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
  378. struct sk_buff *skb,
  379. struct ieee80211_vif *vif,
  380. struct ieee80211_sta *sta,
  381. void *sta_wtbl, void *wtbl_tlv)
  382. {
  383. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  384. struct wtbl_generic *generic;
  385. struct wtbl_rx *rx;
  386. struct wtbl_spe *spe;
  387. struct tlv *tlv;
  388. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_GENERIC,
  389. sizeof(*generic),
  390. wtbl_tlv, sta_wtbl);
  391. generic = (struct wtbl_generic *)tlv;
  392. if (sta) {
  393. if (vif->type == NL80211_IFTYPE_STATION)
  394. generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
  395. else
  396. generic->partial_aid = cpu_to_le16(sta->aid);
  397. memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
  398. generic->muar_idx = mvif->omac_idx;
  399. generic->qos = sta->wme;
  400. } else {
  401. if (is_mt7921(dev) &&
  402. vif->type == NL80211_IFTYPE_STATION)
  403. memcpy(generic->peer_addr, vif->bss_conf.bssid,
  404. ETH_ALEN);
  405. else
  406. eth_broadcast_addr(generic->peer_addr);
  407. generic->muar_idx = 0xe;
  408. }
  409. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
  410. wtbl_tlv, sta_wtbl);
  411. rx = (struct wtbl_rx *)tlv;
  412. rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
  413. rx->rca2 = 1;
  414. rx->rv = 1;
  415. if (is_mt7921(dev))
  416. return;
  417. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
  418. wtbl_tlv, sta_wtbl);
  419. spe = (struct wtbl_spe *)tlv;
  420. spe->spe_idx = 24;
  421. }
  422. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_generic_tlv);
  423. static void
  424. mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
  425. struct ieee80211_vif *vif)
  426. {
  427. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  428. struct sta_rec_amsdu *amsdu;
  429. struct tlv *tlv;
  430. if (vif->type != NL80211_IFTYPE_AP &&
  431. vif->type != NL80211_IFTYPE_STATION)
  432. return;
  433. if (!sta->max_amsdu_len)
  434. return;
  435. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
  436. amsdu = (struct sta_rec_amsdu *)tlv;
  437. amsdu->max_amsdu_num = 8;
  438. amsdu->amsdu_en = true;
  439. amsdu->max_mpdu_size = sta->max_amsdu_len >=
  440. IEEE80211_MAX_MPDU_LEN_VHT_7991;
  441. wcid->amsdu = true;
  442. }
  443. #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
  444. #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
  445. static void
  446. mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
  447. {
  448. struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
  449. struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
  450. struct sta_rec_he *he;
  451. struct tlv *tlv;
  452. u32 cap = 0;
  453. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
  454. he = (struct sta_rec_he *)tlv;
  455. if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
  456. cap |= STA_REC_HE_CAP_HTC;
  457. if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
  458. cap |= STA_REC_HE_CAP_BSR;
  459. if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
  460. cap |= STA_REC_HE_CAP_OM;
  461. if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
  462. cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
  463. if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
  464. cap |= STA_REC_HE_CAP_BQR;
  465. if (elem->phy_cap_info[0] &
  466. (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
  467. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
  468. cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
  469. if (elem->phy_cap_info[1] &
  470. IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
  471. cap |= STA_REC_HE_CAP_LDPC;
  472. if (elem->phy_cap_info[1] &
  473. IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
  474. cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
  475. if (elem->phy_cap_info[2] &
  476. IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
  477. cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
  478. if (elem->phy_cap_info[2] &
  479. IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
  480. cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
  481. if (elem->phy_cap_info[2] &
  482. IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
  483. cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
  484. if (elem->phy_cap_info[6] &
  485. IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
  486. cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
  487. if (elem->phy_cap_info[7] &
  488. IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
  489. cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
  490. if (elem->phy_cap_info[7] &
  491. IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
  492. cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
  493. if (elem->phy_cap_info[7] &
  494. IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
  495. cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
  496. if (elem->phy_cap_info[8] &
  497. IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
  498. cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
  499. if (elem->phy_cap_info[8] &
  500. IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
  501. cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
  502. if (elem->phy_cap_info[9] &
  503. IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
  504. cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
  505. if (elem->phy_cap_info[9] &
  506. IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
  507. cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
  508. if (elem->phy_cap_info[9] &
  509. IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
  510. cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
  511. he->he_cap = cpu_to_le32(cap);
  512. switch (sta->bandwidth) {
  513. case IEEE80211_STA_RX_BW_160:
  514. if (elem->phy_cap_info[0] &
  515. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
  516. he->max_nss_mcs[CMD_HE_MCS_BW8080] =
  517. he_cap->he_mcs_nss_supp.rx_mcs_80p80;
  518. he->max_nss_mcs[CMD_HE_MCS_BW160] =
  519. he_cap->he_mcs_nss_supp.rx_mcs_160;
  520. fallthrough;
  521. default:
  522. he->max_nss_mcs[CMD_HE_MCS_BW80] =
  523. he_cap->he_mcs_nss_supp.rx_mcs_80;
  524. break;
  525. }
  526. he->t_frame_dur =
  527. HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
  528. he->max_ampdu_exp =
  529. HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
  530. he->bw_set =
  531. HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
  532. he->device_class =
  533. HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
  534. he->punc_pream_rx =
  535. HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
  536. he->dcm_tx_mode =
  537. HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
  538. he->dcm_tx_max_nss =
  539. HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
  540. he->dcm_rx_mode =
  541. HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
  542. he->dcm_rx_max_nss =
  543. HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
  544. he->dcm_rx_max_nss =
  545. HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
  546. he->pkt_ext = 2;
  547. }
  548. static u8
  549. mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
  550. enum nl80211_band band, struct ieee80211_sta *sta)
  551. {
  552. struct ieee80211_sta_ht_cap *ht_cap;
  553. struct ieee80211_sta_vht_cap *vht_cap;
  554. const struct ieee80211_sta_he_cap *he_cap;
  555. u8 mode = 0;
  556. if (sta) {
  557. ht_cap = &sta->ht_cap;
  558. vht_cap = &sta->vht_cap;
  559. he_cap = &sta->he_cap;
  560. } else {
  561. struct ieee80211_supported_band *sband;
  562. sband = mphy->hw->wiphy->bands[band];
  563. ht_cap = &sband->ht_cap;
  564. vht_cap = &sband->vht_cap;
  565. he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
  566. }
  567. if (band == NL80211_BAND_2GHZ) {
  568. mode |= PHY_TYPE_BIT_HR_DSSS | PHY_TYPE_BIT_ERP;
  569. if (ht_cap->ht_supported)
  570. mode |= PHY_TYPE_BIT_HT;
  571. if (he_cap->has_he)
  572. mode |= PHY_TYPE_BIT_HE;
  573. } else if (band == NL80211_BAND_5GHZ) {
  574. mode |= PHY_TYPE_BIT_OFDM;
  575. if (ht_cap->ht_supported)
  576. mode |= PHY_TYPE_BIT_HT;
  577. if (vht_cap->vht_supported)
  578. mode |= PHY_TYPE_BIT_VHT;
  579. if (he_cap->has_he)
  580. mode |= PHY_TYPE_BIT_HE;
  581. }
  582. return mode;
  583. }
  584. void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
  585. struct ieee80211_sta *sta,
  586. struct ieee80211_vif *vif,
  587. u8 rcpi)
  588. {
  589. struct cfg80211_chan_def *chandef = &mphy->chandef;
  590. enum nl80211_band band = chandef->chan->band;
  591. struct mt76_dev *dev = mphy->dev;
  592. struct sta_rec_ra_info *ra_info;
  593. struct sta_rec_state *state;
  594. struct sta_rec_phy *phy;
  595. struct tlv *tlv;
  596. /* starec ht */
  597. if (sta->ht_cap.ht_supported) {
  598. struct sta_rec_ht *ht;
  599. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
  600. ht = (struct sta_rec_ht *)tlv;
  601. ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
  602. }
  603. /* starec vht */
  604. if (sta->vht_cap.vht_supported) {
  605. struct sta_rec_vht *vht;
  606. int len;
  607. len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
  608. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
  609. vht = (struct sta_rec_vht *)tlv;
  610. vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
  611. vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
  612. vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
  613. }
  614. /* starec uapsd */
  615. mt76_connac_mcu_sta_uapsd(skb, vif, sta);
  616. if (!is_mt7921(dev))
  617. return;
  618. if (sta->ht_cap.ht_supported)
  619. mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
  620. /* starec he */
  621. if (sta->he_cap.has_he)
  622. mt76_connac_mcu_sta_he_tlv(skb, sta);
  623. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
  624. phy = (struct sta_rec_phy *)tlv;
  625. phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
  626. phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
  627. phy->rcpi = rcpi;
  628. phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
  629. sta->ht_cap.ampdu_factor) |
  630. FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
  631. sta->ht_cap.ampdu_density);
  632. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
  633. ra_info = (struct sta_rec_ra_info *)tlv;
  634. ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]);
  635. if (sta->ht_cap.ht_supported)
  636. memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
  637. HT_MCS_MASK_NUM);
  638. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
  639. state = (struct sta_rec_state *)tlv;
  640. state->state = 2;
  641. if (sta->vht_cap.vht_supported) {
  642. state->vht_opmode = sta->bandwidth;
  643. state->vht_opmode |= (sta->rx_nss - 1) <<
  644. IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
  645. }
  646. }
  647. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
  648. static void
  649. mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
  650. void *sta_wtbl, void *wtbl_tlv)
  651. {
  652. struct wtbl_smps *smps;
  653. struct tlv *tlv;
  654. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
  655. wtbl_tlv, sta_wtbl);
  656. smps = (struct wtbl_smps *)tlv;
  657. if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  658. smps->smps = true;
  659. }
  660. void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
  661. struct ieee80211_sta *sta, void *sta_wtbl,
  662. void *wtbl_tlv)
  663. {
  664. struct wtbl_ht *ht = NULL;
  665. struct tlv *tlv;
  666. u32 flags = 0;
  667. if (sta->ht_cap.ht_supported) {
  668. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
  669. wtbl_tlv, sta_wtbl);
  670. ht = (struct wtbl_ht *)tlv;
  671. ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
  672. ht->af = sta->ht_cap.ampdu_factor;
  673. ht->mm = sta->ht_cap.ampdu_density;
  674. ht->ht = true;
  675. }
  676. if (sta->vht_cap.vht_supported) {
  677. struct wtbl_vht *vht;
  678. u8 af;
  679. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_VHT,
  680. sizeof(*vht), wtbl_tlv,
  681. sta_wtbl);
  682. vht = (struct wtbl_vht *)tlv;
  683. vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
  684. vht->vht = true;
  685. af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
  686. sta->vht_cap.cap);
  687. if (ht)
  688. ht->af = max(ht->af, af);
  689. }
  690. mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
  691. if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
  692. /* sgi */
  693. u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
  694. MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
  695. struct wtbl_raw *raw;
  696. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
  697. sizeof(*raw), wtbl_tlv,
  698. sta_wtbl);
  699. if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
  700. flags |= MT_WTBL_W5_SHORT_GI_20;
  701. if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
  702. flags |= MT_WTBL_W5_SHORT_GI_40;
  703. if (sta->vht_cap.vht_supported) {
  704. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
  705. flags |= MT_WTBL_W5_SHORT_GI_80;
  706. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
  707. flags |= MT_WTBL_W5_SHORT_GI_160;
  708. }
  709. raw = (struct wtbl_raw *)tlv;
  710. raw->val = cpu_to_le32(flags);
  711. raw->msk = cpu_to_le32(~msk);
  712. raw->wtbl_idx = 1;
  713. raw->dw = 5;
  714. }
  715. }
  716. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
  717. int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
  718. struct mt76_sta_cmd_info *info)
  719. {
  720. struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
  721. struct mt76_dev *dev = phy->dev;
  722. struct wtbl_req_hdr *wtbl_hdr;
  723. struct tlv *sta_wtbl;
  724. struct sk_buff *skb;
  725. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
  726. if (IS_ERR(skb))
  727. return PTR_ERR(skb);
  728. mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
  729. if (!info->enable)
  730. goto out;
  731. if (info->sta)
  732. mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
  733. info->rcpi);
  734. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  735. sizeof(struct tlv));
  736. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
  737. WTBL_RESET_AND_SET,
  738. sta_wtbl, &skb);
  739. if (IS_ERR(wtbl_hdr))
  740. return PTR_ERR(wtbl_hdr);
  741. mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif, info->sta,
  742. sta_wtbl, wtbl_hdr);
  743. mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
  744. sta_wtbl, wtbl_hdr);
  745. if (info->sta)
  746. mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta, sta_wtbl,
  747. wtbl_hdr);
  748. out:
  749. return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
  750. }
  751. EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
  752. void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
  753. struct ieee80211_ampdu_params *params,
  754. bool enable, bool tx, void *sta_wtbl,
  755. void *wtbl_tlv)
  756. {
  757. struct wtbl_ba *ba;
  758. struct tlv *tlv;
  759. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
  760. wtbl_tlv, sta_wtbl);
  761. ba = (struct wtbl_ba *)tlv;
  762. ba->tid = params->tid;
  763. if (tx) {
  764. ba->ba_type = MT_BA_TYPE_ORIGINATOR;
  765. ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
  766. ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
  767. ba->ba_en = enable;
  768. } else {
  769. memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
  770. ba->ba_type = MT_BA_TYPE_RECIPIENT;
  771. ba->rst_ba_tid = params->tid;
  772. ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
  773. ba->rst_ba_sb = 1;
  774. }
  775. if (is_mt7921(dev))
  776. return;
  777. if (enable && tx) {
  778. u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
  779. int i;
  780. for (i = 7; i > 0; i--) {
  781. if (params->buf_size >= ba_range[i])
  782. break;
  783. }
  784. ba->ba_winsize_idx = i;
  785. }
  786. }
  787. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
  788. int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
  789. struct ieee80211_vif *vif,
  790. struct mt76_wcid *wcid,
  791. bool enable)
  792. {
  793. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  794. struct mt76_dev *dev = phy->dev;
  795. struct {
  796. struct {
  797. u8 omac_idx;
  798. u8 band_idx;
  799. __le16 pad;
  800. } __packed hdr;
  801. struct req_tlv {
  802. __le16 tag;
  803. __le16 len;
  804. u8 active;
  805. u8 pad;
  806. u8 omac_addr[ETH_ALEN];
  807. } __packed tlv;
  808. } dev_req = {
  809. .hdr = {
  810. .omac_idx = mvif->omac_idx,
  811. .band_idx = mvif->band_idx,
  812. },
  813. .tlv = {
  814. .tag = cpu_to_le16(DEV_INFO_ACTIVE),
  815. .len = cpu_to_le16(sizeof(struct req_tlv)),
  816. .active = enable,
  817. },
  818. };
  819. struct {
  820. struct {
  821. u8 bss_idx;
  822. u8 pad[3];
  823. } __packed hdr;
  824. struct mt76_connac_bss_basic_tlv basic;
  825. } basic_req = {
  826. .hdr = {
  827. .bss_idx = mvif->idx,
  828. },
  829. .basic = {
  830. .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
  831. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
  832. .omac_idx = mvif->omac_idx,
  833. .band_idx = mvif->band_idx,
  834. .wmm_idx = mvif->wmm_idx,
  835. .active = enable,
  836. .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
  837. .sta_idx = cpu_to_le16(wcid->idx),
  838. .conn_state = 1,
  839. },
  840. };
  841. int err, idx, cmd, len;
  842. void *data;
  843. switch (vif->type) {
  844. case NL80211_IFTYPE_MESH_POINT:
  845. case NL80211_IFTYPE_MONITOR:
  846. case NL80211_IFTYPE_AP:
  847. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
  848. break;
  849. case NL80211_IFTYPE_STATION:
  850. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
  851. break;
  852. case NL80211_IFTYPE_ADHOC:
  853. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  854. break;
  855. default:
  856. WARN_ON(1);
  857. break;
  858. }
  859. idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
  860. basic_req.basic.hw_bss_idx = idx;
  861. memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
  862. cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
  863. data = enable ? (void *)&dev_req : (void *)&basic_req;
  864. len = enable ? sizeof(dev_req) : sizeof(basic_req);
  865. err = mt76_mcu_send_msg(dev, cmd, data, len, true);
  866. if (err < 0)
  867. return err;
  868. cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
  869. data = enable ? (void *)&basic_req : (void *)&dev_req;
  870. len = enable ? sizeof(basic_req) : sizeof(dev_req);
  871. return mt76_mcu_send_msg(dev, cmd, data, len, true);
  872. }
  873. EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_dev);
  874. void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
  875. struct ieee80211_ampdu_params *params,
  876. bool enable, bool tx)
  877. {
  878. struct sta_rec_ba *ba;
  879. struct tlv *tlv;
  880. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
  881. ba = (struct sta_rec_ba *)tlv;
  882. ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
  883. ba->winsize = cpu_to_le16(params->buf_size);
  884. ba->ssn = cpu_to_le16(params->ssn);
  885. ba->ba_en = enable << params->tid;
  886. ba->amsdu = params->amsdu;
  887. ba->tid = params->tid;
  888. }
  889. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
  890. int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
  891. struct ieee80211_ampdu_params *params,
  892. bool enable, bool tx)
  893. {
  894. struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
  895. struct wtbl_req_hdr *wtbl_hdr;
  896. struct tlv *sta_wtbl;
  897. struct sk_buff *skb;
  898. int ret;
  899. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  900. if (IS_ERR(skb))
  901. return PTR_ERR(skb);
  902. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  903. sizeof(struct tlv));
  904. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
  905. sta_wtbl, &skb);
  906. if (IS_ERR(wtbl_hdr))
  907. return PTR_ERR(wtbl_hdr);
  908. mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
  909. wtbl_hdr);
  910. ret = mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, true);
  911. if (ret)
  912. return ret;
  913. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  914. if (IS_ERR(skb))
  915. return PTR_ERR(skb);
  916. mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
  917. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE,
  918. true);
  919. }
  920. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
  921. static u8
  922. mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
  923. enum nl80211_band band,
  924. struct ieee80211_sta *sta)
  925. {
  926. struct mt76_dev *dev = phy->dev;
  927. const struct ieee80211_sta_he_cap *he_cap;
  928. struct ieee80211_sta_vht_cap *vht_cap;
  929. struct ieee80211_sta_ht_cap *ht_cap;
  930. u8 mode = 0;
  931. if (!is_mt7921(dev))
  932. return 0x38;
  933. if (sta) {
  934. ht_cap = &sta->ht_cap;
  935. vht_cap = &sta->vht_cap;
  936. he_cap = &sta->he_cap;
  937. } else {
  938. struct ieee80211_supported_band *sband;
  939. sband = phy->hw->wiphy->bands[band];
  940. ht_cap = &sband->ht_cap;
  941. vht_cap = &sband->vht_cap;
  942. he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
  943. }
  944. if (band == NL80211_BAND_2GHZ) {
  945. mode |= PHY_MODE_B | PHY_MODE_G;
  946. if (ht_cap->ht_supported)
  947. mode |= PHY_MODE_GN;
  948. if (he_cap->has_he)
  949. mode |= PHY_MODE_AX_24G;
  950. } else if (band == NL80211_BAND_5GHZ) {
  951. mode |= PHY_MODE_A;
  952. if (ht_cap->ht_supported)
  953. mode |= PHY_MODE_AN;
  954. if (vht_cap->vht_supported)
  955. mode |= PHY_MODE_AC;
  956. if (he_cap->has_he)
  957. mode |= PHY_MODE_AX_5G;
  958. }
  959. return mode;
  960. }
  961. static const struct ieee80211_sta_he_cap *
  962. mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
  963. {
  964. enum nl80211_band band = phy->chandef.chan->band;
  965. struct ieee80211_supported_band *sband;
  966. sband = phy->hw->wiphy->bands[band];
  967. return ieee80211_get_he_iftype_cap(sband, vif->type);
  968. }
  969. #define DEFAULT_HE_PE_DURATION 4
  970. #define DEFAULT_HE_DURATION_RTS_THRES 1023
  971. static void
  972. mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
  973. struct tlv *tlv)
  974. {
  975. const struct ieee80211_sta_he_cap *cap;
  976. struct bss_info_uni_he *he;
  977. cap = mt76_connac_get_he_phy_cap(phy, vif);
  978. he = (struct bss_info_uni_he *)tlv;
  979. he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
  980. if (!he->he_pe_duration)
  981. he->he_pe_duration = DEFAULT_HE_PE_DURATION;
  982. he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
  983. if (!he->he_rts_thres)
  984. he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
  985. he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
  986. he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
  987. he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
  988. }
  989. int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
  990. struct ieee80211_vif *vif,
  991. struct mt76_wcid *wcid,
  992. bool enable)
  993. {
  994. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  995. struct cfg80211_chan_def *chandef = &phy->chandef;
  996. int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
  997. enum nl80211_band band = chandef->chan->band;
  998. struct mt76_dev *mdev = phy->dev;
  999. struct {
  1000. struct {
  1001. u8 bss_idx;
  1002. u8 pad[3];
  1003. } __packed hdr;
  1004. struct mt76_connac_bss_basic_tlv basic;
  1005. struct mt76_connac_bss_qos_tlv qos;
  1006. } basic_req = {
  1007. .hdr = {
  1008. .bss_idx = mvif->idx,
  1009. },
  1010. .basic = {
  1011. .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
  1012. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
  1013. .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
  1014. .dtim_period = vif->bss_conf.dtim_period,
  1015. .omac_idx = mvif->omac_idx,
  1016. .band_idx = mvif->band_idx,
  1017. .wmm_idx = mvif->wmm_idx,
  1018. .active = true, /* keep bss deactivated */
  1019. .phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
  1020. },
  1021. .qos = {
  1022. .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
  1023. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
  1024. .qos = vif->bss_conf.qos,
  1025. },
  1026. };
  1027. struct {
  1028. struct {
  1029. u8 bss_idx;
  1030. u8 pad[3];
  1031. } __packed hdr;
  1032. struct rlm_tlv {
  1033. __le16 tag;
  1034. __le16 len;
  1035. u8 control_channel;
  1036. u8 center_chan;
  1037. u8 center_chan2;
  1038. u8 bw;
  1039. u8 tx_streams;
  1040. u8 rx_streams;
  1041. u8 short_st;
  1042. u8 ht_op_info;
  1043. u8 sco;
  1044. u8 pad[3];
  1045. } __packed rlm;
  1046. } __packed rlm_req = {
  1047. .hdr = {
  1048. .bss_idx = mvif->idx,
  1049. },
  1050. .rlm = {
  1051. .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
  1052. .len = cpu_to_le16(sizeof(struct rlm_tlv)),
  1053. .control_channel = chandef->chan->hw_value,
  1054. .center_chan = ieee80211_frequency_to_channel(freq1),
  1055. .center_chan2 = ieee80211_frequency_to_channel(freq2),
  1056. .tx_streams = hweight8(phy->antenna_mask),
  1057. .ht_op_info = 4, /* set HT 40M allowed */
  1058. .rx_streams = phy->chainmask,
  1059. .short_st = true,
  1060. },
  1061. };
  1062. int err, conn_type;
  1063. u8 idx;
  1064. idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
  1065. basic_req.basic.hw_bss_idx = idx;
  1066. switch (vif->type) {
  1067. case NL80211_IFTYPE_MESH_POINT:
  1068. case NL80211_IFTYPE_AP:
  1069. if (vif->p2p)
  1070. conn_type = CONNECTION_P2P_GO;
  1071. else
  1072. conn_type = CONNECTION_INFRA_AP;
  1073. basic_req.basic.conn_type = cpu_to_le32(conn_type);
  1074. break;
  1075. case NL80211_IFTYPE_STATION:
  1076. if (vif->p2p)
  1077. conn_type = CONNECTION_P2P_GC;
  1078. else
  1079. conn_type = CONNECTION_INFRA_STA;
  1080. basic_req.basic.conn_type = cpu_to_le32(conn_type);
  1081. break;
  1082. case NL80211_IFTYPE_ADHOC:
  1083. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  1084. break;
  1085. default:
  1086. WARN_ON(1);
  1087. break;
  1088. }
  1089. memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
  1090. basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx);
  1091. basic_req.basic.sta_idx = cpu_to_le16(wcid->idx);
  1092. basic_req.basic.conn_state = !enable;
  1093. err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &basic_req,
  1094. sizeof(basic_req), true);
  1095. if (err < 0)
  1096. return err;
  1097. if (vif->bss_conf.he_support) {
  1098. struct {
  1099. struct {
  1100. u8 bss_idx;
  1101. u8 pad[3];
  1102. } __packed hdr;
  1103. struct bss_info_uni_he he;
  1104. } he_req = {
  1105. .hdr = {
  1106. .bss_idx = mvif->idx,
  1107. },
  1108. .he = {
  1109. .tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
  1110. .len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
  1111. },
  1112. };
  1113. mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
  1114. (struct tlv *)&he_req.he);
  1115. err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
  1116. &he_req, sizeof(he_req), true);
  1117. if (err < 0)
  1118. return err;
  1119. }
  1120. switch (chandef->width) {
  1121. case NL80211_CHAN_WIDTH_40:
  1122. rlm_req.rlm.bw = CMD_CBW_40MHZ;
  1123. break;
  1124. case NL80211_CHAN_WIDTH_80:
  1125. rlm_req.rlm.bw = CMD_CBW_80MHZ;
  1126. break;
  1127. case NL80211_CHAN_WIDTH_80P80:
  1128. rlm_req.rlm.bw = CMD_CBW_8080MHZ;
  1129. break;
  1130. case NL80211_CHAN_WIDTH_160:
  1131. rlm_req.rlm.bw = CMD_CBW_160MHZ;
  1132. break;
  1133. case NL80211_CHAN_WIDTH_5:
  1134. rlm_req.rlm.bw = CMD_CBW_5MHZ;
  1135. break;
  1136. case NL80211_CHAN_WIDTH_10:
  1137. rlm_req.rlm.bw = CMD_CBW_10MHZ;
  1138. break;
  1139. case NL80211_CHAN_WIDTH_20_NOHT:
  1140. case NL80211_CHAN_WIDTH_20:
  1141. default:
  1142. rlm_req.rlm.bw = CMD_CBW_20MHZ;
  1143. rlm_req.rlm.ht_op_info = 0;
  1144. break;
  1145. }
  1146. if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
  1147. rlm_req.rlm.sco = 1; /* SCA */
  1148. else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
  1149. rlm_req.rlm.sco = 3; /* SCB */
  1150. return mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &rlm_req,
  1151. sizeof(rlm_req), true);
  1152. }
  1153. EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
  1154. #define MT76_CONNAC_SCAN_CHANNEL_TIME 60
  1155. int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
  1156. struct ieee80211_scan_request *scan_req)
  1157. {
  1158. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1159. struct cfg80211_scan_request *sreq = &scan_req->req;
  1160. int n_ssids = 0, err, i, duration;
  1161. int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
  1162. struct ieee80211_channel **scan_list = sreq->channels;
  1163. struct mt76_dev *mdev = phy->dev;
  1164. bool ext_phy = phy == mdev->phy2;
  1165. struct mt76_connac_mcu_scan_channel *chan;
  1166. struct mt76_connac_hw_scan_req *req;
  1167. struct sk_buff *skb;
  1168. skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req));
  1169. if (!skb)
  1170. return -ENOMEM;
  1171. set_bit(MT76_HW_SCANNING, &phy->state);
  1172. mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
  1173. req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
  1174. req->seq_num = mvif->scan_seq_num | ext_phy << 7;
  1175. req->bss_idx = mvif->idx;
  1176. req->scan_type = sreq->n_ssids ? 1 : 0;
  1177. req->probe_req_num = sreq->n_ssids ? 2 : 0;
  1178. req->version = 1;
  1179. for (i = 0; i < sreq->n_ssids; i++) {
  1180. if (!sreq->ssids[i].ssid_len)
  1181. continue;
  1182. req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
  1183. memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
  1184. sreq->ssids[i].ssid_len);
  1185. n_ssids++;
  1186. }
  1187. req->ssid_type = n_ssids ? BIT(2) : BIT(0);
  1188. req->ssid_type_ext = n_ssids ? BIT(0) : 0;
  1189. req->ssids_num = n_ssids;
  1190. duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
  1191. /* increase channel time for passive scan */
  1192. if (!sreq->n_ssids)
  1193. duration *= 2;
  1194. req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
  1195. req->channel_min_dwell_time = cpu_to_le16(duration);
  1196. req->channel_dwell_time = cpu_to_le16(duration);
  1197. req->channels_num = min_t(u8, sreq->n_channels, 32);
  1198. req->ext_channels_num = min_t(u8, ext_channels_num, 32);
  1199. for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
  1200. if (i >= 32)
  1201. chan = &req->ext_channels[i - 32];
  1202. else
  1203. chan = &req->channels[i];
  1204. chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
  1205. chan->channel_num = scan_list[i]->hw_value;
  1206. }
  1207. req->channel_type = sreq->n_channels ? 4 : 0;
  1208. if (sreq->ie_len > 0) {
  1209. memcpy(req->ies, sreq->ie, sreq->ie_len);
  1210. req->ies_len = cpu_to_le16(sreq->ie_len);
  1211. }
  1212. if (is_mt7921(phy->dev))
  1213. req->scan_func |= SCAN_FUNC_SPLIT_SCAN;
  1214. memcpy(req->bssid, sreq->bssid, ETH_ALEN);
  1215. if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
  1216. get_random_mask_addr(req->random_mac, sreq->mac_addr,
  1217. sreq->mac_addr_mask);
  1218. req->scan_func |= SCAN_FUNC_RANDOM_MAC;
  1219. }
  1220. err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
  1221. if (err < 0)
  1222. clear_bit(MT76_HW_SCANNING, &phy->state);
  1223. return err;
  1224. }
  1225. EXPORT_SYMBOL_GPL(mt76_connac_mcu_hw_scan);
  1226. int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
  1227. struct ieee80211_vif *vif)
  1228. {
  1229. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1230. struct {
  1231. u8 seq_num;
  1232. u8 is_ext_channel;
  1233. u8 rsv[2];
  1234. } __packed req = {
  1235. .seq_num = mvif->scan_seq_num,
  1236. };
  1237. if (test_and_clear_bit(MT76_HW_SCANNING, &phy->state)) {
  1238. struct cfg80211_scan_info info = {
  1239. .aborted = true,
  1240. };
  1241. ieee80211_scan_completed(phy->hw, &info);
  1242. }
  1243. return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req,
  1244. sizeof(req), false);
  1245. }
  1246. EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan);
  1247. int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
  1248. struct ieee80211_vif *vif,
  1249. struct cfg80211_sched_scan_request *sreq)
  1250. {
  1251. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1252. struct ieee80211_channel **scan_list = sreq->channels;
  1253. struct mt76_connac_mcu_scan_channel *chan;
  1254. struct mt76_connac_sched_scan_req *req;
  1255. struct mt76_dev *mdev = phy->dev;
  1256. bool ext_phy = phy == mdev->phy2;
  1257. struct cfg80211_match_set *match;
  1258. struct cfg80211_ssid *ssid;
  1259. struct sk_buff *skb;
  1260. int i;
  1261. skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req) + sreq->ie_len);
  1262. if (!skb)
  1263. return -ENOMEM;
  1264. mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
  1265. req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
  1266. req->version = 1;
  1267. req->seq_num = mvif->scan_seq_num | ext_phy << 7;
  1268. if (is_mt7663(phy->dev) &&
  1269. (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
  1270. get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
  1271. sreq->mac_addr_mask);
  1272. req->scan_func = 1;
  1273. } else if (is_mt7921(phy->dev)) {
  1274. req->mt7921.bss_idx = mvif->idx;
  1275. }
  1276. req->ssids_num = sreq->n_ssids;
  1277. for (i = 0; i < req->ssids_num; i++) {
  1278. ssid = &sreq->ssids[i];
  1279. memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
  1280. req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
  1281. }
  1282. req->match_num = sreq->n_match_sets;
  1283. for (i = 0; i < req->match_num; i++) {
  1284. match = &sreq->match_sets[i];
  1285. memcpy(req->match[i].ssid, match->ssid.ssid,
  1286. match->ssid.ssid_len);
  1287. req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
  1288. req->match[i].ssid_len = match->ssid.ssid_len;
  1289. }
  1290. req->channel_type = sreq->n_channels ? 4 : 0;
  1291. req->channels_num = min_t(u8, sreq->n_channels, 64);
  1292. for (i = 0; i < req->channels_num; i++) {
  1293. chan = &req->channels[i];
  1294. chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
  1295. chan->channel_num = scan_list[i]->hw_value;
  1296. }
  1297. req->intervals_num = sreq->n_scan_plans;
  1298. for (i = 0; i < req->intervals_num; i++)
  1299. req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
  1300. if (sreq->ie_len > 0) {
  1301. req->ie_len = cpu_to_le16(sreq->ie_len);
  1302. memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
  1303. }
  1304. return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false);
  1305. }
  1306. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req);
  1307. int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
  1308. struct ieee80211_vif *vif,
  1309. bool enable)
  1310. {
  1311. struct {
  1312. u8 active; /* 0: enabled 1: disabled */
  1313. u8 rsv[3];
  1314. } __packed req = {
  1315. .active = !enable,
  1316. };
  1317. if (enable)
  1318. set_bit(MT76_HW_SCHED_SCANNING, &phy->state);
  1319. else
  1320. clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
  1321. return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req,
  1322. sizeof(req), false);
  1323. }
  1324. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
  1325. int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
  1326. {
  1327. struct mt76_connac_config req = {
  1328. .resp_type = 0,
  1329. };
  1330. memcpy(req.data, "assert", 7);
  1331. return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
  1332. false);
  1333. }
  1334. EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
  1335. int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
  1336. {
  1337. struct mt76_connac_config req = {
  1338. .resp_type = 0,
  1339. };
  1340. snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
  1341. return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
  1342. false);
  1343. }
  1344. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
  1345. int mt76_connac_sta_state_dp(struct mt76_dev *dev,
  1346. enum ieee80211_sta_state old_state,
  1347. enum ieee80211_sta_state new_state)
  1348. {
  1349. if ((old_state == IEEE80211_STA_ASSOC &&
  1350. new_state == IEEE80211_STA_AUTHORIZED) ||
  1351. (old_state == IEEE80211_STA_NONE &&
  1352. new_state == IEEE80211_STA_NOTEXIST))
  1353. mt76_connac_mcu_set_deep_sleep(dev, true);
  1354. if ((old_state == IEEE80211_STA_NOTEXIST &&
  1355. new_state == IEEE80211_STA_NONE) ||
  1356. (old_state == IEEE80211_STA_AUTHORIZED &&
  1357. new_state == IEEE80211_STA_ASSOC))
  1358. mt76_connac_mcu_set_deep_sleep(dev, false);
  1359. return 0;
  1360. }
  1361. EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
  1362. void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
  1363. struct mt76_connac_coredump *coredump)
  1364. {
  1365. spin_lock_bh(&dev->lock);
  1366. __skb_queue_tail(&coredump->msg_list, skb);
  1367. spin_unlock_bh(&dev->lock);
  1368. coredump->last_activity = jiffies;
  1369. queue_delayed_work(dev->wq, &coredump->work,
  1370. MT76_CONNAC_COREDUMP_TIMEOUT);
  1371. }
  1372. EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
  1373. static void
  1374. mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
  1375. struct mt76_power_limits *limits,
  1376. enum nl80211_band band)
  1377. {
  1378. int max_power = is_mt7921(dev) ? 127 : 63;
  1379. int i, offset = sizeof(limits->cck);
  1380. memset(sku, max_power, MT_SKU_POWER_LIMIT);
  1381. if (band == NL80211_BAND_2GHZ) {
  1382. /* cck */
  1383. memcpy(sku, limits->cck, sizeof(limits->cck));
  1384. }
  1385. /* ofdm */
  1386. memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm));
  1387. offset += sizeof(limits->ofdm);
  1388. /* ht */
  1389. for (i = 0; i < 2; i++) {
  1390. memcpy(&sku[offset], limits->mcs[i], 8);
  1391. offset += 8;
  1392. }
  1393. sku[offset++] = limits->mcs[0][0];
  1394. /* vht */
  1395. for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) {
  1396. memcpy(&sku[offset], limits->mcs[i],
  1397. ARRAY_SIZE(limits->mcs[i]));
  1398. offset += 12;
  1399. }
  1400. if (!is_mt7921(dev))
  1401. return;
  1402. /* he */
  1403. for (i = 0; i < ARRAY_SIZE(limits->ru); i++) {
  1404. memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i]));
  1405. offset += ARRAY_SIZE(limits->ru[i]);
  1406. }
  1407. }
  1408. static int
  1409. mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
  1410. enum nl80211_band band)
  1411. {
  1412. struct mt76_dev *dev = phy->dev;
  1413. int sku_len, batch_len = is_mt7921(dev) ? 8 : 16;
  1414. static const u8 chan_list_2ghz[] = {
  1415. 1, 2, 3, 4, 5, 6, 7,
  1416. 8, 9, 10, 11, 12, 13, 14
  1417. };
  1418. static const u8 chan_list_5ghz[] = {
  1419. 36, 38, 40, 42, 44, 46, 48,
  1420. 50, 52, 54, 56, 58, 60, 62,
  1421. 64, 100, 102, 104, 106, 108, 110,
  1422. 112, 114, 116, 118, 120, 122, 124,
  1423. 126, 128, 132, 134, 136, 138, 140,
  1424. 142, 144, 149, 151, 153, 155, 157,
  1425. 159, 161, 165
  1426. };
  1427. struct mt76_connac_sku_tlv sku_tlbv;
  1428. int i, n_chan, batch_size, idx = 0;
  1429. struct mt76_power_limits limits;
  1430. const u8 *ch_list;
  1431. sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
  1432. if (band == NL80211_BAND_2GHZ) {
  1433. n_chan = ARRAY_SIZE(chan_list_2ghz);
  1434. ch_list = chan_list_2ghz;
  1435. } else {
  1436. n_chan = ARRAY_SIZE(chan_list_5ghz);
  1437. ch_list = chan_list_5ghz;
  1438. }
  1439. batch_size = DIV_ROUND_UP(n_chan, batch_len);
  1440. for (i = 0; i < batch_size; i++) {
  1441. bool last_msg = i == batch_size - 1;
  1442. int num_ch = last_msg ? n_chan % batch_len : batch_len;
  1443. struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
  1444. .band = band == NL80211_BAND_2GHZ ? 1 : 2,
  1445. .n_chan = num_ch,
  1446. .last_msg = last_msg,
  1447. };
  1448. struct sk_buff *skb;
  1449. int j, err, msg_len;
  1450. msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
  1451. skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
  1452. if (!skb)
  1453. return -ENOMEM;
  1454. BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
  1455. memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
  1456. skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
  1457. for (j = 0; j < num_ch; j++, idx++) {
  1458. struct ieee80211_channel chan = {
  1459. .hw_value = ch_list[idx],
  1460. .band = band,
  1461. };
  1462. mt76_get_rate_power_limits(phy, &chan, &limits, 127);
  1463. sku_tlbv.channel = ch_list[idx];
  1464. mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
  1465. &limits, band);
  1466. skb_put_data(skb, &sku_tlbv, sku_len);
  1467. }
  1468. err = mt76_mcu_skb_send_msg(dev, skb,
  1469. MCU_CMD_SET_RATE_TX_POWER, false);
  1470. if (err < 0)
  1471. return err;
  1472. }
  1473. return 0;
  1474. }
  1475. int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
  1476. {
  1477. int err;
  1478. err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
  1479. if (err < 0)
  1480. return err;
  1481. return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
  1482. }
  1483. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
  1484. int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
  1485. struct mt76_vif *vif,
  1486. struct ieee80211_bss_conf *info)
  1487. {
  1488. struct sk_buff *skb;
  1489. int i, len = min_t(int, info->arp_addr_cnt,
  1490. IEEE80211_BSS_ARP_ADDR_LIST_LEN);
  1491. struct {
  1492. struct {
  1493. u8 bss_idx;
  1494. u8 pad[3];
  1495. } __packed hdr;
  1496. struct mt76_connac_arpns_tlv arp;
  1497. } req_hdr = {
  1498. .hdr = {
  1499. .bss_idx = vif->idx,
  1500. },
  1501. .arp = {
  1502. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
  1503. .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
  1504. .ips_num = len,
  1505. .mode = 2, /* update */
  1506. .option = 1,
  1507. },
  1508. };
  1509. skb = mt76_mcu_msg_alloc(dev, NULL,
  1510. sizeof(req_hdr) + len * sizeof(__be32));
  1511. if (!skb)
  1512. return -ENOMEM;
  1513. skb_put_data(skb, &req_hdr, sizeof(req_hdr));
  1514. for (i = 0; i < len; i++) {
  1515. u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
  1516. memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
  1517. }
  1518. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true);
  1519. }
  1520. EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
  1521. #ifdef CONFIG_PM
  1522. const struct wiphy_wowlan_support mt76_connac_wowlan_support = {
  1523. .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
  1524. WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
  1525. .n_patterns = 1,
  1526. .pattern_min_len = 1,
  1527. .pattern_max_len = MT76_CONNAC_WOW_PATTEN_MAX_LEN,
  1528. .max_nd_match_sets = 10,
  1529. };
  1530. EXPORT_SYMBOL_GPL(mt76_connac_wowlan_support);
  1531. static void
  1532. mt76_connac_mcu_key_iter(struct ieee80211_hw *hw,
  1533. struct ieee80211_vif *vif,
  1534. struct ieee80211_sta *sta,
  1535. struct ieee80211_key_conf *key,
  1536. void *data)
  1537. {
  1538. struct mt76_connac_gtk_rekey_tlv *gtk_tlv = data;
  1539. u32 cipher;
  1540. if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
  1541. key->cipher != WLAN_CIPHER_SUITE_CCMP &&
  1542. key->cipher != WLAN_CIPHER_SUITE_TKIP)
  1543. return;
  1544. if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
  1545. gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
  1546. cipher = BIT(3);
  1547. } else {
  1548. gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
  1549. cipher = BIT(4);
  1550. }
  1551. /* we are assuming here to have a single pairwise key */
  1552. if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
  1553. gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
  1554. gtk_tlv->group_cipher = cpu_to_le32(cipher);
  1555. gtk_tlv->keyid = key->keyidx;
  1556. }
  1557. }
  1558. int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
  1559. struct ieee80211_vif *vif,
  1560. struct cfg80211_gtk_rekey_data *key)
  1561. {
  1562. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1563. struct mt76_connac_gtk_rekey_tlv *gtk_tlv;
  1564. struct mt76_phy *phy = hw->priv;
  1565. struct sk_buff *skb;
  1566. struct {
  1567. u8 bss_idx;
  1568. u8 pad[3];
  1569. } __packed hdr = {
  1570. .bss_idx = mvif->idx,
  1571. };
  1572. skb = mt76_mcu_msg_alloc(phy->dev, NULL,
  1573. sizeof(hdr) + sizeof(*gtk_tlv));
  1574. if (!skb)
  1575. return -ENOMEM;
  1576. skb_put_data(skb, &hdr, sizeof(hdr));
  1577. gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
  1578. sizeof(*gtk_tlv));
  1579. gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
  1580. gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
  1581. gtk_tlv->rekey_mode = 2;
  1582. gtk_tlv->option = 1;
  1583. rcu_read_lock();
  1584. ieee80211_iter_keys_rcu(hw, vif, mt76_connac_mcu_key_iter, gtk_tlv);
  1585. rcu_read_unlock();
  1586. memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
  1587. memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
  1588. memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
  1589. return mt76_mcu_skb_send_msg(phy->dev, skb, MCU_UNI_CMD_OFFLOAD, true);
  1590. }
  1591. EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_gtk_rekey);
  1592. static int
  1593. mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
  1594. bool suspend)
  1595. {
  1596. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1597. struct {
  1598. struct {
  1599. u8 bss_idx;
  1600. u8 pad[3];
  1601. } __packed hdr;
  1602. struct mt76_connac_arpns_tlv arpns;
  1603. } req = {
  1604. .hdr = {
  1605. .bss_idx = mvif->idx,
  1606. },
  1607. .arpns = {
  1608. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
  1609. .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
  1610. .mode = suspend,
  1611. },
  1612. };
  1613. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
  1614. true);
  1615. }
  1616. static int
  1617. mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
  1618. bool suspend)
  1619. {
  1620. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1621. struct {
  1622. struct {
  1623. u8 bss_idx;
  1624. u8 pad[3];
  1625. } __packed hdr;
  1626. struct mt76_connac_gtk_rekey_tlv gtk_tlv;
  1627. } __packed req = {
  1628. .hdr = {
  1629. .bss_idx = mvif->idx,
  1630. },
  1631. .gtk_tlv = {
  1632. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
  1633. .len = cpu_to_le16(sizeof(struct mt76_connac_gtk_rekey_tlv)),
  1634. .rekey_mode = !suspend,
  1635. },
  1636. };
  1637. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
  1638. true);
  1639. }
  1640. static int
  1641. mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
  1642. struct ieee80211_vif *vif,
  1643. bool enable, u8 mdtim,
  1644. bool wow_suspend)
  1645. {
  1646. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1647. struct {
  1648. struct {
  1649. u8 bss_idx;
  1650. u8 pad[3];
  1651. } __packed hdr;
  1652. struct mt76_connac_suspend_tlv suspend_tlv;
  1653. } req = {
  1654. .hdr = {
  1655. .bss_idx = mvif->idx,
  1656. },
  1657. .suspend_tlv = {
  1658. .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
  1659. .len = cpu_to_le16(sizeof(struct mt76_connac_suspend_tlv)),
  1660. .enable = enable,
  1661. .mdtim = mdtim,
  1662. .wow_suspend = wow_suspend,
  1663. },
  1664. };
  1665. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
  1666. true);
  1667. }
  1668. static int
  1669. mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
  1670. struct ieee80211_vif *vif,
  1671. u8 index, bool enable,
  1672. struct cfg80211_pkt_pattern *pattern)
  1673. {
  1674. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1675. struct mt76_connac_wow_pattern_tlv *ptlv;
  1676. struct sk_buff *skb;
  1677. struct req_hdr {
  1678. u8 bss_idx;
  1679. u8 pad[3];
  1680. } __packed hdr = {
  1681. .bss_idx = mvif->idx,
  1682. };
  1683. skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(hdr) + sizeof(*ptlv));
  1684. if (!skb)
  1685. return -ENOMEM;
  1686. skb_put_data(skb, &hdr, sizeof(hdr));
  1687. ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
  1688. ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
  1689. ptlv->len = cpu_to_le16(sizeof(*ptlv));
  1690. ptlv->data_len = pattern->pattern_len;
  1691. ptlv->enable = enable;
  1692. ptlv->index = index;
  1693. memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
  1694. memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
  1695. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
  1696. }
  1697. static int
  1698. mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
  1699. bool suspend, struct cfg80211_wowlan *wowlan)
  1700. {
  1701. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1702. struct mt76_dev *dev = phy->dev;
  1703. struct {
  1704. struct {
  1705. u8 bss_idx;
  1706. u8 pad[3];
  1707. } __packed hdr;
  1708. struct mt76_connac_wow_ctrl_tlv wow_ctrl_tlv;
  1709. struct mt76_connac_wow_gpio_param_tlv gpio_tlv;
  1710. } req = {
  1711. .hdr = {
  1712. .bss_idx = mvif->idx,
  1713. },
  1714. .wow_ctrl_tlv = {
  1715. .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
  1716. .len = cpu_to_le16(sizeof(struct mt76_connac_wow_ctrl_tlv)),
  1717. .cmd = suspend ? 1 : 2,
  1718. },
  1719. .gpio_tlv = {
  1720. .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM),
  1721. .len = cpu_to_le16(sizeof(struct mt76_connac_wow_gpio_param_tlv)),
  1722. .gpio_pin = 0xff, /* follow fw about GPIO pin */
  1723. },
  1724. };
  1725. if (wowlan->magic_pkt)
  1726. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
  1727. if (wowlan->disconnect)
  1728. req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
  1729. UNI_WOW_DETECT_TYPE_BCN_LOST);
  1730. if (wowlan->nd_config) {
  1731. mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
  1732. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
  1733. mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
  1734. }
  1735. if (wowlan->n_patterns)
  1736. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
  1737. if (mt76_is_mmio(dev))
  1738. req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
  1739. else if (mt76_is_usb(dev))
  1740. req.wow_ctrl_tlv.wakeup_hif = WOW_USB;
  1741. else if (mt76_is_sdio(dev))
  1742. req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO;
  1743. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
  1744. true);
  1745. }
  1746. int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
  1747. {
  1748. struct {
  1749. struct {
  1750. u8 hif_type; /* 0x0: HIF_SDIO
  1751. * 0x1: HIF_USB
  1752. * 0x2: HIF_PCIE
  1753. */
  1754. u8 pad[3];
  1755. } __packed hdr;
  1756. struct hif_suspend_tlv {
  1757. __le16 tag;
  1758. __le16 len;
  1759. u8 suspend;
  1760. } __packed hif_suspend;
  1761. } req = {
  1762. .hif_suspend = {
  1763. .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
  1764. .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
  1765. .suspend = suspend,
  1766. },
  1767. };
  1768. if (mt76_is_mmio(dev))
  1769. req.hdr.hif_type = 2;
  1770. else if (mt76_is_usb(dev))
  1771. req.hdr.hif_type = 1;
  1772. else if (mt76_is_sdio(dev))
  1773. req.hdr.hif_type = 0;
  1774. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_HIF_CTRL, &req, sizeof(req),
  1775. true);
  1776. }
  1777. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
  1778. void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
  1779. struct ieee80211_vif *vif)
  1780. {
  1781. struct mt76_phy *phy = priv;
  1782. bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->state);
  1783. struct ieee80211_hw *hw = phy->hw;
  1784. struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
  1785. int i;
  1786. mt76_connac_mcu_set_gtk_rekey(phy->dev, vif, suspend);
  1787. mt76_connac_mcu_set_arp_filter(phy->dev, vif, suspend);
  1788. mt76_connac_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
  1789. for (i = 0; i < wowlan->n_patterns; i++)
  1790. mt76_connac_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
  1791. &wowlan->patterns[i]);
  1792. mt76_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
  1793. }
  1794. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_iter);
  1795. #endif /* CONFIG_PM */
  1796. MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
  1797. MODULE_LICENSE("Dual BSD/GPL");