mt76_connac_mcu.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc. */
  3. #include "mt76_connac_mcu.h"
  4. int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
  5. {
  6. struct {
  7. __le32 option;
  8. __le32 addr;
  9. } req = {
  10. .option = cpu_to_le32(option),
  11. .addr = cpu_to_le32(addr),
  12. };
  13. return mt76_mcu_send_msg(dev, MCU_CMD_FW_START_REQ, &req, sizeof(req),
  14. true);
  15. }
  16. EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_firmware);
  17. int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get)
  18. {
  19. u32 op = get ? PATCH_SEM_GET : PATCH_SEM_RELEASE;
  20. struct {
  21. __le32 op;
  22. } req = {
  23. .op = cpu_to_le32(op),
  24. };
  25. return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_SEM_CONTROL, &req,
  26. sizeof(req), true);
  27. }
  28. EXPORT_SYMBOL_GPL(mt76_connac_mcu_patch_sem_ctrl);
  29. int mt76_connac_mcu_start_patch(struct mt76_dev *dev)
  30. {
  31. struct {
  32. u8 check_crc;
  33. u8 reserved[3];
  34. } req = {
  35. .check_crc = 0,
  36. };
  37. return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_FINISH_REQ, &req,
  38. sizeof(req), true);
  39. }
  40. EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_patch);
  41. #define MCU_PATCH_ADDRESS 0x200000
  42. int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
  43. u32 mode)
  44. {
  45. struct {
  46. __le32 addr;
  47. __le32 len;
  48. __le32 mode;
  49. } req = {
  50. .addr = cpu_to_le32(addr),
  51. .len = cpu_to_le32(len),
  52. .mode = cpu_to_le32(mode),
  53. };
  54. int cmd;
  55. if (is_mt7921(dev) &&
  56. (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
  57. cmd = MCU_CMD_PATCH_START_REQ;
  58. else
  59. cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ;
  60. return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true);
  61. }
  62. EXPORT_SYMBOL_GPL(mt76_connac_mcu_init_download);
  63. int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
  64. {
  65. struct mt76_dev *dev = phy->dev;
  66. struct mt76_connac_mcu_channel_domain {
  67. u8 alpha2[4]; /* regulatory_request.alpha2 */
  68. u8 bw_2g; /* BW_20_40M 0
  69. * BW_20M 1
  70. * BW_20_40_80M 2
  71. * BW_20_40_80_160M 3
  72. * BW_20_40_80_8080M 4
  73. */
  74. u8 bw_5g;
  75. __le16 pad;
  76. u8 n_2ch;
  77. u8 n_5ch;
  78. __le16 pad2;
  79. } __packed hdr = {
  80. .bw_2g = 0,
  81. .bw_5g = 3,
  82. };
  83. struct mt76_connac_mcu_chan {
  84. __le16 hw_value;
  85. __le16 pad;
  86. __le32 flags;
  87. } __packed channel;
  88. int len, i, n_max_channels, n_2ch = 0, n_5ch = 0;
  89. struct ieee80211_channel *chan;
  90. struct sk_buff *skb;
  91. n_max_channels = phy->sband_2g.sband.n_channels +
  92. phy->sband_5g.sband.n_channels;
  93. len = sizeof(hdr) + n_max_channels * sizeof(channel);
  94. skb = mt76_mcu_msg_alloc(dev, NULL, len);
  95. if (!skb)
  96. return -ENOMEM;
  97. skb_reserve(skb, sizeof(hdr));
  98. for (i = 0; i < phy->sband_2g.sband.n_channels; i++) {
  99. chan = &phy->sband_2g.sband.channels[i];
  100. if (chan->flags & IEEE80211_CHAN_DISABLED)
  101. continue;
  102. channel.hw_value = cpu_to_le16(chan->hw_value);
  103. channel.flags = cpu_to_le32(chan->flags);
  104. channel.pad = 0;
  105. skb_put_data(skb, &channel, sizeof(channel));
  106. n_2ch++;
  107. }
  108. for (i = 0; i < phy->sband_5g.sband.n_channels; i++) {
  109. chan = &phy->sband_5g.sband.channels[i];
  110. if (chan->flags & IEEE80211_CHAN_DISABLED)
  111. continue;
  112. channel.hw_value = cpu_to_le16(chan->hw_value);
  113. channel.flags = cpu_to_le32(chan->flags);
  114. channel.pad = 0;
  115. skb_put_data(skb, &channel, sizeof(channel));
  116. n_5ch++;
  117. }
  118. BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(hdr.alpha2));
  119. memcpy(hdr.alpha2, dev->alpha2, sizeof(dev->alpha2));
  120. hdr.n_2ch = n_2ch;
  121. hdr.n_5ch = n_5ch;
  122. memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
  123. return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false);
  124. }
  125. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain);
  126. int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable,
  127. bool hdr_trans)
  128. {
  129. struct {
  130. u8 enable;
  131. u8 band;
  132. u8 rsv[2];
  133. } __packed req_mac = {
  134. .enable = enable,
  135. .band = band,
  136. };
  137. return mt76_mcu_send_msg(dev, MCU_EXT_CMD_MAC_INIT_CTRL, &req_mac,
  138. sizeof(req_mac), true);
  139. }
  140. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
  141. int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
  142. {
  143. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  144. struct {
  145. u8 bss_idx;
  146. u8 ps_state; /* 0: device awake
  147. * 1: static power save
  148. * 2: dynamic power saving
  149. */
  150. } req = {
  151. .bss_idx = mvif->idx,
  152. .ps_state = vif->bss_conf.ps ? 2 : 0,
  153. };
  154. if (vif->type != NL80211_IFTYPE_STATION)
  155. return -EOPNOTSUPP;
  156. return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req,
  157. sizeof(req), false);
  158. }
  159. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps);
  160. int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band)
  161. {
  162. struct {
  163. u8 prot_idx;
  164. u8 band;
  165. u8 rsv[2];
  166. __le32 len_thresh;
  167. __le32 pkt_thresh;
  168. } __packed req = {
  169. .prot_idx = 1,
  170. .band = band,
  171. .len_thresh = cpu_to_le32(val),
  172. .pkt_thresh = cpu_to_le32(0x2),
  173. };
  174. return mt76_mcu_send_msg(dev, MCU_EXT_CMD_PROTECT_CTRL, &req,
  175. sizeof(req), true);
  176. }
  177. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
  178. void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
  179. struct ieee80211_vif *vif)
  180. {
  181. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  182. struct mt76_connac_beacon_loss_event *event = priv;
  183. if (mvif->idx != event->bss_idx)
  184. return;
  185. if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
  186. return;
  187. ieee80211_beacon_loss(vif);
  188. }
  189. EXPORT_SYMBOL_GPL(mt76_connac_mcu_beacon_loss_iter);
  190. struct tlv *
  191. mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
  192. void *sta_ntlv, void *sta_wtbl)
  193. {
  194. struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
  195. struct tlv *sta_hdr = sta_wtbl;
  196. struct tlv *ptlv, tlv = {
  197. .tag = cpu_to_le16(tag),
  198. .len = cpu_to_le16(len),
  199. };
  200. u16 ntlv;
  201. ptlv = skb_put(skb, len);
  202. memcpy(ptlv, &tlv, sizeof(tlv));
  203. ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
  204. ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
  205. if (sta_hdr) {
  206. u16 size = le16_to_cpu(sta_hdr->len);
  207. sta_hdr->len = cpu_to_le16(size + len);
  208. }
  209. return ptlv;
  210. }
  211. EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
  212. struct sk_buff *
  213. mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
  214. struct mt76_wcid *wcid)
  215. {
  216. struct sta_req_hdr hdr = {
  217. .bss_idx = mvif->idx,
  218. .muar_idx = wcid ? mvif->omac_idx : 0,
  219. .is_tlv_append = 1,
  220. };
  221. struct sk_buff *skb;
  222. mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
  223. &hdr.wlan_idx_hi);
  224. skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
  225. if (!skb)
  226. return ERR_PTR(-ENOMEM);
  227. skb_put_data(skb, &hdr, sizeof(hdr));
  228. return skb;
  229. }
  230. EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
  231. struct wtbl_req_hdr *
  232. mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
  233. int cmd, void *sta_wtbl, struct sk_buff **skb)
  234. {
  235. struct tlv *sta_hdr = sta_wtbl;
  236. struct wtbl_req_hdr hdr = {
  237. .operation = cmd,
  238. };
  239. struct sk_buff *nskb = *skb;
  240. mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
  241. &hdr.wlan_idx_hi);
  242. if (!nskb) {
  243. nskb = mt76_mcu_msg_alloc(dev, NULL,
  244. MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
  245. if (!nskb)
  246. return ERR_PTR(-ENOMEM);
  247. *skb = nskb;
  248. }
  249. if (sta_hdr)
  250. sta_hdr->len = cpu_to_le16(sizeof(hdr));
  251. return skb_put_data(nskb, &hdr, sizeof(hdr));
  252. }
  253. EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
  254. void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
  255. struct ieee80211_vif *vif,
  256. struct ieee80211_sta *sta,
  257. bool enable, bool newly)
  258. {
  259. struct sta_rec_basic *basic;
  260. struct tlv *tlv;
  261. int conn_type;
  262. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
  263. basic = (struct sta_rec_basic *)tlv;
  264. basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
  265. if (enable) {
  266. if (newly)
  267. basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
  268. basic->conn_state = CONN_STATE_PORT_SECURE;
  269. } else {
  270. basic->conn_state = CONN_STATE_DISCONNECT;
  271. }
  272. if (!sta) {
  273. basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
  274. eth_broadcast_addr(basic->peer_addr);
  275. return;
  276. }
  277. switch (vif->type) {
  278. case NL80211_IFTYPE_MESH_POINT:
  279. case NL80211_IFTYPE_AP:
  280. if (vif->p2p)
  281. conn_type = CONNECTION_P2P_GC;
  282. else
  283. conn_type = CONNECTION_INFRA_STA;
  284. basic->conn_type = cpu_to_le32(conn_type);
  285. basic->aid = cpu_to_le16(sta->aid);
  286. break;
  287. case NL80211_IFTYPE_STATION:
  288. if (vif->p2p)
  289. conn_type = CONNECTION_P2P_GO;
  290. else
  291. conn_type = CONNECTION_INFRA_AP;
  292. basic->conn_type = cpu_to_le32(conn_type);
  293. basic->aid = cpu_to_le16(vif->bss_conf.aid);
  294. break;
  295. case NL80211_IFTYPE_ADHOC:
  296. basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  297. basic->aid = cpu_to_le16(sta->aid);
  298. break;
  299. default:
  300. WARN_ON(1);
  301. break;
  302. }
  303. memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
  304. basic->qos = sta->wme;
  305. }
  306. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
  307. static void
  308. mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
  309. struct ieee80211_sta *sta)
  310. {
  311. struct sta_rec_uapsd *uapsd;
  312. struct tlv *tlv;
  313. if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
  314. return;
  315. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
  316. uapsd = (struct sta_rec_uapsd *)tlv;
  317. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
  318. uapsd->dac_map |= BIT(3);
  319. uapsd->tac_map |= BIT(3);
  320. }
  321. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
  322. uapsd->dac_map |= BIT(2);
  323. uapsd->tac_map |= BIT(2);
  324. }
  325. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
  326. uapsd->dac_map |= BIT(1);
  327. uapsd->tac_map |= BIT(1);
  328. }
  329. if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
  330. uapsd->dac_map |= BIT(0);
  331. uapsd->tac_map |= BIT(0);
  332. }
  333. uapsd->max_sp = sta->max_sp;
  334. }
  335. void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
  336. struct ieee80211_vif *vif,
  337. struct mt76_wcid *wcid,
  338. void *sta_wtbl, void *wtbl_tlv)
  339. {
  340. struct wtbl_hdr_trans *htr;
  341. struct tlv *tlv;
  342. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS,
  343. sizeof(*htr),
  344. wtbl_tlv, sta_wtbl);
  345. htr = (struct wtbl_hdr_trans *)tlv;
  346. htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
  347. if (vif->type == NL80211_IFTYPE_STATION)
  348. htr->to_ds = true;
  349. else
  350. htr->from_ds = true;
  351. if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
  352. htr->to_ds = true;
  353. htr->from_ds = true;
  354. }
  355. }
  356. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
  357. int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
  358. struct ieee80211_vif *vif,
  359. struct mt76_wcid *wcid, int cmd)
  360. {
  361. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  362. struct wtbl_req_hdr *wtbl_hdr;
  363. struct tlv *sta_wtbl;
  364. struct sk_buff *skb;
  365. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  366. if (IS_ERR(skb))
  367. return PTR_ERR(skb);
  368. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  369. sizeof(struct tlv));
  370. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
  371. sta_wtbl, &skb);
  372. if (IS_ERR(wtbl_hdr))
  373. return PTR_ERR(wtbl_hdr);
  374. mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
  375. return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
  376. }
  377. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
  378. void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
  379. struct sk_buff *skb,
  380. struct ieee80211_vif *vif,
  381. struct ieee80211_sta *sta,
  382. void *sta_wtbl, void *wtbl_tlv)
  383. {
  384. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  385. struct wtbl_generic *generic;
  386. struct wtbl_rx *rx;
  387. struct wtbl_spe *spe;
  388. struct tlv *tlv;
  389. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_GENERIC,
  390. sizeof(*generic),
  391. wtbl_tlv, sta_wtbl);
  392. generic = (struct wtbl_generic *)tlv;
  393. if (sta) {
  394. if (vif->type == NL80211_IFTYPE_STATION)
  395. generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
  396. else
  397. generic->partial_aid = cpu_to_le16(sta->aid);
  398. memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
  399. generic->muar_idx = mvif->omac_idx;
  400. generic->qos = sta->wme;
  401. } else {
  402. if (is_mt7921(dev) &&
  403. vif->type == NL80211_IFTYPE_STATION)
  404. memcpy(generic->peer_addr, vif->bss_conf.bssid,
  405. ETH_ALEN);
  406. else
  407. eth_broadcast_addr(generic->peer_addr);
  408. generic->muar_idx = 0xe;
  409. }
  410. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
  411. wtbl_tlv, sta_wtbl);
  412. rx = (struct wtbl_rx *)tlv;
  413. rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
  414. rx->rca2 = 1;
  415. rx->rv = 1;
  416. if (is_mt7921(dev))
  417. return;
  418. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
  419. wtbl_tlv, sta_wtbl);
  420. spe = (struct wtbl_spe *)tlv;
  421. spe->spe_idx = 24;
  422. }
  423. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_generic_tlv);
  424. static void
  425. mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
  426. struct ieee80211_vif *vif)
  427. {
  428. struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
  429. struct sta_rec_amsdu *amsdu;
  430. struct tlv *tlv;
  431. if (vif->type != NL80211_IFTYPE_AP &&
  432. vif->type != NL80211_IFTYPE_STATION)
  433. return;
  434. if (!sta->max_amsdu_len)
  435. return;
  436. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
  437. amsdu = (struct sta_rec_amsdu *)tlv;
  438. amsdu->max_amsdu_num = 8;
  439. amsdu->amsdu_en = true;
  440. amsdu->max_mpdu_size = sta->max_amsdu_len >=
  441. IEEE80211_MAX_MPDU_LEN_VHT_7991;
  442. wcid->amsdu = true;
  443. }
  444. #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
  445. #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
  446. static void
  447. mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
  448. {
  449. struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
  450. struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
  451. struct sta_rec_he *he;
  452. struct tlv *tlv;
  453. u32 cap = 0;
  454. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
  455. he = (struct sta_rec_he *)tlv;
  456. if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
  457. cap |= STA_REC_HE_CAP_HTC;
  458. if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
  459. cap |= STA_REC_HE_CAP_BSR;
  460. if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
  461. cap |= STA_REC_HE_CAP_OM;
  462. if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
  463. cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
  464. if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
  465. cap |= STA_REC_HE_CAP_BQR;
  466. if (elem->phy_cap_info[0] &
  467. (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
  468. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
  469. cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
  470. if (elem->phy_cap_info[1] &
  471. IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
  472. cap |= STA_REC_HE_CAP_LDPC;
  473. if (elem->phy_cap_info[1] &
  474. IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
  475. cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
  476. if (elem->phy_cap_info[2] &
  477. IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
  478. cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
  479. if (elem->phy_cap_info[2] &
  480. IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
  481. cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
  482. if (elem->phy_cap_info[2] &
  483. IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
  484. cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
  485. if (elem->phy_cap_info[6] &
  486. IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
  487. cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
  488. if (elem->phy_cap_info[7] &
  489. IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
  490. cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
  491. if (elem->phy_cap_info[7] &
  492. IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
  493. cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
  494. if (elem->phy_cap_info[7] &
  495. IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
  496. cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
  497. if (elem->phy_cap_info[8] &
  498. IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
  499. cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
  500. if (elem->phy_cap_info[8] &
  501. IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
  502. cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
  503. if (elem->phy_cap_info[9] &
  504. IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
  505. cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
  506. if (elem->phy_cap_info[9] &
  507. IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
  508. cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
  509. if (elem->phy_cap_info[9] &
  510. IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
  511. cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
  512. he->he_cap = cpu_to_le32(cap);
  513. switch (sta->bandwidth) {
  514. case IEEE80211_STA_RX_BW_160:
  515. if (elem->phy_cap_info[0] &
  516. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
  517. he->max_nss_mcs[CMD_HE_MCS_BW8080] =
  518. he_cap->he_mcs_nss_supp.rx_mcs_80p80;
  519. he->max_nss_mcs[CMD_HE_MCS_BW160] =
  520. he_cap->he_mcs_nss_supp.rx_mcs_160;
  521. fallthrough;
  522. default:
  523. he->max_nss_mcs[CMD_HE_MCS_BW80] =
  524. he_cap->he_mcs_nss_supp.rx_mcs_80;
  525. break;
  526. }
  527. he->t_frame_dur =
  528. HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
  529. he->max_ampdu_exp =
  530. HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
  531. he->bw_set =
  532. HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
  533. he->device_class =
  534. HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
  535. he->punc_pream_rx =
  536. HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
  537. he->dcm_tx_mode =
  538. HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
  539. he->dcm_tx_max_nss =
  540. HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
  541. he->dcm_rx_mode =
  542. HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
  543. he->dcm_rx_max_nss =
  544. HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
  545. he->dcm_rx_max_nss =
  546. HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
  547. he->pkt_ext = 2;
  548. }
  549. static u8
  550. mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
  551. enum nl80211_band band, struct ieee80211_sta *sta)
  552. {
  553. struct ieee80211_sta_ht_cap *ht_cap;
  554. struct ieee80211_sta_vht_cap *vht_cap;
  555. const struct ieee80211_sta_he_cap *he_cap;
  556. u8 mode = 0;
  557. if (sta) {
  558. ht_cap = &sta->ht_cap;
  559. vht_cap = &sta->vht_cap;
  560. he_cap = &sta->he_cap;
  561. } else {
  562. struct ieee80211_supported_band *sband;
  563. sband = mphy->hw->wiphy->bands[band];
  564. ht_cap = &sband->ht_cap;
  565. vht_cap = &sband->vht_cap;
  566. he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
  567. }
  568. if (band == NL80211_BAND_2GHZ) {
  569. mode |= PHY_TYPE_BIT_HR_DSSS | PHY_TYPE_BIT_ERP;
  570. if (ht_cap->ht_supported)
  571. mode |= PHY_TYPE_BIT_HT;
  572. if (he_cap->has_he)
  573. mode |= PHY_TYPE_BIT_HE;
  574. } else if (band == NL80211_BAND_5GHZ) {
  575. mode |= PHY_TYPE_BIT_OFDM;
  576. if (ht_cap->ht_supported)
  577. mode |= PHY_TYPE_BIT_HT;
  578. if (vht_cap->vht_supported)
  579. mode |= PHY_TYPE_BIT_VHT;
  580. if (he_cap->has_he)
  581. mode |= PHY_TYPE_BIT_HE;
  582. }
  583. return mode;
  584. }
  585. void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
  586. struct ieee80211_sta *sta,
  587. struct ieee80211_vif *vif,
  588. u8 rcpi, u8 sta_state)
  589. {
  590. struct cfg80211_chan_def *chandef = &mphy->chandef;
  591. enum nl80211_band band = chandef->chan->band;
  592. struct mt76_dev *dev = mphy->dev;
  593. struct sta_rec_ra_info *ra_info;
  594. struct sta_rec_state *state;
  595. struct sta_rec_phy *phy;
  596. struct tlv *tlv;
  597. /* starec ht */
  598. if (sta->ht_cap.ht_supported) {
  599. struct sta_rec_ht *ht;
  600. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
  601. ht = (struct sta_rec_ht *)tlv;
  602. ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
  603. }
  604. /* starec vht */
  605. if (sta->vht_cap.vht_supported) {
  606. struct sta_rec_vht *vht;
  607. int len;
  608. len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
  609. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
  610. vht = (struct sta_rec_vht *)tlv;
  611. vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
  612. vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
  613. vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
  614. }
  615. /* starec uapsd */
  616. mt76_connac_mcu_sta_uapsd(skb, vif, sta);
  617. if (!is_mt7921(dev))
  618. return;
  619. if (sta->ht_cap.ht_supported)
  620. mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
  621. /* starec he */
  622. if (sta->he_cap.has_he)
  623. mt76_connac_mcu_sta_he_tlv(skb, sta);
  624. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
  625. phy = (struct sta_rec_phy *)tlv;
  626. phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
  627. phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
  628. phy->rcpi = rcpi;
  629. phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
  630. sta->ht_cap.ampdu_factor) |
  631. FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
  632. sta->ht_cap.ampdu_density);
  633. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
  634. ra_info = (struct sta_rec_ra_info *)tlv;
  635. ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]);
  636. if (sta->ht_cap.ht_supported)
  637. memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
  638. HT_MCS_MASK_NUM);
  639. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
  640. state = (struct sta_rec_state *)tlv;
  641. state->state = sta_state;
  642. if (sta->vht_cap.vht_supported) {
  643. state->vht_opmode = sta->bandwidth;
  644. state->vht_opmode |= (sta->rx_nss - 1) <<
  645. IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
  646. }
  647. }
  648. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
  649. static void
  650. mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
  651. void *sta_wtbl, void *wtbl_tlv)
  652. {
  653. struct wtbl_smps *smps;
  654. struct tlv *tlv;
  655. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
  656. wtbl_tlv, sta_wtbl);
  657. smps = (struct wtbl_smps *)tlv;
  658. if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  659. smps->smps = true;
  660. }
  661. void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
  662. struct ieee80211_sta *sta, void *sta_wtbl,
  663. void *wtbl_tlv)
  664. {
  665. struct wtbl_ht *ht = NULL;
  666. struct tlv *tlv;
  667. u32 flags = 0;
  668. if (sta->ht_cap.ht_supported) {
  669. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
  670. wtbl_tlv, sta_wtbl);
  671. ht = (struct wtbl_ht *)tlv;
  672. ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
  673. ht->af = sta->ht_cap.ampdu_factor;
  674. ht->mm = sta->ht_cap.ampdu_density;
  675. ht->ht = true;
  676. }
  677. if (sta->vht_cap.vht_supported) {
  678. struct wtbl_vht *vht;
  679. u8 af;
  680. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_VHT,
  681. sizeof(*vht), wtbl_tlv,
  682. sta_wtbl);
  683. vht = (struct wtbl_vht *)tlv;
  684. vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
  685. vht->vht = true;
  686. af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
  687. sta->vht_cap.cap);
  688. if (ht)
  689. ht->af = max(ht->af, af);
  690. }
  691. mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
  692. if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
  693. /* sgi */
  694. u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
  695. MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
  696. struct wtbl_raw *raw;
  697. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
  698. sizeof(*raw), wtbl_tlv,
  699. sta_wtbl);
  700. if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
  701. flags |= MT_WTBL_W5_SHORT_GI_20;
  702. if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
  703. flags |= MT_WTBL_W5_SHORT_GI_40;
  704. if (sta->vht_cap.vht_supported) {
  705. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
  706. flags |= MT_WTBL_W5_SHORT_GI_80;
  707. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
  708. flags |= MT_WTBL_W5_SHORT_GI_160;
  709. }
  710. raw = (struct wtbl_raw *)tlv;
  711. raw->val = cpu_to_le32(flags);
  712. raw->msk = cpu_to_le32(~msk);
  713. raw->wtbl_idx = 1;
  714. raw->dw = 5;
  715. }
  716. }
  717. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
  718. int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
  719. struct mt76_sta_cmd_info *info)
  720. {
  721. struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
  722. struct mt76_dev *dev = phy->dev;
  723. struct wtbl_req_hdr *wtbl_hdr;
  724. struct tlv *sta_wtbl;
  725. struct sk_buff *skb;
  726. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
  727. if (IS_ERR(skb))
  728. return PTR_ERR(skb);
  729. if (info->sta || !info->offload_fw)
  730. mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta,
  731. info->enable, info->newly);
  732. if (info->sta && info->enable)
  733. mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
  734. info->vif, info->rcpi,
  735. info->state);
  736. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  737. sizeof(struct tlv));
  738. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
  739. WTBL_RESET_AND_SET,
  740. sta_wtbl, &skb);
  741. if (IS_ERR(wtbl_hdr))
  742. return PTR_ERR(wtbl_hdr);
  743. if (info->enable) {
  744. mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
  745. info->sta, sta_wtbl,
  746. wtbl_hdr);
  747. mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
  748. sta_wtbl, wtbl_hdr);
  749. if (info->sta)
  750. mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
  751. sta_wtbl, wtbl_hdr);
  752. }
  753. return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
  754. }
  755. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_cmd);
  756. void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
  757. struct ieee80211_ampdu_params *params,
  758. bool enable, bool tx, void *sta_wtbl,
  759. void *wtbl_tlv)
  760. {
  761. struct wtbl_ba *ba;
  762. struct tlv *tlv;
  763. tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
  764. wtbl_tlv, sta_wtbl);
  765. ba = (struct wtbl_ba *)tlv;
  766. ba->tid = params->tid;
  767. if (tx) {
  768. ba->ba_type = MT_BA_TYPE_ORIGINATOR;
  769. ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
  770. ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
  771. ba->ba_en = enable;
  772. } else {
  773. memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
  774. ba->ba_type = MT_BA_TYPE_RECIPIENT;
  775. ba->rst_ba_tid = params->tid;
  776. ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
  777. ba->rst_ba_sb = 1;
  778. }
  779. if (is_mt7921(dev)) {
  780. ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
  781. return;
  782. }
  783. if (enable && tx) {
  784. u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
  785. int i;
  786. for (i = 7; i > 0; i--) {
  787. if (params->buf_size >= ba_range[i])
  788. break;
  789. }
  790. ba->ba_winsize_idx = i;
  791. }
  792. }
  793. EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
  794. int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
  795. struct ieee80211_vif *vif,
  796. struct mt76_wcid *wcid,
  797. bool enable)
  798. {
  799. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  800. struct mt76_dev *dev = phy->dev;
  801. struct {
  802. struct {
  803. u8 omac_idx;
  804. u8 band_idx;
  805. __le16 pad;
  806. } __packed hdr;
  807. struct req_tlv {
  808. __le16 tag;
  809. __le16 len;
  810. u8 active;
  811. u8 pad;
  812. u8 omac_addr[ETH_ALEN];
  813. } __packed tlv;
  814. } dev_req = {
  815. .hdr = {
  816. .omac_idx = mvif->omac_idx,
  817. .band_idx = mvif->band_idx,
  818. },
  819. .tlv = {
  820. .tag = cpu_to_le16(DEV_INFO_ACTIVE),
  821. .len = cpu_to_le16(sizeof(struct req_tlv)),
  822. .active = enable,
  823. },
  824. };
  825. struct {
  826. struct {
  827. u8 bss_idx;
  828. u8 pad[3];
  829. } __packed hdr;
  830. struct mt76_connac_bss_basic_tlv basic;
  831. } basic_req = {
  832. .hdr = {
  833. .bss_idx = mvif->idx,
  834. },
  835. .basic = {
  836. .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
  837. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
  838. .omac_idx = mvif->omac_idx,
  839. .band_idx = mvif->band_idx,
  840. .wmm_idx = mvif->wmm_idx,
  841. .active = enable,
  842. .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
  843. .sta_idx = cpu_to_le16(wcid->idx),
  844. .conn_state = 1,
  845. },
  846. };
  847. int err, idx, cmd, len;
  848. void *data;
  849. switch (vif->type) {
  850. case NL80211_IFTYPE_MESH_POINT:
  851. case NL80211_IFTYPE_MONITOR:
  852. case NL80211_IFTYPE_AP:
  853. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
  854. break;
  855. case NL80211_IFTYPE_STATION:
  856. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
  857. break;
  858. case NL80211_IFTYPE_ADHOC:
  859. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  860. break;
  861. default:
  862. WARN_ON(1);
  863. break;
  864. }
  865. idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
  866. basic_req.basic.hw_bss_idx = idx;
  867. memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
  868. cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
  869. data = enable ? (void *)&dev_req : (void *)&basic_req;
  870. len = enable ? sizeof(dev_req) : sizeof(basic_req);
  871. err = mt76_mcu_send_msg(dev, cmd, data, len, true);
  872. if (err < 0)
  873. return err;
  874. cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
  875. data = enable ? (void *)&basic_req : (void *)&dev_req;
  876. len = enable ? sizeof(basic_req) : sizeof(dev_req);
  877. return mt76_mcu_send_msg(dev, cmd, data, len, true);
  878. }
  879. EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_dev);
  880. void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
  881. struct ieee80211_ampdu_params *params,
  882. bool enable, bool tx)
  883. {
  884. struct sta_rec_ba *ba;
  885. struct tlv *tlv;
  886. tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
  887. ba = (struct sta_rec_ba *)tlv;
  888. ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
  889. ba->winsize = cpu_to_le16(params->buf_size);
  890. ba->ssn = cpu_to_le16(params->ssn);
  891. ba->ba_en = enable << params->tid;
  892. ba->amsdu = params->amsdu;
  893. ba->tid = params->tid;
  894. }
  895. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
  896. int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
  897. struct ieee80211_ampdu_params *params,
  898. bool enable, bool tx)
  899. {
  900. struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
  901. struct wtbl_req_hdr *wtbl_hdr;
  902. struct tlv *sta_wtbl;
  903. struct sk_buff *skb;
  904. int ret;
  905. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  906. if (IS_ERR(skb))
  907. return PTR_ERR(skb);
  908. sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
  909. sizeof(struct tlv));
  910. wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
  911. sta_wtbl, &skb);
  912. if (IS_ERR(wtbl_hdr))
  913. return PTR_ERR(wtbl_hdr);
  914. mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
  915. wtbl_hdr);
  916. ret = mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, true);
  917. if (ret)
  918. return ret;
  919. skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
  920. if (IS_ERR(skb))
  921. return PTR_ERR(skb);
  922. mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
  923. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE,
  924. true);
  925. }
  926. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
  927. static u8
  928. mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
  929. enum nl80211_band band,
  930. struct ieee80211_sta *sta)
  931. {
  932. struct mt76_dev *dev = phy->dev;
  933. const struct ieee80211_sta_he_cap *he_cap;
  934. struct ieee80211_sta_vht_cap *vht_cap;
  935. struct ieee80211_sta_ht_cap *ht_cap;
  936. u8 mode = 0;
  937. if (!is_mt7921(dev))
  938. return 0x38;
  939. if (sta) {
  940. ht_cap = &sta->ht_cap;
  941. vht_cap = &sta->vht_cap;
  942. he_cap = &sta->he_cap;
  943. } else {
  944. struct ieee80211_supported_band *sband;
  945. sband = phy->hw->wiphy->bands[band];
  946. ht_cap = &sband->ht_cap;
  947. vht_cap = &sband->vht_cap;
  948. he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
  949. }
  950. if (band == NL80211_BAND_2GHZ) {
  951. mode |= PHY_MODE_B | PHY_MODE_G;
  952. if (ht_cap->ht_supported)
  953. mode |= PHY_MODE_GN;
  954. if (he_cap->has_he)
  955. mode |= PHY_MODE_AX_24G;
  956. } else if (band == NL80211_BAND_5GHZ) {
  957. mode |= PHY_MODE_A;
  958. if (ht_cap->ht_supported)
  959. mode |= PHY_MODE_AN;
  960. if (vht_cap->vht_supported)
  961. mode |= PHY_MODE_AC;
  962. if (he_cap->has_he)
  963. mode |= PHY_MODE_AX_5G;
  964. }
  965. return mode;
  966. }
  967. static const struct ieee80211_sta_he_cap *
  968. mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
  969. {
  970. enum nl80211_band band = phy->chandef.chan->band;
  971. struct ieee80211_supported_band *sband;
  972. sband = phy->hw->wiphy->bands[band];
  973. return ieee80211_get_he_iftype_cap(sband, vif->type);
  974. }
  975. #define DEFAULT_HE_PE_DURATION 4
  976. #define DEFAULT_HE_DURATION_RTS_THRES 1023
  977. static void
  978. mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
  979. struct tlv *tlv)
  980. {
  981. const struct ieee80211_sta_he_cap *cap;
  982. struct bss_info_uni_he *he;
  983. cap = mt76_connac_get_he_phy_cap(phy, vif);
  984. he = (struct bss_info_uni_he *)tlv;
  985. he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
  986. if (!he->he_pe_duration)
  987. he->he_pe_duration = DEFAULT_HE_PE_DURATION;
  988. he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
  989. if (!he->he_rts_thres)
  990. he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
  991. he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
  992. he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
  993. he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
  994. }
  995. int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
  996. struct ieee80211_vif *vif,
  997. struct mt76_wcid *wcid,
  998. bool enable)
  999. {
  1000. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1001. struct cfg80211_chan_def *chandef = &phy->chandef;
  1002. int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
  1003. enum nl80211_band band = chandef->chan->band;
  1004. struct mt76_dev *mdev = phy->dev;
  1005. struct {
  1006. struct {
  1007. u8 bss_idx;
  1008. u8 pad[3];
  1009. } __packed hdr;
  1010. struct mt76_connac_bss_basic_tlv basic;
  1011. struct mt76_connac_bss_qos_tlv qos;
  1012. } basic_req = {
  1013. .hdr = {
  1014. .bss_idx = mvif->idx,
  1015. },
  1016. .basic = {
  1017. .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
  1018. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
  1019. .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
  1020. .dtim_period = vif->bss_conf.dtim_period,
  1021. .omac_idx = mvif->omac_idx,
  1022. .band_idx = mvif->band_idx,
  1023. .wmm_idx = mvif->wmm_idx,
  1024. .active = true, /* keep bss deactivated */
  1025. .phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
  1026. },
  1027. .qos = {
  1028. .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
  1029. .len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
  1030. .qos = vif->bss_conf.qos,
  1031. },
  1032. };
  1033. struct {
  1034. struct {
  1035. u8 bss_idx;
  1036. u8 pad[3];
  1037. } __packed hdr;
  1038. struct rlm_tlv {
  1039. __le16 tag;
  1040. __le16 len;
  1041. u8 control_channel;
  1042. u8 center_chan;
  1043. u8 center_chan2;
  1044. u8 bw;
  1045. u8 tx_streams;
  1046. u8 rx_streams;
  1047. u8 short_st;
  1048. u8 ht_op_info;
  1049. u8 sco;
  1050. u8 pad[3];
  1051. } __packed rlm;
  1052. } __packed rlm_req = {
  1053. .hdr = {
  1054. .bss_idx = mvif->idx,
  1055. },
  1056. .rlm = {
  1057. .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
  1058. .len = cpu_to_le16(sizeof(struct rlm_tlv)),
  1059. .control_channel = chandef->chan->hw_value,
  1060. .center_chan = ieee80211_frequency_to_channel(freq1),
  1061. .center_chan2 = ieee80211_frequency_to_channel(freq2),
  1062. .tx_streams = hweight8(phy->antenna_mask),
  1063. .ht_op_info = 4, /* set HT 40M allowed */
  1064. .rx_streams = phy->chainmask,
  1065. .short_st = true,
  1066. },
  1067. };
  1068. int err, conn_type;
  1069. u8 idx;
  1070. idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
  1071. basic_req.basic.hw_bss_idx = idx;
  1072. switch (vif->type) {
  1073. case NL80211_IFTYPE_MESH_POINT:
  1074. case NL80211_IFTYPE_AP:
  1075. if (vif->p2p)
  1076. conn_type = CONNECTION_P2P_GO;
  1077. else
  1078. conn_type = CONNECTION_INFRA_AP;
  1079. basic_req.basic.conn_type = cpu_to_le32(conn_type);
  1080. break;
  1081. case NL80211_IFTYPE_STATION:
  1082. if (vif->p2p)
  1083. conn_type = CONNECTION_P2P_GC;
  1084. else
  1085. conn_type = CONNECTION_INFRA_STA;
  1086. basic_req.basic.conn_type = cpu_to_le32(conn_type);
  1087. break;
  1088. case NL80211_IFTYPE_ADHOC:
  1089. basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
  1090. break;
  1091. default:
  1092. WARN_ON(1);
  1093. break;
  1094. }
  1095. memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
  1096. basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx);
  1097. basic_req.basic.sta_idx = cpu_to_le16(wcid->idx);
  1098. basic_req.basic.conn_state = !enable;
  1099. err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &basic_req,
  1100. sizeof(basic_req), true);
  1101. if (err < 0)
  1102. return err;
  1103. if (vif->bss_conf.he_support) {
  1104. struct {
  1105. struct {
  1106. u8 bss_idx;
  1107. u8 pad[3];
  1108. } __packed hdr;
  1109. struct bss_info_uni_he he;
  1110. struct bss_info_uni_bss_color bss_color;
  1111. } he_req = {
  1112. .hdr = {
  1113. .bss_idx = mvif->idx,
  1114. },
  1115. .he = {
  1116. .tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
  1117. .len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
  1118. },
  1119. .bss_color = {
  1120. .tag = cpu_to_le16(UNI_BSS_INFO_BSS_COLOR),
  1121. .len = cpu_to_le16(sizeof(struct bss_info_uni_bss_color)),
  1122. .enable = 0,
  1123. .bss_color = 0,
  1124. },
  1125. };
  1126. if (enable) {
  1127. he_req.bss_color.enable =
  1128. vif->bss_conf.he_bss_color.enabled;
  1129. he_req.bss_color.bss_color =
  1130. vif->bss_conf.he_bss_color.color;
  1131. }
  1132. mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
  1133. (struct tlv *)&he_req.he);
  1134. err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
  1135. &he_req, sizeof(he_req), true);
  1136. if (err < 0)
  1137. return err;
  1138. }
  1139. switch (chandef->width) {
  1140. case NL80211_CHAN_WIDTH_40:
  1141. rlm_req.rlm.bw = CMD_CBW_40MHZ;
  1142. break;
  1143. case NL80211_CHAN_WIDTH_80:
  1144. rlm_req.rlm.bw = CMD_CBW_80MHZ;
  1145. break;
  1146. case NL80211_CHAN_WIDTH_80P80:
  1147. rlm_req.rlm.bw = CMD_CBW_8080MHZ;
  1148. break;
  1149. case NL80211_CHAN_WIDTH_160:
  1150. rlm_req.rlm.bw = CMD_CBW_160MHZ;
  1151. break;
  1152. case NL80211_CHAN_WIDTH_5:
  1153. rlm_req.rlm.bw = CMD_CBW_5MHZ;
  1154. break;
  1155. case NL80211_CHAN_WIDTH_10:
  1156. rlm_req.rlm.bw = CMD_CBW_10MHZ;
  1157. break;
  1158. case NL80211_CHAN_WIDTH_20_NOHT:
  1159. case NL80211_CHAN_WIDTH_20:
  1160. default:
  1161. rlm_req.rlm.bw = CMD_CBW_20MHZ;
  1162. rlm_req.rlm.ht_op_info = 0;
  1163. break;
  1164. }
  1165. if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
  1166. rlm_req.rlm.sco = 1; /* SCA */
  1167. else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
  1168. rlm_req.rlm.sco = 3; /* SCB */
  1169. return mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &rlm_req,
  1170. sizeof(rlm_req), true);
  1171. }
  1172. EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
  1173. #define MT76_CONNAC_SCAN_CHANNEL_TIME 60
  1174. int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
  1175. struct ieee80211_scan_request *scan_req)
  1176. {
  1177. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1178. struct cfg80211_scan_request *sreq = &scan_req->req;
  1179. int n_ssids = 0, err, i, duration;
  1180. int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
  1181. struct ieee80211_channel **scan_list = sreq->channels;
  1182. struct mt76_dev *mdev = phy->dev;
  1183. bool ext_phy = phy == mdev->phy2;
  1184. struct mt76_connac_mcu_scan_channel *chan;
  1185. struct mt76_connac_hw_scan_req *req;
  1186. struct sk_buff *skb;
  1187. skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req));
  1188. if (!skb)
  1189. return -ENOMEM;
  1190. set_bit(MT76_HW_SCANNING, &phy->state);
  1191. mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
  1192. req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
  1193. req->seq_num = mvif->scan_seq_num | ext_phy << 7;
  1194. req->bss_idx = mvif->idx;
  1195. req->scan_type = sreq->n_ssids ? 1 : 0;
  1196. req->probe_req_num = sreq->n_ssids ? 2 : 0;
  1197. req->version = 1;
  1198. for (i = 0; i < sreq->n_ssids; i++) {
  1199. if (!sreq->ssids[i].ssid_len)
  1200. continue;
  1201. req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
  1202. memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
  1203. sreq->ssids[i].ssid_len);
  1204. n_ssids++;
  1205. }
  1206. req->ssid_type = n_ssids ? BIT(2) : BIT(0);
  1207. req->ssid_type_ext = n_ssids ? BIT(0) : 0;
  1208. req->ssids_num = n_ssids;
  1209. duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
  1210. /* increase channel time for passive scan */
  1211. if (!sreq->n_ssids)
  1212. duration *= 2;
  1213. req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
  1214. req->channel_min_dwell_time = cpu_to_le16(duration);
  1215. req->channel_dwell_time = cpu_to_le16(duration);
  1216. req->channels_num = min_t(u8, sreq->n_channels, 32);
  1217. req->ext_channels_num = min_t(u8, ext_channels_num, 32);
  1218. for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
  1219. if (i >= 32)
  1220. chan = &req->ext_channels[i - 32];
  1221. else
  1222. chan = &req->channels[i];
  1223. chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
  1224. chan->channel_num = scan_list[i]->hw_value;
  1225. }
  1226. req->channel_type = sreq->n_channels ? 4 : 0;
  1227. if (sreq->ie_len > 0) {
  1228. memcpy(req->ies, sreq->ie, sreq->ie_len);
  1229. req->ies_len = cpu_to_le16(sreq->ie_len);
  1230. }
  1231. if (is_mt7921(phy->dev))
  1232. req->scan_func |= SCAN_FUNC_SPLIT_SCAN;
  1233. memcpy(req->bssid, sreq->bssid, ETH_ALEN);
  1234. if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
  1235. get_random_mask_addr(req->random_mac, sreq->mac_addr,
  1236. sreq->mac_addr_mask);
  1237. req->scan_func |= SCAN_FUNC_RANDOM_MAC;
  1238. }
  1239. err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
  1240. if (err < 0)
  1241. clear_bit(MT76_HW_SCANNING, &phy->state);
  1242. return err;
  1243. }
  1244. EXPORT_SYMBOL_GPL(mt76_connac_mcu_hw_scan);
  1245. int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
  1246. struct ieee80211_vif *vif)
  1247. {
  1248. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1249. struct {
  1250. u8 seq_num;
  1251. u8 is_ext_channel;
  1252. u8 rsv[2];
  1253. } __packed req = {
  1254. .seq_num = mvif->scan_seq_num,
  1255. };
  1256. if (test_and_clear_bit(MT76_HW_SCANNING, &phy->state)) {
  1257. struct cfg80211_scan_info info = {
  1258. .aborted = true,
  1259. };
  1260. ieee80211_scan_completed(phy->hw, &info);
  1261. }
  1262. return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req,
  1263. sizeof(req), false);
  1264. }
  1265. EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan);
  1266. int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
  1267. struct ieee80211_vif *vif,
  1268. struct cfg80211_sched_scan_request *sreq)
  1269. {
  1270. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1271. struct ieee80211_channel **scan_list = sreq->channels;
  1272. struct mt76_connac_mcu_scan_channel *chan;
  1273. struct mt76_connac_sched_scan_req *req;
  1274. struct mt76_dev *mdev = phy->dev;
  1275. bool ext_phy = phy == mdev->phy2;
  1276. struct cfg80211_match_set *match;
  1277. struct cfg80211_ssid *ssid;
  1278. struct sk_buff *skb;
  1279. int i;
  1280. skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req) + sreq->ie_len);
  1281. if (!skb)
  1282. return -ENOMEM;
  1283. mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
  1284. req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
  1285. req->version = 1;
  1286. req->seq_num = mvif->scan_seq_num | ext_phy << 7;
  1287. if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
  1288. u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
  1289. : req->mt7921.random_mac;
  1290. req->scan_func = 1;
  1291. get_random_mask_addr(addr, sreq->mac_addr,
  1292. sreq->mac_addr_mask);
  1293. }
  1294. if (is_mt7921(phy->dev))
  1295. req->mt7921.bss_idx = mvif->idx;
  1296. req->ssids_num = sreq->n_ssids;
  1297. for (i = 0; i < req->ssids_num; i++) {
  1298. ssid = &sreq->ssids[i];
  1299. memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
  1300. req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
  1301. }
  1302. req->match_num = sreq->n_match_sets;
  1303. for (i = 0; i < req->match_num; i++) {
  1304. match = &sreq->match_sets[i];
  1305. memcpy(req->match[i].ssid, match->ssid.ssid,
  1306. match->ssid.ssid_len);
  1307. req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
  1308. req->match[i].ssid_len = match->ssid.ssid_len;
  1309. }
  1310. req->channel_type = sreq->n_channels ? 4 : 0;
  1311. req->channels_num = min_t(u8, sreq->n_channels, 64);
  1312. for (i = 0; i < req->channels_num; i++) {
  1313. chan = &req->channels[i];
  1314. chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
  1315. chan->channel_num = scan_list[i]->hw_value;
  1316. }
  1317. req->intervals_num = sreq->n_scan_plans;
  1318. for (i = 0; i < req->intervals_num; i++)
  1319. req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
  1320. if (sreq->ie_len > 0) {
  1321. req->ie_len = cpu_to_le16(sreq->ie_len);
  1322. memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
  1323. }
  1324. return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false);
  1325. }
  1326. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req);
  1327. int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
  1328. struct ieee80211_vif *vif,
  1329. bool enable)
  1330. {
  1331. struct {
  1332. u8 active; /* 0: enabled 1: disabled */
  1333. u8 rsv[3];
  1334. } __packed req = {
  1335. .active = !enable,
  1336. };
  1337. if (enable)
  1338. set_bit(MT76_HW_SCHED_SCANNING, &phy->state);
  1339. else
  1340. clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
  1341. return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req,
  1342. sizeof(req), false);
  1343. }
  1344. EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
  1345. int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
  1346. {
  1347. struct mt76_connac_config req = {
  1348. .resp_type = 0,
  1349. };
  1350. memcpy(req.data, "assert", 7);
  1351. return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
  1352. false);
  1353. }
  1354. EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
  1355. int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
  1356. {
  1357. struct mt76_connac_config req = {
  1358. .resp_type = 0,
  1359. };
  1360. snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
  1361. return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
  1362. false);
  1363. }
  1364. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
  1365. int mt76_connac_sta_state_dp(struct mt76_dev *dev,
  1366. enum ieee80211_sta_state old_state,
  1367. enum ieee80211_sta_state new_state)
  1368. {
  1369. if ((old_state == IEEE80211_STA_ASSOC &&
  1370. new_state == IEEE80211_STA_AUTHORIZED) ||
  1371. (old_state == IEEE80211_STA_NONE &&
  1372. new_state == IEEE80211_STA_NOTEXIST))
  1373. mt76_connac_mcu_set_deep_sleep(dev, true);
  1374. if ((old_state == IEEE80211_STA_NOTEXIST &&
  1375. new_state == IEEE80211_STA_NONE) ||
  1376. (old_state == IEEE80211_STA_AUTHORIZED &&
  1377. new_state == IEEE80211_STA_ASSOC))
  1378. mt76_connac_mcu_set_deep_sleep(dev, false);
  1379. return 0;
  1380. }
  1381. EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
  1382. void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
  1383. struct mt76_connac_coredump *coredump)
  1384. {
  1385. spin_lock_bh(&dev->lock);
  1386. __skb_queue_tail(&coredump->msg_list, skb);
  1387. spin_unlock_bh(&dev->lock);
  1388. coredump->last_activity = jiffies;
  1389. queue_delayed_work(dev->wq, &coredump->work,
  1390. MT76_CONNAC_COREDUMP_TIMEOUT);
  1391. }
  1392. EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
  1393. int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
  1394. {
  1395. struct mt76_connac_cap_hdr {
  1396. __le16 n_element;
  1397. u8 rsv[2];
  1398. } __packed * hdr;
  1399. struct sk_buff *skb;
  1400. int ret, i;
  1401. ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
  1402. 0, true, &skb);
  1403. if (ret)
  1404. return ret;
  1405. hdr = (struct mt76_connac_cap_hdr *)skb->data;
  1406. if (skb->len < sizeof(*hdr)) {
  1407. ret = -EINVAL;
  1408. goto out;
  1409. }
  1410. skb_pull(skb, sizeof(*hdr));
  1411. for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
  1412. struct tlv_hdr {
  1413. __le32 type;
  1414. __le32 len;
  1415. } __packed * tlv = (struct tlv_hdr *)skb->data;
  1416. int len;
  1417. if (skb->len < sizeof(*tlv))
  1418. break;
  1419. skb_pull(skb, sizeof(*tlv));
  1420. len = le32_to_cpu(tlv->len);
  1421. if (skb->len < len)
  1422. break;
  1423. switch (le32_to_cpu(tlv->type)) {
  1424. case MT_NIC_CAP_6G:
  1425. phy->cap.has_6ghz = skb->data[0];
  1426. break;
  1427. default:
  1428. break;
  1429. }
  1430. skb_pull(skb, len);
  1431. }
  1432. out:
  1433. dev_kfree_skb(skb);
  1434. return ret;
  1435. }
  1436. EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability);
  1437. static void
  1438. mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
  1439. struct mt76_power_limits *limits,
  1440. enum nl80211_band band)
  1441. {
  1442. int max_power = is_mt7921(dev) ? 127 : 63;
  1443. int i, offset = sizeof(limits->cck);
  1444. memset(sku, max_power, MT_SKU_POWER_LIMIT);
  1445. if (band == NL80211_BAND_2GHZ) {
  1446. /* cck */
  1447. memcpy(sku, limits->cck, sizeof(limits->cck));
  1448. }
  1449. /* ofdm */
  1450. memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm));
  1451. offset += sizeof(limits->ofdm);
  1452. /* ht */
  1453. for (i = 0; i < 2; i++) {
  1454. memcpy(&sku[offset], limits->mcs[i], 8);
  1455. offset += 8;
  1456. }
  1457. sku[offset++] = limits->mcs[0][0];
  1458. /* vht */
  1459. for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) {
  1460. memcpy(&sku[offset], limits->mcs[i],
  1461. ARRAY_SIZE(limits->mcs[i]));
  1462. offset += 12;
  1463. }
  1464. if (!is_mt7921(dev))
  1465. return;
  1466. /* he */
  1467. for (i = 0; i < ARRAY_SIZE(limits->ru); i++) {
  1468. memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i]));
  1469. offset += ARRAY_SIZE(limits->ru[i]);
  1470. }
  1471. }
  1472. static s8 mt76_connac_get_sar_power(struct mt76_phy *phy,
  1473. struct ieee80211_channel *chan,
  1474. s8 target_power)
  1475. {
  1476. const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
  1477. struct mt76_freq_range_power *frp = phy->frp;
  1478. int freq, i;
  1479. if (!capa || !frp)
  1480. return target_power;
  1481. freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
  1482. for (i = 0 ; i < capa->num_freq_ranges; i++) {
  1483. if (frp[i].range &&
  1484. freq >= frp[i].range->start_freq &&
  1485. freq < frp[i].range->end_freq) {
  1486. target_power = min_t(s8, frp[i].power, target_power);
  1487. break;
  1488. }
  1489. }
  1490. return target_power;
  1491. }
  1492. static int
  1493. mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
  1494. enum nl80211_band band)
  1495. {
  1496. struct mt76_dev *dev = phy->dev;
  1497. int sku_len, batch_len = is_mt7921(dev) ? 8 : 16;
  1498. static const u8 chan_list_2ghz[] = {
  1499. 1, 2, 3, 4, 5, 6, 7,
  1500. 8, 9, 10, 11, 12, 13, 14
  1501. };
  1502. static const u8 chan_list_5ghz[] = {
  1503. 36, 38, 40, 42, 44, 46, 48,
  1504. 50, 52, 54, 56, 58, 60, 62,
  1505. 64, 100, 102, 104, 106, 108, 110,
  1506. 112, 114, 116, 118, 120, 122, 124,
  1507. 126, 128, 132, 134, 136, 138, 140,
  1508. 142, 144, 149, 151, 153, 155, 157,
  1509. 159, 161, 165
  1510. };
  1511. int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
  1512. struct mt76_connac_sku_tlv sku_tlbv;
  1513. struct mt76_power_limits limits;
  1514. const u8 *ch_list;
  1515. sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
  1516. tx_power = 2 * phy->hw->conf.power_level;
  1517. if (!tx_power)
  1518. tx_power = 127;
  1519. if (band == NL80211_BAND_2GHZ) {
  1520. n_chan = ARRAY_SIZE(chan_list_2ghz);
  1521. ch_list = chan_list_2ghz;
  1522. } else {
  1523. n_chan = ARRAY_SIZE(chan_list_5ghz);
  1524. ch_list = chan_list_5ghz;
  1525. }
  1526. batch_size = DIV_ROUND_UP(n_chan, batch_len);
  1527. if (!phy->cap.has_5ghz)
  1528. last_ch = chan_list_2ghz[n_chan - 1];
  1529. else
  1530. last_ch = chan_list_5ghz[n_chan - 1];
  1531. for (i = 0; i < batch_size; i++) {
  1532. struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
  1533. .band = band == NL80211_BAND_2GHZ ? 1 : 2,
  1534. };
  1535. int j, err, msg_len, num_ch;
  1536. struct sk_buff *skb;
  1537. num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
  1538. msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
  1539. skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
  1540. if (!skb)
  1541. return -ENOMEM;
  1542. skb_reserve(skb, sizeof(tx_power_tlv));
  1543. BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
  1544. memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
  1545. tx_power_tlv.n_chan = num_ch;
  1546. for (j = 0; j < num_ch; j++, idx++) {
  1547. struct ieee80211_channel chan = {
  1548. .hw_value = ch_list[idx],
  1549. .band = band,
  1550. };
  1551. s8 sar_power;
  1552. sar_power = mt76_connac_get_sar_power(phy, &chan,
  1553. tx_power);
  1554. mt76_get_rate_power_limits(phy, &chan, &limits,
  1555. sar_power);
  1556. tx_power_tlv.last_msg = ch_list[idx] == last_ch;
  1557. sku_tlbv.channel = ch_list[idx];
  1558. mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
  1559. &limits, band);
  1560. skb_put_data(skb, &sku_tlbv, sku_len);
  1561. }
  1562. __skb_push(skb, sizeof(tx_power_tlv));
  1563. memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
  1564. err = mt76_mcu_skb_send_msg(dev, skb,
  1565. MCU_CMD_SET_RATE_TX_POWER, false);
  1566. if (err < 0)
  1567. return err;
  1568. }
  1569. return 0;
  1570. }
  1571. int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
  1572. {
  1573. int err;
  1574. if (phy->cap.has_2ghz) {
  1575. err = mt76_connac_mcu_rate_txpower_band(phy,
  1576. NL80211_BAND_2GHZ);
  1577. if (err < 0)
  1578. return err;
  1579. }
  1580. if (phy->cap.has_5ghz) {
  1581. err = mt76_connac_mcu_rate_txpower_band(phy,
  1582. NL80211_BAND_5GHZ);
  1583. if (err < 0)
  1584. return err;
  1585. }
  1586. return 0;
  1587. }
  1588. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
  1589. int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
  1590. struct mt76_vif *vif,
  1591. struct ieee80211_bss_conf *info)
  1592. {
  1593. struct sk_buff *skb;
  1594. int i, len = min_t(int, info->arp_addr_cnt,
  1595. IEEE80211_BSS_ARP_ADDR_LIST_LEN);
  1596. struct {
  1597. struct {
  1598. u8 bss_idx;
  1599. u8 pad[3];
  1600. } __packed hdr;
  1601. struct mt76_connac_arpns_tlv arp;
  1602. } req_hdr = {
  1603. .hdr = {
  1604. .bss_idx = vif->idx,
  1605. },
  1606. .arp = {
  1607. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
  1608. .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
  1609. .ips_num = len,
  1610. .mode = 2, /* update */
  1611. .option = 1,
  1612. },
  1613. };
  1614. skb = mt76_mcu_msg_alloc(dev, NULL,
  1615. sizeof(req_hdr) + len * sizeof(__be32));
  1616. if (!skb)
  1617. return -ENOMEM;
  1618. skb_put_data(skb, &req_hdr, sizeof(req_hdr));
  1619. for (i = 0; i < len; i++) {
  1620. u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
  1621. memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
  1622. }
  1623. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true);
  1624. }
  1625. EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
  1626. #ifdef CONFIG_PM
  1627. const struct wiphy_wowlan_support mt76_connac_wowlan_support = {
  1628. .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
  1629. WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
  1630. .n_patterns = 1,
  1631. .pattern_min_len = 1,
  1632. .pattern_max_len = MT76_CONNAC_WOW_PATTEN_MAX_LEN,
  1633. .max_nd_match_sets = 10,
  1634. };
  1635. EXPORT_SYMBOL_GPL(mt76_connac_wowlan_support);
  1636. static void
  1637. mt76_connac_mcu_key_iter(struct ieee80211_hw *hw,
  1638. struct ieee80211_vif *vif,
  1639. struct ieee80211_sta *sta,
  1640. struct ieee80211_key_conf *key,
  1641. void *data)
  1642. {
  1643. struct mt76_connac_gtk_rekey_tlv *gtk_tlv = data;
  1644. u32 cipher;
  1645. if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
  1646. key->cipher != WLAN_CIPHER_SUITE_CCMP &&
  1647. key->cipher != WLAN_CIPHER_SUITE_TKIP)
  1648. return;
  1649. if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
  1650. gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
  1651. cipher = BIT(3);
  1652. } else {
  1653. gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
  1654. cipher = BIT(4);
  1655. }
  1656. /* we are assuming here to have a single pairwise key */
  1657. if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
  1658. gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
  1659. gtk_tlv->group_cipher = cpu_to_le32(cipher);
  1660. gtk_tlv->keyid = key->keyidx;
  1661. }
  1662. }
  1663. int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
  1664. struct ieee80211_vif *vif,
  1665. struct cfg80211_gtk_rekey_data *key)
  1666. {
  1667. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1668. struct mt76_connac_gtk_rekey_tlv *gtk_tlv;
  1669. struct mt76_phy *phy = hw->priv;
  1670. struct sk_buff *skb;
  1671. struct {
  1672. u8 bss_idx;
  1673. u8 pad[3];
  1674. } __packed hdr = {
  1675. .bss_idx = mvif->idx,
  1676. };
  1677. skb = mt76_mcu_msg_alloc(phy->dev, NULL,
  1678. sizeof(hdr) + sizeof(*gtk_tlv));
  1679. if (!skb)
  1680. return -ENOMEM;
  1681. skb_put_data(skb, &hdr, sizeof(hdr));
  1682. gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
  1683. sizeof(*gtk_tlv));
  1684. gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
  1685. gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
  1686. gtk_tlv->rekey_mode = 2;
  1687. gtk_tlv->option = 1;
  1688. rcu_read_lock();
  1689. ieee80211_iter_keys_rcu(hw, vif, mt76_connac_mcu_key_iter, gtk_tlv);
  1690. rcu_read_unlock();
  1691. memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
  1692. memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
  1693. memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
  1694. return mt76_mcu_skb_send_msg(phy->dev, skb, MCU_UNI_CMD_OFFLOAD, true);
  1695. }
  1696. EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_gtk_rekey);
  1697. static int
  1698. mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
  1699. bool suspend)
  1700. {
  1701. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1702. struct {
  1703. struct {
  1704. u8 bss_idx;
  1705. u8 pad[3];
  1706. } __packed hdr;
  1707. struct mt76_connac_arpns_tlv arpns;
  1708. } req = {
  1709. .hdr = {
  1710. .bss_idx = mvif->idx,
  1711. },
  1712. .arpns = {
  1713. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
  1714. .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
  1715. .mode = suspend,
  1716. },
  1717. };
  1718. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
  1719. true);
  1720. }
  1721. static int
  1722. mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
  1723. bool suspend)
  1724. {
  1725. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1726. struct {
  1727. struct {
  1728. u8 bss_idx;
  1729. u8 pad[3];
  1730. } __packed hdr;
  1731. struct mt76_connac_gtk_rekey_tlv gtk_tlv;
  1732. } __packed req = {
  1733. .hdr = {
  1734. .bss_idx = mvif->idx,
  1735. },
  1736. .gtk_tlv = {
  1737. .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
  1738. .len = cpu_to_le16(sizeof(struct mt76_connac_gtk_rekey_tlv)),
  1739. .rekey_mode = !suspend,
  1740. },
  1741. };
  1742. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
  1743. true);
  1744. }
  1745. static int
  1746. mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
  1747. struct ieee80211_vif *vif,
  1748. bool enable, u8 mdtim,
  1749. bool wow_suspend)
  1750. {
  1751. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1752. struct {
  1753. struct {
  1754. u8 bss_idx;
  1755. u8 pad[3];
  1756. } __packed hdr;
  1757. struct mt76_connac_suspend_tlv suspend_tlv;
  1758. } req = {
  1759. .hdr = {
  1760. .bss_idx = mvif->idx,
  1761. },
  1762. .suspend_tlv = {
  1763. .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
  1764. .len = cpu_to_le16(sizeof(struct mt76_connac_suspend_tlv)),
  1765. .enable = enable,
  1766. .mdtim = mdtim,
  1767. .wow_suspend = wow_suspend,
  1768. },
  1769. };
  1770. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
  1771. true);
  1772. }
  1773. static int
  1774. mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
  1775. struct ieee80211_vif *vif,
  1776. u8 index, bool enable,
  1777. struct cfg80211_pkt_pattern *pattern)
  1778. {
  1779. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1780. struct mt76_connac_wow_pattern_tlv *ptlv;
  1781. struct sk_buff *skb;
  1782. struct req_hdr {
  1783. u8 bss_idx;
  1784. u8 pad[3];
  1785. } __packed hdr = {
  1786. .bss_idx = mvif->idx,
  1787. };
  1788. skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(hdr) + sizeof(*ptlv));
  1789. if (!skb)
  1790. return -ENOMEM;
  1791. skb_put_data(skb, &hdr, sizeof(hdr));
  1792. ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
  1793. ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
  1794. ptlv->len = cpu_to_le16(sizeof(*ptlv));
  1795. ptlv->data_len = pattern->pattern_len;
  1796. ptlv->enable = enable;
  1797. ptlv->index = index;
  1798. memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
  1799. memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
  1800. return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
  1801. }
  1802. static int
  1803. mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
  1804. bool suspend, struct cfg80211_wowlan *wowlan)
  1805. {
  1806. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  1807. struct mt76_dev *dev = phy->dev;
  1808. struct {
  1809. struct {
  1810. u8 bss_idx;
  1811. u8 pad[3];
  1812. } __packed hdr;
  1813. struct mt76_connac_wow_ctrl_tlv wow_ctrl_tlv;
  1814. struct mt76_connac_wow_gpio_param_tlv gpio_tlv;
  1815. } req = {
  1816. .hdr = {
  1817. .bss_idx = mvif->idx,
  1818. },
  1819. .wow_ctrl_tlv = {
  1820. .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
  1821. .len = cpu_to_le16(sizeof(struct mt76_connac_wow_ctrl_tlv)),
  1822. .cmd = suspend ? 1 : 2,
  1823. },
  1824. .gpio_tlv = {
  1825. .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM),
  1826. .len = cpu_to_le16(sizeof(struct mt76_connac_wow_gpio_param_tlv)),
  1827. .gpio_pin = 0xff, /* follow fw about GPIO pin */
  1828. },
  1829. };
  1830. if (wowlan->magic_pkt)
  1831. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
  1832. if (wowlan->disconnect)
  1833. req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
  1834. UNI_WOW_DETECT_TYPE_BCN_LOST);
  1835. if (wowlan->nd_config) {
  1836. mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
  1837. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
  1838. mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
  1839. }
  1840. if (wowlan->n_patterns)
  1841. req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
  1842. if (mt76_is_mmio(dev))
  1843. req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
  1844. else if (mt76_is_usb(dev))
  1845. req.wow_ctrl_tlv.wakeup_hif = WOW_USB;
  1846. else if (mt76_is_sdio(dev))
  1847. req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO;
  1848. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
  1849. true);
  1850. }
  1851. int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
  1852. {
  1853. struct {
  1854. struct {
  1855. u8 hif_type; /* 0x0: HIF_SDIO
  1856. * 0x1: HIF_USB
  1857. * 0x2: HIF_PCIE
  1858. */
  1859. u8 pad[3];
  1860. } __packed hdr;
  1861. struct hif_suspend_tlv {
  1862. __le16 tag;
  1863. __le16 len;
  1864. u8 suspend;
  1865. } __packed hif_suspend;
  1866. } req = {
  1867. .hif_suspend = {
  1868. .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
  1869. .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
  1870. .suspend = suspend,
  1871. },
  1872. };
  1873. if (mt76_is_mmio(dev))
  1874. req.hdr.hif_type = 2;
  1875. else if (mt76_is_usb(dev))
  1876. req.hdr.hif_type = 1;
  1877. else if (mt76_is_sdio(dev))
  1878. req.hdr.hif_type = 0;
  1879. return mt76_mcu_send_msg(dev, MCU_UNI_CMD_HIF_CTRL, &req, sizeof(req),
  1880. true);
  1881. }
  1882. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
  1883. void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
  1884. struct ieee80211_vif *vif)
  1885. {
  1886. struct mt76_phy *phy = priv;
  1887. bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->state);
  1888. struct ieee80211_hw *hw = phy->hw;
  1889. struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
  1890. int i;
  1891. mt76_connac_mcu_set_gtk_rekey(phy->dev, vif, suspend);
  1892. mt76_connac_mcu_set_arp_filter(phy->dev, vif, suspend);
  1893. mt76_connac_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
  1894. for (i = 0; i < wowlan->n_patterns; i++)
  1895. mt76_connac_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
  1896. &wowlan->patterns[i]);
  1897. mt76_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
  1898. }
  1899. EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_iter);
  1900. #endif /* CONFIG_PM */
  1901. MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
  1902. MODULE_LICENSE("Dual BSD/GPL");