mt76x02_dfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  4. */
  5. #include "mt76x02.h"
  6. #define RADAR_SPEC(m, len, el, eh, wl, wh, \
  7. w_tolerance, tl, th, t_tolerance, \
  8. bl, bh, event_exp, power_jmp) \
  9. { \
  10. .mode = m, \
  11. .avg_len = len, \
  12. .e_low = el, \
  13. .e_high = eh, \
  14. .w_low = wl, \
  15. .w_high = wh, \
  16. .w_margin = w_tolerance, \
  17. .t_low = tl, \
  18. .t_high = th, \
  19. .t_margin = t_tolerance, \
  20. .b_low = bl, \
  21. .b_high = bh, \
  22. .event_expiration = event_exp, \
  23. .pwr_jmp = power_jmp \
  24. }
  25. static const struct mt76x02_radar_specs etsi_radar_specs[] = {
  26. /* 20MHz */
  27. RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
  28. 0x7fffffff, 0x155cc0, 0x19cc),
  29. RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
  30. 0x7fffffff, 0x155cc0, 0x19cc),
  31. RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
  32. 0x7fffffff, 0x155cc0, 0x19dd),
  33. RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
  34. 0x7fffffff, 0x2191c0, 0x15cc),
  35. /* 40MHz */
  36. RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
  37. 0x7fffffff, 0x155cc0, 0x19cc),
  38. RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
  39. 0x7fffffff, 0x155cc0, 0x19cc),
  40. RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
  41. 0x7fffffff, 0x155cc0, 0x19dd),
  42. RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
  43. 0x7fffffff, 0x2191c0, 0x15cc),
  44. /* 80MHz */
  45. RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
  46. 0x7fffffff, 0x155cc0, 0x19cc),
  47. RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
  48. 0x7fffffff, 0x155cc0, 0x19cc),
  49. RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
  50. 0x7fffffff, 0x155cc0, 0x19dd),
  51. RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
  52. 0x7fffffff, 0x2191c0, 0x15cc)
  53. };
  54. static const struct mt76x02_radar_specs fcc_radar_specs[] = {
  55. /* 20MHz */
  56. RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
  57. 0x7fffffff, 0xfe808, 0x13dc),
  58. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  59. 0x7fffffff, 0xfe808, 0x19dd),
  60. RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
  61. 0x7fffffff, 0xfe808, 0x12cc),
  62. RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
  63. 0x3938700, 0x57bcf00, 0x1289),
  64. /* 40MHz */
  65. RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
  66. 0x7fffffff, 0xfe808, 0x13dc),
  67. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  68. 0x7fffffff, 0xfe808, 0x19dd),
  69. RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
  70. 0x7fffffff, 0xfe808, 0x12cc),
  71. RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
  72. 0x3938700, 0x57bcf00, 0x1289),
  73. /* 80MHz */
  74. RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
  75. 0x7fffffff, 0xfe808, 0x16cc),
  76. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  77. 0x7fffffff, 0xfe808, 0x19dd),
  78. RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
  79. 0x7fffffff, 0xfe808, 0x12cc),
  80. RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
  81. 0x3938700, 0x57bcf00, 0x1289)
  82. };
  83. static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
  84. /* 20MHz */
  85. RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
  86. 0x7fffffff, 0x14c080, 0x13dc),
  87. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  88. 0x7fffffff, 0x14c080, 0x19dd),
  89. RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
  90. 0x7fffffff, 0x14c080, 0x12cc),
  91. RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
  92. 0x3938700, 0X57bcf00, 0x1289),
  93. /* 40MHz */
  94. RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
  95. 0x7fffffff, 0x14c080, 0x13dc),
  96. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  97. 0x7fffffff, 0x14c080, 0x19dd),
  98. RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
  99. 0x7fffffff, 0x14c080, 0x12cc),
  100. RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
  101. 0x3938700, 0X57bcf00, 0x1289),
  102. /* 80MHz */
  103. RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
  104. 0x7fffffff, 0x14c080, 0x16cc),
  105. RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
  106. 0x7fffffff, 0x14c080, 0x19dd),
  107. RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
  108. 0x7fffffff, 0x14c080, 0x12cc),
  109. RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
  110. 0x3938700, 0X57bcf00, 0x1289)
  111. };
  112. static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
  113. /* 20MHz */
  114. RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
  115. 0x7fffffff, 0x14c080, 0x16cc),
  116. { 0 },
  117. RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
  118. 0x7fffffff, 0x14c080, 0x16cc),
  119. { 0 },
  120. /* 40MHz */
  121. RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
  122. 0x7fffffff, 0x14c080, 0x16cc),
  123. { 0 },
  124. RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
  125. 0x7fffffff, 0x14c080, 0x16cc),
  126. { 0 },
  127. /* 80MHz */
  128. RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
  129. 0x7fffffff, 0x14c080, 0x16cc),
  130. { 0 },
  131. RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
  132. 0x7fffffff, 0x14c080, 0x16cc),
  133. { 0 }
  134. };
  135. static void
  136. mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable)
  137. {
  138. u32 data;
  139. data = (1 << 1) | enable;
  140. mt76_wr(dev, MT_BBP(DFS, 36), data);
  141. }
  142. static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev,
  143. struct mt76x02_dfs_sequence *seq)
  144. {
  145. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  146. list_add(&seq->head, &dfs_pd->seq_pool);
  147. dfs_pd->seq_stats.seq_pool_len++;
  148. dfs_pd->seq_stats.seq_len--;
  149. }
  150. static struct mt76x02_dfs_sequence *
  151. mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev)
  152. {
  153. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  154. struct mt76x02_dfs_sequence *seq;
  155. if (list_empty(&dfs_pd->seq_pool)) {
  156. seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
  157. } else {
  158. seq = list_first_entry(&dfs_pd->seq_pool,
  159. struct mt76x02_dfs_sequence,
  160. head);
  161. list_del(&seq->head);
  162. dfs_pd->seq_stats.seq_pool_len--;
  163. }
  164. if (seq)
  165. dfs_pd->seq_stats.seq_len++;
  166. return seq;
  167. }
  168. static int mt76x02_dfs_get_multiple(int val, int frac, int margin)
  169. {
  170. int remainder, factor;
  171. if (!frac)
  172. return 0;
  173. if (abs(val - frac) <= margin)
  174. return 1;
  175. factor = val / frac;
  176. remainder = val % frac;
  177. if (remainder > margin) {
  178. if ((frac - remainder) <= margin)
  179. factor++;
  180. else
  181. factor = 0;
  182. }
  183. return factor;
  184. }
  185. static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev)
  186. {
  187. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  188. struct mt76x02_dfs_sequence *seq, *tmp_seq;
  189. int i;
  190. /* reset hw detector */
  191. mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
  192. /* reset sw detector */
  193. for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
  194. dfs_pd->event_rb[i].h_rb = 0;
  195. dfs_pd->event_rb[i].t_rb = 0;
  196. }
  197. list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
  198. list_del_init(&seq->head);
  199. mt76x02_dfs_seq_pool_put(dev, seq);
  200. }
  201. }
  202. static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev)
  203. {
  204. bool ret = false;
  205. u32 current_ts, delta_ts;
  206. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  207. current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
  208. delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
  209. dfs_pd->chirp_pulse_ts = current_ts;
  210. /* 12 sec */
  211. if (delta_ts <= (12 * (1 << 20))) {
  212. if (++dfs_pd->chirp_pulse_cnt > 8)
  213. ret = true;
  214. } else {
  215. dfs_pd->chirp_pulse_cnt = 1;
  216. }
  217. return ret;
  218. }
  219. static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev,
  220. struct mt76x02_dfs_hw_pulse *pulse)
  221. {
  222. u32 data;
  223. /* select channel */
  224. data = (MT_DFS_CH_EN << 16) | pulse->engine;
  225. mt76_wr(dev, MT_BBP(DFS, 0), data);
  226. /* reported period */
  227. pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
  228. /* reported width */
  229. pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
  230. pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
  231. /* reported burst number */
  232. pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
  233. }
  234. static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
  235. struct mt76x02_dfs_hw_pulse *pulse)
  236. {
  237. bool ret = false;
  238. if (!pulse->period || !pulse->w1)
  239. return false;
  240. switch (dev->mt76.region) {
  241. case NL80211_DFS_FCC:
  242. if (pulse->engine > 3)
  243. break;
  244. if (pulse->engine == 3) {
  245. ret = mt76x02_dfs_check_chirp(dev);
  246. break;
  247. }
  248. /* check short pulse*/
  249. if (pulse->w1 < 120)
  250. ret = (pulse->period >= 2900 &&
  251. (pulse->period <= 4700 ||
  252. pulse->period >= 6400) &&
  253. (pulse->period <= 6800 ||
  254. pulse->period >= 10200) &&
  255. pulse->period <= 61600);
  256. else if (pulse->w1 < 130) /* 120 - 130 */
  257. ret = (pulse->period >= 2900 &&
  258. pulse->period <= 61600);
  259. else
  260. ret = (pulse->period >= 3500 &&
  261. pulse->period <= 10100);
  262. break;
  263. case NL80211_DFS_ETSI:
  264. if (pulse->engine >= 3)
  265. break;
  266. ret = (pulse->period >= 4900 &&
  267. (pulse->period <= 10200 ||
  268. pulse->period >= 12400) &&
  269. pulse->period <= 100100);
  270. break;
  271. case NL80211_DFS_JP:
  272. if (dev->mphy.chandef.chan->center_freq >= 5250 &&
  273. dev->mphy.chandef.chan->center_freq <= 5350) {
  274. /* JPW53 */
  275. if (pulse->w1 <= 130)
  276. ret = (pulse->period >= 28360 &&
  277. (pulse->period <= 28700 ||
  278. pulse->period >= 76900) &&
  279. pulse->period <= 76940);
  280. break;
  281. }
  282. if (pulse->engine > 3)
  283. break;
  284. if (pulse->engine == 3) {
  285. ret = mt76x02_dfs_check_chirp(dev);
  286. break;
  287. }
  288. /* check short pulse*/
  289. if (pulse->w1 < 120)
  290. ret = (pulse->period >= 2900 &&
  291. (pulse->period <= 4700 ||
  292. pulse->period >= 6400) &&
  293. (pulse->period <= 6800 ||
  294. pulse->period >= 27560) &&
  295. (pulse->period <= 27960 ||
  296. pulse->period >= 28360) &&
  297. (pulse->period <= 28700 ||
  298. pulse->period >= 79900) &&
  299. pulse->period <= 80100);
  300. else if (pulse->w1 < 130) /* 120 - 130 */
  301. ret = (pulse->period >= 2900 &&
  302. (pulse->period <= 10100 ||
  303. pulse->period >= 27560) &&
  304. (pulse->period <= 27960 ||
  305. pulse->period >= 28360) &&
  306. (pulse->period <= 28700 ||
  307. pulse->period >= 79900) &&
  308. pulse->period <= 80100);
  309. else
  310. ret = (pulse->period >= 3900 &&
  311. pulse->period <= 10100);
  312. break;
  313. case NL80211_DFS_UNSET:
  314. default:
  315. return false;
  316. }
  317. return ret;
  318. }
  319. static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev,
  320. struct mt76x02_dfs_event *event)
  321. {
  322. u32 data;
  323. /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
  324. * 2nd: DFS_R37[21:0]: pulse time
  325. * 3rd: DFS_R37[11:0]: pulse width
  326. * 3rd: DFS_R37[25:16]: phase
  327. * 4th: DFS_R37[12:0]: current pwr
  328. * 4th: DFS_R37[21:16]: pwr stable counter
  329. *
  330. * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
  331. */
  332. data = mt76_rr(dev, MT_BBP(DFS, 37));
  333. if (!MT_DFS_CHECK_EVENT(data))
  334. return false;
  335. event->engine = MT_DFS_EVENT_ENGINE(data);
  336. data = mt76_rr(dev, MT_BBP(DFS, 37));
  337. event->ts = MT_DFS_EVENT_TIMESTAMP(data);
  338. data = mt76_rr(dev, MT_BBP(DFS, 37));
  339. event->width = MT_DFS_EVENT_WIDTH(data);
  340. return true;
  341. }
  342. static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev,
  343. struct mt76x02_dfs_event *event)
  344. {
  345. if (event->engine == 2) {
  346. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  347. struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
  348. u16 last_event_idx;
  349. u32 delta_ts;
  350. last_event_idx = mt76_decr(event_buff->t_rb,
  351. MT_DFS_EVENT_BUFLEN);
  352. delta_ts = event->ts - event_buff->data[last_event_idx].ts;
  353. if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
  354. event_buff->data[last_event_idx].width >= 200)
  355. return false;
  356. }
  357. return true;
  358. }
  359. static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev,
  360. struct mt76x02_dfs_event *event)
  361. {
  362. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  363. struct mt76x02_dfs_event_rb *event_buff;
  364. /* add radar event to ring buffer */
  365. event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
  366. : &dfs_pd->event_rb[0];
  367. event_buff->data[event_buff->t_rb] = *event;
  368. event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
  369. event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
  370. if (event_buff->t_rb == event_buff->h_rb)
  371. event_buff->h_rb = mt76_incr(event_buff->h_rb,
  372. MT_DFS_EVENT_BUFLEN);
  373. }
  374. static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
  375. struct mt76x02_dfs_event *event,
  376. u16 cur_len)
  377. {
  378. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  379. struct mt76x02_dfs_sw_detector_params *sw_params;
  380. u32 width_delta, with_sum;
  381. struct mt76x02_dfs_sequence seq, *seq_p;
  382. struct mt76x02_dfs_event_rb *event_rb;
  383. struct mt76x02_dfs_event *cur_event;
  384. int i, j, end, pri, factor, cur_pri;
  385. event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
  386. : &dfs_pd->event_rb[0];
  387. i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
  388. end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
  389. while (i != end) {
  390. cur_event = &event_rb->data[i];
  391. with_sum = event->width + cur_event->width;
  392. sw_params = &dfs_pd->sw_dpd_params;
  393. switch (dev->mt76.region) {
  394. case NL80211_DFS_FCC:
  395. case NL80211_DFS_JP:
  396. if (with_sum < 600)
  397. width_delta = 8;
  398. else
  399. width_delta = with_sum >> 3;
  400. break;
  401. case NL80211_DFS_ETSI:
  402. if (event->engine == 2)
  403. width_delta = with_sum >> 6;
  404. else if (with_sum < 620)
  405. width_delta = 24;
  406. else
  407. width_delta = 8;
  408. break;
  409. case NL80211_DFS_UNSET:
  410. default:
  411. return -EINVAL;
  412. }
  413. pri = event->ts - cur_event->ts;
  414. if (abs(event->width - cur_event->width) > width_delta ||
  415. pri < sw_params->min_pri)
  416. goto next;
  417. if (pri > sw_params->max_pri)
  418. break;
  419. seq.pri = event->ts - cur_event->ts;
  420. seq.first_ts = cur_event->ts;
  421. seq.last_ts = event->ts;
  422. seq.engine = event->engine;
  423. seq.count = 2;
  424. j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
  425. while (j != end) {
  426. cur_event = &event_rb->data[j];
  427. cur_pri = event->ts - cur_event->ts;
  428. factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri,
  429. sw_params->pri_margin);
  430. if (factor > 0) {
  431. seq.first_ts = cur_event->ts;
  432. seq.count++;
  433. }
  434. j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
  435. }
  436. if (seq.count <= cur_len)
  437. goto next;
  438. seq_p = mt76x02_dfs_seq_pool_get(dev);
  439. if (!seq_p)
  440. return -ENOMEM;
  441. *seq_p = seq;
  442. INIT_LIST_HEAD(&seq_p->head);
  443. list_add(&seq_p->head, &dfs_pd->sequences);
  444. next:
  445. i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
  446. }
  447. return 0;
  448. }
  449. static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
  450. struct mt76x02_dfs_event *event)
  451. {
  452. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  453. struct mt76x02_dfs_sw_detector_params *sw_params;
  454. struct mt76x02_dfs_sequence *seq, *tmp_seq;
  455. u16 max_seq_len = 0;
  456. int factor, pri;
  457. sw_params = &dfs_pd->sw_dpd_params;
  458. list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
  459. if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
  460. list_del_init(&seq->head);
  461. mt76x02_dfs_seq_pool_put(dev, seq);
  462. continue;
  463. }
  464. if (event->engine != seq->engine)
  465. continue;
  466. pri = event->ts - seq->last_ts;
  467. factor = mt76x02_dfs_get_multiple(pri, seq->pri,
  468. sw_params->pri_margin);
  469. if (factor > 0) {
  470. seq->last_ts = event->ts;
  471. seq->count++;
  472. max_seq_len = max_t(u16, max_seq_len, seq->count);
  473. }
  474. }
  475. return max_seq_len;
  476. }
  477. static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev)
  478. {
  479. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  480. struct mt76x02_dfs_sequence *seq;
  481. if (list_empty(&dfs_pd->sequences))
  482. return false;
  483. list_for_each_entry(seq, &dfs_pd->sequences, head) {
  484. if (seq->count > MT_DFS_SEQUENCE_TH) {
  485. dfs_pd->stats[seq->engine].sw_pattern++;
  486. return true;
  487. }
  488. }
  489. return false;
  490. }
  491. static void mt76x02_dfs_add_events(struct mt76x02_dev *dev)
  492. {
  493. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  494. struct mt76x02_dfs_event event;
  495. int i, seq_len;
  496. /* disable debug mode */
  497. mt76x02_dfs_set_capture_mode_ctrl(dev, false);
  498. for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
  499. if (!mt76x02_dfs_fetch_event(dev, &event))
  500. break;
  501. if (dfs_pd->last_event_ts > event.ts)
  502. mt76x02_dfs_detector_reset(dev);
  503. dfs_pd->last_event_ts = event.ts;
  504. if (!mt76x02_dfs_check_event(dev, &event))
  505. continue;
  506. seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event);
  507. mt76x02_dfs_create_sequence(dev, &event, seq_len);
  508. mt76x02_dfs_queue_event(dev, &event);
  509. }
  510. mt76x02_dfs_set_capture_mode_ctrl(dev, true);
  511. }
  512. static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
  513. {
  514. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  515. struct mt76x02_dfs_event_rb *event_buff;
  516. struct mt76x02_dfs_event *event;
  517. int i;
  518. for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
  519. event_buff = &dfs_pd->event_rb[i];
  520. while (event_buff->h_rb != event_buff->t_rb) {
  521. event = &event_buff->data[event_buff->h_rb];
  522. /* sorted list */
  523. if (time_is_after_jiffies(event->fetch_ts +
  524. MT_DFS_EVENT_WINDOW))
  525. break;
  526. event_buff->h_rb = mt76_incr(event_buff->h_rb,
  527. MT_DFS_EVENT_BUFLEN);
  528. }
  529. }
  530. }
  531. static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
  532. {
  533. struct mt76x02_dfs_pattern_detector *dfs_pd = from_tasklet(dfs_pd, t,
  534. dfs_tasklet);
  535. struct mt76x02_dev *dev = container_of(dfs_pd, typeof(*dev), dfs_pd);
  536. u32 engine_mask;
  537. int i;
  538. if (test_bit(MT76_SCANNING, &dev->mphy.state))
  539. goto out;
  540. if (time_is_before_jiffies(dfs_pd->last_sw_check +
  541. MT_DFS_SW_TIMEOUT)) {
  542. bool radar_detected;
  543. dfs_pd->last_sw_check = jiffies;
  544. mt76x02_dfs_add_events(dev);
  545. radar_detected = mt76x02_dfs_check_detection(dev);
  546. if (radar_detected) {
  547. /* sw detector rx radar pattern */
  548. ieee80211_radar_detected(dev->mt76.hw);
  549. mt76x02_dfs_detector_reset(dev);
  550. return;
  551. }
  552. mt76x02_dfs_check_event_window(dev);
  553. }
  554. engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
  555. if (!(engine_mask & 0xf))
  556. goto out;
  557. for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
  558. struct mt76x02_dfs_hw_pulse pulse;
  559. if (!(engine_mask & (1 << i)))
  560. continue;
  561. pulse.engine = i;
  562. mt76x02_dfs_get_hw_pulse(dev, &pulse);
  563. if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) {
  564. dfs_pd->stats[i].hw_pulse_discarded++;
  565. continue;
  566. }
  567. /* hw detector rx radar pattern */
  568. dfs_pd->stats[i].hw_pattern++;
  569. ieee80211_radar_detected(dev->mt76.hw);
  570. mt76x02_dfs_detector_reset(dev);
  571. return;
  572. }
  573. /* reset hw detector */
  574. mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
  575. out:
  576. mt76x02_irq_enable(dev, MT_INT_GPTIMER);
  577. }
  578. static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
  579. {
  580. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  581. switch (dev->mt76.region) {
  582. case NL80211_DFS_FCC:
  583. dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
  584. dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
  585. dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
  586. break;
  587. case NL80211_DFS_ETSI:
  588. dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
  589. dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
  590. dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
  591. break;
  592. case NL80211_DFS_JP:
  593. dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
  594. dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
  595. dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
  596. break;
  597. case NL80211_DFS_UNSET:
  598. default:
  599. break;
  600. }
  601. }
  602. static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
  603. {
  604. const struct mt76x02_radar_specs *radar_specs;
  605. u8 i, shift;
  606. u32 data;
  607. switch (dev->mphy.chandef.width) {
  608. case NL80211_CHAN_WIDTH_40:
  609. shift = MT_DFS_NUM_ENGINES;
  610. break;
  611. case NL80211_CHAN_WIDTH_80:
  612. shift = 2 * MT_DFS_NUM_ENGINES;
  613. break;
  614. default:
  615. shift = 0;
  616. break;
  617. }
  618. switch (dev->mt76.region) {
  619. case NL80211_DFS_FCC:
  620. radar_specs = &fcc_radar_specs[shift];
  621. break;
  622. case NL80211_DFS_ETSI:
  623. radar_specs = &etsi_radar_specs[shift];
  624. break;
  625. case NL80211_DFS_JP:
  626. if (dev->mphy.chandef.chan->center_freq >= 5250 &&
  627. dev->mphy.chandef.chan->center_freq <= 5350)
  628. radar_specs = &jp_w53_radar_specs[shift];
  629. else
  630. radar_specs = &jp_w56_radar_specs[shift];
  631. break;
  632. case NL80211_DFS_UNSET:
  633. default:
  634. return;
  635. }
  636. data = (MT_DFS_VGA_MASK << 16) |
  637. (MT_DFS_PWR_GAIN_OFFSET << 12) |
  638. (MT_DFS_PWR_DOWN_TIME << 8) |
  639. (MT_DFS_SYM_ROUND << 4) |
  640. (MT_DFS_DELTA_DELAY & 0xf);
  641. mt76_wr(dev, MT_BBP(DFS, 2), data);
  642. data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
  643. mt76_wr(dev, MT_BBP(DFS, 3), data);
  644. for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
  645. /* configure engine */
  646. mt76_wr(dev, MT_BBP(DFS, 0), i);
  647. /* detection mode + avg_len */
  648. data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
  649. (radar_specs[i].mode & 0xf);
  650. mt76_wr(dev, MT_BBP(DFS, 4), data);
  651. /* dfs energy */
  652. data = ((radar_specs[i].e_high & 0x0fff) << 16) |
  653. (radar_specs[i].e_low & 0x0fff);
  654. mt76_wr(dev, MT_BBP(DFS, 5), data);
  655. /* dfs period */
  656. mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
  657. mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
  658. /* dfs burst */
  659. mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
  660. mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
  661. /* dfs width */
  662. data = ((radar_specs[i].w_high & 0x0fff) << 16) |
  663. (radar_specs[i].w_low & 0x0fff);
  664. mt76_wr(dev, MT_BBP(DFS, 14), data);
  665. /* dfs margins */
  666. data = (radar_specs[i].w_margin << 16) |
  667. radar_specs[i].t_margin;
  668. mt76_wr(dev, MT_BBP(DFS, 15), data);
  669. /* dfs event expiration */
  670. mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
  671. /* dfs pwr adj */
  672. mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
  673. }
  674. /* reset status */
  675. mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
  676. mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
  677. /* enable detection*/
  678. mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
  679. mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
  680. }
  681. void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev)
  682. {
  683. u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
  684. agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
  685. agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
  686. val_r8 = (agc_r8 & 0x00007e00) >> 9;
  687. val_r4 = agc_r4 & ~0x1f000000;
  688. val_r4 += (((val_r8 + 1) >> 1) << 24);
  689. mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
  690. dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
  691. dfs_r31 += val_r8;
  692. dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
  693. dfs_r31 = (dfs_r31 << 16) | 0x00000307;
  694. mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
  695. if (is_mt76x2(dev)) {
  696. mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
  697. } else {
  698. /* disable hw detector */
  699. mt76_wr(dev, MT_BBP(DFS, 0), 0);
  700. /* enable hw detector */
  701. mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
  702. }
  703. }
  704. EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
  705. void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
  706. {
  707. struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
  708. if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
  709. dev->mt76.region != NL80211_DFS_UNSET) {
  710. mt76x02_dfs_init_sw_detector(dev);
  711. mt76x02_dfs_set_bbp_params(dev);
  712. /* enable debug mode */
  713. mt76x02_dfs_set_capture_mode_ctrl(dev, true);
  714. mt76x02_irq_enable(dev, MT_INT_GPTIMER);
  715. mt76_rmw_field(dev, MT_INT_TIMER_EN,
  716. MT_INT_TIMER_EN_GP_TIMER_EN, 1);
  717. } else {
  718. /* disable hw detector */
  719. mt76_wr(dev, MT_BBP(DFS, 0), 0);
  720. /* clear detector status */
  721. mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
  722. if (mt76_chip(&dev->mt76) == 0x7610 ||
  723. mt76_chip(&dev->mt76) == 0x7630)
  724. mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
  725. else
  726. mt76_wr(dev, MT_BBP(IBI, 11), 0);
  727. mt76x02_irq_disable(dev, MT_INT_GPTIMER);
  728. mt76_rmw_field(dev, MT_INT_TIMER_EN,
  729. MT_INT_TIMER_EN_GP_TIMER_EN, 0);
  730. }
  731. }
  732. EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params);
  733. void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
  734. {
  735. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  736. INIT_LIST_HEAD(&dfs_pd->sequences);
  737. INIT_LIST_HEAD(&dfs_pd->seq_pool);
  738. dev->mt76.region = NL80211_DFS_UNSET;
  739. dfs_pd->last_sw_check = jiffies;
  740. tasklet_setup(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet);
  741. }
  742. static void
  743. mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
  744. enum nl80211_dfs_regions region)
  745. {
  746. struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
  747. mutex_lock(&dev->mt76.mutex);
  748. if (dev->mt76.region != region) {
  749. tasklet_disable(&dfs_pd->dfs_tasklet);
  750. dev->ed_monitor = dev->ed_monitor_enabled &&
  751. region == NL80211_DFS_ETSI;
  752. mt76x02_edcca_init(dev);
  753. dev->mt76.region = region;
  754. mt76x02_dfs_init_params(dev);
  755. tasklet_enable(&dfs_pd->dfs_tasklet);
  756. }
  757. mutex_unlock(&dev->mt76.mutex);
  758. }
  759. void mt76x02_regd_notifier(struct wiphy *wiphy,
  760. struct regulatory_request *request)
  761. {
  762. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  763. struct mt76x02_dev *dev = hw->priv;
  764. mt76x02_dfs_set_domain(dev, request->dfs_region);
  765. }