@@ -1184,43 +1184,77 @@ void mt7921_update_channel(struct mt76_dev *mdev)
11841184 mt76_connac_power_save_sched (& dev -> mphy , & dev -> pm );
11851185}
11861186
1187- static bool
1188- mt7921_wait_reset_state (struct mt7921_dev * dev , u32 state )
1187+ static int
1188+ mt7921_wfsys_reset (struct mt7921_dev * dev )
11891189{
1190- bool ret ;
1190+ mt76_set (dev , 0x70002600 , BIT (0 ));
1191+ msleep (200 );
1192+ mt76_clear (dev , 0x70002600 , BIT (0 ));
11911193
1192- ret = wait_event_timeout (dev -> reset_wait ,
1193- (READ_ONCE (dev -> reset_state ) & state ),
1194- MT7921_RESET_TIMEOUT );
1195-
1196- WARN (!ret , "Timeout waiting for MCU reset state %x\n" , state );
1197- return ret ;
1194+ return __mt76_poll_msec (& dev -> mt76 , MT_WFSYS_SW_RST_B ,
1195+ WFSYS_SW_INIT_DONE , WFSYS_SW_INIT_DONE , 500 );
11981196}
11991197
12001198static void
1201- mt7921_dma_reset (struct mt7921_phy * phy )
1199+ mt7921_dma_reset (struct mt7921_dev * dev )
12021200{
1203- struct mt7921_dev * dev = phy -> dev ;
12041201 int i ;
12051202
1203+ /* reset */
1204+ mt76_clear (dev , MT_WFDMA0_RST ,
1205+ MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST );
1206+
1207+ mt76_set (dev , MT_WFDMA0_RST ,
1208+ MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST );
1209+
1210+ /* disable WFDMA0 */
12061211 mt76_clear (dev , MT_WFDMA0_GLO_CFG ,
1207- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN );
1212+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
1213+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
1214+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
1215+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
1216+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 );
12081217
1209- usleep_range (1000 , 2000 );
1218+ mt76_poll (dev , MT_WFDMA0_GLO_CFG ,
1219+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
1220+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY , 0 , 1000 );
12101221
1211- mt76_queue_tx_cleanup ( dev , dev -> mt76 . q_mcu [ MT_MCUQ_WA ], true);
1222+ /* reset hw queues */
12121223 for (i = 0 ; i < __MT_TXQ_MAX ; i ++ )
1213- mt76_queue_tx_cleanup (dev , phy -> mt76 -> q_tx [i ], true );
1224+ mt76_queue_reset (dev , dev -> mphy . q_tx [i ]);
12141225
1215- mt76_for_each_q_rx (& dev -> mt76 , i ) {
1216- mt76_queue_rx_reset (dev , i );
1217- }
1226+ for (i = 0 ; i < __MT_MCUQ_MAX ; i ++ )
1227+ mt76_queue_reset (dev , dev -> mt76 .q_mcu [i ]);
1228+
1229+ mt76_for_each_q_rx (& dev -> mt76 , i )
1230+ mt76_queue_reset (dev , & dev -> mt76 .q_rx [i ]);
12181231
1219- /* re-init prefetch settings after reset */
1232+ /* configure perfetch settings */
12201233 mt7921_dma_prefetch (dev );
12211234
1235+ /* reset dma idx */
1236+ mt76_wr (dev , MT_WFDMA0_RST_DTX_PTR , ~0 );
1237+
1238+ /* configure delay interrupt */
1239+ mt76_wr (dev , MT_WFDMA0_PRI_DLY_INT_CFG0 , 0 );
1240+
1241+ mt76_set (dev , MT_WFDMA0_GLO_CFG ,
1242+ MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
1243+ MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
1244+ MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
1245+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
1246+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
1247+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 );
1248+
12221249 mt76_set (dev , MT_WFDMA0_GLO_CFG ,
12231250 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN );
1251+
1252+ mt76_set (dev , 0x54000120 , BIT (1 ));
1253+
1254+ /* enable interrupts for TX/RX rings */
1255+ mt7921_irq_enable (dev ,
1256+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
1257+ MT_INT_MCU_CMD );
12241258}
12251259
12261260void mt7921_tx_token_put (struct mt7921_dev * dev )
@@ -1244,71 +1278,125 @@ void mt7921_tx_token_put(struct mt7921_dev *dev)
12441278 idr_destroy (& dev -> token );
12451279}
12461280
1247- /* system error recovery */
1248- void mt7921_mac_reset_work (struct work_struct * work )
1281+ static void
1282+ mt7921_vif_connect_iter (void * priv , u8 * mac ,
1283+ struct ieee80211_vif * vif )
12491284{
1250- struct mt7921_dev * dev ;
1285+ struct mt7921_vif * mvif = (struct mt7921_vif * )vif -> drv_priv ;
1286+ struct mt7921_dev * dev = mvif -> phy -> dev ;
12511287
1252- dev = container_of ( work , struct mt7921_dev , reset_work );
1288+ ieee80211_disconnect ( vif , true );
12531289
1254- if (!(READ_ONCE (dev -> reset_state ) & MT_MCU_CMD_STOP_DMA ))
1255- return ;
1290+ mt76_connac_mcu_uni_add_dev (& dev -> mphy , vif , & mvif -> sta .wcid , true);
1291+ mt7921_mcu_set_tx (dev , vif );
1292+ }
1293+
1294+ static int
1295+ mt7921_mac_reset (struct mt7921_dev * dev )
1296+ {
1297+ int i , err ;
1298+
1299+ mt76_connac_free_pending_tx_skbs (& dev -> pm , NULL );
12561300
1257- ieee80211_stop_queues (mt76_hw (dev ));
1301+ mt76_wr (dev , MT_WFDMA0_HOST_INT_ENA , 0 );
1302+ mt76_wr (dev , MT_PCIE_MAC_INT_ENABLE , 0x0 );
12581303
1259- set_bit (MT76_RESET , & dev -> mphy .state );
12601304 set_bit (MT76_MCU_RESET , & dev -> mphy .state );
12611305 wake_up (& dev -> mt76 .mcu .wait );
1262- cancel_delayed_work_sync (& dev -> mphy . mac_work );
1306+ skb_queue_purge (& dev -> mt76 . mcu . res_q );
12631307
1264- /* lock/unlock all queues to ensure that no tx is pending */
12651308 mt76_txq_schedule_all (& dev -> mphy );
12661309
12671310 mt76_worker_disable (& dev -> mt76 .tx_worker );
1268- napi_disable (& dev -> mt76 .napi [0 ]);
1269- napi_disable (& dev -> mt76 .napi [1 ]);
1270- napi_disable (& dev -> mt76 .napi [2 ]);
1311+ napi_disable (& dev -> mt76 .napi [MT_RXQ_MAIN ]);
1312+ napi_disable (& dev -> mt76 .napi [MT_RXQ_MCU ]);
1313+ napi_disable (& dev -> mt76 .napi [MT_RXQ_MCU_WA ]);
12711314 napi_disable (& dev -> mt76 .tx_napi );
12721315
1273- mt7921_mutex_acquire (dev );
1274-
1275- mt76_wr (dev , MT_MCU_INT_EVENT , MT_MCU_INT_EVENT_DMA_STOPPED );
1276-
12771316 mt7921_tx_token_put (dev );
12781317 idr_init (& dev -> token );
12791318
1280- if (mt7921_wait_reset_state (dev , MT_MCU_CMD_RESET_DONE )) {
1281- mt7921_dma_reset (& dev -> phy );
1319+ /* clean up hw queues */
1320+ for (i = 0 ; i < ARRAY_SIZE (dev -> mt76 .phy .q_tx ); i ++ )
1321+ mt76_queue_tx_cleanup (dev , dev -> mphy .q_tx [i ], true);
12821322
1283- mt76_wr (dev , MT_MCU_INT_EVENT , MT_MCU_INT_EVENT_DMA_INIT );
1284- mt7921_wait_reset_state (dev , MT_MCU_CMD_RECOVERY_DONE );
1285- }
1323+ for (i = 0 ; i < ARRAY_SIZE (dev -> mt76 .q_mcu ); i ++ )
1324+ mt76_queue_tx_cleanup (dev , dev -> mt76 .q_mcu [i ], true);
12861325
1287- clear_bit (MT76_MCU_RESET , & dev -> mphy .state );
1288- clear_bit (MT76_RESET , & dev -> mphy .state );
1326+ mt76_for_each_q_rx (& dev -> mt76 , i )
1327+ mt76_queue_rx_cleanup (dev , & dev -> mt76 .q_rx [i ]);
1328+
1329+ mt7921_wfsys_reset (dev );
1330+ mt7921_dma_reset (dev );
1331+
1332+ mt76_for_each_q_rx (& dev -> mt76 , i ) {
1333+ mt76_queue_rx_reset (dev , i );
1334+ napi_enable (& dev -> mt76 .napi [i ]);
1335+ napi_schedule (& dev -> mt76 .napi [i ]);
1336+ }
12891337
1290- mt76_worker_enable (& dev -> mt76 .tx_worker );
12911338 napi_enable (& dev -> mt76 .tx_napi );
12921339 napi_schedule (& dev -> mt76 .tx_napi );
1340+ mt76_worker_enable (& dev -> mt76 .tx_worker );
12931341
1294- napi_enable (& dev -> mt76 .napi [0 ]);
1295- napi_schedule (& dev -> mt76 .napi [0 ]);
1342+ clear_bit (MT76_MCU_RESET , & dev -> mphy .state );
12961343
1297- napi_enable (& dev -> mt76 .napi [1 ]);
1298- napi_schedule (& dev -> mt76 .napi [1 ]);
1344+ mt76_wr (dev , MT_WFDMA0_HOST_INT_ENA , 0 );
1345+ mt76_wr (dev , MT_PCIE_MAC_INT_ENABLE , 0xff );
1346+ mt7921_irq_enable (dev ,
1347+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
1348+ MT_INT_MCU_CMD );
12991349
1300- napi_enable (& dev -> mt76 .napi [2 ]);
1301- napi_schedule (& dev -> mt76 .napi [2 ]);
1350+ err = mt7921_run_firmware (dev );
1351+ if (err )
1352+ return err ;
13021353
1303- ieee80211_wake_queues (mt76_hw (dev ));
1354+ err = mt7921_mcu_set_eeprom (dev );
1355+ if (err )
1356+ return err ;
13041357
1305- mt76_wr (dev , MT_MCU_INT_EVENT , MT_MCU_INT_EVENT_RESET_DONE );
1306- mt7921_wait_reset_state (dev , MT_MCU_CMD_NORMAL_STATE );
1358+ mt7921_mac_init (dev );
1359+ return __mt7921_start (& dev -> phy );
1360+ }
13071361
1308- mt7921_mutex_release (dev );
1362+ /* system error recovery */
1363+ void mt7921_mac_reset_work (struct work_struct * work )
1364+ {
1365+ struct ieee80211_hw * hw ;
1366+ struct mt7921_dev * dev ;
1367+ int i ;
13091368
1310- ieee80211_queue_delayed_work (mt76_hw (dev ), & dev -> mphy .mac_work ,
1311- MT7921_WATCHDOG_TIME );
1369+ dev = container_of (work , struct mt7921_dev , reset_work );
1370+ hw = mt76_hw (dev );
1371+
1372+ dev_err (dev -> mt76 .dev , "chip reset\n" );
1373+ ieee80211_stop_queues (hw );
1374+
1375+ cancel_delayed_work_sync (& dev -> mphy .mac_work );
1376+ cancel_delayed_work_sync (& dev -> pm .ps_work );
1377+ cancel_work_sync (& dev -> pm .wake_work );
1378+
1379+ mutex_lock (& dev -> mt76 .mutex );
1380+ for (i = 0 ; i < 10 ; i ++ ) {
1381+ if (!mt7921_mac_reset (dev ))
1382+ break ;
1383+ }
1384+ mutex_unlock (& dev -> mt76 .mutex );
1385+
1386+ if (i == 10 )
1387+ dev_err (dev -> mt76 .dev , "chip reset failed\n" );
1388+
1389+ ieee80211_wake_queues (hw );
1390+ ieee80211_iterate_active_interfaces (hw ,
1391+ IEEE80211_IFACE_ITER_RESUME_ALL ,
1392+ mt7921_vif_connect_iter , 0 );
1393+ }
1394+
1395+ void mt7921_reset (struct mt76_dev * mdev )
1396+ {
1397+ struct mt7921_dev * dev = container_of (mdev , struct mt7921_dev , mt76 );
1398+
1399+ queue_work (dev -> mt76 .wq , & dev -> reset_work );
13121400}
13131401
13141402static void
@@ -1505,4 +1593,5 @@ void mt7921_coredump_work(struct work_struct *work)
15051593 }
15061594 dev_coredumpv (dev -> mt76 .dev , dump , MT76_CONNAC_COREDUMP_SZ ,
15071595 GFP_KERNEL );
1596+ mt7921_reset (& dev -> mt76 );
15081597}
0 commit comments