This commit is contained in:
刘可亮
2024-09-03 11:16:08 +08:00
parent cf270df8d6
commit 803cac77d5
2931 changed files with 614364 additions and 31222 deletions

View File

@@ -0,0 +1,827 @@
/**
****************************************************************************************
*
* @file uwifi_ap_api.c
*
* @brief add function api for ap
*
* Copyright (C) ASR
*
****************************************************************************************
*/
#include "uwifi_common.h"
#include "asr_wlan_api.h"
#include "asr_dbg.h"
#include "hostapd.h"
#include "uwifi_msg_rx.h"
#include "asr_wlan_api.h"
#include "asr_rtos_api.h"
#include "uwifi_wlan_list.h"
#ifdef CFG_ADD_API
uap_config_scan_chan g_scan_results ;
uint8_t g_scan_results_flag;
void Set_Scan_Chan(uap_config_scan_chan _chan)
{
memcpy(&g_scan_results,&_chan,sizeof(uap_config_scan_chan));
}
uap_config_scan_chan Get_Scan_Chan(void)
{
return g_scan_results;
}
void Set_Scan_Flag(uint8_t _flag)
{
g_scan_results_flag = _flag ;
}
uint8_t Get_Scan_Flag(void)
{
return g_scan_results_flag ;
}
INT32 UAP_Stalist(sta_list *list)
{
int i = 0,j=0;
for(i = 0; i < AP_MAX_ASSOC_NUM; i++)
{
if(g_ap_user_info.sta_table[i].aid)
{
memcpy(list->info[j++].mac_address,g_ap_user_info.sta_table[i].mac_addr, MAC_ADDR_LEN);//g_wpa_sta_info
}
}
list->sta_count = g_ap_user_info.connect_peer_num;
//TBD
//power_mfg_status;
//rssi;
// for debug test
dbg(D_ERR, D_UWIFI_CTRL, "%s: sta_count %d\r\n", __func__,list->sta_count);
for(i = 0; i < list->sta_count; i++)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:[%d] %02x %02x %02x %02x %02x %02x\r\n", __func__,i,list->info[i].mac_address[0],list->info[i].mac_address[1],
list->info[i].mac_address[2],list->info[i].mac_address[3],list->info[i].mac_address[4],list->info[i].mac_address[5]);
}
return 0;
}
// UINT16 pkt_fwd_ctl;
// Bit 0: Packet forwarding handled by Host (0) or Firmware (1)
// Bit 1: Intra-BSS broadcast packets are allowed (0) or denied (1)
// Bit 2: Intra-BSS unicast packets are allowed (0) or denied (1)
// Bit 3: Inter-BSS unicast packets are allowed (0) or denied (1)
INT32 UAP_PktFwd_Ctl(uap_pkt_fwd_ctl *param)
{
if(GET_ACTION != param->action && SET_ACTION != param->action)
return -1;
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
if(GET_ACTION == param->action)
{
param->pkt_fwd_ctl = (asr_vif->ap.flags>>0)&1;
}
else if(SET_ACTION == param->action)
{
uint8_t bit0 = (param->pkt_fwd_ctl>>0)&1;
if ((ASR_VIF_TYPE(asr_vif) == NL80211_IFTYPE_AP) && ( bit0 > -1))
{
if (bit0)
asr_vif->ap.flags |= ASR_AP_ISOLATE;
else
asr_vif->ap.flags &= ~ASR_AP_ISOLATE;
}
}
else
return -1;
dbg(D_ERR, D_UWIFI_CTRL, "%s: action=%d,pkt_fwd_ctl=%d,ap.flags=%d \r\n", __func__, param->action,param->pkt_fwd_ctl,asr_vif->ap.flags);
dbg(D_ERR, D_UWIFI_CTRL, "%s: done \r\n", __func__);
return 0;
}
INT32 UAP_GetCurrentChannel(void)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
dbg(D_ERR, D_UWIFI_CTRL, "%s: get channel %d \r\n", __func__,asr_vif->ap.chan_num);
return asr_vif->ap.chan_num;
}
INT32 UAP_Scan_Channels_Config(uap_config_scan_chan *param)
{
if(GET_ACTION != param->action && SET_ACTION != param->action)
return -1;
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
if(asr_vif->iftype != NL80211_IFTYPE_AP)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s: must in ap mode!\r\n",__func__);
return -1;
}
if(GET_ACTION == param->action)
{
param->band = asr_vif->ap.st_chan.band;
param->total_chan = asr_vif->ap.chan_num; // in get mode,total_chan is the current channel
}
else if(SET_ACTION == param->action)
{
Set_Scan_Flag(0);
asr_wlan_start_scan();// will block untill scan done
Get_Scan_Flag();
uap_config_scan_chan _scan_results = Get_Scan_Chan();
param->band = asr_vif->ap.st_chan.band ;//2.4GHz
param->total_chan = _scan_results.total_chan;
int i = 0;
uint8_t channel_toset,channel_num_min = _scan_results.chan_num[1];// channel is > 0
dbg(D_ERR, D_UWIFI_CTRL, "%s: total_chan=%d\r\n", __func__,param->total_chan);
for(i=1;i<MAX_CHANNELS;++i)
{
param->chan_num[i] = _scan_results.chan_num[i];
if(param->chan_num[i])
dbg(D_ERR, D_UWIFI_CTRL, "%s: [%d] chan_num=%d\r\n", __func__,i,param->chan_num[i]);
if(channel_num_min >param->chan_num[i])
{
channel_num_min = param->chan_num[i];
channel_toset = i;
}
}
dbg(D_ERR, D_UWIFI_CTRL, "%s:channel is %d\r\n", __func__,channel_toset);
uwifi_ap_channel_change(channel_toset);
param->total_chan = channel_toset; //use var total_chan to store channel_toset
}
else
return -1;
dbg(D_ERR, D_UWIFI_CTRL, "%s: done band %d chan %d \r\n", __func__,param->band ,param->total_chan);
return 0;
}
INT32 UAP_Sta_Deauth(uap_802_11_mac_addr mac)
{
struct uwifi_ap_peer_info *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if( NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
if(asr_vif->iftype != NL80211_IFTYPE_AP)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s: must in ap mode!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_sta_list, list)
{
if (!memcmp(cur->peer_addr, mac, MAC_ADDR_LEN))
{
found++ ;
break;
}
}
if(!found)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:UAP_Death no mac match \r\n", __func__);
return -1;
}
uwifi_hostapd_handle_deauth_msg((uint32_t)cur);
dbg(D_ERR, D_UWIFI_CTRL, "%s:UAP_Death ", __func__);
return 0;
}
INT32 UAP_Black_List_Onoff( uint8_t onoff)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
asr_vif->ap.black_state = onoff;
dbg(D_CRT,D_UWIFI_CTRL,"%s: onoff=%d \r\n",__func__,asr_vif->ap.black_state);
return 0;
}
INT32 UAP_Black_List_Get(blacklist *black_l)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0,i = 0 ;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_black_list, list)
{
if(cur)
{
memcpy(black_l->peer_addr[found],cur->peer_addr,MAC_ADDR_LEN);
found ++;
}
}
black_l->onoff = asr_vif->ap.black_state;
black_l->count = found;
dbg(D_CRT,D_UWIFI_CTRL,"%s:the list len is %d onoff=%d \r\n",__func__,black_l->count,black_l->onoff );
if(!found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:black list is not exist \r\n",__func__);
return -1;
}
for(i=0;i<black_l->count;++i)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:the list[%d] mac %02x:%02x:%02x:%02x:%02x:%02x \r\n",__func__,i,
black_l->peer_addr[i][0],black_l->peer_addr[i][1],black_l->peer_addr[i][2],
black_l->peer_addr[i][3],black_l->peer_addr[i][4],black_l->peer_addr[i][5]);
}
return 0;
}
INT32 UAP_Black_List_Add( uap_802_11_mac_addr blackmac)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0,list_len=0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_black_list, list)
{
if(cur)
{
list_len++;
if (!memcmp(cur->peer_addr, blackmac, MAC_ADDR_LEN))
{
found ++;
break;
}
}
}
if(found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:black list have added the mac %02x:%02x:%02x:%02x:%02x:%02x\r\n",__func__,
blackmac[0],blackmac[1],blackmac[2],blackmac[3],blackmac[4],blackmac[5]);
return -1;
}
dbg(D_CRT, D_UWIFI_CTRL, "%s: count=%d\r\n",__func__,found);
list_len = list_len%AP_MAX_BLACK_NUM;
memcpy(asr_vif->ap.peer_black[list_len].peer_addr, blackmac, MAC_ADDR_LEN);
list_add_tail(&(asr_vif->ap.peer_black[list_len].list), &asr_vif->ap.peer_black_list);
return 0;
}
INT32 UAP_Black_List_Del( uap_802_11_mac_addr blackmac)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_black_list, list)
{
if (!memcmp(cur->peer_addr, blackmac, MAC_ADDR_LEN))
{
found ++;
break;
}
}
if(!found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:black list the mac not exist %02x:%02x:%02x:%02x:%02x:%02x\r\n",__func__,
blackmac[0],blackmac[1],blackmac[2],blackmac[3],blackmac[4],blackmac[5]);
return -1;
}
dbg(D_CRT,D_UWIFI_CTRL,"%s: found=%d\r\n",__func__,found);
list_del(&(asr_vif->ap.peer_black[found-1].list));
memset(&(asr_vif->ap.peer_black[found-1]), 0, sizeof(struct list_mac));
return 0;
}
INT32 UAP_Black_List_Clear(void)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
dbg(D_CRT,D_UWIFI_CTRL,"%s: found=%d\r\n",__func__,found);
if(!list_empty(&asr_vif->ap.peer_black_list))
{
list_del_init(&asr_vif->ap.peer_black_list);
}
return 0;
}
INT32 UAP_White_List_Onoff(uint8_t onoff)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
asr_vif->ap.white_state = onoff;
dbg(D_CRT,D_UWIFI_CTRL,"%s: white_state=%d \r\n",__func__,asr_vif->ap.white_state);
return 0;
}
INT32 UAP_White_List_Get(whitelist *white_l)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0,i = 0 ;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_white_list, list)
{
if(cur)
{
memcpy(white_l->peer_addr[found],cur->peer_addr,MAC_ADDR_LEN);
found ++;
}
}
white_l->onoff = asr_vif->ap.white_state ;
white_l->count = found;
dbg(D_CRT,D_UWIFI_CTRL,"%s:the list len is %d onoff=%d \r\n",__func__,white_l->count,white_l->onoff );
if(!found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:white list is not exist \r\n",__func__);
return -1;
}
for(i=0;i<white_l->count;++i)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:the list[%d] mac %02x:%02x:%02x:%02x:%02x:%02x \r\n",__func__,i,
white_l->peer_addr[i][0],white_l->peer_addr[i][1],white_l->peer_addr[i][2],
white_l->peer_addr[i][3],white_l->peer_addr[i][4],white_l->peer_addr[i][5]);
}
return 0;
}
INT32 UAP_White_List_Add( uap_802_11_mac_addr whitemac)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0,list_len=0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_white_list, list)
{
if(cur)
{
list_len++;
if (!memcmp(cur->peer_addr, whitemac, MAC_ADDR_LEN))
{
found ++;
break;
}
}
}
if(found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:black list have added the mac %02x:%02x:%02x:%02x:%02x:%02x\r\n",__func__,
whitemac[0],whitemac[1],whitemac[2],whitemac[3],whitemac[4],whitemac[5]);
return -1;
}
dbg(D_CRT, D_UWIFI_CTRL, "%s: count=%d\r\n",__func__,found);
list_len = list_len%AP_MAX_BLACK_NUM;
memcpy(asr_vif->ap.peer_white[list_len].peer_addr, whitemac, MAC_ADDR_LEN);
list_add_tail(&(asr_vif->ap.peer_white[list_len].list), &asr_vif->ap.peer_white_list);
return 0;
}
INT32 UAP_White_List_Del( uap_802_11_mac_addr whitemac)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
list_for_each_entry_safe(cur, tmp, &asr_vif->ap.peer_white_list, list)
{
if (!memcmp(cur->peer_addr, whitemac, MAC_ADDR_LEN))
{
found ++;
break;
}
}
if(!found)
{
dbg(D_CRT, D_UWIFI_CTRL, "%s:white list the mac not exist %02x:%02x:%02x:%02x:%02x:%02x\r\n",__func__,
whitemac[0],whitemac[1],whitemac[2],whitemac[3],whitemac[4],whitemac[5]);
return -1;
}
dbg(D_CRT,D_UWIFI_CTRL,"%s: found=%d\r\n",__func__,found);
list_del(&(asr_vif->ap.peer_white[found-1].list));
memset(&(asr_vif->ap.peer_white[found-1]), 0, sizeof(struct list_mac));
return 0;
}
INT32 UAP_White_List_Clear(void)
{
struct list_mac *cur,*tmp;
struct asr_vif *asr_vif;
uint8_t found = 0;
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL\r\n",__func__);
return -1;
}
if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
}
if (NULL == asr_vif)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:no vif!\r\n",__func__);
return -1;
}
dbg(D_CRT,D_UWIFI_CTRL,"%s: found=%d\r\n",__func__,found);
if(!list_empty(&asr_vif->ap.peer_white_list))
{
list_del_init(&asr_vif->ap.peer_white_list);
}
return 0;
}
INT32 UAP_Params_Config(uap_config_param * uap_param)
{
if(GET_ACTION == uap_param->action)
{
whitelist white_l;
blacklist black_l;
UAP_White_List_Get(&white_l);
if(!white_l.onoff)
{// blacklist mode
UAP_Black_List_Get(&black_l);
if(!black_l.onoff)
{
uap_param->filter.filter_mode = DISABLE_FILTER;
// if disable then get nothing
return -1;
}
uap_param->filter.filter_mode = BLACK_FILTER;
uap_param->filter.mac_count = black_l.count;
int i = 0 ;
for(i=0;i<black_l.count;++i)
{
memcpy( uap_param->filter.mac_list[i],black_l.peer_addr,MAC_ADDR_LEN);
}
}
uap_param->filter.filter_mode = WHITE_FILTER;
uap_param->filter.mac_count = white_l.count;
int i = 0 ;
for(i=0;i<white_l.count;++i)
{
memcpy( uap_param->filter.mac_list[i],white_l.peer_addr,MAC_ADDR_LEN);
}
}
else if(SET_ACTION == uap_param->action)
{
if(DISABLE_FILTER == uap_param->filter.filter_mode)
{
UAP_Black_List_Onoff(DOFF);
UAP_White_List_Onoff(DOFF);
//if disable then set nothing
return -1;
}
else if(WHITE_FILTER == uap_param->filter.filter_mode)
{
int list_len = uap_param->filter.mac_count;
if(AP_MAX_WHITE_NUM < list_len)
{
//max list is 16
return -1;
}
UAP_Black_List_Onoff(DOFF);
UAP_White_List_Onoff(DON);
UAP_White_List_Clear();
int i = 0 ;
for(i=0;i<list_len;++i)
{
UAP_White_List_Add( uap_param->filter.mac_list[i]);
}
}
else if(BLACK_FILTER == uap_param->filter.filter_mode)
{
int list_len = uap_param->filter.mac_count;
if(AP_MAX_BLACK_NUM < list_len)
{
//max list is 16
return -1;
}
UAP_Black_List_Onoff(DON);
UAP_White_List_Onoff(DOFF);
UAP_Black_List_Clear();
int i = 0 ;
for(i=0;i<list_len;++i)
{
UAP_Black_List_Add( uap_param->filter.mac_list[i]);
}
}
}
else
return -1;
return 0;
}
INT32 UAP_BSS_Config(INT32 start_stop)
{
if(DON == start_stop)
{//start :normal mode
asr_send_set_ps_mode(PS_MODE_OFF);
}
else if(DOFF == start_stop)
{//stop :in save mode but not poweroff
asr_send_set_ps_mode(PS_MODE_ON_DYN);
}
else
{
return -1;
}
dbg(D_ERR, D_UWIFI_CTRL, "%s: done \r\n", __func__);
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,341 @@
/**
****************************************************************************************
*
* @file uwifi_cmds.c
*
* @brief Handles commands from LMAC FW
*
* Copyright (C) ASR
*
****************************************************************************************
*/
#include "uwifi_cmds.h"
#include "uwifi_include.h"
#include "uwifi_msg_tx.h"
#include "uwifi_msg.h"
#include "ipc_host.h"
/**
*
*/
static void cmd_dump(const struct asr_cmd *cmd)
{
dbg(D_ERR,D_UWIFI_CTRL,"tkn[%u] flags:%04x result:%d cmd:%4u - reqcfm(%4u)\n",
(unsigned int)cmd->tkn,
cmd->flags,
(int)cmd->result,
(unsigned int)cmd->id,
(unsigned int)cmd->reqid);
}
static void cmd_mgr_drain(struct asr_cmd_mgr *cmd_mgr)
{
struct asr_cmd *cur, *nxt;
asr_rtos_lock_mutex(&cmd_mgr->lock);
list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) {
list_del(&cur->list);
cmd_mgr->queue_sz--;
if (!(cur->flags & ASR_CMD_FLAG_NONBLOCK)) //block case
asr_rtos_set_semaphore(&cur->semaphore);
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
}
void cmd_queue_crash_handle(struct asr_hw *asr_hw, const char *func, u32 line, u32 reason)
{
struct asr_cmd_mgr *cmd_mgr = NULL;
if (asr_hw == NULL) {
return;
}
if (asr_test_bit(ASR_DEV_PRE_RESTARTING, &asr_hw->phy_flags) || asr_test_bit(ASR_DEV_RESTARTING, &asr_hw->phy_flags)) {
dbg(D_ERR,D_UWIFI_CTRL, "%s:phy_flags=0X%X\n", __func__, (unsigned int)asr_hw->phy_flags);
return;
}
asr_set_bit(ASR_DEV_PRE_RESTARTING, &asr_hw->phy_flags);
cmd_mgr = &asr_hw->cmd_mgr;
dump_sdio_info(asr_hw, func, line);
//spin_lock_bh(&cmd_mgr->lock);
cmd_mgr->state = ASR_CMD_MGR_STATE_CRASHED;
//spin_unlock_bh(&cmd_mgr->lock);
//send dev restart event
asr_msleep(10);
cmd_mgr_drain(cmd_mgr);
#ifdef ASR_MODULE_RESET_SUPPORT
asr_hw->dev_restart_work.asr_hw = asr_hw;
asr_hw->dev_restart_work.parm1 = reason;
//schedule_work(&asr_hw->dev_restart_work.real_work);
#endif
}
/**
*
*/
static void cmd_complete(struct asr_cmd_mgr *cmd_mgr, struct asr_cmd *cmd)
{
list_del(&cmd->list);
cmd_mgr->queue_sz--;
cmd->flags |= ASR_CMD_FLAG_DONE;
if (cmd->flags & ASR_CMD_FLAG_NONBLOCK) {
asr_rtos_free(cmd);
cmd = NULL;
} else { //block case
if (ASR_CMD_WAIT_COMPLETE(cmd->flags)) {
cmd->result = 0;
asr_rtos_set_semaphore(&cmd->semaphore);
}
}
}
/**
*
*/
int g_cmd_ret;
struct asr_cmd *g_cmd;
asr_semaphore_t *g_sem;
static int cmd_mgr_queue(struct asr_cmd_mgr *cmd_mgr, struct asr_cmd *cmd)
{
struct asr_hw *asr_hw = container_of(cmd_mgr, struct asr_hw, cmd_mgr);
unsigned int tout;
int ret = 0;
asr_semaphore_t cmd_sem;
struct asr_cmd *cmd_check = NULL;
struct asr_cmd *cur = NULL, *nxt = NULL;
dbg(D_ERR, D_UWIFI_CTRL, "dTX (%d,%d)->(%d,%d),flag=0x%x",MSG_T(cmd->id),MSG_I(cmd->id),MSG_T(cmd->reqid),MSG_I(cmd->reqid),cmd->flags);
asr_rtos_init_semaphore(&cmd_sem,0);
asr_rtos_lock_mutex(&cmd_mgr->lock);
if (cmd_mgr->state == ASR_CMD_MGR_STATE_CRASHED)
{
dbg(D_ERR,D_UWIFI_CTRL,"ASR_CMD_MGR_STATE_CRASHED\r\n");
cmd->result = -EPIPE;
asr_rtos_free(cmd->a2e_msg);
cmd->a2e_msg = NULL;
asr_rtos_unlock_mutex(&cmd_mgr->lock);
asr_rtos_deinit_semaphore(&cmd_sem);
return -EPIPE;
}
if (!list_empty(&cmd_mgr->cmds))
{
if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz)
{
dbg(D_ERR,D_UWIFI_CTRL,"ENOMEM\r\n");
cmd->result = -ENOMEM;
asr_rtos_free(cmd->a2e_msg);
cmd->a2e_msg = NULL;
asr_rtos_unlock_mutex(&cmd_mgr->lock);
asr_rtos_deinit_semaphore(&cmd_sem);
return -ENOMEM;
}
}
if (cmd->flags & ASR_CMD_FLAG_REQ_CFM)
cmd->flags |= ASR_CMD_FLAG_WAIT_CFM;
cmd->tkn = cmd_mgr->next_tkn++;
cmd->result = -EINTR;
if (!(cmd->flags & ASR_CMD_FLAG_NONBLOCK)) //block case
asr_rtos_init_semaphore(&cmd->semaphore, 0);
list_add_tail(&cmd->list, &cmd_mgr->cmds);
cmd_mgr->queue_sz++;
if (SM_DISCONNECT_CFM == cmd->reqid)
{
/* when disconnect, the deauth may cannot send out for 3second more,
delete it when TX performance improve */
tout = ASR_NEVER_TIMEOUT;
}
else
{
tout = (ASR_80211_CMD_TIMEOUT_MS * cmd_mgr->queue_sz);
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
#if 0//ndef MSG_REFINE
g_cmd = cmd;
g_sem = &cmd_sem;
uwifi_sdio_event_set(UWIFI_SDIO_EVENT_MSG);
asr_rtos_get_semaphore(&cmd_sem,ASR_WAIT_FOREVER);
g_cmd = NULL;
g_sem = NULL;
asr_rtos_deinit_semaphore(&cmd_sem);
ret = g_cmd_ret;
#else
asr_rtos_deinit_semaphore(&cmd_sem);
/* send cmd to FW, SDIO
if error, return and release msg and cmd
ipc_host_msg_push in uwifi task instead of uwifi sdio task.
*/
ret = ipc_host_msg_push(asr_hw->ipc_env, (void *)cmd, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
#endif
asr_rtos_free(cmd->a2e_msg);
cmd->a2e_msg = NULL;
if (ret) { // has error
asr_rtos_lock_mutex(&cmd_mgr->lock);
list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) {
if (cmd == cur) {
list_del(&cur->list);
cmd_mgr->queue_sz--;
}
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
dbg(D_ERR,D_UWIFI_CTRL,"[%s] ipc msg push err,ret=%d\n", __func__, ret);
if (ret != -ENOSYS)
cmd_queue_crash_handle(asr_hw, __func__, __LINE__, ASR_RESTART_REASON_TXMSG_FAIL);
return -EBUSY;
}
if (!(cmd->flags & ASR_CMD_FLAG_NONBLOCK)) //block case
{
int rx_retry = ASR_80211_CMD_TIMEOUT_RETRY;
while(rx_retry--)
{
if (asr_rtos_get_semaphore(&cmd->semaphore, tout))
{
if(rx_retry)
{
tout = ASR_80211_CMD_TIMEOUT_MS;
dbg(D_ERR, D_UWIFI_CTRL, "rx msg retry(%d),(%d,%d)->(%d,%d)\n",rx_retry,MSG_T(cmd->id),MSG_I(cmd->id),MSG_T(cmd->reqid),MSG_I(cmd->reqid));
uwifi_sdio_event_set(UWIFI_SDIO_EVENT_RX);
}
else
{
dbg(D_DBG, D_UWIFI_CTRL, "%s: flags=%d tout=%d reqid=%d queue_sz=%d\r\n", __func__,cmd->flags,tout,cmd->reqid,cmd_mgr->queue_sz);
dbg(D_ERR, D_UWIFI_CTRL, "cmd timed-out (%d,%d)->(%d,%d)\n",MSG_T(cmd->id),MSG_I(cmd->id),MSG_T(cmd->reqid),MSG_I(cmd->reqid));
asr_rtos_deinit_semaphore(&cmd->semaphore);
cmd_dump(cmd);
asr_rtos_lock_mutex(&cmd_mgr->lock);
cmd_mgr->state = ASR_CMD_MGR_STATE_CRASHED;
if (!(cmd->flags & ASR_CMD_FLAG_DONE)) {
cmd->result = -ETIMEDOUT;
cmd_complete(cmd_mgr, cmd);
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
return -ETIMEDOUT;
}
}
else
{
asr_rtos_deinit_semaphore(&cmd->semaphore);
break;
}
}
} else {
cmd->result = 0;
}
// cmd del when no req cfm,otherwise in cmd_complete for block case.
if(!(cmd->flags & ASR_CMD_FLAG_REQ_CFM))
{
asr_rtos_lock_mutex(&cmd_mgr->lock);
list_del(&cmd->list);
cmd_mgr->queue_sz--;
asr_rtos_unlock_mutex(&cmd_mgr->lock);
}
// ? check cmd del.
asr_rtos_lock_mutex(&cmd_mgr->lock);
list_for_each_entry(cmd_check, &cmd_mgr->cmds, list) {
if (cmd_check == cmd) {
// non-block but req cfm will del here. if del , cannot found in cmd_mgr_msgind. ex. ME_TRAFFIC_IND_REQ
//list_del(&cmd->list);
//cmd_mgr->queue_sz--;
//dbg(D_ERR, D_UWIFI_CTRL, "ERROR: cmd check in! reqid=%d, flags=0x%x \n",cmd->reqid,cmd->flags);
break;
}
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
return 0;
}
/**
*
*/
static int cmd_mgr_msgind(struct asr_cmd_mgr *cmd_mgr, struct ipc_e2a_msg *msg, msg_cb_fct cb)
{
struct asr_hw *asr_hw = container_of(cmd_mgr, struct asr_hw, cmd_mgr);
struct asr_cmd *cmd;
bool found = false;
asr_rtos_lock_mutex(&cmd_mgr->lock);
list_for_each_entry(cmd, &cmd_mgr->cmds, list) {
if (cmd->reqid == msg->id &&
(cmd->flags & ASR_CMD_FLAG_WAIT_CFM)) {
if (!cb || (cb && !cb(asr_hw, cmd, msg))) {
found = true;
cmd->flags &= ~ASR_CMD_FLAG_WAIT_CFM;
if (cmd->e2a_msg && msg->param_len)
memcpy(cmd->e2a_msg, &msg->param, msg->param_len);
if (ASR_CMD_WAIT_COMPLETE(cmd->flags))
cmd_complete(cmd_mgr, cmd);
break;
}
}
}
asr_rtos_unlock_mutex(&cmd_mgr->lock);
if (!found && cb)
{
//dbg(D_ERR, D_UWIFI_CTRL, "cmd->flags:0x%x,(%d %d)", cmd->flags,MSG_T(msg->id),MSG_I(msg->id));
cb(asr_hw, NULL, msg);
}
return 0;
}
/**
*
*/
int asr_cmd_mgr_init(struct asr_cmd_mgr *cmd_mgr)
{
INIT_LIST_HEAD(&cmd_mgr->cmds);
if(asr_rtos_init_mutex(&cmd_mgr->lock))
return -1;
cmd_mgr->max_queue_sz = ASR_CMD_MAX_QUEUED;
cmd_mgr->queue = &cmd_mgr_queue;
//cmd_mgr->print = &cmd_mgr_print;
//cmd_mgr->drain = &cmd_mgr_drain;
//cmd_mgr->llind = &cmd_mgr_llind;
cmd_mgr->msgind = &cmd_mgr_msgind;
return 0;
}
/**
*
*/
void asr_cmd_mgr_deinit(struct asr_cmd_mgr *cmd_mgr)
{
//cmd_mgr->print(cmd_mgr);
//cmd_mgr->drain(cmd_mgr);
//cmd_mgr->print(cmd_mgr);
asr_rtos_deinit_mutex(&cmd_mgr->lock);
memset(cmd_mgr, 0, sizeof(*cmd_mgr));
}

View File

@@ -0,0 +1,168 @@
#if (defined CFG_SNIFFER_SUPPORT || defined CFG_CUS_FRAME)
#include "uwifi_idle_mode.h"
#include "uwifi_platform.h"
struct asr_txq *asr_txq_vif_get_idle_mode(struct asr_hw *asr_hw, uint8_t ac, int *idx)
{
if (idx)
*idx = ac;
return &asr_hw->txq[ac];
}
void asr_txq_init_idle_mode(struct asr_txq *txq, int idx, uint8_t status,
struct asr_hwq *hwq, int tid)
{
int i;
txq->idx = idx;
txq->status = status;
txq->credits = NX_TXQ_INITIAL_CREDITS;
txq->pkt_sent = 0;
skb_queue_head_init(&txq->sk_list);
txq->last_retry_skb = NULL;
txq->nb_retry = 0;
txq->hwq = hwq;
for (i = 0; i < CONFIG_USER_MAX ; i++)
txq->pkt_pushed[i] = 0;
//txq->baw.agg_on = false;
}
/* called after idle mode add interface */
void asr_txq_vif_init_idle_mode(struct asr_hw *asr_hw, uint8_t status)
{
int i;
int idx;
struct asr_txq *txq;
txq = asr_txq_vif_get_idle_mode(asr_hw, 0, &idx);
for (i = 0; i < NX_NB_TXQ_MAX_IDLE_MODE; i++, idx++, txq++)
{
asr_txq_init_idle_mode(txq, idx, status, &asr_hw->hwq[i], -1);
}
}
/**
* asr_txq_sta_deinit - Deinitialize TX queues for a STA
*
* @asr_hw: Main driver data
* @asr_sta: STA for which tx queues need to be deinitialized
*/
void asr_txq_vif_deinit_idle_mode(struct asr_hw *asr_hw, struct asr_vif *asr_vif)
{
int i;
int idx;
struct asr_txq *txq;
txq = asr_txq_vif_get_idle_mode(asr_hw, 0, &idx);
for (i = 0; i < NX_NB_TXQ_MAX_IDLE_MODE; i++, txq++) {
asr_txq_deinit(asr_hw, txq);
}
}
/*start after channel switch*/
void asr_txq_vif_start_idle_mode(struct asr_vif *asr_vif, u16 reason,
struct asr_hw *asr_hw)
{
struct asr_txq *txq;
int i;
asr_rtos_lock_mutex(&asr_hw->tx_lock);
txq = asr_txq_vif_get_idle_mode(asr_hw, 0, NULL);
for (i = 0; i < NX_NB_TXQ_MAX_IDLE_MODE; i++, txq++)
{
asr_txq_start(txq, reason);
}
asr_rtos_unlock_mutex(&asr_hw->tx_lock);
}
/*stop before channel switch*/
void asr_txq_vif_stop_idle_mode(struct asr_vif *asr_vif, uint16_t reason,
struct asr_hw *asr_hw)
{
struct asr_txq *txq;
int i;
asr_rtos_lock_mutex(&asr_hw->tx_lock);
txq = asr_txq_vif_get_idle_mode(asr_hw, 0, NULL);
for (i = 0; i < NX_NB_TXQ_MAX_IDLE_MODE; i++, txq++)
{
asr_txq_stop(txq, reason);
}
asr_rtos_unlock_mutex(&asr_hw->tx_lock);
}
/**
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
* Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX)
*
* - Initialize the desciptor for this pkt (stored in skb before data)
* - Push the pkt in the corresponding Txq
* - If possible (i.e. credit available and not in PS) the pkt is pushed
* to fw
*/
void asr_start_xmit_idle(uint8_t *frame, uint32_t len)
{
//uint16_t freq;
uint64_t cookie; //useless?
struct asr_vif *asr_vif;
struct cfg80211_mgmt_tx_params tx_params = {0};
struct asr_hw *asr_hw = uwifi_get_asr_hw();
if(NULL == asr_hw)
{
dbg(D_CRT,D_UWIFI_CTRL,"%s:asr_hw is NULL",__func__);
return;
}
// 1. get asr_vif and freq
if(asr_hw->monitor_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->monitor_vif_idx];
//if(asr_vif->sniffer.chan_num > 0)
// freq = 2407 + 5*(asr_vif->sniffer.chan_num);
//else
// freq = 2437;
tx_params.chan = &asr_vif->sniffer.st_chan;
}
else if(asr_hw->sta_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->sta_vif_idx];
tx_params.chan = asr_vif->asr_hw->chanctx_table[asr_vif->ch_index].chan_def.chan;
}
else if(asr_hw->ap_vif_idx != 0xff)
{
asr_vif = asr_hw->vif_table[asr_hw->ap_vif_idx];
tx_params.chan = asr_vif->asr_hw->chanctx_table[asr_vif->ch_index].chan_def.chan;
}
else
{
dbg(D_CRT, D_UWIFI_CTRL, "vif not support, return");
return;
}
// 2. get tx_params
tx_params.buf = frame;
tx_params.len = len;
tx_params.cus_flags = 1;
// 3. send mgmt frame
if(uwifi_cfg80211_ops.mgmt_tx(asr_vif, &tx_params, &cookie))
{
dbg(D_CRT, D_UWIFI_CTRL, "custom mgmt frame tx fail");
asr_rtos_free((uint8_t *)tx_params.buf);
tx_params.buf = NULL;
return;
}
}
void uwifi_tx_dev_custom_mgmtframe(uint8_t *pframe, uint32_t len)
{
asr_start_xmit_idle(pframe,len);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,398 @@
/**
******************************************************************************
*
* @file uwifi_kernel.c
*
* @brief like linux kernel related function
*
* Copyright (C) ASR
*
******************************************************************************
*/
#include "uwifi_kernel.h"
#include "uwifi_include.h"
extern uint32_t current_iftype;
typedef struct
{
uint8_t buf[MAX_SKB_BUF_SIZE];
}sk_buffer_t;
struct skb_priv gskb_priv={0};
//uint8_t g_sk_buf_t[SK_BUFF_T_SIZE * TOTAL_SKB_NUM + 4];
//sk_buffer_t g_sk_buf_pool[TOTAL_SKB_NUM];
/*
* the following is os independent
*/
void os_init_listhead(_list *list)
{
INIT_LIST_HEAD(list);
}
uint32_t os_list_empty(_list *phead)
{
return list_empty(phead);
}
void os_list_insert_head(_list *plist, _list *phead)
{
list_add(plist, phead);
}
void os_list_insert_tail(_list *plist, _list *phead)
{
list_add_tail(plist, phead);
}
void os_list_delete(_list *plist)
{
list_del_init(plist);
}
void os_init_queue(_queue *pqueue)
{
os_init_listhead(&(pqueue->queue));
asr_rtos_init_mutex(&(pqueue->lock));
}
void os_deinit_queue(_queue *pqueue)
{
asr_rtos_deinit_mutex(&(pqueue->lock));
}
uint32_t os_queue_empty(_queue *pqueue)
{
return (os_list_empty(&(pqueue->queue)));
}
uint32_t os_end_of_queue(_list *head, _list *plist)
{
if (head == plist)
return true;
else
return false;
}
#if 0
//skb buffer management
struct sk_buff *_alloc_skb(struct skb_priv *pskb_priv)
{
uint32_t i;
struct sk_buff *skb = NULL;
/*maybe we should aligmnet for datasize */
pskb_priv->allocated_skb_addr = g_sk_buf_t;
memset(pskb_priv->allocated_skb_addr, 0, sizeof(g_sk_buf_t[SK_BUFF_T_SIZE * TOTAL_SKB_NUM + 4]));
pskb_priv->skb_addr = pskb_priv->allocated_skb_addr + 4 -
((unsigned long) (pskb_priv->allocated_skb_addr) & 3);
skb = (struct sk_buff *) pskb_priv->skb_addr;
memset(g_sk_buf_pool, 0, sizeof(g_sk_buf_pool[TOTAL_SKB_NUM]));
for (i = 0; i < TOTAL_SKB_NUM; i++)
{
skb->tail = skb->data = g_sk_buf_pool[i].buf;
skb->head = skb->data;
skb->end = skb->head + MAX_SKB_BUF_SIZE;
//skb->len = MAX_SKB_BUF_SIZE;
//os_memset(skb->data, 0, size);
os_init_listhead(&(skb->list));
os_list_insert_tail(&skb->list, &(pskb_priv->free_skb_queue.queue));
pskb_priv->free_skb_cnt++;
skb++;
}
return (struct sk_buff *) pskb_priv->skb_addr;
}
void _free_skb(struct skb_priv *pskb_priv)
{
uint32_t i;
struct sk_buff *skb = (struct sk_buff *)pskb_priv->skb_addr;;
for (i=0; i<TOTAL_SKB_NUM; i++)
{
if (skb)
{
if (skb->head)
{
//asr_rtos_free(skb->head);
skb->head = NULL;
skb->data = NULL;
}
}
else
{
break;
}
skb++;
}
if (pskb_priv->allocated_skb_addr)
{
//asr_rtos_free(pskb_priv->allocated_skb_addr);
pskb_priv->allocated_skb_addr = NULL;
}
}
#endif
int32_t wlan_init_skb_priv(void)
{
struct skb_priv *pskb_priv = &gskb_priv;
memset((uint8_t *)pskb_priv, 0, sizeof(struct skb_priv));
os_init_queue(&pskb_priv->free_skb_queue);
return _SUCCESS;
}
void wlan_free_skb_priv(void)
{
struct skb_priv *pskb_priv = &gskb_priv;
os_deinit_queue(&pskb_priv->free_skb_queue);
}
extern int g_amsdu;
int g_amsdu_free_cnt = 0;
int g_amsdu_malloc_cnt = 0;
struct sk_buff *wifi_alloc_pktbuf(uint32_t size, uint8_t is_tx)
{
struct skb_priv *pskb_priv = &gskb_priv;
struct sk_buff *pskb = NULL;
_queue *pfree_skb_queue = &(pskb_priv->free_skb_queue);
if(pfree_skb_queue->lock == NULL)
return NULL;
asr_rtos_lock_mutex(&pfree_skb_queue->lock);
{
pskb = asr_rtos_malloc(sizeof(struct sk_buff));
if (NULL == pskb)
{
dbg(D_ERR, D_UWIFI_DATA,"skb malloc from stack failed\n");
asr_rtos_unlock_mutex(&pfree_skb_queue->lock);
return NULL;
}
memset(pskb, 0, sizeof(struct sk_buff));
pskb->data = asr_rtos_malloc(size);
if (NULL == pskb->data)
{
dbg(D_ERR, D_UWIFI_DATA,"data malloc from stack failed\n");
asr_rtos_free(pskb);
asr_rtos_unlock_mutex(&pfree_skb_queue->lock);
return NULL;
}
memset(pskb->data, 0, size);
pskb->tail = pskb->data;
pskb->head = pskb->data;
pskb->end = pskb->head + size;
pskb_priv->tx_skb_cnt++;
}
asr_rtos_unlock_mutex(&pfree_skb_queue->lock);
if(is_tx==55)
g_amsdu_malloc_cnt++;
return pskb;
}
int32_t wifi_free_pktbuf(struct sk_buff *skb, uint8_t is_tx)
{
struct skb_priv *pskb_priv = &gskb_priv;
_queue *pfree_skb_queue = &pskb_priv->free_skb_queue;
if (skb==NULL) {
return _FAIL;
}
if(pfree_skb_queue->lock == NULL)
return _FAIL;
asr_rtos_lock_mutex(&pfree_skb_queue->lock);
if(skb->head){
asr_rtos_free(skb->head);
skb->head = NULL;
skb->data = NULL;
}
if(skb)
asr_rtos_free(skb);
skb = NULL;
asr_rtos_unlock_mutex(&pfree_skb_queue->lock);
if(is_tx == 55)
g_amsdu_free_cnt++;
return _SUCCESS;
}
uint8_t *skb_pull(struct sk_buff *skb, uint32_t len)
{
//return skb->data += len;
if (len > skb->len)
{
dbg(D_ERR, D_UWIFI_DATA, "skb_pull len is more than skb len, pull len:%u, skb len:%u\n", (unsigned int)len,
(unsigned int)skb->len);
return NULL;
}
skb->len -= len;
skb->data = (uint8_t *)(((uint32_t)skb->data) + len);
return skb->data;
}
uint8_t *skb_push(struct sk_buff *skb, uint32_t len)
{
if ((skb->data-len) < skb->head)
{
dbg(D_ERR, D_UWIFI_DATA, "skb_push len is more than skb head, skb data:%x, skb head:%x, len:%u\n",
(unsigned int)skb->data, (unsigned int)skb->head, (unsigned int)len);
return NULL;
}
skb->len += len;
skb->data = (uint8_t *)(((uint32_t)skb->data) - len);
return skb->data;
}
/**
* skb_put - add data to a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer.
* A pointer to the first byte of the extra data is returned.
*/
unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb->tail;
skb->tail += len;
skb->len += len;
if(skb->tail>skb->end)
{
dbg(D_ERR, D_UWIFI_DATA, "skb_put tail over the end, head:%x, data:%x, tail:%x, end:%x, len:%d\n",
(unsigned int)skb->head,
(unsigned int)skb->data,
(unsigned int)skb->tail,
(unsigned int)skb->end, len);
return NULL;
}
return tmp;
}
uint8_t *skb_reinit(struct sk_buff *skb)
{
uint8_t *phead = NULL,*pend = NULL;
if (!skb)
{
return NULL;
}
phead = skb->head;
pend = skb->end;
memset(skb, 0, sizeof(struct sk_buff));
memset(phead, 0, (uint32_t)pend - (uint32_t)phead);
skb->data = phead;
skb->tail = skb->data;
skb->head = skb->data;
skb->end = pend;
return skb->data;
}
/**
* skb_queue_tail - queue a buffer at the list tail
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the tail of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
asr_rtos_lock_mutex(&list->lock);
__skb_queue_tail(list, newsk);
asr_rtos_unlock_mutex(&list->lock);
}
/**
* skb_append - append a buffer
* @old: buffer to insert after
* @newsk: buffer to insert
* @list: list to use
*
* Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls.
* A buffer cannot be placed on two lists at the same time.
*/
void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
asr_rtos_lock_mutex(&list->lock);
__skb_queue_after(list, old, newsk);
asr_rtos_unlock_mutex(&list->lock);
}
/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the start of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
asr_rtos_lock_mutex(&list->lock);
__skb_queue_head(list, newsk);
asr_rtos_unlock_mutex(&list->lock);
}
/**
* skb_unlink - remove a buffer from a list
* @skb: buffer to remove
* @list: list to use
*
* Remove a packet from a list. The list locks are taken and this
* function is atomic with respect to other list locked calls
*
* You must know what list the SKB is on.
*/
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
asr_rtos_lock_mutex(&list->lock);
__skb_unlink(skb, list);
asr_rtos_unlock_mutex(&list->lock);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,174 @@
/**
******************************************************************************
*
* @file uwifi_notify.cpp
*
* @brief uwifi_notify impletion
*
* Copyright (C) ASR
*
******************************************************************************
*/
#include "uwifi_ieee80211.h"
#include "uwifi_notify.h"
//#include "at_callback.h"
#include "wifi_config.h"
#if UWIFI_TEST
#include "uwifi_msg.h"
#include "uwifi_basic_test.h"
#endif
#include "asr_dbg.h"
#include "uwifi_msg.h"
#ifdef LWIP
#include "lwipopts.h"
//#include "lwip_comm_wifi.h"
#endif
#include "asr_wlan_api_aos.h"
#include "hostapd.h"
extern void dhcps_record_client_wifi_status(uint8_t ip, uint8_t *client_mac);
#ifdef WIFI_TEST_LINUX
extern int wifi_test_result;
extern sem_t open_close_completion;
extern sem_t scan_result_completion;
extern sem_t connect_result_completion;
void wifi_test_linux_scan_cb(asr_wlan_scan_result_t *pApList)
{
uint8_t i;
printf("wifi_test_linux_scan_cb:AP num:%d\n", pApList->ap_num);
for(i=0;i<pApList->ap_num;i++)
{
printf("[ap:%d] name = %s | rssi=%d \r\n",
i,pApList->ap_list[i].ssid, (int8_t)pApList->ap_list[i].ap_power);
}
if (0 == pApList->ap_num)
{
wifi_test_result = -1;
}
else if (0 != memcmp(pApList->ap_list[0].ssid, "000", 3))
{
wifi_test_result = -1;
}
else
{
wifi_test_result = 0;
}
sem_post(&scan_result_completion);
}
#endif
void IOpenCloseStateCallback(enum wifi_open_close_state state)
{
#ifdef WIFI_TEST_LINUX
wifi_test_result = state;
sem_post(&open_close_completion);
#endif
}
// FIXME:
//#ifndef ALIOS_SUPPORT
asr_wlan_event_cb_t asr_wlan_event_cb_handle = {0};
int g_wifi_sta_up_down_flag = 0;// 1,up ;2 down
void wifi_event_cb(asr_wlan_event_e evt, void* info)
{
switch (evt)
{
#ifdef CFG_STATION_SUPPORT
case WLAN_EVENT_SCAN_COMPLETED:
if(asr_wlan_event_cb_handle.scan_compeleted)
asr_wlan_event_cb_handle.scan_compeleted((asr_wlan_scan_result_t *)info);
break;
case WLAN_EVENT_AUTH:
if(asr_wlan_event_cb_handle.stat_chg)
{
asr_wlan_event_cb_handle.stat_chg(EVENT_STATION_AUTH);
dbg(D_ERR,D_UWIFI_CTRL,"%s:STA AUTH\r\n",__func__);
}
break;
case WLAN_EVENT_ASSOCIATED:
{
asr_wlan_ap_info_adv_t *ap_info = asr_wlan_get_associated_apinfo();
if(asr_wlan_event_cb_handle.associated_ap)
asr_wlan_event_cb_handle.associated_ap(ap_info);
}
break;
#endif
case WLAN_EVENT_4WAY_HANDSHAKE:
if(asr_wlan_event_cb_handle.stat_chg)
{
asr_wlan_event_cb_handle.stat_chg(EVENT_STATION_4WAY_HANDSHAKE);
}
break;
case WLAN_EVENT_4WAY_HANDSHAKE_DONE:
if(asr_wlan_event_cb_handle.stat_chg)
{
asr_wlan_event_cb_handle.stat_chg(EVENT_STATION_4WAY_HANDSHAKE_DONE);
}
break;
#ifdef CFG_STATION_SUPPORT
case WLAN_EVENT_CONNECTED:
if(g_wifi_sta_up_down_flag == 1)
return;
if(asr_wlan_event_cb_handle.stat_chg)
{
asr_wlan_event_cb_handle.stat_chg(EVENT_STATION_UP);
dbg(D_ERR,D_UWIFI_CTRL,"%s:EVENT_STATION_UP uped\r\n",__func__);
g_wifi_sta_up_down_flag = 1;
}
break;
case WLAN_EVENT_IP_GOT:
if(asr_wlan_event_cb_handle.ip_got)
asr_wlan_event_cb_handle.ip_got((asr_wlan_ip_stat_t *)info);
break;
case WLAN_EVENT_DISCONNECTED:
if(g_wifi_sta_up_down_flag == 2)
return;
if(asr_wlan_event_cb_handle.stat_chg)
{
asr_wlan_event_cb_handle.stat_chg(EVENT_STATION_DOWN);
dbg(D_ERR,D_UWIFI_CTRL,"%s:EVENT_STATION_DOWN downed\r\n",__func__);
g_wifi_sta_up_down_flag = 2;
}
break;
#endif
case WLAN_EVENT_AP_UP:
if(asr_wlan_event_cb_handle.stat_chg)
asr_wlan_event_cb_handle.stat_chg(EVENT_AP_UP);
break;
case WLAN_EVENT_AP_DOWN:
if(asr_wlan_event_cb_handle.stat_chg)
asr_wlan_event_cb_handle.stat_chg(EVENT_AP_DOWN);
break;
case WLAN_EVENT_RSSI_LEVEL:
if (asr_wlan_event_cb_handle.rssi_chg)
asr_wlan_event_cb_handle.rssi_chg(*(uint8_t *)info);
break;
case WLAN_EVENT_STA_CLOSE:
if(asr_wlan_event_cb_handle.stat_chg)
asr_wlan_event_cb_handle.stat_chg(EVENT_STA_CLOSE);
break;
case WLAN_EVENT_AP_PEER_UP:
if(asr_wlan_event_cb_handle.ap_add_dev)
asr_wlan_event_cb_handle.ap_add_dev(((struct peer_sta_user_info *)info)->mac_addr);
break;
case WLAN_EVENT_AP_PEER_DOWN:
if(asr_wlan_event_cb_handle.ap_del_dev)
asr_wlan_event_cb_handle.ap_del_dev(((struct peer_sta_user_info *)info)->mac_addr);
#ifndef JXC_SDK
uint8_t *mac = ((struct peer_sta_user_info *)info)->mac_addr;
dbg(D_INF,D_UWIFI_CTRL,"%s:AP_PEER_DOWN,mac=%X:%X:%X:%X:%X:%X\r\n",__func__,mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]);
dhcps_record_client_wifi_status(0, ((struct peer_sta_user_info *)info)->mac_addr);
#endif
break;
default:
break;
}
}
//#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,872 @@
/**
******************************************************************************
*
* @file uwifi_platform.c
*
* @brief platform related implement
*
* Copyright (C) ASR
*
******************************************************************************
*/
#include "uwifi_include.h"
#include "uwifi_rx.h"
#include "uwifi_msg_rx.h"
#include "uwifi_platform.h"
#include "uwifi_kernel.h"
#include "uwifi_tx.h"
#include "uwifi_msg.h"
#include "asr_rtos.h"
#include "asr_sdio.h"
#include "uwifi_sdio.h"
#include "uwifi_ops_adapter.h"
#include "uwifi_hif.h"
#include "tasks_info.h"
#ifdef THREADX
#include "loadtable.h"
#else
#include "firmware.h"
#endif
extern asr_thread_hand_t lwifi_task_handle;
bool txlogen = 0;
bool rxlogen = 0;
int lalalaen = 0;
#ifdef CONFIG_ASR_KEY_DBG
int tx_status_debug = 20; // default 20s.
#endif
//int setscheduler = 0;
//int dbg_type = 0;
//32*512 = 16384, so tx/rx aggr num can't exceed 16384/1696 = 9.66, so the maximum is 8
//int tx_aggr = 8; //8 will error//7 is maximum as port0 as msg
//int rx_aggr_max_num = 8;//but when wrap exceed the maximum index 15, then 6 is the maximum rx_aggr_max_num, as port index 0 and 1 is reversed
//int rx_thread_timer = 800;//the rx_thread_timer smaller, the ipc host irq more, now msg/tx/rx all in sdio_irq_main process, so too much ipc host irq will be nosiy
int flow_ctrl_high = 150;
int flow_ctrl_low = 80;
//int tx_conserve = 1; //if tx_conserve == 1, tx_aggr = 4 will be better
void ipc_host_init(struct ipc_host_env_tag *env,
struct ipc_host_cb_tag *cb, void *pthis)
{
// Reset the IPC Host environment
memset(env, 0, sizeof(struct ipc_host_env_tag));
// Save the callbacks in our own environment
env->cb = *cb;
// Save the pointer to the register base
env->pthis = pthis;
// Initialize buffers numbers and buffers sizes needed for DMA Receptions
env->rx_bufnb = IPC_RXBUF_CNT;
env->rx_bufsz = IPC_RXBUF_SIZE;
env->rx_msgbufnb = IPC_RXMSGBUF_CNT;
env->rx_msgbufsz = IPC_RXMSGBUF_SIZE;
#ifdef SDIO_DEAGGR
env->rx_bufnb_sdio_deagg = IPC_RXBUF_CNT_SDIO_DEAGG;
env->rx_bufsz_sdio_deagg = IPC_RXBUF_SIZE_SDIO_DEAGG;
#endif
#ifdef CFG_AMSDU_TEST
env->rx_bufnb_split = IPC_RXBUF_CNT_SPLIT;
env->rx_bufsz_split = IPC_RXBUF_SIZE_SPLIT;
#endif
}
int asr_rxbuff_alloc(struct asr_hw *asr_hw, uint32_t len, struct sk_buff **skb)
{
struct sk_buff *skb_new;
skb_new = dev_alloc_skb_rx(len);
if (!skb_new) {
dbg(D_ERR,D_UWIFI_DATA," skb alloc of size %u failed\n\n", (unsigned int)len);
return -ENOMEM;
}
*skb = skb_new;
return 0;
}
static void asr_elems_deallocs(struct asr_hw *asr_hw)
{
// Get first element
struct sk_buff *skb;
while (!skb_queue_empty(&asr_hw->rx_data_sk_list))
{
skb = __skb_dequeue(&asr_hw->rx_data_sk_list);
if(skb)
{
dev_kfree_skb_rx(skb);
skb = NULL;
}
}
while (!skb_queue_empty(&asr_hw->rx_msg_sk_list))
{
skb = __skb_dequeue(&asr_hw->rx_msg_sk_list);
if(skb)
{
dev_kfree_skb_rx(skb);
skb = NULL;
}
}
#ifdef CFG_AMSDU_TEST
// used for reconstruct amsdu.
while (!skb_queue_empty(&asr_hw->rx_sk_split_list))
{
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: rx_sk_split_list deleted\n", __func__, __LINE__);
skb = __skb_dequeue(&asr_hw->rx_sk_split_list);
if(skb)
{
dev_kfree_skb_rx(skb);
skb = NULL;
}
}
#endif
#ifdef SDIO_DEAGGR
while (!skb_queue_empty(&asr_hw->rx_sk_sdio_deaggr_list)) {
skb = skb_dequeue(&asr_hw->rx_sk_sdio_deaggr_list);
if (skb) {
dev_kfree_skb_rx(skb);
skb = NULL;
}
}
#endif
}
/**
* @brief Allocate storage elements.
*
* This function allocates all the elements required for communications with
LMAC,
* such as Rx Data elements, MSGs elements, ...
*
* This function should be called in correspondence with the deallocation
function.
*
* @param[in] asr_hw Pointer to main structure storing all the relevant
information
*/
static int asr_elems_allocs(struct asr_hw *asr_hw)
{
struct sk_buff *skb;
int i;
dbg(D_ERR, D_UWIFI_CTRL, "%s, rx_bufnb = %d rx_msgbufnb = %d rx_bufsz = %d , rx_msgbufsz = %d\r\n",__func__,
asr_hw->ipc_env->rx_bufnb,asr_hw->ipc_env->rx_msgbufnb,
asr_hw->ipc_env->rx_bufsz,asr_hw->ipc_env->rx_msgbufsz);
for (i = 0; i < asr_hw->ipc_env->rx_bufnb; i++) {
// Allocate a new sk buff
if (asr_rxbuff_alloc(asr_hw, asr_hw->ipc_env->rx_bufsz, &skb)) {
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: MEM ALLOC FAILED\n", __func__, __LINE__);
goto err_alloc;
}
// Add the sk buffer structure in the table of rx buffer
skb_queue_tail(&asr_hw->rx_data_sk_list, skb);
}
for (i = 0; i < asr_hw->ipc_env->rx_msgbufnb; i++) {
// Allocate a new sk buff
if (asr_rxbuff_alloc(asr_hw, asr_hw->ipc_env->rx_msgbufsz, &skb)) {
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: MEM ALLOC FAILED\n", __func__, __LINE__);
goto err_alloc;
}
// Add the sk buffer structure in the table of rx buffer
skb_queue_tail(&asr_hw->rx_msg_sk_list, skb);
}
#ifdef CFG_AMSDU_TEST
for (i = 0; i < asr_hw->ipc_env->rx_bufnb_split; i++) {
// Allocate a new sk buff
if (asr_rxbuff_alloc(asr_hw, asr_hw->ipc_env->rx_bufsz_split, &skb)) {
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: MEM ALLOC FAILED\n", __func__, __LINE__);
goto err_alloc;
}
// Add the sk buffer structure in the table of rx buffer
dbg(D_INF, D_UWIFI_CTRL, "%s:%d: rx_sk_split_list added\n", __func__, __LINE__);
skb_queue_tail(&asr_hw->rx_sk_split_list, skb);
}
#endif
#ifdef SDIO_DEAGGR
for (i = 0; i < asr_hw->ipc_env->rx_bufnb_sdio_deagg; i++) {
// Allocate a new sk buff
if (asr_rxbuff_alloc(asr_hw, asr_hw->ipc_env->rx_bufsz_sdio_deagg, &skb)) {
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: MEM ALLOC FAILED\n", __func__, __LINE__);
goto err_alloc;
}
memset(skb->data, 0, asr_hw->ipc_env->rx_bufsz_sdio_deagg);
// Add the sk buffer structure in the table of rx buffer
skb_queue_tail(&asr_hw->rx_sk_sdio_deaggr_list, skb);
}
#endif
return 0;
err_alloc:
asr_elems_deallocs(asr_hw);
return -ENOMEM;
}
// tx hif buf
int asr_txhifbuffs_alloc(struct asr_hw *asr_hw, u32 len, struct sk_buff **skb)
{
struct sk_buff *skb_new;
skb_new = dev_alloc_skb_tx(len);
if (skb_new == NULL) {
dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: skb alloc of size %u failed\n\n", __func__, __LINE__, len);
return -ENOMEM;
}
*skb = skb_new;
return 0;
}
void asr_txhifbuffs_dealloc(struct asr_hw *asr_hw)
{
// Get first element
struct sk_buff *skb = NULL;
while (!skb_queue_empty(&asr_hw->tx_sk_free_list)) {
skb = __skb_dequeue(&asr_hw->tx_sk_free_list);
if (skb) {
dev_kfree_skb_tx(skb);
skb = NULL;
}
}
while (!skb_queue_empty(&asr_hw->tx_sk_list)) {
skb = __skb_dequeue(&asr_hw->tx_sk_list);
if (skb) {
dev_kfree_skb_tx(skb);
skb = NULL;
}
}
while (!skb_queue_empty(&asr_hw->tx_hif_skb_list)) {
skb = __skb_dequeue(&asr_hw->tx_hif_skb_list);
if (skb) {
dev_kfree_skb_tx(skb);
skb = NULL;
}
}
while (!skb_queue_empty(&asr_hw->tx_hif_free_buf_list)) {
skb = __skb_dequeue(&asr_hw->tx_hif_free_buf_list);
if (skb) {
dev_kfree_skb_tx(skb);
skb = NULL;
}
}
}
/**
* WLAN driver call-back function for message reception indication
*/
extern struct asr_traffic_status g_asr_traffic_sts;
uint8_t asr_msgind(void *pthis, void *hostid)
{
struct asr_hw *asr_hw = (struct asr_hw *)pthis;
struct sk_buff *skb = (struct sk_buff*)hostid;
struct ipc_e2a_msg *msg;
uint8_t ret = 0;
bool is_ps_change_ind_msg = false;
/* Retrieve the message structure */
msg = (struct ipc_e2a_msg *)skb->data;
/* Relay further actions to the msg parser */
asr_rx_handle_msg(asr_hw, msg);
is_ps_change_ind_msg = ((MSG_T(msg->id) == TASK_MM) && (MSG_I(msg->id) == MM_PS_CHANGE_IND));
skb_queue_tail(&asr_hw->rx_msg_sk_list, skb);
//dbg(D_ERR, D_UWIFI_CTRL, "%s:%d: msg use cnt %d\n", __func__, __LINE__,msg_buf_cnt_use);
if (is_ps_change_ind_msg == true) {
// move traffic sts msg send here.
if (g_asr_traffic_sts.send) {
struct asr_sta *asr_sta_tmp = g_asr_traffic_sts.asr_sta_ps;
// send msg may schedule.
if (g_asr_traffic_sts.ps_id_bits & LEGACY_PS_ID)
asr_set_traffic_status(asr_hw, asr_sta_tmp, g_asr_traffic_sts.tx_ava, LEGACY_PS_ID);
if (g_asr_traffic_sts.ps_id_bits & UAPSD_ID)
asr_set_traffic_status(asr_hw, asr_sta_tmp, g_asr_traffic_sts.tx_ava, UAPSD_ID);
dbg(D_INF,D_UWIFI_CTRL," [ps]tx_ava=%d:sta-%d, uapsd=0x%x, (%d , %d) \r\n",
g_asr_traffic_sts.tx_ava,
asr_sta_tmp->sta_idx,asr_sta_tmp->uapsd_tids,
asr_sta_tmp->ps.pkt_ready[LEGACY_PS_ID],
asr_sta_tmp->ps.pkt_ready[UAPSD_ID]);
}
}
return ret;
}
/**
* WLAN driver call-back function for primary TBTT indication
*/
void asr_prim_tbtt_ind(void *pthis)
{
}
asr_semaphore_t g_asr_mgmt_sem;
int asr_ipc_init(struct asr_hw *asr_hw)
{
struct ipc_host_cb_tag cb;
/* initialize the API interface */
cb.recv_data_ind = asr_rxdataind;
cb.recv_msg_ind = asr_msgind;
//cb.recv_msgack_ind = asr_msgackind;
//cb.send_data_cfm = asr_txdatacfm;
//cb.prim_tbtt_ind = asr_prim_tbtt_ind;
cb.recv_dbg_ind = asr_dbgind;
/* set the IPC environment */
asr_hw->ipc_env = (struct ipc_host_env_tag *)
asr_rtos_malloc(sizeof(struct ipc_host_env_tag));
if(NULL == asr_hw->ipc_env)
{
dbg(D_ERR,D_UWIFI_CTRL,"%s:alloc ipc_host_env_tag failed",__func__);
return -ENOMEM;
}
/* call the initialization of the IPC */
ipc_host_init(asr_hw->ipc_env, &cb, asr_hw);
/* rx mgmt semaphore */
asr_rtos_init_semaphore(&g_asr_mgmt_sem, 1);
if(asr_cmd_mgr_init(&asr_hw->cmd_mgr))
return -1;
else
return asr_elems_allocs(asr_hw);
}
extern struct asr_hw g_asr_hw;
extern struct asr_hw *sp_asr_hw;
#ifdef CONFIG_ASR595X
int asr_download_bootloader(struct asr_hw *asr_hw)
{
int ret;
struct asr_firmware asr_bootld;
request_firmware(&asr_bootld,ASR_BOOTLOADER);
ret = asr_sdio_download_firmware(asr_hw, (uint8_t *)asr_bootld.data, asr_bootld.size);
if(ret)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s download bootloader fail",__func__);
return ret;
}
return ret;
}
#endif
int asr_download_fw(struct asr_hw *asr_hw)
{
int ret;
#ifdef THREADX
uint32_t fw_img_ptr_loaded = get_asrbin_begin_address();//(unsigned char *)(0x81C00000 + 680 * 1024);
#else
struct asr_firmware asr_fw;
#endif
asr_sdio_set_block_size(SDIO_BLOCK_SIZE_DLD);//is a question, it should set the hardware of the block size
#ifdef CONFIG_ASR595X
ret = asr_download_bootloader(asr_hw);
if(ret)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s download bootloader fail",__func__);
return ret;
}
#endif
#ifndef THREADX
request_firmware(&asr_fw,ASR_FIRMWARE);
#endif
#ifdef THREADX
ret = asr_sdio_download_firmware(asr_hw, (uint8_t *)fw_img_ptr_loaded, 163840);
#else
ret = asr_sdio_download_firmware(asr_hw, (uint8_t *)asr_fw.data, asr_fw.size);
#endif
if(ret)
{
dbg(D_ERR, D_UWIFI_CTRL, "%s download fw fail",__func__);
return ret;
}
return ret;
}
int asr_platform_on(struct asr_hw *asr_hw)
{
int ret;
//ipc_shenv = (struct ipc_shared_env_tag *)&ipc_shared_env;
if ((ret = asr_ipc_init(asr_hw))) {
dbg(D_ERR, D_UWIFI_CTRL, "asr ipc init failed\r\n");
return ret;
}
//init sdio irq
if (init_sdio_task(asr_hw) != kNoErr) {
dbg(D_ERR, D_UWIFI_CTRL, "asr init rx uwifi failed\r\n");
return -1;
}
//init rx to os task
if (init_rx_to_os(asr_hw) != kNoErr) {
dbg(D_ERR, D_UWIFI_CTRL, "asr init rx to os failed\r\n");
return -1;
}
return 0;
}
void asr_ipc_deinit(struct asr_hw *asr_hw)
{
asr_cmd_mgr_deinit(&asr_hw->cmd_mgr);
asr_elems_deallocs(asr_hw);
asr_rtos_deinit_semaphore(&g_asr_mgmt_sem);
asr_rtos_free(asr_hw->ipc_env);
asr_hw->ipc_env = NULL;
}
/**
* asr_platform_off - Stop the platform
*
* @asr_hw Main driver data
*
* Called by 802.11 part
*/
void asr_platform_off(struct asr_hw *asr_hw)
{
deinit_sdio_task();
deinit_rx_to_os();
asr_ipc_deinit(asr_hw);
}
//following tx_iq_comp_2_4G/rx_iq_comp_2_4G value need further check
//had checked, 01000000
int asr_parse_phy_configfile(struct asr_hw *asr_hw,
struct asr_phy_conf_file *config)
{
/* Get Trident path mapping */
config->trd.path_mapping = asr_hw->mod_params->phy_cfg;
/* Get DC offset compensation */
config->trd.tx_dc_off_comp = 0;
/* Get Karst TX IQ compensation value for path0 on 2.4GHz */
config->karst.tx_iq_comp_2_4G[0] = 0x01000000;
/* Get Karst TX IQ compensation value for path1 on 2.4GHz */
config->karst.tx_iq_comp_2_4G[1] = 0x01000000;
/* Get Karst RX IQ compensation value for path0 on 2.4GHz */
config->karst.rx_iq_comp_2_4G[0] = 0x01000000;
/* Get Karst RX IQ compensation value for path1 on 2.4GHz */
config->karst.rx_iq_comp_2_4G[1] = 0x01000000;
/* Get Karst default path */
config->karst.path_used = asr_hw->mod_params->phy_cfg;
return 0;
}
static int asr_check_fw_hw_feature(struct asr_hw *asr_hw,
struct wiphy *wiphy)
{
uint32_t sys_feat = asr_hw->version_cfm.features;
//uint32_t mac_feat = asr_hw->version_cfm.version_machw_1;
uint32_t phy_feat = asr_hw->version_cfm.version_phy_1;
if (!(sys_feat & BIT(MM_FEAT_UMAC_BIT))) {
dbg(D_ERR,D_UWIFI_CTRL,"Loading softmac firmware with fullmac driver\n");
return -1;
}
if (!(sys_feat & BIT(MM_FEAT_PS_BIT))) {
asr_hw->mod_params->ps_on = false;
}
/* AMSDU (non)support implies different shared structure definition
so insure that fw and drv have consistent compilation option */
if (sys_feat & BIT(MM_FEAT_AMSDU_BIT)) {
#if !NX_AMSDU_TX
dbg(D_ERR,D_UWIFI_CTRL,"AMSDU enabled in firmware but support not compiled in driver\n");
return -1;
#else
if (asr_hw->mod_params->amsdu_maxnb > NX_TX_PAYLOAD_MAX)
asr_hw->mod_params->amsdu_maxnb = NX_TX_PAYLOAD_MAX;
#endif
} else {
#if NX_AMSDU_TX
dbg(D_ERR,D_UWIFI_CTRL,"AMSDU disabled in firmware but support compiled in driver\n");
return -1;
#endif
}
if (!(sys_feat & BIT(MM_FEAT_UAPSD_BIT))) {
asr_hw->mod_params->uapsd_timeout = 0;
}
if (sys_feat & BIT(MM_FEAT_BCN_BIT))
{
}else
{
dbg(D_ERR,D_UWIFI_CTRL,"disabled in firmware but support compiled in driver\n");
return -1;
}
switch (__MDM_PHYCFG_FROM_VERS(phy_feat)) {
case MDM_PHY_CONFIG_TRIDENT:
case MDM_PHY_CONFIG_ELMA:
asr_hw->mod_params->nss = 1;
break;
case MDM_PHY_CONFIG_KARST:
{
int nss_supp = (phy_feat & MDM_NSS_MASK) >> MDM_NSS_LSB;
if (asr_hw->mod_params->nss > nss_supp)
asr_hw->mod_params->nss = nss_supp;
}
break;
default:
break;
}
if (asr_hw->mod_params->nss < 1 || asr_hw->mod_params->nss > 2)
asr_hw->mod_params->nss = 1;
return 0;
}
#ifdef CONFIG_ASR595X
static void asr_set_he_capa(struct asr_hw *asr_hw, struct wiphy *wiphy)
{
struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
int i;
int nss = asr_hw->mod_params->nss;
struct ieee80211_sta_he_cap *he_cap;
int mcs_map;
if (!asr_hw->mod_params->he_on) {
band_2GHz->iftype_data = NULL;
band_2GHz->n_iftype_data = 0;
return;
}
he_cap = (struct ieee80211_sta_he_cap *)&band_2GHz->iftype_data->he_cap;
he_cap->has_he = true;
if (asr_hw->mod_params->twt_request)
he_cap->he_cap_elem.mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK;
if (asr_hw->mod_params->use_2040) {
he_cap->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
he_cap->ppe_thres[0] |= 0x10;
}
if (asr_hw->mod_params->use_80) {
he_cap->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
}
if (asr_hw->mod_params->ldpc_on) {
he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
} else {
// If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory
// for MCS 10 and 11
asr_hw->mod_params->he_mcs_map = min_t(int, asr_hw->mod_params->mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9);
}
he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US |
IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
he_cap->he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS |
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
if (asr_hw->mod_params->stbc_on)
he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA;
if (asr_hw->mod_params->bfmee) {
he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
}
he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G;
he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
mcs_map = asr_hw->mod_params->he_mcs_map;
memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp));
for (i = 0; i < nss; i++) {
uint16_t unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i * 2));
he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
mcs_map = IEEE80211_HE_MCS_SUPPORT_0_9;
}
for (; i < 8; i++) {
uint16_t unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
}
mcs_map = asr_hw->mod_params->he_mcs_map;
for (i = 0; i < nss; i++) {
uint16_t unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i * 2));
he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
mcs_map = min_t(int, asr_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9);
}
for (; i < 8; i++) {
uint16_t unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
}
}
#endif
int asr_handle_dynparams(struct asr_hw *asr_hw, struct wiphy *wiphy)
{
struct ieee80211_supported_band *band_2GHz =
wiphy->bands[IEEE80211_BAND_2GHZ];
uint32_t mdm_phy_cfg;
int i, ret;
int nss;
ret = asr_check_fw_hw_feature(asr_hw, wiphy);
if (ret)
return ret;
if (asr_hw->mod_params->phy_cfg < 0 || asr_hw->mod_params->phy_cfg > 5)
asr_hw->mod_params->phy_cfg = 2;
if (asr_hw->mod_params->mcs_map < 0 || asr_hw->mod_params->mcs_map > 2)
asr_hw->mod_params->mcs_map = 0;
mdm_phy_cfg = __MDM_PHYCFG_FROM_VERS(asr_hw->version_cfm.version_phy_1);
if (mdm_phy_cfg == MDM_PHY_CONFIG_TRIDENT)
{
struct asr_phy_conf_file phy_conf;
// Retrieve the Trident configuration
asr_parse_phy_configfile(asr_hw, &phy_conf);
memcpy(&asr_hw->phy_config, &phy_conf.trd, sizeof(phy_conf.trd));
} else if (mdm_phy_cfg == MDM_PHY_CONFIG_ELMA) {
} else if (mdm_phy_cfg == MDM_PHY_CONFIG_KARST)
{
struct asr_phy_conf_file phy_conf;
// We use the NSS parameter as is
// Retrieve the Karst configuration
asr_parse_phy_configfile(asr_hw, &phy_conf);
memcpy(&asr_hw->phy_config, &phy_conf.karst, sizeof(phy_conf.karst));
} else {
}
nss = asr_hw->mod_params->nss;
/* VHT capabilities */
/*
* MCS map:
* This capabilities are filled according to the mcs_map module parameter.
* However currently we have some limitations due to FPGA clock constraints
* that prevent always using the range of MCS that is defined by the
* parameter:
* - in RX, 2SS, we support up to MCS7
* - in TX, 2SS, we support up to MCS8
*/
for (i = 0; i < nss; i++)
band_2GHz->ht_cap.mcs.rx_mask[i] = 0xFF;
/* HT capabilities */
band_2GHz->ht_cap.cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
if (asr_hw->mod_params->use_2040)
{
band_2GHz->ht_cap.mcs.rx_mask[4] = 0x1; /* MCS32 */
band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(135 * nss);
} else
{
band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(65 * nss);
}
if (nss > 1)
band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (asr_hw->mod_params->sgi)
{
band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (asr_hw->mod_params->use_2040)
{
band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(150 * nss);
} else
band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(72 * nss);
}
band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
if (!asr_hw->mod_params->ht_on)
band_2GHz->ht_cap.ht_supported = false;
/**
* adjust caps with lower layers asr_hw->version_cfm
*/
#ifdef CONFIG_ASR595X
/* Set HE capabilities */
asr_set_he_capa(asr_hw, wiphy);
#endif
return 0;
}
asr_thread_hand_t uwifi_sdio_task_handle={0};
asr_thread_hand_t uwifi_rx_to_os_task_handle={0};
OSStatus init_sdio_task(struct asr_hw *asr_hw)
{
OSStatus status = kNoErr;
asr_task_config_t cfg;
status = uwifi_sdio_event_init();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:asr_rtos_create_event");
return status;
}
if (kNoErr != asr_rtos_task_cfg_get(ASR_TASK_CONFIG_UWIFI_SDIO, &cfg)) {
dbg(D_ERR,D_UWIFI_CTRL,"get uwifi sdio task information fail");
return -1;
}
status = asr_rtos_create_thread(&uwifi_sdio_task_handle, cfg.task_priority,UWIFI_SDIO_TASK_NAME, uwifi_sdio_main,
cfg.stack_size, (asr_thread_arg_t)asr_hw);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:asr_rtos_create_thread");
return status;
}
return status;
}
OSStatus deinit_sdio_task(void)
{
OSStatus status = kNoErr;
status = asr_rtos_delete_thread(&uwifi_sdio_task_handle);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"[%s]OS Error:asr_rtos_delete_thread",__func__);
return status;
}
dbg(D_ERR,D_UWIFI_CTRL,"thread uwifi_sdio_main deleted");
status = uwifi_sdio_event_deinit();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:asr_rtos_deinit_event");
return status;
}
return status;
}
OSStatus init_rx_to_os(struct asr_hw *asr_hw)
{
OSStatus status = kNoErr;
asr_task_config_t cfg;
status = uwifi_rx_to_os_msg_queue_init();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:uwifi_rx_to_os_msg_queue_init");
return status;
}
if (kNoErr != asr_rtos_task_cfg_get(ASR_TASK_CONFIG_UWIFI_RX_TO_OS, &cfg)) {
dbg(D_ERR,D_UWIFI_CTRL,"get uwifi rx to os task information fail");
return -1;
}
status = asr_rtos_create_thread(&uwifi_rx_to_os_task_handle, cfg.task_priority,UWIFI_RX_TO_OS_TASK_NAME, uwifi_rx_to_os_main,
cfg.stack_size, (asr_thread_arg_t)asr_hw);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:asr_rtos_create_thread");
uwifi_rx_to_os_msg_queue_deinit();
return status;
}
return status;
}
OSStatus deinit_rx_to_os(void)
{
OSStatus status = kNoErr;
status = asr_rtos_delete_thread(&uwifi_rx_to_os_task_handle);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"[%s]OS Error:asr_rtos_delete_thread",__func__);
return status;
}
status = uwifi_rx_to_os_msg_queue_deinit();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:uwifi_rx_to_os_msg_queue_deinit");
return status;
}
return status;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
/**
****************************************************************************************
*
* @file uwifi_task.c
*
* @brief uwifi task init
*
* Copyright (C) ASR
*
****************************************************************************************
*/
#include "uwifi_task.h"
#include "asr_rtos.h"
#include "asr_dbg.h"
#include "uwifi_msg.h"
#include "tasks_info.h"
#include "uwifi_platform.h"
//these define uwifi task
asr_thread_hand_t uwifi_task_handle={0};
/**
****************************************************************************************
* @brief main entry of UWIFI_TASK.
****************************************************************************************
*/
static void uwifi_main(asr_thread_arg_t arg)
{
uwifi_msg_handle();
}
/**
****************************************************************************************
* @brief api of init UWIFI_TASK.
****************************************************************************************
*/
OSStatus init_uwifi(void)
{
OSStatus status = kNoErr;
asr_task_config_t cfg;
//asr_dump_poolsize();
status = uwifi_msg_queue_init();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:uwifi_msg_queue_init %x",status);
return status;
}
if (kNoErr != asr_rtos_task_cfg_get(ASR_TASK_CONFIG_UWIFI, &cfg)) {
dbg(D_ERR,D_UWIFI_CTRL,"get uwifi task information fail");
return -1;
}
//asr_dump_poolsize();
status = asr_rtos_create_thread(&uwifi_task_handle, cfg.task_priority,UWIFI_TASK_NAME, uwifi_main, cfg.stack_size, 0);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:asr_rtos_create_thread");
uwifi_msg_queue_deinit();
return status;
}
dbg(D_CRT, D_UWIFI_CTRL, "init_uwifi");
return status;
}
/**
****************************************************************************************
* @brief api of deinit UWIFI_TASK.
****************************************************************************************
*/
OSStatus deinit_uwifi(void)
{
OSStatus status = kNoErr;
status = asr_rtos_delete_thread(&uwifi_task_handle);
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"[%s]OS Error:asr_rtos_delete_thread",__func__);
return status;
}
status = uwifi_msg_queue_deinit();
if(status != kNoErr)
{
dbg(D_ERR,D_UWIFI_CTRL,"OS Error:uwifi_msg_queue_deinit");
return status;
}
dbg(D_CRT, D_UWIFI_CTRL, "deinit_uwifi");
return status;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,990 @@
/**
******************************************************************************
*
* @file uwifi_txq.c
*
* @brief txq related operation
*
* Copyright (C) ASR
*
******************************************************************************
*/
#include "uwifi_txq.h"
#include "uwifi_include.h"
#include "uwifi_rx.h"
#include "uwifi_tx.h"
#include "uwifi_kernel.h"
#define ALL_HWQ_MASK ((1 << CONFIG_USER_MAX) - 1)
/******************************************************************************
* Utils inline functions
*****************************************************************************/
struct asr_txq *asr_txq_sta_get(struct asr_sta *sta, uint8_t tid, int *idx,
struct asr_hw * asr_hw)
{
int id;
if (tid >= NX_NB_TXQ_PER_STA)
tid = 0;
if (is_multicast_sta(asr_hw, sta->sta_idx))
id = (asr_hw->sta_max_num * NX_NB_TXQ_PER_STA) + sta->vif_idx;
else
id = (sta->sta_idx * NX_NB_TXQ_PER_STA) + tid;
if (idx)
*idx = id;
return &asr_hw->txq[id];
}
struct asr_txq *asr_txq_vif_get(struct asr_vif *vif, uint8_t type, int *idx)
{
int id;
id = vif->asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + master_vif_idx(vif) +
(type * vif->asr_hw->vif_max_num);
if (idx)
*idx = id;
return &vif->asr_hw->txq[id];
}
#if 0
static struct asr_sta *asr_txq_2_sta(struct asr_txq *txq)
{
return txq->sta;
}
#endif
/******************************************************************************
* Init/Deinit functions
*****************************************************************************/
/**
* asr_txq_init - Initialize a TX queue
*
* @txq: TX queue to be initialized
* @idx: TX queue index
* @status: TX queue initial status
* @hwq: Associated HW queue
* @ndev: Net device this queue belongs to
* (may be null for non netdev txq)
*
* Each queue is initialized with the credit of @NX_TXQ_INITIAL_CREDITS.
*/
static void asr_txq_init(struct asr_hw *asr_hw, struct asr_txq *txq, int idx, uint8_t status,
struct asr_hwq *hwq,
struct asr_sta *sta)
{
int i;
txq->idx = idx;
txq->status = status;
txq->credits = NX_TXQ_INITIAL_CREDITS;
txq->pkt_sent = 0;
skb_queue_head_init(&txq->sk_list);
txq->last_retry_skb = NULL;
txq->nb_retry = 0;
txq->hwq = hwq;
txq->sta = sta;
for (i = 0; i < CONFIG_USER_MAX ; i++)
txq->pkt_pushed[i] = 0;
txq->ps_id = LEGACY_PS_ID;
txq->push_limit = 0;
if (idx < asr_hw->sta_max_num * NX_NB_TXQ_PER_STA) {
int sta_idx = sta->sta_idx;
int tid = idx - (sta_idx * NX_NB_TXQ_PER_STA);
if (tid < NX_NB_TID_PER_STA)
txq->ndev_idx = NX_STA_NDEV_IDX(tid, sta_idx);
else
txq->ndev_idx = NDEV_NO_TXQ;
} else if (idx < asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + asr_hw->vif_max_num) {
txq->ndev_idx = NX_NB_TID_PER_STA * asr_hw->sta_max_num;
} else {
txq->ndev_idx = NDEV_NO_TXQ;
}
}
/**
* skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. The list lock is taken so the function
* may be used safely with other locking list functions. The head item is
* returned or %NULL if the list is empty.
*/
struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *result;
asr_rtos_lock_mutex(&list->lock);
result = __skb_dequeue(list);
asr_rtos_unlock_mutex(&list->lock);
return result;
}
/**
* asr_txq_flush - Flush all buffers queued for a TXQ
*
* @asr_hw: main driver data
* @txq: txq to flush
*/
void asr_txq_flush(struct asr_hw *asr_hw, struct asr_txq *txq)
{
struct sk_buff *skb;
//struct asr_txhdr *txhdr;
//struct asr_sw_txhdr *sw_hdr;
while(!skb_queue_empty(&txq->sk_list) && (skb = skb_dequeue(&txq->sk_list)) != NULL) {
//struct asr_sw_txhdr *hdr = ((struct asr_txhdr *)skb->data)->sw_hdr;
//dma_unmap_single(asr_hw->dev, hdr->dma_addr, hdr->map_len,
// DMA_TO_DEVICE);
dbg(D_ERR, D_UWIFI_CTRL, "txq flush\r\n");
//txhdr = (struct asr_txhdr *)skb->data;
//sw_hdr = txhdr->sw_hdr;
//skb_pull(skb, sw_hdr->headroom);
//asr_rtos_free(sw_hdr);
//txhdr->sw_hdr = NULL;
dev_kfree_skb_tx(skb);
}
//try to deinit mutex
asr_rtos_deinit_mutex(&txq->sk_list.lock);
}
/**
* asr_txq_deinit - De-initialize a TX queue
*
* @asr_hw: Driver main data
* @txq: TX queue to be de-initialized
* Any buffer stuck in a queue will be freed.
*/
void asr_txq_deinit(struct asr_hw *asr_hw, struct asr_txq *txq)
{
asr_rtos_lock_mutex(&asr_hw->tx_lock);
asr_txq_del_from_hw_list(txq);
txq->idx = TXQ_INACTIVE;
asr_rtos_unlock_mutex(&asr_hw->tx_lock);
asr_txq_flush(asr_hw, txq);
}
/**
* asr_txq_vif_init - Initialize all TXQ linked to a vif
*
* @asr_hw: main driver data
* @asr_vif: Pointer on VIF
* @status: Intial txq status
*
* Softmac : 1 VIF TXQ per HWQ
*
* Fullmac : 1 VIF TXQ for BC/MC
* 1 VIF TXQ for MGMT to unknown STA
*/
void asr_txq_vif_init(struct asr_hw *asr_hw, struct asr_vif *asr_vif,
uint8_t status)
{
struct asr_txq *txq;
int idx;
txq = asr_txq_vif_get(asr_vif, NX_BCMC_TXQ_TYPE, &idx);
asr_txq_init(asr_hw, txq, idx, status, &asr_hw->hwq[ASR_HWQ_BE],
&asr_hw->sta_table[asr_vif->ap.bcmc_index]);
txq = asr_txq_vif_get(asr_vif, NX_UNK_TXQ_TYPE, &idx);
asr_txq_init(asr_hw, txq, idx, status, &asr_hw->hwq[ASR_HWQ_VO],
NULL);
}
/**
* asr_txq_vif_deinit - Deinitialize all TXQ linked to a vif
*
* @asr_hw: main driver data
* @asr_vif: Pointer on VIF
*/
void asr_txq_vif_deinit(struct asr_hw * asr_hw, struct asr_vif *asr_vif)
{
struct asr_txq *txq;
txq = asr_txq_vif_get(asr_vif, NX_BCMC_TXQ_TYPE, NULL);
asr_txq_deinit(asr_hw, txq);
txq = asr_txq_vif_get(asr_vif, NX_UNK_TXQ_TYPE, NULL);
asr_txq_deinit(asr_hw, txq);
}
/**
* asr_txq_sta_init - Initialize TX queues for a STA
*
* @asr_hw: Main driver data
* @asr_sta: STA for which tx queues need to be initialized
* @status: Intial txq status
*
* This function initialize all the TXQ associated to a STA.
* Softmac : 1 TXQ per TID
*
* Fullmac : 1 TXQ per TID (limited to 8)
* 1 TXQ for MGMT
*/
void asr_txq_sta_init(struct asr_hw * asr_hw, struct asr_sta *asr_sta,
uint8_t status)
{
struct asr_txq *txq;
int tid, idx;
//struct asr_vif *asr_vif = asr_hw->vif_table[asr_sta->vif_idx];
txq = asr_txq_sta_get(asr_sta, 0, &idx, asr_hw);
for (tid = 0; tid < NX_NB_TXQ_PER_STA; tid++, txq++, idx++) {
asr_txq_init(asr_hw, txq, idx, status, &asr_hw->hwq[asr_tid2hwq[tid]],
asr_sta);
txq->ps_id = asr_sta->uapsd_tids & (1 << tid) ? UAPSD_ID : LEGACY_PS_ID;
}
}
/**
* asr_txq_sta_deinit - Deinitialize TX queues for a STA
*
* @asr_hw: Main driver data
* @asr_sta: STA for which tx queues need to be deinitialized
*/
void asr_txq_sta_deinit(struct asr_hw *asr_hw, struct asr_sta *asr_sta)
{
struct asr_txq *txq;
int i;
txq = asr_txq_sta_get(asr_sta, 0, NULL, asr_hw);
for (i = 0; i < NX_NB_TXQ_PER_STA; i++, txq++) {
asr_txq_deinit(asr_hw, txq);
}
}
/**
* asr_init_unk_txq - Initialize TX queue for the transmission on a offchannel
*
* @vif: Interface for which the queue has to be initialized
*
* NOTE: Offchannel txq is only active for the duration of the ROC
*/
void asr_txq_offchan_init(struct asr_vif *asr_vif)
{
struct asr_hw *asr_hw = asr_vif->asr_hw;
struct asr_txq *txq;
txq = &asr_hw->txq[asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + asr_hw->vif_max_num * NX_NB_TXQ_PER_VIF];
asr_txq_init(asr_hw, txq, asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + asr_hw->vif_max_num * NX_NB_TXQ_PER_VIF, ASR_TXQ_STOP_CHAN,
&asr_hw->hwq[ASR_HWQ_VO], NULL);
}
/**
* asr_deinit_offchan_txq - Deinitialize TX queue for offchannel
*
* @vif: Interface that manages the STA
*
* This function deintialize txq for one STA.
* Any buffer stuck in a queue will be freed.
*/
void asr_txq_offchan_deinit(struct asr_vif *asr_vif)
{
struct asr_txq *txq;
struct asr_hw *asr_hw = asr_vif->asr_hw;
txq = &asr_vif->asr_hw->txq[asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + asr_hw->vif_max_num * NX_NB_TXQ_PER_VIF];
asr_txq_deinit(asr_vif->asr_hw, txq);
}
/******************************************************************************
* Start/Stop functions
*****************************************************************************/
/**
* asr_txq_add_to_hw_list - Add TX queue to a HW queue schedule list.
*
* @txq: TX queue to add
*
* Add the TX queue if not already present in the HW queue list.
* To be called with tx_lock hold
*/
void asr_txq_add_to_hw_list(struct asr_txq *txq)
{
if (!(txq->status & ASR_TXQ_IN_HWQ_LIST)) {
txq->status |= ASR_TXQ_IN_HWQ_LIST;
list_add_tail(&txq->sched_list, &txq->hwq->list);
txq->hwq->need_processing = true;
}
}
/**
* asr_txq_del_from_hw_list - Delete TX queue from a HW queue schedule list.
*
* @txq: TX queue to delete
*
* Remove the TX queue from the HW queue list if present.
* To be called with tx_lock hold
*/
void asr_txq_del_from_hw_list(struct asr_txq *txq)
{
if (txq->status & ASR_TXQ_IN_HWQ_LIST) {
txq->status &= ~ASR_TXQ_IN_HWQ_LIST;
list_del(&txq->sched_list);
}
}
/**
* asr_txq_start - Try to Start one TX queue
*
* @txq: TX queue to start
* @reason: reason why the TX queue is started (among ASR_TXQ_STOP_xxx)
*
* Re-start the TX queue for one reason.
* If after this the txq is no longer stopped and some buffers are ready,
* the TX queue is also added to HW queue list.
* To be called with tx_lock hold
*/
void asr_txq_start(struct asr_txq *txq, uint16_t reason)
{
if (txq->idx != TXQ_INACTIVE && (txq->status & reason))
{
txq->status &= ~reason;
if (!asr_txq_is_stopped(txq) &&
!skb_queue_empty(&txq->sk_list)) {
asr_txq_add_to_hw_list(txq);
}
}
}
/**
* asr_txq_stop - Stop one TX queue
*
* @txq: TX queue to stop
* @reason: reason why the TX queue is stopped (among ASR_TXQ_STOP_xxx)
*
* Stop the TX queue. It will remove the TX queue from HW queue list
* To be called with tx_lock hold
*/
void asr_txq_stop(struct asr_txq *txq, uint16_t reason)
{
if(txq==NULL)
{
dbg(D_ERR,D_UWIFI_CTRL,"%s-txq is NULL\n", __func__);
}
if (txq->idx != TXQ_INACTIVE)
{
txq->status |= reason;
asr_txq_del_from_hw_list(txq);
}
}
/**
* asr_txq_sta_start - Start all the TX queue linked to a STA
*
* @sta: STA whose TX queues must be re-started
* @reason: Reason why the TX queue are restarted (among ASR_TXQ_STOP_xxx)
* @asr_hw: Driver main data
*
* This function will re-start all the TX queues of the STA for the reason
* specified. It can be :
* - ASR_TXQ_STOP_STA_PS: the STA is no longer in power save mode
* - ASR_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence)
* - ASR_TXQ_STOP_CHAN: the STA's VIF is now on the current active channel
*
* Any TX queue with buffer ready and not Stopped for other reasons, will be
* added to the HW queue list
* To be called with tx_lock hold
*/
void asr_txq_sta_start(struct asr_sta *asr_sta, uint16_t reason,
struct asr_hw *asr_hw)
{
struct asr_txq *txq;
int i;
int nb_txq;
txq = asr_txq_sta_get(asr_sta, 0, NULL, asr_hw);
if (is_multicast_sta(asr_hw, asr_sta->sta_idx))
nb_txq = 1;
else
nb_txq = NX_NB_TXQ_PER_STA;
for (i = 0; i < nb_txq; i++, txq++)
asr_txq_start(txq, reason);
}
/**
* asr_stop_sta_txq - Stop all the TX queue linked to a STA
*
* @sta: STA whose TX queues must be stopped
* @reason: Reason why the TX queue are stopped (among ASR_TX_STOP_xxx)
* @asr_hw: Driver main data
*
* This function will stop all the TX queues of the STA for the reason
* specified. It can be :
* - ASR_TXQ_STOP_STA_PS: the STA is in power save mode
* - ASR_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence)
* - ASR_TXQ_STOP_CHAN: the STA's VIF is not on the current active channel
*
* Any TX queue present in a HW queue list will be removed from this list.
* To be called with tx_lock hold
*/
void asr_txq_sta_stop(struct asr_sta *asr_sta, uint16_t reason
, struct asr_hw *asr_hw)
{
struct asr_txq *txq;
int i;
int nb_txq;
if (!asr_sta)
return;
txq = asr_txq_sta_get(asr_sta, 0, NULL, asr_hw);
if (is_multicast_sta(asr_hw, asr_sta->sta_idx))
nb_txq = 1;
else
nb_txq = NX_NB_TXQ_PER_STA;
for (i = 0; i < nb_txq; i++, txq++)
asr_txq_stop(txq, reason);
}
static void asr_txq_vif_for_each_sta(struct asr_hw *asr_hw, struct asr_vif *asr_vif,
void (*f)(struct asr_sta *, uint16_t, struct asr_hw *),
uint16_t reason)
{
switch (ASR_VIF_TYPE(asr_vif)) {
case NL80211_IFTYPE_STATION:
{
f(asr_vif->sta.ap, reason, asr_hw);
break;
}
case NL80211_IFTYPE_AP:
{
struct asr_sta *sta;
list_for_each_entry(sta, &asr_vif->ap.sta_list, list) {
f(sta, reason, asr_hw);
}
break;
}
default:
break;
}
}
bool check_vif_block_flags(struct asr_vif *asr_vif)
{
if (NULL == asr_vif)
return true;
else
return (asr_test_bit(ASR_DEV_STA_OUT_TWTSP, &asr_vif->dev_flags) ||
asr_test_bit(ASR_DEV_TXQ_STOP_CSA, &asr_vif->dev_flags) ||
asr_test_bit(ASR_DEV_TXQ_STOP_VIF_PS, &asr_vif->dev_flags) ||
asr_test_bit(ASR_DEV_TXQ_STOP_CHAN, &asr_vif->dev_flags));
}
/**
* asr_txq_vif_start - START TX queues of all STA associated to the vif
* and vif's TXQ
*
* @vif: Interface to start
* @reason: Start reason (ASR_TXQ_STOP_CHAN or ASR_TXQ_STOP_VIF_PS)
* @asr_hw: Driver main data
*
* Iterate over all the STA associated to the vif and re-start them for the
* reason @reason
* Take tx_lock
*/
void asr_txq_vif_start(struct asr_vif *asr_vif, uint16_t reason,
struct asr_hw *asr_hw)
{
struct asr_txq *txq;
asr_rtos_lock_mutex(&asr_hw->tx_lock);
asr_txq_vif_for_each_sta(asr_hw, asr_vif, asr_txq_sta_start, reason);
txq = asr_txq_vif_get(asr_vif, NX_BCMC_TXQ_TYPE, NULL);
asr_txq_start(txq, reason);
txq = asr_txq_vif_get(asr_vif, NX_UNK_TXQ_TYPE, NULL);
asr_txq_start(txq, reason);
// sdio mode used: clr drv flag for tx task.
#ifdef CONFIG_TWT
if (ASR_TXQ_STOP_TWT == reason) {
asr_clear_bit(ASR_DEV_STA_OUT_TWTSP,&asr_vif->dev_flags);
}
#endif
if (ASR_TXQ_STOP_CSA == reason) {
asr_clear_bit(ASR_DEV_TXQ_STOP_CSA,&asr_vif->dev_flags);
}
if (ASR_TXQ_STOP_CHAN == reason) {
asr_clear_bit(ASR_DEV_TXQ_STOP_CHAN,&asr_vif->dev_flags);
}
if (ASR_TXQ_STOP_VIF_PS == reason) {
asr_clear_bit(ASR_DEV_TXQ_STOP_VIF_PS,&asr_vif->dev_flags);
}
asr_rtos_unlock_mutex(&asr_hw->tx_lock);
// add tx task trigger.
if ( (check_vif_block_flags(asr_vif) == false) ) {
uwifi_sdio_event_set(UWIFI_SDIO_EVENT_TX);
}
}
/**
* asr_txq_vif_stop - STOP TX queues of all STA associated to the vif
*
* @vif: Interface to stop
* @arg: Stop reason (ASR_TXQ_STOP_CHAN or ASR_TXQ_STOP_VIF_PS)
* @asr_hw: Driver main data
*
* Iterate over all the STA associated to the vif and stop them for the
* reason ASR_TXQ_STOP_CHAN or ASR_TXQ_STOP_VIF_PS
* Take tx_lock
*/
void asr_txq_vif_stop(struct asr_vif *asr_vif, uint16_t reason,
struct asr_hw *asr_hw)
{
struct asr_txq *txq;
asr_rtos_lock_mutex(&asr_hw->tx_lock);
asr_txq_vif_for_each_sta(asr_hw, asr_vif, asr_txq_sta_stop, reason);
txq = asr_txq_vif_get(asr_vif, NX_BCMC_TXQ_TYPE, NULL);
asr_txq_stop(txq, reason);
txq = asr_txq_vif_get(asr_vif, NX_UNK_TXQ_TYPE, NULL);
asr_txq_stop(txq, reason);
// sdio mode used: set drv flag for tx task stop.
#ifdef CONFIG_TWT
if (ASR_TXQ_STOP_TWT == reason) {
asr_set_bit(ASR_DEV_STA_OUT_TWTSP,&asr_vif->dev_flags);
}
#endif
if (ASR_TXQ_STOP_CSA == reason) {
asr_set_bit(ASR_DEV_TXQ_STOP_CSA,&asr_vif->dev_flags);
}
if (ASR_TXQ_STOP_CHAN == reason) {
asr_set_bit(ASR_DEV_TXQ_STOP_CHAN,&asr_vif->dev_flags);
}
if (ASR_TXQ_STOP_VIF_PS == reason) {
asr_set_bit(ASR_DEV_TXQ_STOP_VIF_PS,&asr_vif->dev_flags);
}
asr_rtos_unlock_mutex(&asr_hw->tx_lock);
}
/**
* asr_start_offchan_txq - START TX queue for offchannel frame
*
* @asr_hw: Driver main data
*/
void asr_txq_offchan_start(struct asr_hw *asr_hw)
{
struct asr_txq *txq;
txq = &asr_hw->txq[asr_hw->sta_max_num * NX_NB_TXQ_PER_STA + asr_hw->vif_max_num * NX_NB_TXQ_PER_VIF];
asr_txq_start(txq, ASR_TXQ_STOP_CHAN);
}
/**
* asr_switch_vif_sta_txq - Associate TXQ linked to a STA to a new vif
*
* @sta: STA whose txq must be switched
* @old_vif: Vif currently associated to the STA (may no longer be active)
* @new_vif: vif which should be associated to the STA for now on
*
* This function will switch the vif (i.e. the netdev) associated to all STA's
* TXQ. This is used when AP_VLAN interface are created.
* If one STA is associated to an AP_vlan vif, it will be moved from the master
* AP vif to the AP_vlan vif.
* If an AP_vlan vif is removed, then STA will be moved back to mastert AP vif.
*
*/
void asr_txq_sta_switch_vif(struct asr_sta *sta, struct asr_vif *old_vif,
struct asr_vif *new_vif)
{
struct asr_hw *asr_hw = new_vif->asr_hw;
struct asr_txq *txq;
int i;
txq = asr_txq_sta_get(sta, 0, NULL, asr_hw);
for (i = 0; i < NX_NB_TID_PER_STA; i++, txq++) {
}
}
/******************************************************************************
* TXQ queue/schedule functions
*****************************************************************************/
/**
* asr_txq_queue_skb - Queue a buffer in a TX queue
*
* @skb: Buffer to queue
* @txq: TX Queue in which the buffer must be added
* @asr_hw: Driver main data
* @retry: Should it be queued in the retry list
*
* @return: Retrun 1 if txq has been added to hwq list, 0 otherwise
*
* Add a buffer in the buffer list of the TX queue
* and add this TX queue in the HW queue list if the txq is not stopped.
* If this is a retry packet it is added after the last retry packet or at the
* beginning if there is no retry packet queued.
*
* If the STA is in PS mode and this is the first packet queued for this txq
* update TIM.
*
* To be called with tx_lock hold
*/
int asr_txq_queue_skb(struct sk_buff *skb, struct asr_txq *txq,
struct asr_hw *asr_hw, bool retry)
{
if (txq->sta && txq->sta->ps.active)
{
txq->sta->ps.pkt_ready[txq->ps_id]++;
if (txq->sta->ps.pkt_ready[txq->ps_id] == 1)
{
asr_set_traffic_status(asr_hw, txq->sta, true, txq->ps_id);
}
}
if (!retry) {
/* add buffer in the sk_list */
skb_queue_tail(&txq->sk_list, skb);
} else {
if (txq->last_retry_skb)
skb_append(txq->last_retry_skb, skb, &txq->sk_list);
else
skb_queue_head(&txq->sk_list, skb);
txq->last_retry_skb = skb;
txq->nb_retry++;
}
/* Flowctrl corresponding netdev queue if needed */
/* If too many buffer are queued for this TXQ stop netdev queue */
if ((txq->ndev_idx != NDEV_NO_TXQ) &&
(skb_queue_len(&txq->sk_list) > ASR_NDEV_FLOW_CTRL_STOP)) {
txq->status |= ASR_TXQ_NDEV_FLOW_CTRL;
//netif_stop_subqueue(txq->ndev, txq->ndev_idx);
}
/* add it in the hwq list if not stopped and not yet present */
if (!(txq->status & ASR_TXQ_STOP)) {
asr_txq_add_to_hw_list(txq);
return 1;
}
return 0;
}
/**
* asr_txq_confirm_any - Process buffer confirmed by fw
*
* @asr_hw: Driver main data
* @txq: TX Queue
* @hwq: HW Queue
* @sw_txhdr: software descriptor of the confirmed packet
*
* Process a buffer returned by the fw. It doesn't check buffer status
* and only does systematic counter update:
* - hw credit
* - buffer pushed to fw
*
* To be called with tx_lock hold
*/
void asr_txq_confirm_any(struct asr_hw *asr_hw, struct asr_txq *txq,
struct asr_hwq *hwq, struct asr_sw_txhdr *sw_txhdr)
{
int user = 0;
if (txq->pkt_pushed[user])
txq->pkt_pushed[user]--;
hwq->credits[user]++;
hwq->need_processing = true;
//asr_hw->stats.cfm_balance[hwq->id]--;
}
/******************************************************************************
* HWQ processing
*****************************************************************************/
#if 0
static int8_t asr_txq_get_credits(struct asr_txq *txq)
{
int8_t cred = txq->credits;
/* if destination is in PS mode, push_limit indicates the maximum
number of packet that can be pushed on this txq. */
if (txq->push_limit && (cred > txq->push_limit)) {
cred = txq->push_limit;
}
return cred;
}
#endif
/**
* extract the first @nb_elt of @list and append them to @head
* It is assume that:
* - @list contains more that @nb_elt
* - There is no need to take @list nor @head lock to modify them
*/
static inline void skb_queue_extract(struct sk_buff_head *list,
struct sk_buff_head *head, int nb_elt)
{
int i;
struct sk_buff *first, *last, *ptr;
first = ptr = list->next;
for (i = 0; i < nb_elt; i++) {
ptr = ptr->next;
}
last = ptr->prev;
/* unlink nb_elt in list */
list->qlen -= nb_elt;
list->next = ptr;
ptr->prev = (struct sk_buff *)list;
/* append nb_elt at end of head */
head->qlen += nb_elt;
last->next = (struct sk_buff *)head;
head->prev->next = first;
first->prev = head->prev;
head->prev = last;
}
/**
* asr_txq_get_skb_to_push - Get list of buffer to push for one txq
*
* @asr_hw: main driver data
* @hwq: HWQ on wich buffers will be pushed
* @txq: TXQ to get buffers from
* @user: user postion to use
* @sk_list_push: list to update
*
*
* This function will returned a list of buffer to push for one txq.
* It will take into account the number of credit of the HWQ for this user
* position and TXQ (and push_limit for fullmac).
* This allow to get a list that can be pushed without having to test for
* hwq/txq status after each push
*
* If a MU group has been selected for this txq, it will also update the
* counter for the group
*
* @return true if txq no longer have buffer ready after the ones returned.
* false otherwise
*/
#if 0
static
bool asr_txq_get_skb_to_push(struct asr_hw *asr_hw, struct asr_hwq *hwq,
struct asr_txq *txq, int user,
struct sk_buff_head *sk_list_push)
{
int nb_ready = skb_queue_len(&txq->sk_list);
int credits = min_t(int, asr_txq_get_credits(txq), hwq->credits[user]);
bool res = false;
__skb_queue_head_init(sk_list_push);
if (credits >= nb_ready) {
skb_queue_splice_init(&txq->sk_list, sk_list_push);
credits = nb_ready;
res = true;
} else {
skb_queue_extract(&txq->sk_list, sk_list_push, credits);
/* When processing PS service period (i.e. push_limit != 0), no longer
process this txq if this is a asrcy PS service period (even if no
packet is pushed) or the SP is complete for this txq */
if (txq->push_limit &&
((txq->ps_id == LEGACY_PS_ID) ||
(credits >= txq->push_limit)))
res = true;
}
//asr_mu_set_active_sta(asr_hw, asr_txq_2_sta(txq), credits);
return res;
}
#endif
/**
* asr_hwq_process - Process one HW queue list
*
* @asr_hw: Driver main data
* @hw_queue: HW queue index to process
*
* The function will iterate over all the TX queues linked in this HW queue
* list. For each TX queue, push as many buffers as possible in the HW queue.
* (NB: TX queue have at least 1 buffer, otherwise it wouldn't be in the list)
* - If TX queue no longer have buffer, remove it from the list and check next
* TX queue
* - If TX queue no longer have credits or has a push_limit (PS mode) and it
* is reached , remove it from the list and check next TX queue
* - If HW queue is full, update list head to start with the next TX queue on
* next call if current TX queue already pushed "too many" pkt in a row, and
* return
*
* To be called when HW queue list is modified:
* - when a buffer is pushed on a TX queue
* - when new credits are received
* - when a STA returns from Power Save mode or receives traffic request.
* - when Channel context change
*
* To be called with tx_lock hold
*/
#if 0
void asr_hwq_process(struct asr_hw *asr_hw, struct asr_hwq *hwq)
{
struct asr_txq *txq, *next;
int user = 0;
int credit_map = 0;
//bool mu_enable = false;
hwq->need_processing = false;
credit_map = ALL_HWQ_MASK - 1;
list_for_each_entry_safe(txq, next, &hwq->list, sched_list) {
struct asr_txhdr *txhdr = NULL;
struct sk_buff_head sk_list_push;
struct sk_buff *skb;
bool txq_empty;
if (!hwq->credits[user]) {
credit_map |= BIT(user);
if (credit_map == ALL_HWQ_MASK)
break;
continue;
}
txq_empty = asr_txq_get_skb_to_push(asr_hw, hwq, txq, user,
&sk_list_push);
while ((skb = __skb_dequeue(&sk_list_push)) != NULL) {
txhdr = (struct asr_txhdr *)skb->data;
asr_tx_push(asr_hw, txhdr, 0);
}
if (txq_empty) {
asr_txq_del_from_hw_list(txq);
txq->pkt_sent = 0;
} else if ((hwq->credits[user] == 0) &&
asr_txq_is_scheduled(txq)) {
/* txq not empty,
- To avoid starving need to process other txq in the list
- For better aggregation, need to send "as many consecutive
pkt as possible" for he same txq
==> Add counter to trigger txq switch
*/
if (txq->pkt_sent > hwq->size) {
txq->pkt_sent = 0;
list_rotate_left(&hwq->list);
}
}
/* Unable to complete PS traffic request because of hwq credit */
if (txq->push_limit && txq->sta) {
if (txq->ps_id == LEGACY_PS_ID) {
/* for asrcy PS abort SP and wait next ps-poll */
txq->sta->ps.sp_cnt[txq->ps_id] -= txq->push_limit;
txq->push_limit = 0;
}
/* for u-apsd need to complete the SP to send EOSP frame */
}
/* restart netdev queue if number of queued buffer is below threshold */
if ((txq->status & ASR_TXQ_NDEV_FLOW_CTRL) &&
skb_queue_len(&txq->sk_list) < ASR_NDEV_FLOW_CTRL_RESTART) {
txq->status &= ~ASR_TXQ_NDEV_FLOW_CTRL;
//netif_wake_subqueue(txq->ndev, txq->ndev_idx);
}
}
}
#endif
/**
* asr_hwq_process_all - Process all HW queue list
*
* @asr_hw: Driver main data
*
* Loop over all HWQ, and process them if needed
* To be called with tx_lock hold
*/
#if 0
void asr_hwq_process_all(struct asr_hw *asr_hw)
{
int id;
for (id = ARRAY_SIZE(asr_hw->hwq) - 1; id >= 0 ; id--)
{
if (asr_hw->hwq[id].need_processing)
{
asr_hwq_process(asr_hw, &asr_hw->hwq[id]);
}
}
}
#endif
/**
* asr_hwq_init - Initialize all hwq structures
*
* @asr_hw: Driver main data
*
*/
void asr_hwq_init(struct asr_hw *asr_hw)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(asr_hw->hwq); i++) {
struct asr_hwq *hwq = &asr_hw->hwq[i];
for (j = 0 ; j < CONFIG_USER_MAX; j++)
hwq->credits[j] = nx_txdesc_cnt[i];
hwq->id = i;
hwq->size = nx_txdesc_cnt[i];
INIT_LIST_HEAD(&hwq->list);
}
}

View File

@@ -0,0 +1,16 @@
/**
******************************************************************************
*
* @file uwifi_version.c
*
* @brief return asr version info
*
* Copyright (C) ASR
*
******************************************************************************
*/
#define ASR_WIFI_VERSION "ASR_RTOS_WIFI-V2.6.5"
const char *asr_get_wifi_version(void)
{
return ASR_WIFI_VERSION;
}

File diff suppressed because it is too large Load Diff